[Pkg-ceph-commits] [ceph] 01/03: New upstream version 10.2.6

James Downing Page jamespage at moszumanska.debian.org
Thu Mar 9 09:30:51 UTC 2017


This is an automated email from the git hooks/post-receive script.

jamespage pushed a commit to branch ubuntu/xenial
in repository ceph.

commit 585f53358b0fdbfb139e15801fe94b14d6b50f79
Author: James Page <james.page at ubuntu.com>
Date:   Wed Mar 8 14:31:29 2017 +0000

    New upstream version 10.2.6
---
 AUTHORS                                            |   41 +
 CMakeLists.txt                                     |    2 +-
 ChangeLog                                          | 4910 +++++++++++++++++++-
 ceph.spec                                          |    8 +-
 ceph.spec.in                                       |    6 +-
 configure                                          |   89 +-
 configure.ac                                       |    4 +-
 doc/Makefile                                       |    8 +-
 doc/dev/quick_guide.rst                            |    6 +-
 doc/man/8/rados.rst                                |   14 +-
 doc/rados/configuration/osd-config-ref.rst         |    2 +-
 doc/radosgw/config-ref.rst                         |   24 +-
 doc/radosgw/s3/commons.rst                         |    4 +
 doc/radosgw/upgrade_to_jewel.rst                   |   37 +
 install-deps.sh                                    |   33 +-
 man/ceph-authtool.8                                |    2 +-
 man/ceph-clsinfo.8                                 |    2 +-
 man/ceph-conf.8                                    |    2 +-
 man/ceph-create-keys.8                             |    2 +-
 man/ceph-debugpack.8                               |    2 +-
 man/ceph-dencoder.8                                |    2 +-
 man/ceph-deploy.8                                  |    2 +-
 man/ceph-detect-init.8                             |    2 +-
 man/ceph-disk.8                                    |    2 +-
 man/ceph-fuse.8                                    |    2 +-
 man/ceph-mds.8                                     |    2 +-
 man/ceph-mon.8                                     |    2 +-
 man/ceph-osd.8                                     |    2 +-
 man/ceph-post-file.8                               |    2 +-
 man/ceph-rbdnamer.8                                |    2 +-
 man/ceph-rest-api.8                                |    2 +-
 man/ceph-run.8                                     |    2 +-
 man/ceph-syn.8                                     |    2 +-
 man/ceph.8                                         |    2 +-
 man/cephfs.8                                       |    2 +-
 man/crushtool.8                                    |    2 +-
 man/librados-config.8                              |    2 +-
 man/monmaptool.8                                   |    2 +-
 man/mount.ceph.8                                   |    2 +-
 man/osdmaptool.8                                   |    2 +-
 man/rados.8                                        |   16 +-
 man/radosgw-admin.8                                |    2 +-
 man/radosgw.8                                      |    2 +-
 man/rbd-fuse.8                                     |    2 +-
 man/rbd-mirror.8                                   |    2 +-
 man/rbd-nbd.8                                      |    2 +-
 man/rbd-replay-many.8                              |    2 +-
 man/rbd-replay-prep.8                              |    2 +-
 man/rbd-replay.8                                   |    2 +-
 man/rbd.8                                          |    2 +-
 man/rbdmap.8                                       |    2 +-
 qa/.gitignore                                      |    5 +
 qa/README                                          |   52 +
 qa/archs/aarch64.yaml                              |    1 +
 qa/archs/armv7.yaml                                |    1 +
 qa/archs/i686.yaml                                 |    1 +
 qa/archs/x86_64.yaml                               |    1 +
 qa/ceph-deploy-overrides/ceph_deploy_dmcrypt.yaml  |    3 +
 .../disable_diff_journal_disk.yaml                 |    3 +
 .../enable_diff_journal_disk.yaml                  |    3 +
 .../enable_dmcrypt_diff_journal_disk.yaml          |    4 +
 qa/clusters/extra-client.yaml                      |    9 +
 qa/clusters/fixed-1.yaml                           |   10 +
 qa/clusters/fixed-2-ucephfs.yaml                   |   10 +
 qa/clusters/fixed-2.yaml                           |    7 +
 qa/clusters/fixed-3-cephfs.yaml                    |   11 +
 qa/clusters/fixed-3.yaml                           |    8 +
 qa/clusters/fixed-4.yaml                           |    5 +
 qa/config/rados.yaml                               |    7 +
 qa/config_options/cephdeploy_conf.yaml             |    6 +
 qa/debug/buildpackages.yaml                        |    6 +
 qa/debug/mds_client.yaml                           |    9 +
 qa/debug/openstack-15G.yaml                        |    3 +
 qa/debug/openstack-30G.yaml                        |    3 +
 qa/distros/a-supported-distro.yaml                 |    2 +
 qa/distros/all/centos.yaml                         |    2 +
 qa/distros/all/centos_6.3.yaml                     |    2 +
 qa/distros/all/centos_6.4.yaml                     |    2 +
 qa/distros/all/centos_6.5.yaml                     |    2 +
 qa/distros/all/centos_7.0.yaml                     |    2 +
 qa/distros/all/centos_7.1.yaml                     |    2 +
 qa/distros/all/centos_7.2.yaml                     |    2 +
 qa/distros/all/centos_7.3.yaml                     |    2 +
 qa/distros/all/debian_6.0.yaml                     |    2 +
 qa/distros/all/debian_7.0.yaml                     |    2 +
 qa/distros/all/debian_8.0.yaml                     |    2 +
 qa/distros/all/fedora_17.yaml                      |    2 +
 qa/distros/all/fedora_18.yaml                      |    2 +
 qa/distros/all/fedora_19.yaml                      |    2 +
 qa/distros/all/opensuse_12.2.yaml                  |    2 +
 qa/distros/all/opensuse_13.2.yaml                  |    2 +
 qa/distros/all/opensuse_42.1.yaml                  |    2 +
 qa/distros/all/opensuse_42.2.yaml                  |    2 +
 qa/distros/all/rhel_6.3.yaml                       |    2 +
 qa/distros/all/rhel_6.4.yaml                       |    2 +
 qa/distros/all/rhel_6.5.yaml                       |    2 +
 qa/distros/all/rhel_7.0.yaml                       |    2 +
 qa/distros/all/sle_12.2.yaml                       |    2 +
 qa/distros/all/ubuntu_12.04.yaml                   |    2 +
 qa/distros/all/ubuntu_12.10.yaml                   |    2 +
 qa/distros/all/ubuntu_14.04.yaml                   |    2 +
 qa/distros/all/ubuntu_14.04_aarch64.yaml           |    3 +
 qa/distros/all/ubuntu_14.04_i686.yaml              |    3 +
 qa/distros/all/ubuntu_16.04.yaml                   |    2 +
 qa/distros/supported/centos_7.3.yaml               |    2 +
 qa/distros/supported/ubuntu_14.04.yaml             |    2 +
 qa/erasure-code/ec-feature-plugins-v2.yaml         |   97 +
 qa/erasure-code/ec-feature-plugins-v3.yaml         |   97 +
 qa/erasure-code/ec-rados-default.yaml              |   19 +
 qa/erasure-code/ec-rados-parallel.yaml             |   20 +
 qa/erasure-code/ec-rados-plugin=isa-k=2-m=1.yaml   |   25 +
 .../ec-rados-plugin=jerasure-k=2-m=1.yaml          |   25 +
 .../ec-rados-plugin=jerasure-k=3-m=1.yaml          |   31 +
 .../ec-rados-plugin=lrc-k=4-m=2-l=3.yaml           |   25 +
 .../ec-rados-plugin=shec-k=4-m=3-c=2.yaml          |   25 +
 qa/erasure-code/ec-rados-sequential.yaml           |   20 +
 qa/fs/btrfs.yaml                                   |    7 +
 qa/fs/ext4.yaml                                    |    7 +
 qa/fs/xfs.yaml                                     |    6 +
 qa/machine_types/schedule_rados.sh                 |   23 +
 qa/machine_types/schedule_rados_ovh.sh             |   23 +
 qa/machine_types/vps.yaml                          |   16 +
 qa/overrides/2-size-1-min-size.yaml                |    6 +
 qa/overrides/2-size-2-min-size.yaml                |    6 +
 qa/overrides/3-size-2-min-size.yaml                |    8 +
 qa/overrides/short_pg_log.yaml                     |    6 +
 qa/overrides/whitelist_wrongly_marked_down.yaml    |   10 +
 qa/packages/packages.yaml                          |   45 +
 qa/releases/infernalis.yaml                        |    5 +
 qa/releases/jewel.yaml                             |    5 +
 qa/releases/kraken.yaml                            |    1 +
 qa/rgw_pool_type/ec-cache.yaml                     |    6 +
 qa/rgw_pool_type/ec-profile.yaml                   |   10 +
 qa/rgw_pool_type/ec.yaml                           |    5 +
 qa/rgw_pool_type/replicated.yaml                   |    3 +
 qa/suites/big/rados-thrash/%                       |    0
 qa/suites/big/rados-thrash/ceph/ceph.yaml          |    3 +
 qa/suites/big/rados-thrash/clusters/big.yaml       |   68 +
 qa/suites/big/rados-thrash/clusters/medium.yaml    |   22 +
 qa/suites/big/rados-thrash/clusters/small.yaml     |    6 +
 qa/suites/big/rados-thrash/fs/btrfs.yaml           |    7 +
 qa/suites/big/rados-thrash/fs/xfs.yaml             |    6 +
 qa/suites/big/rados-thrash/thrashers/default.yaml  |   10 +
 .../rados-thrash/workloads/snaps-few-objects.yaml  |   13 +
 qa/suites/buildpackages/any/%                      |    0
 qa/suites/buildpackages/any/distros/centos.yaml    |    2 +
 .../buildpackages/any/distros/centos_6.3.yaml      |    2 +
 .../buildpackages/any/distros/centos_6.4.yaml      |    2 +
 .../buildpackages/any/distros/centos_6.5.yaml      |    2 +
 .../buildpackages/any/distros/centos_7.0.yaml      |    2 +
 .../buildpackages/any/distros/centos_7.1.yaml      |    2 +
 .../buildpackages/any/distros/centos_7.2.yaml      |    2 +
 .../buildpackages/any/distros/centos_7.3.yaml      |    2 +
 .../buildpackages/any/distros/debian_6.0.yaml      |    2 +
 .../buildpackages/any/distros/debian_7.0.yaml      |    2 +
 .../buildpackages/any/distros/debian_8.0.yaml      |    2 +
 qa/suites/buildpackages/any/distros/fedora_17.yaml |    2 +
 qa/suites/buildpackages/any/distros/fedora_18.yaml |    2 +
 qa/suites/buildpackages/any/distros/fedora_19.yaml |    2 +
 .../buildpackages/any/distros/opensuse_12.2.yaml   |    2 +
 .../buildpackages/any/distros/opensuse_13.2.yaml   |    2 +
 .../buildpackages/any/distros/opensuse_42.1.yaml   |    2 +
 .../buildpackages/any/distros/opensuse_42.2.yaml   |    2 +
 qa/suites/buildpackages/any/distros/rhel_6.3.yaml  |    2 +
 qa/suites/buildpackages/any/distros/rhel_6.4.yaml  |    2 +
 qa/suites/buildpackages/any/distros/rhel_6.5.yaml  |    2 +
 qa/suites/buildpackages/any/distros/rhel_7.0.yaml  |    2 +
 qa/suites/buildpackages/any/distros/sle_12.2.yaml  |    2 +
 .../buildpackages/any/distros/ubuntu_12.04.yaml    |    2 +
 .../buildpackages/any/distros/ubuntu_12.10.yaml    |    2 +
 .../buildpackages/any/distros/ubuntu_14.04.yaml    |    2 +
 .../any/distros/ubuntu_14.04_aarch64.yaml          |    3 +
 .../any/distros/ubuntu_14.04_i686.yaml             |    3 +
 .../buildpackages/any/distros/ubuntu_16.04.yaml    |    2 +
 qa/suites/buildpackages/any/tasks/release.yaml     |    8 +
 qa/suites/buildpackages/tests/%                    |    0
 qa/suites/buildpackages/tests/distros/centos.yaml  |    2 +
 .../buildpackages/tests/distros/centos_6.3.yaml    |    2 +
 .../buildpackages/tests/distros/centos_6.4.yaml    |    2 +
 .../buildpackages/tests/distros/centos_6.5.yaml    |    2 +
 .../buildpackages/tests/distros/centos_7.0.yaml    |    2 +
 .../buildpackages/tests/distros/centos_7.1.yaml    |    2 +
 .../buildpackages/tests/distros/centos_7.2.yaml    |    2 +
 .../buildpackages/tests/distros/centos_7.3.yaml    |    2 +
 .../buildpackages/tests/distros/debian_6.0.yaml    |    2 +
 .../buildpackages/tests/distros/debian_7.0.yaml    |    2 +
 .../buildpackages/tests/distros/debian_8.0.yaml    |    2 +
 .../buildpackages/tests/distros/fedora_17.yaml     |    2 +
 .../buildpackages/tests/distros/fedora_18.yaml     |    2 +
 .../buildpackages/tests/distros/fedora_19.yaml     |    2 +
 .../buildpackages/tests/distros/opensuse_12.2.yaml |    2 +
 .../buildpackages/tests/distros/opensuse_13.2.yaml |    2 +
 .../buildpackages/tests/distros/opensuse_42.1.yaml |    2 +
 .../buildpackages/tests/distros/opensuse_42.2.yaml |    2 +
 .../buildpackages/tests/distros/rhel_6.3.yaml      |    2 +
 .../buildpackages/tests/distros/rhel_6.4.yaml      |    2 +
 .../buildpackages/tests/distros/rhel_6.5.yaml      |    2 +
 .../buildpackages/tests/distros/rhel_7.0.yaml      |    2 +
 .../buildpackages/tests/distros/sle_12.2.yaml      |    2 +
 .../buildpackages/tests/distros/ubuntu_12.04.yaml  |    2 +
 .../buildpackages/tests/distros/ubuntu_12.10.yaml  |    2 +
 .../buildpackages/tests/distros/ubuntu_14.04.yaml  |    2 +
 .../tests/distros/ubuntu_14.04_aarch64.yaml        |    3 +
 .../tests/distros/ubuntu_14.04_i686.yaml           |    3 +
 .../buildpackages/tests/distros/ubuntu_16.04.yaml  |    2 +
 qa/suites/buildpackages/tests/tasks/release.yaml   |   20 +
 qa/suites/calamari/%                               |    0
 qa/suites/calamari/clusters/osd-3.yaml             |    5 +
 qa/suites/calamari/distros/centos6.4.yaml          |    2 +
 qa/suites/calamari/distros/centos6.5.yaml          |    2 +
 qa/suites/calamari/distros/precise.yaml            |    2 +
 qa/suites/calamari/distros/rhel6.4.yaml            |    2 +
 qa/suites/calamari/distros/rhel6.5.yaml            |    2 +
 qa/suites/calamari/distros/rhel7.0.yaml            |    2 +
 qa/suites/calamari/distros/trusty.yaml             |    2 +
 qa/suites/calamari/distros/wheezy.yaml.disabled    |    2 +
 qa/suites/calamari/tasks/calamari.yaml             |   10 +
 qa/suites/ceph-ansible/smoke/basic/%               |    0
 .../smoke/basic/0-clusters/3-node.yaml             |   10 +
 .../smoke/basic/1-distros/centos_7.3.yaml          |    2 +
 .../smoke/basic/1-distros/ubuntu_16.04.yaml        |    2 +
 .../smoke/basic/2-config/ceph_ansible.yaml         |   22 +
 .../smoke/basic/3-tasks/ceph-admin-commands.yaml   |    7 +
 .../ceph-ansible/smoke/basic/3-tasks/cls.yaml      |    7 +
 .../smoke/basic/3-tasks/rbd_import_export.yaml     |    7 +
 qa/suites/ceph-deploy-release/%                    |    0
 .../ceph-deploy-release/distros/centos_7.3.yaml    |    2 +
 .../ceph-deploy-release/distros/ubuntu_14.04.yaml  |    2 +
 .../overrides/ceph_deploy_dmcrypt.yaml             |    3 +
 .../overrides/disable_diff_journal_disk.yaml       |    3 +
 .../tasks/release-install-test.yaml                |   40 +
 qa/suites/ceph-deploy/basic/%                      |    0
 .../ceph-deploy-overrides/ceph_deploy_dmcrypt.yaml |    3 +
 .../disable_diff_journal_disk.yaml                 |    3 +
 .../enable_diff_journal_disk.yaml                  |    3 +
 .../enable_dmcrypt_diff_journal_disk.yaml          |    4 +
 .../basic/config_options/cephdeploy_conf.yaml      |    6 +
 .../ceph-deploy/basic/distros/centos_7.3.yaml      |    2 +
 .../ceph-deploy/basic/distros/ubuntu_14.04.yaml    |    2 +
 .../basic/tasks/ceph-admin-commands.yaml           |   25 +
 qa/suites/ceph-disk/basic/%                        |    0
 qa/suites/ceph-disk/basic/distros/centos_7.3.yaml  |    2 +
 .../ceph-disk/basic/distros/ubuntu_14.04.yaml      |    2 +
 qa/suites/ceph-disk/basic/tasks/ceph-disk.yaml     |   37 +
 qa/suites/dummy/%                                  |    0
 qa/suites/dummy/all/nop.yaml                       |    9 +
 qa/suites/experimental/multimds/%                  |    0
 .../experimental/multimds/clusters/7-multimds.yaml |    8 +
 .../multimds/tasks/fsstress_thrash_subtrees.yaml   |   15 +
 qa/suites/fs/32bits/%                              |    0
 qa/suites/fs/32bits/clusters/fixed-2-ucephfs.yaml  |   10 +
 qa/suites/fs/32bits/debug/mds_client.yaml          |    9 +
 qa/suites/fs/32bits/dirfrag/frag_enable.yaml       |   10 +
 qa/suites/fs/32bits/fs/btrfs.yaml                  |    7 +
 qa/suites/fs/32bits/mount/ceph-fuse.yaml           |    9 +
 .../overrides/whitelist_wrongly_marked_down.yaml   |   10 +
 .../tasks/cfuse_workunit_suites_fsstress.yaml      |    5 +
 .../fs/32bits/tasks/cfuse_workunit_suites_pjd.yaml |    5 +
 qa/suites/fs/basic/%                               |    0
 qa/suites/fs/basic/clusters/fixed-2-ucephfs.yaml   |   10 +
 qa/suites/fs/basic/debug/mds_client.yaml           |    9 +
 qa/suites/fs/basic/dirfrag/frag_enable.yaml        |   10 +
 qa/suites/fs/basic/fs/btrfs.yaml                   |    7 +
 qa/suites/fs/basic/inline/no.yaml                  |    3 +
 qa/suites/fs/basic/inline/yes.yaml                 |    6 +
 .../overrides/whitelist_wrongly_marked_down.yaml   |   10 +
 qa/suites/fs/basic/tasks/cephfs_scrub_tests.yaml   |   11 +
 .../tasks/cfuse_workunit_kernel_untar_build.yaml   |    6 +
 qa/suites/fs/basic/tasks/cfuse_workunit_misc.yaml  |    8 +
 .../tasks/cfuse_workunit_misc_test_o_trunc.yaml    |    6 +
 .../fs/basic/tasks/cfuse_workunit_norstats.yaml    |   13 +
 qa/suites/fs/basic/tasks/cfuse_workunit_quota.yaml |   13 +
 .../tasks/cfuse_workunit_suites_blogbench.yaml     |    6 +
 .../basic/tasks/cfuse_workunit_suites_dbench.yaml  |    6 +
 .../fs/basic/tasks/cfuse_workunit_suites_ffsb.yaml |   11 +
 .../tasks/cfuse_workunit_suites_fsstress.yaml      |    6 +
 .../fs/basic/tasks/cfuse_workunit_suites_fsx.yaml  |    6 +
 .../basic/tasks/cfuse_workunit_suites_fsync.yaml   |    6 +
 .../basic/tasks/cfuse_workunit_suites_iogen.yaml   |    7 +
 .../basic/tasks/cfuse_workunit_suites_iozone.yaml  |    6 +
 .../fs/basic/tasks/cfuse_workunit_suites_pjd.yaml  |   15 +
 .../cfuse_workunit_suites_truncate_delay.yaml      |   15 +
 .../basic/tasks/cfuse_workunit_trivial_sync.yaml   |    5 +
 .../fs/basic/tasks/libcephfs_interface_tests.yaml  |    6 +
 qa/suites/fs/basic/tasks/libcephfs_java.yaml       |    6 +
 qa/suites/fs/basic/tasks/libcephfs_python.yaml     |    5 +
 qa/suites/fs/basic/tasks/mds_creation_retry.yaml   |    7 +
 qa/suites/fs/multiclient/%                         |    0
 .../fs/multiclient/clusters/three_clients.yaml     |   15 +
 qa/suites/fs/multiclient/clusters/two_clients.yaml |   14 +
 qa/suites/fs/multiclient/debug/mds_client.yaml     |    9 +
 qa/suites/fs/multiclient/dirfrag/frag_enable.yaml  |   10 +
 qa/suites/fs/multiclient/fs/btrfs.yaml             |    7 +
 qa/suites/fs/multiclient/mount/ceph-fuse.yaml      |    4 +
 .../fs/multiclient/mount/kclient.yaml.disabled     |    9 +
 .../fs/multiclient/tasks/cephfs_misc_tests.yaml    |    4 +
 .../fs/multiclient/tasks/fsx-mpi.yaml.disabled     |   20 +
 .../fs/multiclient/tasks/ior-shared-file.yaml      |   26 +
 qa/suites/fs/multiclient/tasks/mdtest.yaml         |   23 +
 qa/suites/fs/multifs/%                             |    0
 .../fs/multifs/clusters/2-remote-clients.yaml      |   10 +
 qa/suites/fs/multifs/debug/mds_client.yaml         |   15 +
 qa/suites/fs/multifs/dirfrag/frag_enable.yaml      |   10 +
 qa/suites/fs/multifs/mounts/ceph-fuse.yaml         |    8 +
 qa/suites/fs/multifs/tasks/failover.yaml           |    6 +
 qa/suites/fs/multifs/xfs.yaml                      |    6 +
 qa/suites/fs/permission/%                          |    0
 .../fs/permission/clusters/fixed-2-ucephfs.yaml    |   10 +
 qa/suites/fs/permission/debug/mds_client.yaml      |    9 +
 qa/suites/fs/permission/dirfrag/frag_enable.yaml   |   10 +
 qa/suites/fs/permission/fs/btrfs.yaml              |    7 +
 qa/suites/fs/permission/mount/ceph-fuse.yaml       |   10 +
 .../overrides/whitelist_wrongly_marked_down.yaml   |   10 +
 .../tasks/cfuse_workunit_suites_pjd.yaml           |    5 +
 qa/suites/fs/recovery/%                            |    0
 .../fs/recovery/clusters/4-remote-clients.yaml     |   10 +
 qa/suites/fs/recovery/debug/mds_client.yaml        |   12 +
 qa/suites/fs/recovery/dirfrag/frag_enable.yaml     |   10 +
 qa/suites/fs/recovery/mounts/ceph-fuse.yaml        |   12 +
 qa/suites/fs/recovery/tasks/auto-repair.yaml       |   11 +
 qa/suites/fs/recovery/tasks/backtrace.yaml         |    5 +
 qa/suites/fs/recovery/tasks/cap-flush.yam          |    5 +
 qa/suites/fs/recovery/tasks/client-limits.yaml     |   11 +
 qa/suites/fs/recovery/tasks/client-recovery.yaml   |   13 +
 qa/suites/fs/recovery/tasks/config-commands.yaml   |   11 +
 qa/suites/fs/recovery/tasks/damage.yaml            |   23 +
 qa/suites/fs/recovery/tasks/data-scan.yaml         |   15 +
 qa/suites/fs/recovery/tasks/forward-scrub.yaml     |    5 +
 qa/suites/fs/recovery/tasks/journal-repair.yaml    |   11 +
 qa/suites/fs/recovery/tasks/mds-flush.yaml         |    5 +
 qa/suites/fs/recovery/tasks/mds-full.yaml          |   25 +
 qa/suites/fs/recovery/tasks/pool-perm.yaml         |    5 +
 qa/suites/fs/recovery/tasks/sessionmap.yaml        |   10 +
 qa/suites/fs/recovery/tasks/strays.yaml            |    5 +
 qa/suites/fs/recovery/tasks/volume-client.yaml     |    5 +
 qa/suites/fs/recovery/xfs.yaml                     |    6 +
 qa/suites/fs/snaps/%                               |    0
 qa/suites/fs/snaps/clusters/fixed-2-ucephfs.yaml   |   10 +
 qa/suites/fs/snaps/dirfrag/frag_enable.yaml        |   10 +
 qa/suites/fs/snaps/fs/btrfs.yaml                   |    6 +
 qa/suites/fs/snaps/mount/ceph-fuse.yaml            |    4 +
 qa/suites/fs/snaps/tasks/snaptests.yaml            |    5 +
 qa/suites/fs/standbyreplay/%                       |    0
 .../fs/standbyreplay/clusters/standby-replay.yaml  |   17 +
 .../fs/standbyreplay/dirfrag/frag_enable.yaml      |   10 +
 qa/suites/fs/standbyreplay/mount/fuse.yaml         |    5 +
 qa/suites/fs/standbyreplay/tasks/migration.yaml    |    5 +
 qa/suites/fs/standbyreplay/xfs.yaml                |    6 +
 qa/suites/fs/thrash/%                              |    0
 qa/suites/fs/thrash/ceph-thrash/default.yaml       |    2 +
 qa/suites/fs/thrash/ceph/base.yaml                 |    3 +
 .../fs/thrash/clusters/mds-1active-1standby.yaml   |   10 +
 qa/suites/fs/thrash/debug/mds_client.yaml          |    9 +
 qa/suites/fs/thrash/dirfrag/frag_enable.yaml       |   10 +
 qa/suites/fs/thrash/fs/xfs.yaml                    |    6 +
 qa/suites/fs/thrash/msgr-failures/none.yaml        |    0
 .../fs/thrash/msgr-failures/osd-mds-delay.yaml     |    8 +
 .../overrides/whitelist_wrongly_marked_down.yaml   |   10 +
 .../fs/thrash/tasks/cfuse_workunit_snaptests.yaml  |    6 +
 .../tasks/cfuse_workunit_suites_fsstress.yaml      |    6 +
 .../fs/thrash/tasks/cfuse_workunit_suites_pjd.yaml |    6 +
 .../thrash/tasks/cfuse_workunit_trivial_sync.yaml  |    5 +
 qa/suites/fs/traceless/%                           |    0
 .../fs/traceless/clusters/fixed-2-ucephfs.yaml     |   10 +
 qa/suites/fs/traceless/debug/mds_client.yaml       |    9 +
 qa/suites/fs/traceless/dirfrag/frag_enable.yaml    |   10 +
 qa/suites/fs/traceless/fs/btrfs.yaml               |    7 +
 .../overrides/whitelist_wrongly_marked_down.yaml   |   10 +
 .../tasks/cfuse_workunit_suites_blogbench.yaml     |    8 +
 .../tasks/cfuse_workunit_suites_dbench.yaml        |    8 +
 .../tasks/cfuse_workunit_suites_ffsb.yaml          |   11 +
 .../tasks/cfuse_workunit_suites_fsstress.yaml      |    8 +
 qa/suites/fs/traceless/traceless/50pc.yaml         |    5 +
 qa/suites/fs/verify/%                              |    0
 qa/suites/fs/verify/clusters/fixed-2-ucephfs.yaml  |   10 +
 qa/suites/fs/verify/debug/+                        |    0
 qa/suites/fs/verify/debug/mds_client.yaml          |    9 +
 qa/suites/fs/verify/debug/mon.yaml                 |    6 +
 qa/suites/fs/verify/dirfrag/frag_enable.yaml       |   10 +
 qa/suites/fs/verify/fs/btrfs.yaml                  |    7 +
 .../overrides/whitelist_wrongly_marked_down.yaml   |   10 +
 .../verify/tasks/cfuse_workunit_suites_dbench.yaml |   12 +
 .../tasks/cfuse_workunit_suites_fsstress.yaml      |    8 +
 .../fs/verify/tasks/libcephfs_interface_tests.yaml |    8 +
 qa/suites/fs/verify/validater/lockdep.yaml         |    5 +
 qa/suites/fs/verify/validater/valgrind.yaml        |   16 +
 qa/suites/hadoop/basic/%                           |    0
 qa/suites/hadoop/basic/clusters/fixed-3.yaml       |   13 +
 qa/suites/hadoop/basic/tasks/repl.yaml             |    8 +
 qa/suites/hadoop/basic/tasks/terasort.yaml         |   10 +
 qa/suites/hadoop/basic/tasks/wordcount.yaml        |    8 +
 qa/suites/hadoop/basic/xfs.yaml                    |    6 +
 qa/suites/kcephfs/cephfs/%                         |    0
 .../kcephfs/cephfs/clusters/fixed-3-cephfs.yaml    |   11 +
 qa/suites/kcephfs/cephfs/conf.yaml                 |    7 +
 qa/suites/kcephfs/cephfs/fs/btrfs.yaml             |    7 +
 qa/suites/kcephfs/cephfs/inline/no.yaml            |    3 +
 qa/suites/kcephfs/cephfs/inline/yes.yaml           |    6 +
 .../cephfs/tasks/kclient_workunit_direct_io.yaml   |    7 +
 .../tasks/kclient_workunit_kernel_untar_build.yaml |    6 +
 .../cephfs/tasks/kclient_workunit_misc.yaml        |    6 +
 .../cephfs/tasks/kclient_workunit_o_trunc.yaml     |    7 +
 .../cephfs/tasks/kclient_workunit_snaps.yaml       |    6 +
 .../tasks/kclient_workunit_suites_dbench.yaml      |    6 +
 .../cephfs/tasks/kclient_workunit_suites_ffsb.yaml |    6 +
 .../tasks/kclient_workunit_suites_fsstress.yaml    |    6 +
 .../cephfs/tasks/kclient_workunit_suites_fsx.yaml  |    6 +
 .../tasks/kclient_workunit_suites_fsync.yaml       |    6 +
 .../tasks/kclient_workunit_suites_iozone.yaml      |    6 +
 .../cephfs/tasks/kclient_workunit_suites_pjd.yaml  |    6 +
 .../tasks/kclient_workunit_trivial_sync.yaml       |    5 +
 qa/suites/kcephfs/mixed-clients/%                  |    0
 .../kcephfs/mixed-clients/clusters/2-clients.yaml  |    9 +
 qa/suites/kcephfs/mixed-clients/conf.yaml          |    7 +
 qa/suites/kcephfs/mixed-clients/fs/btrfs.yaml      |    7 +
 .../kernel_cfuse_workunits_dbench_iozone.yaml      |   20 +
 ...ernel_cfuse_workunits_untarbuild_blogbench.yaml |   20 +
 qa/suites/kcephfs/thrash/%                         |    0
 .../kcephfs/thrash/clusters/fixed-3-cephfs.yaml    |   11 +
 qa/suites/kcephfs/thrash/conf.yaml                 |    7 +
 qa/suites/kcephfs/thrash/fs/btrfs.yaml             |    7 +
 qa/suites/kcephfs/thrash/thrashers/default.yaml    |    7 +
 qa/suites/kcephfs/thrash/thrashers/mds.yaml        |    4 +
 qa/suites/kcephfs/thrash/thrashers/mon.yaml        |    6 +
 .../workloads/kclient_workunit_suites_ffsb.yaml    |   11 +
 .../workloads/kclient_workunit_suites_iozone.yaml  |    6 +
 qa/suites/knfs/basic/%                             |    0
 qa/suites/knfs/basic/ceph/base.yaml                |   13 +
 qa/suites/knfs/basic/clusters/extra-client.yaml    |    9 +
 qa/suites/knfs/basic/fs/btrfs.yaml                 |    7 +
 qa/suites/knfs/basic/mount/v3.yaml                 |    5 +
 qa/suites/knfs/basic/mount/v4.yaml                 |    5 +
 .../tasks/nfs-workunit-kernel-untar-build.yaml     |    6 +
 qa/suites/knfs/basic/tasks/nfs_workunit_misc.yaml  |   11 +
 .../basic/tasks/nfs_workunit_suites_blogbench.yaml |    5 +
 .../basic/tasks/nfs_workunit_suites_dbench.yaml    |    5 +
 .../knfs/basic/tasks/nfs_workunit_suites_ffsb.yaml |   10 +
 .../basic/tasks/nfs_workunit_suites_fsstress.yaml  |    5 +
 .../basic/tasks/nfs_workunit_suites_iozone.yaml    |    5 +
 qa/suites/krbd/rbd-nomount/%                       |    0
 qa/suites/krbd/rbd-nomount/clusters/fixed-3.yaml   |    8 +
 qa/suites/krbd/rbd-nomount/conf.yaml               |    7 +
 qa/suites/krbd/rbd-nomount/fs/btrfs.yaml           |    7 +
 qa/suites/krbd/rbd-nomount/install/ceph.yaml       |    3 +
 qa/suites/krbd/rbd-nomount/msgr-failures/few.yaml  |    5 +
 qa/suites/krbd/rbd-nomount/msgr-failures/many.yaml |    5 +
 .../krbd/rbd-nomount/tasks/rbd_concurrent.yaml     |   10 +
 .../krbd/rbd-nomount/tasks/rbd_huge_tickets.yaml   |    5 +
 .../krbd/rbd-nomount/tasks/rbd_image_read.yaml     |   15 +
 qa/suites/krbd/rbd-nomount/tasks/rbd_kernel.yaml   |    5 +
 qa/suites/krbd/rbd-nomount/tasks/rbd_kfsx.yaml     |   11 +
 .../rbd-nomount/tasks/rbd_map_snapshot_io.yaml     |    5 +
 .../krbd/rbd-nomount/tasks/rbd_map_unmap.yaml      |    5 +
 .../krbd/rbd-nomount/tasks/rbd_simple_big.yaml     |    6 +
 qa/suites/krbd/rbd/%                               |    0
 qa/suites/krbd/rbd/clusters/fixed-3.yaml           |    8 +
 qa/suites/krbd/rbd/conf.yaml                       |    7 +
 qa/suites/krbd/rbd/fs/btrfs.yaml                   |    7 +
 qa/suites/krbd/rbd/msgr-failures/few.yaml          |    5 +
 qa/suites/krbd/rbd/msgr-failures/many.yaml         |    5 +
 qa/suites/krbd/rbd/tasks/rbd_fio.yaml              |   11 +
 .../rbd/tasks/rbd_workunit_kernel_untar_build.yaml |    9 +
 .../krbd/rbd/tasks/rbd_workunit_suites_dbench.yaml |    9 +
 .../krbd/rbd/tasks/rbd_workunit_suites_ffsb.yaml   |   10 +
 .../rbd/tasks/rbd_workunit_suites_fsstress.yaml    |    9 +
 .../tasks/rbd_workunit_suites_fsstress_btrfs.yaml  |   10 +
 .../tasks/rbd_workunit_suites_fsstress_ext4.yaml   |   10 +
 .../krbd/rbd/tasks/rbd_workunit_suites_fsx.yaml    |    9 +
 .../krbd/rbd/tasks/rbd_workunit_suites_iozone.yaml |   10 +
 .../krbd/rbd/tasks/rbd_workunit_trivial_sync.yaml  |    8 +
 qa/suites/krbd/singleton/%                         |    0
 qa/suites/krbd/singleton/conf.yaml                 |    7 +
 qa/suites/krbd/singleton/fs/btrfs.yaml             |    7 +
 qa/suites/krbd/singleton/msgr-failures/few.yaml    |    5 +
 qa/suites/krbd/singleton/msgr-failures/many.yaml   |    5 +
 qa/suites/krbd/singleton/tasks/rbd_xfstests.yaml   |   23 +
 qa/suites/krbd/thrash/%                            |    0
 qa/suites/krbd/thrash/clusters/fixed-3.yaml        |    8 +
 qa/suites/krbd/thrash/conf.yaml                    |    7 +
 qa/suites/krbd/thrash/fs/btrfs.yaml                |    7 +
 qa/suites/krbd/thrash/thrashers/default.yaml       |    7 +
 qa/suites/krbd/thrash/thrashers/mon-thrasher.yaml  |    6 +
 qa/suites/krbd/thrash/workloads/rbd_fio.yaml       |    9 +
 .../thrash/workloads/rbd_workunit_suites_ffsb.yaml |    8 +
 .../rbd_workunit_suites_iozone.yaml.disabled       |    8 +
 qa/suites/krbd/unmap/%                             |    0
 qa/suites/krbd/unmap/ceph/ceph.yaml                |    6 +
 qa/suites/krbd/unmap/clusters/separate-client.yaml |   16 +
 qa/suites/krbd/unmap/conf.yaml                     |    5 +
 qa/suites/krbd/unmap/kernels/pre-single-major.yaml |   10 +
 qa/suites/krbd/unmap/kernels/single-major-off.yaml |    6 +
 qa/suites/krbd/unmap/kernels/single-major-on.yaml  |    6 +
 qa/suites/krbd/unmap/tasks/unmap.yaml              |    5 +
 qa/suites/krbd/unmap/xfs.yaml                      |    6 +
 qa/suites/marginal/basic/%                         |    0
 qa/suites/marginal/basic/clusters/fixed-3.yaml     |    4 +
 qa/suites/marginal/basic/fs/btrfs.yaml             |    7 +
 .../tasks/kclient_workunit_suites_blogbench.yaml   |    8 +
 .../basic/tasks/kclient_workunit_suites_fsx.yaml   |    8 +
 qa/suites/marginal/fs-misc/%                       |    0
 .../marginal/fs-misc/clusters/two_clients.yaml     |    4 +
 qa/suites/marginal/fs-misc/fs/btrfs.yaml           |    7 +
 qa/suites/marginal/fs-misc/tasks/locktest.yaml     |    5 +
 qa/suites/marginal/mds_restart/%                   |    0
 .../marginal/mds_restart/clusters/one_mds.yaml     |    4 +
 .../tasks/restart-workunit-backtraces.yaml         |   11 +
 qa/suites/marginal/multimds/%                      |    0
 .../marginal/multimds/clusters/3-node-3-mds.yaml   |    5 +
 .../marginal/multimds/clusters/3-node-9-mds.yaml   |    5 +
 qa/suites/marginal/multimds/fs/btrfs.yaml          |    7 +
 qa/suites/marginal/multimds/mounts/ceph-fuse.yaml  |    7 +
 qa/suites/marginal/multimds/mounts/kclient.yaml    |    4 +
 .../marginal/multimds/tasks/workunit_misc.yaml     |    5 +
 .../multimds/tasks/workunit_suites_blogbench.yaml  |    5 +
 .../multimds/tasks/workunit_suites_dbench.yaml     |    5 +
 .../multimds/tasks/workunit_suites_fsstress.yaml   |    5 +
 .../multimds/tasks/workunit_suites_fsync.yaml      |    5 +
 .../multimds/tasks/workunit_suites_pjd.yaml        |   10 +
 .../tasks/workunit_suites_truncate_delay.yaml      |   15 +
 qa/suites/marginal/multimds/thrash/exports.yaml    |    5 +
 qa/suites/marginal/multimds/thrash/normal.yaml     |    0
 .../mixed-clients/basic/clusters/fixed-3.yaml      |    4 +
 qa/suites/mixed-clients/basic/fs/btrfs.yaml        |    7 +
 .../kernel_cfuse_workunits_dbench_iozone.yaml      |   26 +
 ...ernel_cfuse_workunits_untarbuild_blogbench.yaml |   26 +
 qa/suites/multimds/basic/%                         |    0
 qa/suites/multimds/basic/ceph/base.yaml            |    6 +
 qa/suites/multimds/basic/clusters/3-mds.yaml       |    4 +
 qa/suites/multimds/basic/clusters/9-mds.yaml       |    4 +
 qa/suites/multimds/basic/debug/mds_client.yaml     |    9 +
 qa/suites/multimds/basic/fs/btrfs.yaml             |    7 +
 qa/suites/multimds/basic/inline/no.yaml            |    0
 qa/suites/multimds/basic/inline/yes.yaml           |    4 +
 qa/suites/multimds/basic/mount/cfuse.yaml          |    2 +
 qa/suites/multimds/basic/mount/kclient.yaml        |    7 +
 .../overrides/whitelist_wrongly_marked_down.yaml   |   10 +
 .../multimds/basic/tasks/kernel_untar_build.yaml   |   10 +
 qa/suites/multimds/basic/tasks/misc.yaml           |    6 +
 .../multimds/basic/tasks/misc_test_o_trunc.yaml    |    5 +
 .../multimds/basic/tasks/suites_blogbench.yaml     |    5 +
 qa/suites/multimds/basic/tasks/suites_dbench.yaml  |    5 +
 qa/suites/multimds/basic/tasks/suites_ffsb.yaml    |   10 +
 .../multimds/basic/tasks/suites_fsstress.yaml      |    5 +
 qa/suites/multimds/basic/tasks/suites_fsx.yaml     |    5 +
 qa/suites/multimds/basic/tasks/suites_fsync.yaml   |    5 +
 qa/suites/multimds/basic/tasks/suites_iogen.yaml   |    5 +
 qa/suites/multimds/basic/tasks/suites_iozone.yaml  |    5 +
 qa/suites/multimds/basic/tasks/suites_pjd.yaml     |   14 +
 .../basic/tasks/suites_truncate_delay.yaml         |   14 +
 qa/suites/multimds/basic/tasks/trivial_sync.yaml   |    4 +
 qa/suites/multimds/libcephfs/%                     |    0
 qa/suites/multimds/libcephfs/ceph/base.yaml        |    6 +
 qa/suites/multimds/libcephfs/clusters/3-mds.yaml   |    4 +
 qa/suites/multimds/libcephfs/clusters/9-mds.yaml   |    3 +
 qa/suites/multimds/libcephfs/debug/mds_client.yaml |    9 +
 qa/suites/multimds/libcephfs/fs/btrfs.yaml         |    7 +
 qa/suites/multimds/libcephfs/inline/no.yaml        |    0
 qa/suites/multimds/libcephfs/inline/yes.yaml       |    4 +
 .../overrides/whitelist_wrongly_marked_down.yaml   |   10 +
 .../libcephfs/tasks/libcephfs_interface_tests.yaml |    6 +
 .../multimds/libcephfs/tasks/libcephfs_java.yaml   |    6 +
 .../libcephfs/tasks/mds_creation_retry.yaml        |    6 +
 qa/suites/multimds/verify/%                        |    0
 qa/suites/multimds/verify/ceph/base.yaml           |    6 +
 qa/suites/multimds/verify/clusters/3-mds.yaml      |    3 +
 qa/suites/multimds/verify/clusters/9-mds.yaml      |    3 +
 qa/suites/multimds/verify/debug/mds_client.yaml    |    9 +
 qa/suites/multimds/verify/fs/btrfs.yaml            |    7 +
 .../overrides/whitelist_wrongly_marked_down.yaml   |   10 +
 .../verify/tasks/cfuse_workunit_suites_dbench.yaml |    6 +
 .../tasks/cfuse_workunit_suites_fsstress.yaml      |    6 +
 .../verify/tasks/libcephfs_interface_tests.yaml    |    6 +
 qa/suites/multimds/verify/validater/lockdep.yaml   |    5 +
 qa/suites/multimds/verify/validater/valgrind.yaml  |   16 +
 qa/suites/powercycle/osd/%                         |    0
 .../powercycle/osd/clusters/3osd-1per-target.yaml  |    5 +
 qa/suites/powercycle/osd/fs/btrfs.yaml             |    7 +
 qa/suites/powercycle/osd/fs/xfs.yaml               |    6 +
 qa/suites/powercycle/osd/powercycle/default.yaml   |    7 +
 .../osd/tasks/admin_socket_objecter_requests.yaml  |   13 +
 .../tasks/cfuse_workunit_kernel_untar_build.yaml   |   12 +
 .../powercycle/osd/tasks/cfuse_workunit_misc.yaml  |    7 +
 .../osd/tasks/cfuse_workunit_suites_ffsb.yaml      |   14 +
 .../osd/tasks/cfuse_workunit_suites_fsstress.yaml  |    6 +
 .../osd/tasks/cfuse_workunit_suites_fsx.yaml       |    7 +
 .../osd/tasks/cfuse_workunit_suites_fsync.yaml     |    6 +
 .../osd/tasks/cfuse_workunit_suites_pjd.yaml       |    6 +
 .../cfuse_workunit_suites_truncate_delay.yaml      |   15 +
 .../powercycle/osd/tasks/rados_api_tests.yaml      |   10 +
 qa/suites/powercycle/osd/tasks/radosbench.yaml     |   26 +
 qa/suites/powercycle/osd/tasks/readwrite.yaml      |    9 +
 .../powercycle/osd/tasks/snaps-few-objects.yaml    |   13 +
 .../powercycle/osd/tasks/snaps-many-objects.yaml   |   13 +
 qa/suites/rados/basic/%                            |    0
 qa/suites/rados/basic/clusters/+                   |    0
 qa/suites/rados/basic/clusters/fixed-2.yaml        |    7 +
 qa/suites/rados/basic/clusters/openstack.yaml      |    4 +
 qa/suites/rados/basic/fs/btrfs.yaml                |    7 +
 qa/suites/rados/basic/fs/xfs.yaml                  |    6 +
 qa/suites/rados/basic/msgr-failures/few.yaml       |    5 +
 qa/suites/rados/basic/msgr-failures/many.yaml      |    5 +
 qa/suites/rados/basic/msgr/async.yaml              |    6 +
 qa/suites/rados/basic/msgr/random.yaml             |    6 +
 qa/suites/rados/basic/msgr/simple.yaml             |    5 +
 qa/suites/rados/basic/rados.yaml                   |    7 +
 qa/suites/rados/basic/tasks/rados_api_tests.yaml   |   14 +
 qa/suites/rados/basic/tasks/rados_cls_all.yaml     |    7 +
 qa/suites/rados/basic/tasks/rados_python.yaml      |    9 +
 .../rados/basic/tasks/rados_stress_watch.yaml      |    7 +
 .../basic/tasks/rados_workunit_loadgen_big.yaml    |   11 +
 .../basic/tasks/rados_workunit_loadgen_mix.yaml    |   11 +
 .../tasks/rados_workunit_loadgen_mostlyread.yaml   |   11 +
 qa/suites/rados/basic/tasks/repair_test.yaml       |   11 +
 qa/suites/rados/basic/tasks/scrub_test.yaml        |   19 +
 qa/suites/rados/monthrash/%                        |    0
 qa/suites/rados/monthrash/ceph/ceph.yaml           |    9 +
 qa/suites/rados/monthrash/clusters/3-mons.yaml     |    7 +
 qa/suites/rados/monthrash/clusters/9-mons.yaml     |    7 +
 qa/suites/rados/monthrash/fs/xfs.yaml              |    6 +
 qa/suites/rados/monthrash/msgr-failures/few.yaml   |    5 +
 .../rados/monthrash/msgr-failures/mon-delay.yaml   |    9 +
 qa/suites/rados/monthrash/msgr/async.yaml          |    6 +
 qa/suites/rados/monthrash/msgr/random.yaml         |    6 +
 qa/suites/rados/monthrash/msgr/simple.yaml         |    5 +
 qa/suites/rados/monthrash/rados.yaml               |    7 +
 .../rados/monthrash/thrashers/force-sync-many.yaml |    6 +
 qa/suites/rados/monthrash/thrashers/many.yaml      |   13 +
 qa/suites/rados/monthrash/thrashers/one.yaml       |    4 +
 qa/suites/rados/monthrash/thrashers/sync-many.yaml |   11 +
 qa/suites/rados/monthrash/thrashers/sync.yaml      |   10 +
 .../monthrash/workloads/pool-create-delete.yaml    |   56 +
 .../rados/monthrash/workloads/rados_5925.yaml      |    4 +
 .../rados/monthrash/workloads/rados_api_tests.yaml |   14 +
 .../monthrash/workloads/rados_mon_workunits.yaml   |   13 +
 .../monthrash/workloads/snaps-few-objects.yaml     |   13 +
 qa/suites/rados/multimon/%                         |    0
 qa/suites/rados/multimon/clusters/21.yaml          |    8 +
 qa/suites/rados/multimon/clusters/3.yaml           |    6 +
 qa/suites/rados/multimon/clusters/6.yaml           |    7 +
 qa/suites/rados/multimon/clusters/9.yaml           |    8 +
 qa/suites/rados/multimon/fs/xfs.yaml               |    6 +
 qa/suites/rados/multimon/msgr-failures/few.yaml    |    5 +
 qa/suites/rados/multimon/msgr-failures/many.yaml   |    5 +
 qa/suites/rados/multimon/msgr/async.yaml           |    6 +
 qa/suites/rados/multimon/msgr/random.yaml          |    6 +
 qa/suites/rados/multimon/msgr/simple.yaml          |    5 +
 qa/suites/rados/multimon/rados.yaml                |    7 +
 .../rados/multimon/tasks/mon_clock_no_skews.yaml   |    9 +
 .../rados/multimon/tasks/mon_clock_with_skews.yaml |   15 +
 qa/suites/rados/multimon/tasks/mon_recovery.yaml   |    4 +
 qa/suites/rados/objectstore/alloc-hint.yaml        |   21 +
 .../rados/objectstore/ceph_objectstore_tool.yaml   |   16 +
 qa/suites/rados/objectstore/filejournal.yaml       |   13 +
 .../filestore-idempotent-aio-journal.yaml          |   14 +
 .../rados/objectstore/filestore-idempotent.yaml    |   11 +
 qa/suites/rados/objectstore/fusestore.yaml         |    9 +
 qa/suites/rados/objectstore/keyvaluedb.yaml        |    8 +
 .../rados/objectstore/objectcacher-stress.yaml     |   14 +
 qa/suites/rados/objectstore/objectstore.yaml       |   12 +
 qa/suites/rados/singleton-nomsgr/%                 |    0
 qa/suites/rados/singleton-nomsgr/all/11429.yaml    |  135 +
 qa/suites/rados/singleton-nomsgr/all/16113.yaml    |  103 +
 .../rados/singleton-nomsgr/all/cache-fs-trunc.yaml |   42 +
 .../rados/singleton-nomsgr/all/ceph-post-file.yaml |    8 +
 .../singleton-nomsgr/all/export-after-evict.yaml   |   29 +
 .../rados/singleton-nomsgr/all/full-tiering.yaml   |   26 +
 .../singleton-nomsgr/all/lfn-upgrade-hammer.yaml   |   97 +
 .../all/lfn-upgrade-infernalis.yaml                |   97 +
 qa/suites/rados/singleton-nomsgr/all/msgr.yaml     |   21 +
 .../all/multi-backfill-reject.yaml                 |   32 +
 .../rados/singleton-nomsgr/all/valgrind-leaks.yaml |   21 +
 qa/suites/rados/singleton-nomsgr/rados.yaml        |    7 +
 qa/suites/rados/singleton/%                        |    0
 qa/suites/rados/singleton/all/admin-socket.yaml    |   24 +
 qa/suites/rados/singleton/all/cephtool.yaml        |   25 +
 .../rados/singleton/all/divergent_priors.yaml      |   21 +
 .../rados/singleton/all/divergent_priors2.yaml     |   21 +
 qa/suites/rados/singleton/all/dump-stuck.yaml      |   14 +
 .../singleton/all/ec-lost-unfound-upgrade.yaml     |   30 +
 qa/suites/rados/singleton/all/ec-lost-unfound.yaml |   18 +
 .../singleton/all/lost-unfound-delete-upgrade.yaml |   29 +
 .../rados/singleton/all/lost-unfound-delete.yaml   |   17 +
 .../rados/singleton/all/lost-unfound-upgrade.yaml  |   29 +
 qa/suites/rados/singleton/all/lost-unfound.yaml    |   17 +
 qa/suites/rados/singleton/all/mon-config-keys.yaml |   19 +
 qa/suites/rados/singleton/all/mon-thrasher.yaml    |   25 +
 qa/suites/rados/singleton/all/osd-backfill.yaml    |   20 +
 .../singleton/all/osd-recovery-incomplete.yaml     |   21 +
 qa/suites/rados/singleton/all/osd-recovery.yaml    |   20 +
 qa/suites/rados/singleton/all/peer.yaml            |   20 +
 .../singleton/all/pg-removal-interruption.yaml     |   31 +
 qa/suites/rados/singleton/all/radostool.yaml       |   21 +
 qa/suites/rados/singleton/all/reg11184.yaml        |   21 +
 qa/suites/rados/singleton/all/rest-api.yaml        |   29 +
 qa/suites/rados/singleton/all/thrash-rados.yaml    |   26 +
 .../all/thrash_cache_writeback_proxy_none.yaml     |   66 +
 .../singleton/all/watch-notify-same-primary.yaml   |   26 +
 qa/suites/rados/singleton/fs/xfs.yaml              |    6 +
 qa/suites/rados/singleton/msgr-failures/few.yaml   |    5 +
 qa/suites/rados/singleton/msgr-failures/many.yaml  |    5 +
 qa/suites/rados/singleton/msgr/async.yaml          |    6 +
 qa/suites/rados/singleton/msgr/random.yaml         |    6 +
 qa/suites/rados/singleton/msgr/simple.yaml         |    5 +
 qa/suites/rados/singleton/rados.yaml               |    7 +
 qa/suites/rados/thrash-erasure-code-big/%          |    0
 qa/suites/rados/thrash-erasure-code-big/cluster/+  |    0
 .../thrash-erasure-code-big/cluster/12-osds.yaml   |    5 +
 .../thrash-erasure-code-big/cluster/openstack.yaml |    4 +
 .../rados/thrash-erasure-code-big/fs/btrfs.yaml    |    7 +
 .../rados/thrash-erasure-code-big/fs/xfs.yaml      |    6 +
 .../msgr-failures/fastclose.yaml                   |    6 +
 .../thrash-erasure-code-big/msgr-failures/few.yaml |    7 +
 .../msgr-failures/osd-delay.yaml                   |    9 +
 qa/suites/rados/thrash-erasure-code-big/rados.yaml |    7 +
 .../thrash-erasure-code-big/thrashers/default.yaml |   18 +
 .../thrashers/fastread.yaml                        |   19 +
 .../thrash-erasure-code-big/thrashers/mapgap.yaml  |   22 +
 .../thrashers/morepggrow.yaml                      |   16 +
 .../thrash-erasure-code-big/thrashers/pggrow.yaml  |   15 +
 .../workloads/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml |   25 +
 qa/suites/rados/thrash-erasure-code-isa/%          |    0
 .../rados/thrash-erasure-code-isa/arch/x86_64.yaml |    1 +
 qa/suites/rados/thrash-erasure-code-isa/clusters/+ |    0
 .../thrash-erasure-code-isa/clusters/fixed-2.yaml  |    7 +
 .../clusters/openstack.yaml                        |    4 +
 .../rados/thrash-erasure-code-isa/fs/btrfs.yaml    |    7 +
 .../rados/thrash-erasure-code-isa/fs/xfs.yaml      |    6 +
 .../msgr-failures/fastclose.yaml                   |    6 +
 .../thrash-erasure-code-isa/msgr-failures/few.yaml |    7 +
 .../msgr-failures/osd-delay.yaml                   |    9 +
 qa/suites/rados/thrash-erasure-code-isa/rados.yaml |    7 +
 .../supported/centos_7.3.yaml                      |    2 +
 .../supported/ubuntu_14.04.yaml                    |    2 +
 .../thrash-erasure-code-isa/thrashers/default.yaml |   16 +
 .../thrash-erasure-code-isa/thrashers/mapgap.yaml  |   21 +
 .../thrashers/morepggrow.yaml                      |   22 +
 .../thrash-erasure-code-isa/thrashers/pggrow.yaml  |   15 +
 .../workloads/ec-rados-plugin=isa-k=2-m=1.yaml     |   25 +
 qa/suites/rados/thrash-erasure-code-shec/%         |    0
 .../rados/thrash-erasure-code-shec/clusters/+      |    0
 .../thrash-erasure-code-shec/clusters/fixed-4.yaml |    5 +
 .../clusters/openstack.yaml                        |    4 +
 .../rados/thrash-erasure-code-shec/fs/xfs.yaml     |    6 +
 .../msgr-failures/fastclose.yaml                   |    6 +
 .../msgr-failures/few.yaml                         |    7 +
 .../msgr-failures/osd-delay.yaml                   |    9 +
 .../rados/thrash-erasure-code-shec/rados.yaml      |    7 +
 .../thrashers/default.yaml                         |   18 +
 .../ec-rados-plugin=shec-k=4-m=3-c=2.yaml          |   25 +
 qa/suites/rados/thrash-erasure-code/%              |    0
 qa/suites/rados/thrash-erasure-code/clusters/+     |    0
 .../thrash-erasure-code/clusters/fixed-2.yaml      |    7 +
 .../thrash-erasure-code/clusters/openstack.yaml    |    4 +
 qa/suites/rados/thrash-erasure-code/fs/btrfs.yaml  |    7 +
 qa/suites/rados/thrash-erasure-code/fs/xfs.yaml    |    6 +
 .../msgr-failures/fastclose.yaml                   |    6 +
 .../thrash-erasure-code/msgr-failures/few.yaml     |    7 +
 .../msgr-failures/osd-delay.yaml                   |    9 +
 qa/suites/rados/thrash-erasure-code/rados.yaml     |    7 +
 .../thrash-erasure-code/thrashers/default.yaml     |   17 +
 .../thrash-erasure-code/thrashers/fastread.yaml    |   19 +
 .../thrash-erasure-code/thrashers/mapgap.yaml      |   22 +
 .../thrash-erasure-code/thrashers/morepggrow.yaml  |   16 +
 .../thrash-erasure-code/thrashers/pggrow.yaml      |   15 +
 .../ec-rados-plugin=jerasure-k=2-m=1.yaml          |   25 +
 .../ec-rados-plugin=jerasure-k=3-m=1.yaml          |   31 +
 .../workloads/ec-radosbench.yaml                   |   27 +
 .../workloads/ec-small-objects-fast-read.yaml      |   21 +
 .../workloads/ec-small-objects.yaml                |   20 +
 qa/suites/rados/thrash/%                           |    0
 .../2-size-1-min-size.yaml                         |    6 +
 .../2-size-2-min-size.yaml                         |    6 +
 .../3-size-2-min-size.yaml                         |    8 +
 .../thrash/1-pg-log-overrides/normal_pg_log.yaml   |    0
 .../thrash/1-pg-log-overrides/short_pg_log.yaml    |    6 +
 qa/suites/rados/thrash/clusters/+                  |    0
 qa/suites/rados/thrash/clusters/fixed-2.yaml       |    7 +
 qa/suites/rados/thrash/clusters/openstack.yaml     |    4 +
 qa/suites/rados/thrash/fs/btrfs.yaml               |    7 +
 qa/suites/rados/thrash/fs/xfs.yaml                 |    6 +
 qa/suites/rados/thrash/hobj-sort.yaml              |    5 +
 .../rados/thrash/msgr-failures/fastclose.yaml      |    6 +
 qa/suites/rados/thrash/msgr-failures/few.yaml      |    7 +
 .../rados/thrash/msgr-failures/osd-delay.yaml      |    9 +
 qa/suites/rados/thrash/msgr/async.yaml             |    6 +
 qa/suites/rados/thrash/msgr/random.yaml            |    6 +
 qa/suites/rados/thrash/msgr/simple.yaml            |    5 +
 qa/suites/rados/thrash/rados.yaml                  |    7 +
 qa/suites/rados/thrash/thrashers/default.yaml      |   16 +
 qa/suites/rados/thrash/thrashers/mapgap.yaml       |   21 +
 qa/suites/rados/thrash/thrashers/morepggrow.yaml   |   22 +
 qa/suites/rados/thrash/thrashers/pggrow.yaml       |   15 +
 .../workloads/admin_socket_objecter_requests.yaml  |   13 +
 .../rados/thrash/workloads/cache-agent-big.yaml    |   29 +
 .../rados/thrash/workloads/cache-agent-small.yaml  |   29 +
 .../workloads/cache-pool-snaps-readproxy.yaml      |   33 +
 .../rados/thrash/workloads/cache-pool-snaps.yaml   |   35 +
 qa/suites/rados/thrash/workloads/cache-snaps.yaml  |   33 +
 qa/suites/rados/thrash/workloads/cache.yaml        |   30 +
 .../thrash/workloads/pool-snaps-few-objects.yaml   |   14 +
 .../rados/thrash/workloads/rados_api_tests.yaml    |   15 +
 qa/suites/rados/thrash/workloads/radosbench.yaml   |   24 +
 qa/suites/rados/thrash/workloads/readwrite.yaml    |   12 +
 qa/suites/rados/thrash/workloads/rgw_snaps.yaml    |   25 +
 .../rados/thrash/workloads/small-objects.yaml      |   21 +
 .../rados/thrash/workloads/snaps-few-objects.yaml  |   13 +
 .../thrash/workloads/write_fadvise_dontneed.yaml   |    8 +
 qa/suites/rados/upgrade/%                          |    0
 qa/suites/rados/upgrade/hammer-x-singleton/%       |    0
 .../rados/upgrade/hammer-x-singleton/0-cluster/+   |    0
 .../hammer-x-singleton/0-cluster/openstack.yaml    |    4 +
 .../hammer-x-singleton/0-cluster/start.yaml        |   16 +
 .../1-hammer-install/hammer.yaml                   |    7 +
 .../2-partial-upgrade/firsthalf.yaml               |    8 +
 .../hammer-x-singleton/3-thrash/default.yaml       |   13 +
 .../upgrade/hammer-x-singleton/4-mon/mona.yaml     |    6 +
 .../rados/upgrade/hammer-x-singleton/5-workload/+  |    0
 .../hammer-x-singleton/5-workload/rbd-cls.yaml     |    7 +
 .../5-workload/rbd-import-export.yaml              |    9 +
 .../hammer-x-singleton/5-workload/readwrite.yaml   |   11 +
 .../5-workload/snaps-few-objects.yaml              |   14 +
 .../hammer-x-singleton/6-next-mon/monb.yaml        |    6 +
 .../rados/upgrade/hammer-x-singleton/7-workload/+  |    0
 .../hammer-x-singleton/7-workload/radosbench.yaml  |   18 +
 .../hammer-x-singleton/7-workload/rbd_api.yaml     |    7 +
 .../hammer-x-singleton/8-next-mon/monc.yaml        |    8 +
 .../rados/upgrade/hammer-x-singleton/9-workload/+  |    0
 .../ec-rados-plugin=jerasure-k=3-m=1.yaml          |   31 +
 .../hammer-x-singleton/9-workload/rbd-python.yaml  |    7 +
 .../hammer-x-singleton/9-workload/rgw-swift.yaml   |    9 +
 .../9-workload/snaps-many-objects.yaml             |   13 +
 .../9-workload/test_cache-pool-snaps.yaml          |   36 +
 qa/suites/rados/upgrade/rados.yaml                 |    7 +
 qa/suites/rados/verify/%                           |    0
 qa/suites/rados/verify/1thrash/default.yaml        |   10 +
 qa/suites/rados/verify/1thrash/none.yaml           |    3 +
 qa/suites/rados/verify/clusters/+                  |    0
 qa/suites/rados/verify/clusters/fixed-2.yaml       |    7 +
 qa/suites/rados/verify/clusters/openstack.yaml     |    4 +
 qa/suites/rados/verify/fs/btrfs.yaml               |    7 +
 qa/suites/rados/verify/msgr-failures/few.yaml      |    5 +
 qa/suites/rados/verify/msgr/async.yaml             |    6 +
 qa/suites/rados/verify/msgr/random.yaml            |    6 +
 qa/suites/rados/verify/msgr/simple.yaml            |    5 +
 qa/suites/rados/verify/rados.yaml                  |    7 +
 qa/suites/rados/verify/tasks/mon_recovery.yaml     |    2 +
 qa/suites/rados/verify/tasks/rados_api_tests.yaml  |   16 +
 qa/suites/rados/verify/tasks/rados_cls_all.yaml    |    5 +
 qa/suites/rados/verify/validater/lockdep.yaml      |    5 +
 qa/suites/rados/verify/validater/valgrind.yaml     |   13 +
 qa/suites/rbd/basic/%                              |    0
 qa/suites/rbd/basic/base/install.yaml              |    3 +
 qa/suites/rbd/basic/cachepool/none.yaml            |    0
 qa/suites/rbd/basic/cachepool/small.yaml           |   11 +
 qa/suites/rbd/basic/clusters/+                     |    0
 qa/suites/rbd/basic/clusters/fixed-1.yaml          |   10 +
 qa/suites/rbd/basic/clusters/openstack.yaml        |    4 +
 qa/suites/rbd/basic/fs/xfs.yaml                    |    6 +
 qa/suites/rbd/basic/msgr-failures/few.yaml         |    5 +
 qa/suites/rbd/basic/msgr-failures/many.yaml        |    5 +
 .../rbd/basic/tasks/rbd_api_tests_old_format.yaml  |    5 +
 qa/suites/rbd/basic/tasks/rbd_cli_tests.yaml       |    6 +
 qa/suites/rbd/basic/tasks/rbd_cls_tests.yaml       |    5 +
 qa/suites/rbd/basic/tasks/rbd_lock_and_fence.yaml  |    5 +
 .../tasks/rbd_python_api_tests_old_format.yaml     |    5 +
 qa/suites/rbd/cli/%                                |    0
 qa/suites/rbd/cli/base/install.yaml                |    3 +
 qa/suites/rbd/cli/cachepool/none.yaml              |    0
 qa/suites/rbd/cli/cachepool/small.yaml             |   11 +
 qa/suites/rbd/cli/clusters/+                       |    0
 qa/suites/rbd/cli/clusters/fixed-1.yaml            |   10 +
 qa/suites/rbd/cli/clusters/openstack.yaml          |    4 +
 qa/suites/rbd/cli/features/defaults.yaml           |    6 +
 qa/suites/rbd/cli/features/format-1.yaml           |    5 +
 qa/suites/rbd/cli/features/journaling.yaml         |    6 +
 qa/suites/rbd/cli/features/layering.yaml           |    6 +
 qa/suites/rbd/cli/fs/xfs.yaml                      |    6 +
 qa/suites/rbd/cli/msgr-failures/few.yaml           |    5 +
 qa/suites/rbd/cli/msgr-failures/many.yaml          |    5 +
 qa/suites/rbd/cli/workloads/rbd_cli_copy.yaml      |    5 +
 .../rbd/cli/workloads/rbd_cli_import_export.yaml   |    5 +
 qa/suites/rbd/librbd/%                             |    0
 qa/suites/rbd/librbd/cache/none.yaml               |    6 +
 qa/suites/rbd/librbd/cache/writeback.yaml          |    6 +
 qa/suites/rbd/librbd/cache/writethrough.yaml       |    7 +
 qa/suites/rbd/librbd/cachepool/none.yaml           |    0
 qa/suites/rbd/librbd/cachepool/small.yaml          |   11 +
 qa/suites/rbd/librbd/clusters/+                    |    0
 qa/suites/rbd/librbd/clusters/fixed-3.yaml         |    8 +
 qa/suites/rbd/librbd/clusters/openstack.yaml       |    4 +
 qa/suites/rbd/librbd/copy-on-read/off.yaml         |    5 +
 qa/suites/rbd/librbd/copy-on-read/on.yaml          |    5 +
 qa/suites/rbd/librbd/fs/xfs.yaml                   |    6 +
 qa/suites/rbd/librbd/msgr-failures/few.yaml        |    7 +
 qa/suites/rbd/librbd/workloads/c_api_tests.yaml    |    7 +
 .../workloads/c_api_tests_with_defaults.yaml       |    7 +
 .../workloads/c_api_tests_with_journaling.yaml     |    7 +
 qa/suites/rbd/librbd/workloads/fsx.yaml            |    4 +
 .../rbd/librbd/workloads/python_api_tests.yaml     |    7 +
 .../workloads/python_api_tests_with_defaults.yaml  |    7 +
 .../python_api_tests_with_journaling.yaml          |    7 +
 qa/suites/rbd/librbd/workloads/rbd_fio.yaml        |   10 +
 qa/suites/rbd/maintenance/%                        |    0
 qa/suites/rbd/maintenance/base/install.yaml        |    3 +
 qa/suites/rbd/maintenance/clusters/+               |    0
 qa/suites/rbd/maintenance/clusters/fixed-3.yaml    |    8 +
 qa/suites/rbd/maintenance/clusters/openstack.yaml  |    8 +
 qa/suites/rbd/maintenance/qemu/xfstests.yaml       |   13 +
 .../maintenance/workloads/dynamic_features.yaml    |    8 +
 .../maintenance/workloads/rebuild_object_map.yaml  |    8 +
 qa/suites/rbd/maintenance/xfs.yaml                 |    6 +
 qa/suites/rbd/mirror/%                             |    0
 qa/suites/rbd/mirror/base/install.yaml             |    9 +
 qa/suites/rbd/mirror/cluster/+                     |    0
 qa/suites/rbd/mirror/cluster/2-node.yaml           |   19 +
 qa/suites/rbd/mirror/cluster/openstack.yaml        |    4 +
 qa/suites/rbd/mirror/fs/xfs.yaml                   |    6 +
 qa/suites/rbd/mirror/msgr-failures/few.yaml        |    5 +
 qa/suites/rbd/mirror/msgr-failures/many.yaml       |    5 +
 .../rbd/mirror/rbd-mirror/one-per-cluster.yaml     |   14 +
 .../workloads/rbd-mirror-stress-workunit.yaml      |   12 +
 .../rbd/mirror/workloads/rbd-mirror-workunit.yaml  |   11 +
 qa/suites/rbd/qemu/%                               |    0
 qa/suites/rbd/qemu/cache/none.yaml                 |    6 +
 qa/suites/rbd/qemu/cache/writeback.yaml            |    6 +
 qa/suites/rbd/qemu/cache/writethrough.yaml         |    7 +
 qa/suites/rbd/qemu/cachepool/ec-cache.yaml         |   14 +
 qa/suites/rbd/qemu/cachepool/none.yaml             |    0
 qa/suites/rbd/qemu/cachepool/small.yaml            |   11 +
 qa/suites/rbd/qemu/clusters/+                      |    0
 qa/suites/rbd/qemu/clusters/fixed-3.yaml           |    8 +
 qa/suites/rbd/qemu/clusters/openstack.yaml         |    8 +
 qa/suites/rbd/qemu/features/defaults.yaml          |    6 +
 qa/suites/rbd/qemu/features/journaling.yaml        |    6 +
 qa/suites/rbd/qemu/fs/xfs.yaml                     |    6 +
 qa/suites/rbd/qemu/msgr-failures/few.yaml          |    7 +
 qa/suites/rbd/qemu/workloads/qemu_bonnie.yaml      |    6 +
 qa/suites/rbd/qemu/workloads/qemu_fsstress.yaml    |    6 +
 .../rbd/qemu/workloads/qemu_iozone.yaml.disabled   |    6 +
 qa/suites/rbd/qemu/workloads/qemu_xfstests.yaml    |    8 +
 qa/suites/rbd/singleton/%                          |    0
 qa/suites/rbd/singleton/all/admin_socket.yaml      |    9 +
 qa/suites/rbd/singleton/all/formatted-output.yaml  |   10 +
 qa/suites/rbd/singleton/all/merge_diff.yaml        |    9 +
 qa/suites/rbd/singleton/all/permissions.yaml       |    9 +
 .../rbd/singleton/all/qemu-iotests-no-cache.yaml   |   13 +
 .../rbd/singleton/all/qemu-iotests-writeback.yaml  |   13 +
 .../singleton/all/qemu-iotests-writethrough.yaml   |   14 +
 .../rbd/singleton/all/rbd-vs-unmanaged-snaps.yaml  |   14 +
 qa/suites/rbd/singleton/all/rbd_mirror.yaml        |    9 +
 qa/suites/rbd/singleton/all/rbdmap_RBDMAPFILE.yaml |    7 +
 .../rbd/singleton/all/read-flags-no-cache.yaml     |   12 +
 .../rbd/singleton/all/read-flags-writeback.yaml    |   12 +
 .../rbd/singleton/all/read-flags-writethrough.yaml |   13 +
 qa/suites/rbd/singleton/all/verify_pool.yaml       |    9 +
 qa/suites/rbd/singleton/openstack.yaml             |    4 +
 qa/suites/rbd/thrash/%                             |    0
 qa/suites/rbd/thrash/base/install.yaml             |    3 +
 qa/suites/rbd/thrash/clusters/+                    |    0
 qa/suites/rbd/thrash/clusters/fixed-2.yaml         |    7 +
 qa/suites/rbd/thrash/clusters/openstack.yaml       |    8 +
 qa/suites/rbd/thrash/fs/xfs.yaml                   |    6 +
 qa/suites/rbd/thrash/msgr-failures/few.yaml        |    5 +
 qa/suites/rbd/thrash/thrashers/cache.yaml          |   18 +
 qa/suites/rbd/thrash/thrashers/default.yaml        |    8 +
 qa/suites/rbd/thrash/workloads/journal.yaml        |    5 +
 qa/suites/rbd/thrash/workloads/rbd_api_tests.yaml  |    7 +
 .../workloads/rbd_api_tests_copy_on_read.yaml      |   12 +
 .../thrash/workloads/rbd_api_tests_journaling.yaml |    7 +
 .../thrash/workloads/rbd_api_tests_no_locking.yaml |    7 +
 .../thrash/workloads/rbd_fsx_cache_writeback.yaml  |    9 +
 .../workloads/rbd_fsx_cache_writethrough.yaml      |   10 +
 .../rbd/thrash/workloads/rbd_fsx_copy_on_read.yaml |   10 +
 .../rbd/thrash/workloads/rbd_fsx_journal.yaml      |    5 +
 qa/suites/rbd/thrash/workloads/rbd_fsx_nbd.yaml    |   15 +
 .../rbd/thrash/workloads/rbd_fsx_nocache.yaml      |    9 +
 qa/suites/rbd/thrash/workloads/rbd_nbd.yaml        |   10 +
 qa/suites/rbd/valgrind/%                           |    0
 qa/suites/rbd/valgrind/base/install.yaml           |    3 +
 qa/suites/rbd/valgrind/clusters/+                  |    0
 qa/suites/rbd/valgrind/clusters/fixed-1.yaml       |   10 +
 qa/suites/rbd/valgrind/clusters/openstack.yaml     |    4 +
 qa/suites/rbd/valgrind/fs/xfs.yaml                 |    6 +
 qa/suites/rbd/valgrind/validator/memcheck.yaml     |   10 +
 qa/suites/rbd/valgrind/workloads/c_api_tests.yaml  |    7 +
 .../workloads/c_api_tests_with_defaults.yaml       |    7 +
 .../workloads/c_api_tests_with_journaling.yaml     |    7 +
 qa/suites/rbd/valgrind/workloads/fsx.yaml          |    4 +
 .../rbd/valgrind/workloads/python_api_tests.yaml   |    7 +
 .../workloads/python_api_tests_with_defaults.yaml  |    7 +
 .../python_api_tests_with_journaling.yaml          |    7 +
 qa/suites/rbd/valgrind/workloads/rbd_mirror.yaml   |    5 +
 qa/suites/rest/basic/tasks/rest_test.yaml          |   32 +
 qa/suites/rgw/multifs/%                            |    0
 qa/suites/rgw/multifs/clusters/fixed-2.yaml        |    7 +
 qa/suites/rgw/multifs/frontend/apache.yaml         |    3 +
 qa/suites/rgw/multifs/frontend/civetweb.yaml       |    3 +
 qa/suites/rgw/multifs/fs/btrfs.yaml                |    7 +
 qa/suites/rgw/multifs/fs/xfs.yaml                  |    6 +
 qa/suites/rgw/multifs/overrides.yaml               |    7 +
 qa/suites/rgw/multifs/rgw_pool_type/ec-cache.yaml  |    6 +
 .../rgw/multifs/rgw_pool_type/ec-profile.yaml      |   10 +
 qa/suites/rgw/multifs/rgw_pool_type/ec.yaml        |    5 +
 .../rgw/multifs/rgw_pool_type/replicated.yaml      |    3 +
 qa/suites/rgw/multifs/tasks/rgw_bucket_quota.yaml  |   10 +
 .../rgw/multifs/tasks/rgw_multipart_upload.yaml    |   10 +
 qa/suites/rgw/multifs/tasks/rgw_readwrite.yaml     |   16 +
 qa/suites/rgw/multifs/tasks/rgw_roundtrip.yaml     |   16 +
 qa/suites/rgw/multifs/tasks/rgw_s3tests.yaml       |    8 +
 qa/suites/rgw/multifs/tasks/rgw_swift.yaml         |    7 +
 qa/suites/rgw/multifs/tasks/rgw_user_quota.yaml    |   10 +
 qa/suites/rgw/singleton/%                          |    0
 .../rgw/singleton/all/radosgw-admin-data-sync.yaml |   64 +
 .../singleton/all/radosgw-admin-multi-region.yaml  |   67 +
 qa/suites/rgw/singleton/all/radosgw-admin.yaml     |   20 +
 .../singleton/all/radosgw-convert-to-region.yaml   |   81 +
 qa/suites/rgw/singleton/frontend/apache.yaml       |    3 +
 qa/suites/rgw/singleton/frontend/civetweb.yaml     |    3 +
 qa/suites/rgw/singleton/fs/xfs.yaml                |    6 +
 qa/suites/rgw/singleton/overrides.yaml             |    7 +
 .../rgw/singleton/rgw_pool_type/ec-cache.yaml      |    6 +
 .../rgw/singleton/rgw_pool_type/ec-profile.yaml    |   10 +
 qa/suites/rgw/singleton/rgw_pool_type/ec.yaml      |    5 +
 .../rgw/singleton/rgw_pool_type/replicated.yaml    |    3 +
 qa/suites/rgw/singleton/xfs.yaml                   |    6 +
 qa/suites/rgw/verify/%                             |    0
 qa/suites/rgw/verify/clusters/fixed-2.yaml         |    7 +
 qa/suites/rgw/verify/frontend/apache.yaml          |    3 +
 qa/suites/rgw/verify/frontend/civetweb.yaml        |    3 +
 qa/suites/rgw/verify/fs/btrfs.yaml                 |    7 +
 qa/suites/rgw/verify/msgr-failures/few.yaml        |    5 +
 qa/suites/rgw/verify/overrides.yaml                |    7 +
 qa/suites/rgw/verify/rgw_pool_type/ec-cache.yaml   |    6 +
 qa/suites/rgw/verify/rgw_pool_type/ec-profile.yaml |   10 +
 qa/suites/rgw/verify/rgw_pool_type/ec.yaml         |    5 +
 qa/suites/rgw/verify/rgw_pool_type/replicated.yaml |    3 +
 qa/suites/rgw/verify/tasks/rgw_s3tests.yaml        |   12 +
 .../rgw/verify/tasks/rgw_s3tests_multiregion.yaml  |   63 +
 qa/suites/rgw/verify/tasks/rgw_swift.yaml          |   11 +
 qa/suites/rgw/verify/validater/lockdep.yaml        |    7 +
 qa/suites/rgw/verify/validater/valgrind.yaml       |   13 +
 qa/suites/samba/%                                  |    0
 qa/suites/samba/clusters/samba-basic.yaml          |    7 +
 qa/suites/samba/fs/btrfs.yaml                      |    7 +
 qa/suites/samba/install/install.yaml               |    9 +
 qa/suites/samba/mount/fuse.yaml                    |    6 +
 qa/suites/samba/mount/kclient.yaml                 |   14 +
 qa/suites/samba/mount/native.yaml                  |    2 +
 qa/suites/samba/mount/noceph.yaml                  |    5 +
 qa/suites/samba/workload/cifs-dbench.yaml          |    8 +
 qa/suites/samba/workload/cifs-fsstress.yaml        |    8 +
 .../samba/workload/cifs-kernel-build.yaml.disabled |    9 +
 qa/suites/samba/workload/smbtorture.yaml           |   39 +
 qa/suites/smoke/1node/%                            |    0
 qa/suites/smoke/1node/clusters/+                   |    0
 qa/suites/smoke/1node/clusters/fixed-1.yaml        |   10 +
 qa/suites/smoke/1node/clusters/openstack.yaml      |    8 +
 qa/suites/smoke/1node/tasks/ceph-deploy.yaml       |    7 +
 qa/suites/smoke/basic/%                            |    0
 qa/suites/smoke/basic/clusters/fixed-3-cephfs.yaml |   11 +
 qa/suites/smoke/basic/fs/btrfs.yaml                |    7 +
 .../tasks/cfuse_workunit_suites_blogbench.yaml     |    9 +
 .../tasks/cfuse_workunit_suites_fsstress.yaml      |    9 +
 .../basic/tasks/cfuse_workunit_suites_iozone.yaml  |    9 +
 .../basic/tasks/cfuse_workunit_suites_pjd.yaml     |   16 +
 .../basic/tasks/kclient_workunit_direct_io.yaml    |   14 +
 .../tasks/kclient_workunit_suites_dbench.yaml      |   14 +
 .../tasks/kclient_workunit_suites_fsstress.yaml    |   14 +
 .../basic/tasks/kclient_workunit_suites_pjd.yaml   |   14 +
 .../basic/tasks/libcephfs_interface_tests.yaml     |   18 +
 qa/suites/smoke/basic/tasks/mon_thrash.yaml        |   23 +
 qa/suites/smoke/basic/tasks/rados_api_tests.yaml   |   16 +
 qa/suites/smoke/basic/tasks/rados_bench.yaml       |   36 +
 qa/suites/smoke/basic/tasks/rados_cache_snaps.yaml |   41 +
 qa/suites/smoke/basic/tasks/rados_cls_all.yaml     |    8 +
 qa/suites/smoke/basic/tasks/rados_ec_snaps.yaml    |   31 +
 qa/suites/smoke/basic/tasks/rados_python.yaml      |   11 +
 .../basic/tasks/rados_workunit_loadgen_mix.yaml    |    9 +
 qa/suites/smoke/basic/tasks/rbd_api_tests.yaml     |   11 +
 .../smoke/basic/tasks/rbd_cli_import_export.yaml   |   11 +
 qa/suites/smoke/basic/tasks/rbd_fsx.yaml           |   17 +
 .../smoke/basic/tasks/rbd_python_api_tests.yaml    |   11 +
 .../basic/tasks/rbd_workunit_suites_iozone.yaml    |   18 +
 qa/suites/smoke/basic/tasks/rgw_ec_s3tests.yaml    |   13 +
 qa/suites/smoke/basic/tasks/rgw_s3tests.yaml       |    8 +
 qa/suites/smoke/basic/tasks/rgw_swift.yaml         |    8 +
 qa/suites/smoke/systemd/distro/centos.yaml         |    2 +
 qa/suites/stress/bench/%                           |    0
 .../stress/bench/clusters/fixed-3-cephfs.yaml      |   11 +
 .../stress/bench/tasks/cfuse_workunit_snaps.yaml   |    8 +
 .../bench/tasks/kclient_workunit_suites_fsx.yaml   |    8 +
 qa/suites/stress/thrash/%                          |    0
 qa/suites/stress/thrash/clusters/16-osd.yaml       |   18 +
 .../stress/thrash/clusters/3-osd-1-machine.yaml    |    3 +
 qa/suites/stress/thrash/clusters/8-osd.yaml        |   10 +
 qa/suites/stress/thrash/fs/btrfs.yaml              |    7 +
 qa/suites/stress/thrash/fs/none.yaml               |    0
 qa/suites/stress/thrash/fs/xfs.yaml                |    6 +
 qa/suites/stress/thrash/thrashers/default.yaml     |    7 +
 qa/suites/stress/thrash/thrashers/fast.yaml        |    9 +
 qa/suites/stress/thrash/thrashers/more-down.yaml   |    8 +
 .../stress/thrash/workloads/bonnie_cfuse.yaml      |    6 +
 .../stress/thrash/workloads/iozone_cfuse.yaml      |    6 +
 qa/suites/stress/thrash/workloads/radosbench.yaml  |    4 +
 qa/suites/stress/thrash/workloads/readwrite.yaml   |    9 +
 qa/suites/teuthology/buildpackages/%               |    0
 .../buildpackages/distros/centos_7.3.yaml          |    2 +
 .../buildpackages/distros/ubuntu_14.04.yaml        |    2 +
 .../teuthology/buildpackages/tasks/branch.yaml     |   10 +
 .../teuthology/buildpackages/tasks/default.yaml    |   14 +
 qa/suites/teuthology/buildpackages/tasks/tag.yaml  |   11 +
 qa/suites/teuthology/ceph/%                        |    0
 qa/suites/teuthology/ceph/clusters/single.yaml     |    2 +
 qa/suites/teuthology/ceph/distros/centos_7.3.yaml  |    2 +
 .../teuthology/ceph/distros/ubuntu_14.04.yaml      |    2 +
 qa/suites/teuthology/ceph/tasks/teuthology.yaml    |    3 +
 qa/suites/teuthology/integration.yaml              |    2 +
 qa/suites/teuthology/multi-cluster/%               |    0
 qa/suites/teuthology/multi-cluster/all/ceph.yaml   |   23 +
 .../teuthology/multi-cluster/all/thrashosds.yaml   |   20 +
 .../teuthology/multi-cluster/all/upgrade.yaml      |   49 +
 .../teuthology/multi-cluster/all/workunit.yaml     |   21 +
 qa/suites/teuthology/multi-cluster/fs/xfs.yaml     |    6 +
 qa/suites/teuthology/no-ceph/%                     |    0
 qa/suites/teuthology/no-ceph/clusters/single.yaml  |    2 +
 qa/suites/teuthology/no-ceph/tasks/teuthology.yaml |    2 +
 qa/suites/teuthology/nop/%                         |    0
 qa/suites/teuthology/nop/all/nop.yaml              |    3 +
 qa/suites/teuthology/rgw/%                         |    0
 qa/suites/teuthology/rgw/distros/centos_7.3.yaml   |    2 +
 qa/suites/teuthology/rgw/distros/ubuntu_14.04.yaml |    2 +
 .../teuthology/rgw/tasks/s3tests-civetweb.yaml     |   23 +
 .../teuthology/rgw/tasks/s3tests-fastcgi.yaml      |   23 +
 qa/suites/teuthology/rgw/tasks/s3tests-fcgi.yaml   |   25 +
 qa/suites/teuthology/workunits/yes.yaml            |    8 +
 qa/suites/tgt/basic/%                              |    1 +
 qa/suites/tgt/basic/clusters/fixed-3.yaml          |    4 +
 qa/suites/tgt/basic/fs/btrfs.yaml                  |    6 +
 qa/suites/tgt/basic/msgr-failures/few.yaml         |    5 +
 qa/suites/tgt/basic/msgr-failures/many.yaml        |    5 +
 qa/suites/tgt/basic/tasks/blogbench.yaml           |    9 +
 qa/suites/tgt/basic/tasks/bonnie.yaml              |    9 +
 qa/suites/tgt/basic/tasks/dbench-short.yaml        |    9 +
 qa/suites/tgt/basic/tasks/dbench.yaml              |    9 +
 qa/suites/tgt/basic/tasks/ffsb.yaml                |    9 +
 qa/suites/tgt/basic/tasks/fio.yaml                 |    9 +
 qa/suites/tgt/basic/tasks/fsstress.yaml            |    9 +
 qa/suites/tgt/basic/tasks/fsx.yaml                 |    9 +
 qa/suites/tgt/basic/tasks/fsync-tester.yaml        |    9 +
 qa/suites/tgt/basic/tasks/iogen.yaml               |    9 +
 qa/suites/tgt/basic/tasks/iozone-sync.yaml         |    9 +
 qa/suites/tgt/basic/tasks/iozone.yaml              |    9 +
 qa/suites/tgt/basic/tasks/pjd.yaml                 |    9 +
 .../client-upgrade/firefly-client-x/basic/%        |    0
 .../firefly-client-x/basic/0-cluster/start.yaml    |   18 +
 .../basic/1-install/firefly-client-x.yaml          |   10 +
 .../basic/2-workload/rbd_cli_import_export.yaml    |    9 +
 .../firefly-client-x/basic/distros/centos_7.2.yaml |    2 +
 .../basic/distros/ubuntu_14.04.yaml                |    2 +
 .../upgrade/client-upgrade/hammer-client-x/basic/% |    0
 .../hammer-client-x/basic/0-cluster/start.yaml     |   14 +
 .../basic/1-install/hammer-client-x.yaml           |   10 +
 .../basic/2-workload/rbd_api_tests.yaml            |   26 +
 .../basic/2-workload/rbd_cli_import_export.yaml    |   13 +
 .../hammer-client-x/basic/distros/centos_7.2.yaml  |    2 +
 .../basic/distros/ubuntu_14.04.yaml                |    2 +
 .../upgrade/client-upgrade/hammer-client-x/rbd/%   |    0
 .../hammer-client-x/rbd/0-cluster/start.yaml       |   16 +
 .../rbd/1-install/hammer-client-x.yaml             |   10 +
 .../rbd/2-workload/rbd_notification_tests.yaml     |   21 +
 .../hammer-client-x/rbd/distros/centos_7.2.yaml    |    2 +
 .../hammer-client-x/rbd/distros/ubuntu_14.04.yaml  |    2 +
 .../client-upgrade/infernalis-client-x/basic/%     |    0
 .../infernalis-client-x/basic/0-cluster/start.yaml |   12 +
 .../basic/1-install/infernalis-client-x.yaml       |   10 +
 .../basic/2-workload/rbd_api_tests.yaml            |   21 +
 .../basic/2-workload/rbd_cli_import_export.yaml    |   13 +
 .../basic/distros/centos_7.2.yaml                  |    2 +
 .../basic/distros/ubuntu_14.04.yaml                |    2 +
 .../client-upgrade/infernalis-client-x/rbd/%       |    0
 .../infernalis-client-x/rbd/0-cluster/start.yaml   |   16 +
 .../rbd/1-install/infernalis-client-x.yaml         |   10 +
 .../rbd/2-workload/rbd_notification_tests.yaml     |   21 +
 .../rbd/distros/centos_7.2.yaml                    |    2 +
 .../rbd/distros/ubuntu_14.04.yaml                  |    2 +
 qa/suites/upgrade/firefly-hammer-x/parallel/%      |    0
 .../firefly-hammer-x/parallel/0-cluster/start.yaml |   21 +
 .../1-firelfy-hammer-install/firefly-hammer.yaml   |   14 +
 .../upgrade/firefly-hammer-x/parallel/2-workload/+ |    0
 .../parallel/2-workload/rados_api.yaml             |    9 +
 .../parallel/2-workload/rados_loadgenbig.yaml      |    8 +
 .../parallel/2-workload/test_rbd_api.yaml          |    8 +
 .../parallel/2-workload/test_rbd_python.yaml       |    8 +
 .../parallel/3-upgrade-sequence/upgrade-all.yaml   |   10 +
 .../3-upgrade-sequence/upgrade-mon-osd-mds.yaml    |   39 +
 .../firefly-hammer-x.yaml                          |    8 +
 .../upgrade/firefly-hammer-x/parallel/5-workload/+ |    0
 .../parallel/5-workload/rados_api.yaml             |   13 +
 .../parallel/5-workload/rados_loadgenbig.yaml      |    8 +
 .../parallel/5-workload/test_rbd_api.yaml          |    8 +
 .../parallel/5-workload/test_rbd_python.yaml       |    8 +
 .../parallel/6-upgrade-sequence/upgrade-all.yaml   |    8 +
 .../6-upgrade-sequence/upgrade-by-daemon.yaml      |   35 +
 .../firefly-hammer-x/parallel/7-final-workload/+   |    0
 .../ec-rados-plugin=jerasure-k=2-m=1.yaml          |   25 +
 .../ec-rados-plugin=jerasure-k=3-m=1.yaml          |   31 +
 .../7-final-workload/rados-snaps-few-objects.yaml  |   13 +
 .../7-final-workload/rados_loadgenmix.yaml         |    6 +
 .../7-final-workload/rados_mon_thrash.yaml         |   11 +
 .../parallel/7-final-workload/rbd_cls.yaml         |    7 +
 .../7-final-workload/rbd_import_export.yaml        |    7 +
 .../parallel/7-final-workload/rgw_s3tests.yaml     |    5 +
 .../parallel/distros/centos_7.3.yaml               |    2 +
 .../parallel/distros/ubuntu_14.04.yaml             |    2 +
 qa/suites/upgrade/firefly-hammer-x/stress-split/%  |    0
 .../stress-split/00-cluster/start.yaml             |   19 +
 .../stress-split/01-firefly-install/firefly.yaml   |    6 +
 .../02-partial-upgrade-hammer/firsthalf.yaml       |    8 +
 .../stress-split/03-workload/rbd.yaml              |    6 +
 .../stress-split/04-mona-upgrade-hammer/mona.yaml  |    6 +
 .../firefly-hammer-x/stress-split/05-workload/+    |    0
 .../stress-split/05-workload/rbd-cls.yaml          |    6 +
 .../stress-split/05-workload/readwrite.yaml        |   11 +
 .../stress-split/06-monb-upgrade-hammer/monb.yaml  |    6 +
 .../firefly-hammer-x/stress-split/07-workload/+    |    0
 .../stress-split/07-workload/radosbench.yaml       |   33 +
 .../stress-split/07-workload/rbd_api.yaml          |    6 +
 .../stress-split/08-monc-upgrade-hammer/monc.yaml  |    8 +
 .../stress-split/09-workload/rbd-python.yaml       |    8 +
 .../10-osds-upgrade-hammer/secondhalf.yaml         |    8 +
 .../11-workload/snaps-few-objects.yaml             |   14 +
 .../stress-split/12-partial-upgrade-x/first.yaml   |    7 +
 .../13-workload/rados_loadgen_big.yaml             |    6 +
 .../stress-split/14-mona-upgrade-x/mona.yaml       |    6 +
 .../15-workload/rbd-import-export.yaml             |    8 +
 .../stress-split/16-monb-upgrade-x/monb.yaml       |    6 +
 .../stress-split/17-workload/readwrite.yaml        |   11 +
 .../stress-split/18-monc-upgrade-x/monc.yaml       |    8 +
 .../stress-split/19-workload/radosbench.yaml       |   33 +
 .../20-osds-upgrade-x/osds_secondhalf.yaml         |    7 +
 .../stress-split/21-final-workload/+               |    0
 .../21-final-workload/rados_stress_watch.yaml      |    5 +
 .../21-final-workload/rbd_cls_tests.yaml           |    5 +
 .../stress-split/21-final-workload/rgw-swift.yaml  |    8 +
 .../stress-split/distros/centos_7.3.yaml           |    2 +
 .../stress-split/distros/ubuntu_14.04.yaml         |    2 +
 .../hammer-jewel-x/parallel/distros/centos.yaml    |    2 +
 .../stress-split/distros/centos.yaml               |    2 +
 qa/suites/upgrade/hammer-x/f-h-x-offline/%         |    0
 .../upgrade/hammer-x/f-h-x-offline/0-install.yaml  |   13 +
 .../upgrade/hammer-x/f-h-x-offline/1-pre.yaml      |    6 +
 .../upgrade/hammer-x/f-h-x-offline/2-upgrade.yaml  |   18 +
 .../upgrade/hammer-x/f-h-x-offline/3-jewel.yaml    |    5 +
 .../upgrade/hammer-x/f-h-x-offline/4-after.yaml    |    5 +
 qa/suites/upgrade/hammer-x/f-h-x-offline/README    |    4 +
 .../hammer-x/f-h-x-offline/ubuntu_14.04.yaml       |    2 +
 qa/suites/upgrade/hammer-x/parallel/%              |    0
 .../upgrade/hammer-x/parallel/0-cluster/start.yaml |   31 +
 .../upgrade/hammer-x/parallel/0-tz-eastern.yaml    |    4 +
 .../hammer-x/parallel/1-hammer-install/hammer.yaml |   17 +
 qa/suites/upgrade/hammer-x/parallel/2-workload/+   |    0
 .../hammer-x/parallel/2-workload/blogbench.yaml    |   14 +
 .../parallel/2-workload/ec-rados-default.yaml      |   20 +
 .../hammer-x/parallel/2-workload/rados_api.yaml    |    8 +
 .../parallel/2-workload/rados_loadgenbig.yaml      |    8 +
 .../hammer-x/parallel/2-workload/test_rbd_api.yaml |    8 +
 .../parallel/2-workload/test_rbd_python.yaml       |    8 +
 .../parallel/3-upgrade-sequence/upgrade-all.yaml   |   17 +
 .../3-upgrade-sequence/upgrade-osd-mds-mon.yaml    |   38 +
 qa/suites/upgrade/hammer-x/parallel/4-jewel.yaml   |    5 +
 .../upgrade/hammer-x/parallel/5-final-workload/+   |    0
 .../parallel/5-final-workload/blogbench.yaml       |   13 +
 .../5-final-workload/rados-snaps-few-objects.yaml  |   14 +
 .../5-final-workload/rados_loadgenmix.yaml         |    6 +
 .../5-final-workload/rados_mon_thrash.yaml         |   14 +
 .../parallel/5-final-workload/rbd_cls.yaml         |    6 +
 .../5-final-workload/rbd_import_export.yaml        |    8 +
 .../parallel/5-final-workload/rgw_swift.yaml       |   10 +
 .../hammer-x/parallel/distros/centos_7.3.yaml      |    2 +
 .../hammer-x/parallel/distros/ubuntu_14.04.yaml    |    2 +
 .../hammer-x/stress-split-erasure-code-x86_64/%    |    0
 .../stress-split-erasure-code-x86_64/0-cluster/+   |    0
 .../0-cluster/openstack.yaml                       |    3 +
 .../0-cluster/start.yaml                           |   17 +
 .../0-tz-eastern.yaml                              |    4 +
 .../stress-split-erasure-code-x86_64/0-x86_64.yaml |    1 +
 .../1-hammer-install/hammer.yaml                   |    7 +
 .../2-partial-upgrade/firsthalf.yaml               |    7 +
 .../3-thrash/default.yaml                          |   16 +
 .../4-mon/mona.yaml                                |    6 +
 .../5-workload/ec-rados-default.yaml               |   19 +
 .../6-next-mon/monb.yaml                           |    6 +
 .../8-finish-upgrade/last-osds-and-monc.yaml       |   20 +
 .../9-workload/ec-rados-plugin=isa-k=2-m=1.yaml    |   25 +
 .../upgrade/hammer-x/stress-split-erasure-code/%   |    0
 .../hammer-x/stress-split-erasure-code/0-cluster/+ |    0
 .../0-cluster/openstack.yaml                       |    3 +
 .../stress-split-erasure-code/0-cluster/start.yaml |   17 +
 .../stress-split-erasure-code/0-tz-eastern.yaml    |    4 +
 .../1-hammer-install/hammer.yaml                   |    7 +
 .../2-partial-upgrade/firsthalf.yaml               |    7 +
 .../3-thrash/default.yaml                          |   18 +
 .../stress-split-erasure-code/4-mon/mona.yaml      |    6 +
 .../5-workload/ec-no-shec.yaml                     |    9 +
 .../5-workload/ec-rados-default.yaml               |   19 +
 .../stress-split-erasure-code/6-next-mon/monb.yaml |    6 +
 .../8-finish-upgrade/last-osds-and-monc.yaml       |   20 +
 .../ec-rados-plugin=jerasure-k=3-m=1.yaml          |   31 +
 .../distros/centos_7.3.yaml                        |    2 +
 .../distros/ubuntu_14.04.yaml                      |    2 +
 qa/suites/upgrade/hammer-x/stress-split/%          |    0
 .../upgrade/hammer-x/stress-split/0-cluster/+      |    0
 .../hammer-x/stress-split/0-cluster/openstack.yaml |    3 +
 .../hammer-x/stress-split/0-cluster/start.yaml     |   17 +
 .../hammer-x/stress-split/0-tz-eastern.yaml        |    4 +
 .../stress-split/1-hammer-install/hammer.yaml      |    7 +
 .../stress-split/2-partial-upgrade/firsthalf.yaml  |    7 +
 .../hammer-x/stress-split/3-thrash/default.yaml    |   19 +
 .../upgrade/hammer-x/stress-split/4-mon/mona.yaml  |    6 +
 .../upgrade/hammer-x/stress-split/5-workload/+     |    0
 .../hammer-x/stress-split/5-workload/rbd-cls.yaml  |    7 +
 .../stress-split/5-workload/rbd-import-export.yaml |    9 +
 .../stress-split/5-workload/readwrite.yaml         |   12 +
 .../stress-split/5-workload/snaps-few-objects.yaml |   15 +
 .../hammer-x/stress-split/6-next-mon/monb.yaml     |    6 +
 .../upgrade/hammer-x/stress-split/7-workload/+     |    0
 .../stress-split/7-workload/radosbench.yaml        |   36 +
 .../hammer-x/stress-split/7-workload/rbd_api.yaml  |    7 +
 .../8-finish-upgrade/last-osds-and-monc.yaml       |   20 +
 .../upgrade/hammer-x/stress-split/9-workload/+     |    0
 .../stress-split/9-workload/rbd-python.yaml        |    7 +
 .../stress-split/9-workload/rgw-swift.yaml         |    9 +
 .../9-workload/snaps-many-objects.yaml             |   13 +
 .../hammer-x/stress-split/distros/centos_7.3.yaml  |    2 +
 .../stress-split/distros/ubuntu_14.04.yaml         |    2 +
 qa/suites/upgrade/hammer-x/tiering/%               |    0
 .../upgrade/hammer-x/tiering/0-cluster/start.yaml  |   22 +
 .../hammer-x/tiering/1-hammer-install/hammer.yaml  |    7 +
 .../hammer-x/tiering/2-setup-cache-tiering/%       |    0
 .../0-create-base-tier/create-ec-pool.yaml         |    5 +
 .../0-create-base-tier/create-replicated-pool.yaml |    4 +
 .../1-create-cache-tier/create-cache-tier.yaml     |   10 +
 .../hammer-x/tiering/3-upgrade/upgrade.yaml        |   90 +
 .../tiering/4-finish-upgrade/flip-success.yaml     |   28 +
 .../hammer-x/tiering/distros/centos_7.3.yaml       |    2 +
 .../hammer-x/tiering/distros/ubuntu_14.04.yaml     |    2 +
 qa/suites/upgrade/hammer-x/v0-94-4-stop/+          |    0
 .../hammer-x/v0-94-4-stop/distros/centos_7.3.yaml  |    2 +
 .../v0-94-4-stop/distros/ubuntu_14.04.yaml         |    2 +
 .../upgrade/hammer-x/v0-94-4-stop/ignore.yaml      |    8 +
 .../hammer-x/v0-94-4-stop/v0-94-4-stop.yaml        |  110 +
 qa/suites/upgrade/infernalis-x/parallel/%          |    0
 .../upgrade/infernalis-x/parallel/0-cluster/+      |    0
 .../infernalis-x/parallel/0-cluster/openstack.yaml |    4 +
 .../infernalis-x/parallel/0-cluster/start.yaml     |   30 +
 .../parallel/1-infernalis-install/infernalis.yaml  |   22 +
 .../upgrade/infernalis-x/parallel/2-workload/+     |    0
 .../parallel/2-workload/blogbench.yaml             |   14 +
 .../parallel/2-workload/ec-rados-default.yaml      |   24 +
 .../parallel/2-workload/rados_api.yaml             |   11 +
 .../parallel/2-workload/rados_loadgenbig.yaml      |   11 +
 .../parallel/2-workload/test_rbd_api.yaml          |   11 +
 .../parallel/2-workload/test_rbd_python.yaml       |   11 +
 .../parallel/3-upgrade-sequence/upgrade-all.yaml   |   16 +
 .../3-upgrade-sequence/upgrade-mon-osd-mds.yaml    |   43 +
 .../upgrade/infernalis-x/parallel/4-jewel.yaml     |    5 +
 .../infernalis-x/parallel/5-final-workload/+       |    0
 .../parallel/5-final-workload/blogbench.yaml       |   13 +
 .../5-final-workload/rados-snaps-few-objects.yaml  |   17 +
 .../5-final-workload/rados_loadgenmix.yaml         |    9 +
 .../5-final-workload/rados_mon_thrash.yaml         |   17 +
 .../parallel/5-final-workload/rbd_cls.yaml         |    9 +
 .../5-final-workload/rbd_import_export.yaml        |   11 +
 .../parallel/5-final-workload/rgw_swift.yaml       |   13 +
 .../infernalis-x/parallel/distros/centos_7.3.yaml  |    2 +
 .../parallel/distros/ubuntu_14.04.yaml             |    2 +
 qa/suites/upgrade/infernalis-x/point-to-point-x/%  |    0
 .../point-to-point-x/distros/centos_7.3.yaml       |    2 +
 .../point-to-point-x/distros/ubuntu_14.04.yaml     |    2 +
 .../point-to-point-x/point-to-point.yaml           |  204 +
 .../stress-split-erasure-code-x86_64/%             |    0
 .../stress-split-erasure-code-x86_64/0-cluster/+   |    0
 .../0-cluster/openstack.yaml                       |    6 +
 .../0-cluster/start.yaml                           |   23 +
 .../stress-split-erasure-code-x86_64/0-x86_64.yaml |    1 +
 .../1-infernalis-install/infernalis.yaml           |    8 +
 .../2-partial-upgrade/firsthalf.yaml               |   12 +
 .../3-thrash/default.yaml                          |   20 +
 .../4-mon/mona.yaml                                |    9 +
 .../5-workload/ec-rados-default.yaml               |   22 +
 .../6-next-mon/monb.yaml                           |    9 +
 .../8-next-mon/monc.yaml                           |   12 +
 .../ec-rados-plugin=jerasure-k=3-m=1.yaml          |   35 +
 .../infernalis-x/stress-split-erasure-code/%       |    0
 .../stress-split-erasure-code/0-cluster/+          |    0
 .../0-cluster/openstack.yaml                       |    6 +
 .../stress-split-erasure-code/0-cluster/start.yaml |   23 +
 .../1-infernalis-install/infernalis.yaml           |    8 +
 .../2-partial-upgrade/firsthalf.yaml               |   12 +
 .../3-thrash/default.yaml                          |   20 +
 .../stress-split-erasure-code/4-mon/mona.yaml      |    9 +
 .../5-workload/ec-rados-default.yaml               |   22 +
 .../stress-split-erasure-code/6-next-mon/monb.yaml |    9 +
 .../stress-split-erasure-code/8-next-mon/monc.yaml |   12 +
 .../ec-rados-plugin=jerasure-k=3-m=1.yaml          |   35 +
 .../distros/centos_7.3.yaml                        |    2 +
 .../distros/ubuntu_14.04.yaml                      |    2 +
 qa/suites/upgrade/infernalis-x/stress-split/%      |    0
 .../upgrade/infernalis-x/stress-split/0-cluster/+  |    0
 .../stress-split/0-cluster/openstack.yaml          |    6 +
 .../infernalis-x/stress-split/0-cluster/start.yaml |   23 +
 .../1-infernalis-install/infernalis.yaml           |    8 +
 .../stress-split/2-partial-upgrade/firsthalf.yaml  |   12 +
 .../stress-split/3-thrash/default.yaml             |   19 +
 .../infernalis-x/stress-split/4-mon/mona.yaml      |    9 +
 .../upgrade/infernalis-x/stress-split/5-workload/+ |    0
 .../stress-split/5-workload/rbd-cls.yaml           |   10 +
 .../stress-split/5-workload/rbd-import-export.yaml |   12 +
 .../stress-split/5-workload/readwrite.yaml         |   16 +
 .../stress-split/5-workload/snaps-few-objects.yaml |   18 +
 .../infernalis-x/stress-split/6-next-mon/monb.yaml |    9 +
 .../upgrade/infernalis-x/stress-split/7-workload/+ |    0
 .../stress-split/7-workload/radosbench.yaml        |   40 +
 .../stress-split/7-workload/rbd_api.yaml           |   10 +
 .../infernalis-x/stress-split/8-next-mon/monc.yaml |   12 +
 .../upgrade/infernalis-x/stress-split/9-workload/+ |    0
 .../stress-split/9-workload/rbd-python.yaml        |   10 +
 .../stress-split/9-workload/rgw-swift.yaml         |   12 +
 .../9-workload/snaps-many-objects.yaml             |   16 +
 .../stress-split/distros/centos_7.3.yaml           |    2 +
 .../stress-split/distros/ubuntu_14.04.yaml         |    2 +
 qa/suites/upgrade/jewel-x/parallel/%               |    0
 qa/suites/upgrade/jewel-x/parallel/0-cluster/+     |    0
 .../jewel-x/parallel/0-cluster/openstack.yaml      |    4 +
 .../upgrade/jewel-x/parallel/0-cluster/start.yaml  |   28 +
 .../jewel-x/parallel/1-jewel-install/jewel.yaml    |   22 +
 qa/suites/upgrade/jewel-x/parallel/2-workload/+    |    0
 .../jewel-x/parallel/2-workload/blogbench.yaml     |   14 +
 .../parallel/2-workload/ec-rados-default.yaml      |   24 +
 .../jewel-x/parallel/2-workload/rados_api.yaml     |   11 +
 .../parallel/2-workload/rados_loadgenbig.yaml      |   11 +
 .../jewel-x/parallel/2-workload/test_rbd_api.yaml  |   11 +
 .../parallel/2-workload/test_rbd_python.yaml       |   11 +
 .../parallel/3-upgrade-sequence/upgrade-all.yaml   |    7 +
 .../3-upgrade-sequence/upgrade-mon-osd-mds.yaml    |   37 +
 .../upgrade/jewel-x/parallel/5-final-workload/+    |    0
 .../parallel/5-final-workload/blogbench.yaml       |   13 +
 .../5-final-workload/rados-snaps-few-objects.yaml  |   17 +
 .../5-final-workload/rados_loadgenmix.yaml         |    9 +
 .../5-final-workload/rados_mon_thrash.yaml         |   17 +
 .../jewel-x/parallel/5-final-workload/rbd_cls.yaml |    9 +
 .../5-final-workload/rbd_import_export.yaml        |   11 +
 .../parallel/5-final-workload/rgw_swift.yaml       |   13 +
 .../jewel-x/parallel/distros/centos_7.3.yaml       |    2 +
 .../jewel-x/parallel/distros/ubuntu_14.04.yaml     |    2 +
 qa/suites/upgrade/jewel-x/parallel/kraken.yaml     |    1 +
 qa/suites/upgrade/jewel-x/point-to-point-x/%       |    0
 .../jewel-x/point-to-point-x/distros/centos.yaml   |    2 +
 .../point-to-point-x/point-to-point-upgrade.yaml   |  214 +
 .../jewel-x/stress-split-erasure-code-x86_64/%     |    0
 .../stress-split-erasure-code-x86_64/0-cluster/+   |    0
 .../0-cluster/openstack.yaml                       |    6 +
 .../0-cluster/start.yaml                           |   23 +
 .../stress-split-erasure-code-x86_64/0-x86_64.yaml |    1 +
 .../1-jewel-install/jewel.yaml                     |    8 +
 .../2-partial-upgrade/firsthalf.yaml               |   12 +
 .../3-thrash/default.yaml                          |   18 +
 .../4-mon/mona.yaml                                |    9 +
 .../5-workload/ec-rados-default.yaml               |   22 +
 .../6-next-mon/monb.yaml                           |    9 +
 .../8-next-mon/monc.yaml                           |   12 +
 .../ec-rados-plugin=jerasure-k=3-m=1.yaml          |   35 +
 .../upgrade/jewel-x/stress-split-erasure-code/%    |    0
 .../jewel-x/stress-split-erasure-code/0-cluster/+  |    0
 .../0-cluster/openstack.yaml                       |    6 +
 .../stress-split-erasure-code/0-cluster/start.yaml |   23 +
 .../1-jewel-install/jewel.yaml                     |    8 +
 .../2-partial-upgrade/firsthalf.yaml               |   12 +
 .../3-thrash/default.yaml                          |   18 +
 .../stress-split-erasure-code/4-mon/mona.yaml      |    9 +
 .../5-workload/ec-rados-default.yaml               |   22 +
 .../stress-split-erasure-code/6-next-mon/monb.yaml |    9 +
 .../stress-split-erasure-code/8-next-mon/monc.yaml |   12 +
 .../ec-rados-plugin=jerasure-k=3-m=1.yaml          |   35 +
 .../distros/centos_7.3.yaml                        |    2 +
 .../distros/ubuntu_14.04.yaml                      |    2 +
 qa/suites/upgrade/jewel-x/stress-split/%           |    0
 qa/suites/upgrade/jewel-x/stress-split/0-cluster/+ |    0
 .../jewel-x/stress-split/0-cluster/openstack.yaml  |    6 +
 .../jewel-x/stress-split/0-cluster/start.yaml      |   23 +
 .../stress-split/1-jewel-install/jewel.yaml        |    8 +
 .../stress-split/2-partial-upgrade/firsthalf.yaml  |   12 +
 .../jewel-x/stress-split/3-thrash/default.yaml     |   17 +
 .../upgrade/jewel-x/stress-split/4-mon/mona.yaml   |    9 +
 .../upgrade/jewel-x/stress-split/5-workload/+      |    0
 .../jewel-x/stress-split/5-workload/rbd-cls.yaml   |   10 +
 .../stress-split/5-workload/rbd-import-export.yaml |   12 +
 .../jewel-x/stress-split/5-workload/readwrite.yaml |   16 +
 .../stress-split/5-workload/snaps-few-objects.yaml |   18 +
 .../jewel-x/stress-split/6-next-mon/monb.yaml      |    9 +
 .../upgrade/jewel-x/stress-split/7-workload/+      |    0
 .../stress-split/7-workload/radosbench.yaml        |   40 +
 .../jewel-x/stress-split/7-workload/rbd_api.yaml   |   10 +
 .../jewel-x/stress-split/8-next-mon/monc.yaml      |   12 +
 .../upgrade/jewel-x/stress-split/9-workload/+      |    0
 .../stress-split/9-workload/rbd-python.yaml        |   10 +
 .../jewel-x/stress-split/9-workload/rgw-swift.yaml |   12 +
 .../9-workload/snaps-many-objects.yaml             |   16 +
 .../jewel-x/stress-split/distros/centos_7.3.yaml   |    2 +
 .../jewel-x/stress-split/distros/ubuntu_14.04.yaml |    2 +
 qa/tasks/__init__.py                               |    6 +
 qa/tasks/admin_socket.py                           |  199 +
 qa/tasks/apache.conf.template                      |   48 +
 qa/tasks/autotest.py                               |  166 +
 qa/tasks/blktrace.py                               |   96 +
 qa/tasks/boto.cfg.template                         |    2 +
 qa/tasks/calamari_nosetests.py                     |  289 ++
 qa/tasks/calamari_setup.py                         |  467 ++
 qa/tasks/ceph.py                                   | 1531 ++++++
 qa/tasks/ceph_client.py                            |   42 +
 qa/tasks/ceph_deploy.py                            |  694 +++
 qa/tasks/ceph_fuse.py                              |  145 +
 qa/tasks/ceph_manager.py                           | 2014 ++++++++
 qa/tasks/ceph_objectstore_tool.py                  |  670 +++
 qa/tasks/cephfs/__init__.py                        |    0
 qa/tasks/cephfs/cephfs_test_case.py                |  411 ++
 qa/tasks/cephfs/filesystem.py                      |  905 ++++
 qa/tasks/cephfs/fuse_mount.py                      |  404 ++
 qa/tasks/cephfs/kernel_mount.py                    |  246 +
 qa/tasks/cephfs/mount.py                           |  585 +++
 qa/tasks/cephfs/test_auto_repair.py                |   90 +
 qa/tasks/cephfs/test_backtrace.py                  |   82 +
 qa/tasks/cephfs/test_cap_flush.py                  |   63 +
 qa/tasks/cephfs/test_client_limits.py              |  219 +
 qa/tasks/cephfs/test_client_recovery.py            |  432 ++
 qa/tasks/cephfs/test_config_commands.py            |   63 +
 qa/tasks/cephfs/test_damage.py                     |  534 +++
 qa/tasks/cephfs/test_data_scan.py                  |  511 ++
 qa/tasks/cephfs/test_failover.py                   |  465 ++
 qa/tasks/cephfs/test_flush.py                      |  113 +
 qa/tasks/cephfs/test_forward_scrub.py              |  196 +
 qa/tasks/cephfs/test_full.py                       |  410 ++
 qa/tasks/cephfs/test_journal_migration.py          |   89 +
 qa/tasks/cephfs/test_journal_repair.py             |  439 ++
 qa/tasks/cephfs/test_misc.py                       |   33 +
 qa/tasks/cephfs/test_pool_perm.py                  |  117 +
 qa/tasks/cephfs/test_scrub_checks.py               |  245 +
 qa/tasks/cephfs/test_sessionmap.py                 |  235 +
 qa/tasks/cephfs/test_strays.py                     |  684 +++
 qa/tasks/cephfs/test_volume_client.py              |  896 ++++
 qa/tasks/cephfs/vstart_runner.py                   |  917 ++++
 qa/tasks/cephfs_test_runner.py                     |  188 +
 qa/tasks/cifs_mount.py                             |  137 +
 qa/tasks/cram.py                                   |  155 +
 qa/tasks/create_verify_lfn_objects.py              |   83 +
 qa/tasks/devstack.py                               |  382 ++
 qa/tasks/die_on_err.py                             |   70 +
 qa/tasks/divergent_priors.py                       |  171 +
 qa/tasks/divergent_priors2.py                      |  207 +
 qa/tasks/dump_stuck.py                             |  146 +
 qa/tasks/ec_lost_unfound.py                        |  167 +
 qa/tasks/filestore_idempotent.py                   |   81 +
 qa/tasks/kclient.py                                |  107 +
 qa/tasks/locktest.py                               |  134 +
 qa/tasks/logrotate.conf                            |   13 +
 qa/tasks/lost_unfound.py                           |  183 +
 qa/tasks/manypools.py                              |   73 +
 qa/tasks/mds_creation_failure.py                   |   85 +
 qa/tasks/mds_thrash.py                             |  415 ++
 qa/tasks/metadata.yaml                             |    2 +
 qa/tasks/mod_fastcgi.conf.template                 |   17 +
 qa/tasks/mod_proxy_fcgi.tcp.conf.template          |   16 +
 qa/tasks/mod_proxy_fcgi.uds.conf.template          |   14 +
 qa/tasks/mon_clock_skew_check.py                   |  261 ++
 qa/tasks/mon_recovery.py                           |   80 +
 qa/tasks/mon_thrash.py                             |  343 ++
 qa/tasks/multibench.py                             |   57 +
 qa/tasks/object_source_down.py                     |  104 +
 qa/tasks/omapbench.py                              |   83 +
 qa/tasks/osd_backfill.py                           |  105 +
 qa/tasks/osd_failsafe_enospc.py                    |  218 +
 qa/tasks/osd_recovery.py                           |  208 +
 qa/tasks/peer.py                                   |   97 +
 qa/tasks/peering_speed_test.py                     |   87 +
 qa/tasks/populate_rbd_pool.py                      |   82 +
 qa/tasks/qemu.py                                   |  473 ++
 qa/tasks/rados.py                                  |  250 +
 qa/tasks/radosbench.py                             |  104 +
 qa/tasks/radosgw_admin.py                          | 1034 +++++
 qa/tasks/radosgw_admin_rest.py                     |  668 +++
 qa/tasks/radosgw_agent.py                          |  211 +
 qa/tasks/rbd.py                                    |  598 +++
 qa/tasks/rbd_fio.py                                |  214 +
 qa/tasks/rbd_fsx.py                                |  102 +
 qa/tasks/rbd_mirror.py                             |  117 +
 qa/tasks/recovery_bench.py                         |  208 +
 qa/tasks/reg11184.py                               |  242 +
 qa/tasks/rep_lost_unfound_delete.py                |  184 +
 qa/tasks/repair_test.py                            |  304 ++
 qa/tasks/rest_api.py                               |  184 +
 qa/tasks/restart.py                                |  163 +
 qa/tasks/rgw.py                                    | 1377 ++++++
 qa/tasks/rgw_logsocket.py                          |  161 +
 qa/tasks/s3readwrite.py                            |  346 ++
 qa/tasks/s3roundtrip.py                            |  302 ++
 qa/tasks/s3tests.py                                |  449 ++
 qa/tasks/samba.py                                  |  245 +
 qa/tasks/scrub.py                                  |  117 +
 qa/tasks/scrub_test.py                             |  383 ++
 qa/tasks/tests/__init__.py                         |    0
 qa/tasks/tests/test_buildpackages.py               |  170 +
 qa/tasks/tests/test_devstack.py                    |   48 +
 qa/tasks/tests/test_radosgw_admin.py               |   31 +
 qa/tasks/teuthology_integration.py                 |   19 +
 qa/tasks/tgt.py                                    |  177 +
 qa/tasks/thrash_pool_snaps.py                      |   61 +
 qa/tasks/thrashosds.py                             |  161 +
 qa/tasks/userdata_setup.yaml                       |   25 +
 qa/tasks/userdata_teardown.yaml                    |   11 +
 qa/tasks/util/__init__.py                          |   26 +
 qa/tasks/util/rados.py                             |   79 +
 qa/tasks/util/rgw.py                               |  181 +
 qa/tasks/util/test/__init__.py                     |    0
 qa/tasks/util/test/test_rados.py                   |   40 +
 qa/tasks/watch_notify_same_primary.py              |  134 +
 qa/tasks/watch_notify_stress.py                    |   69 +
 qa/tasks/workunit.py                               |  428 ++
 qa/timezone/eastern.yaml                           |    4 +
 qa/timezone/pacific.yaml                           |    4 +
 qa/timezone/random.yaml                            |    5 +
 qa/tox.ini                                         |    8 +
 qa/workunits/ceph-helpers.sh                       |    4 +-
 qa/workunits/fs/test_python.sh                     |    6 +-
 qa/workunits/rados/test_cache_pool.sh              |   31 +
 qa/workunits/rados/test_python.sh                  |    6 +-
 qa/workunits/rados/test_rados_tool.sh              |   23 +-
 qa/workunits/rbd/notify_master.sh                  |    6 +-
 qa/workunits/rbd/notify_slave.sh                   |    6 +-
 qa/workunits/rbd/qemu-iotests.sh                   |   15 +-
 qa/workunits/rbd/rbd-nbd.sh                        |    4 +
 qa/workunits/rbd/rbd_mirror.sh                     |   57 +-
 qa/workunits/rbd/rbd_mirror_helpers.sh             |   36 +-
 qa/workunits/rbd/rbd_mirror_stress.sh              |   22 +-
 qa/workunits/rbd/test_librbd_python.sh             |    9 +-
 qa/workunits/rbd/test_lock_fence.sh                |    6 +-
 qa/workunits/suites/pjd.sh                         |    1 -
 selinux/ceph.te                                    |    1 +
 src/.git_version                                   |    4 +-
 src/Makefile.in                                    | 1480 +++---
 src/ceph-create-keys                               |   23 +-
 src/ceph-disk/ceph_disk/main.py                    |   41 +-
 src/ceph_mds.cc                                    |    5 +-
 src/client/Client.cc                               |  121 +-
 src/client/Client.h                                |    2 +-
 src/cls/Makefile-client.am                         |   12 +-
 src/cls/Makefile-server.am                         |    3 +
 src/cls/lock/cls_lock.cc                           |   92 +
 src/cls/lock/cls_lock_client.cc                    |   16 +
 src/cls/lock/cls_lock_client.h                     |    5 +
 src/cls/lock/cls_lock_ops.cc                       |   21 +
 src/cls/lock/cls_lock_ops.h                        |   36 +
 src/cls/rgw/cls_rgw.cc                             |  196 +-
 src/cls/rgw/cls_rgw_client.cc                      |   20 +
 src/cls/rgw/cls_rgw_client.h                       |    4 +
 src/cls/rgw/cls_rgw_ops.cc                         |   23 +
 src/cls/rgw/cls_rgw_ops.h                          |   24 +
 src/cls/rgw/cls_rgw_types.cc                       |   32 +
 src/cls/rgw/cls_rgw_types.h                        |   15 +
 src/common/Makefile.am                             |    6 +-
 src/common/ceph_hash.cc                            |    2 +-
 src/common/config_opts.h                           |   12 +
 src/common/util.cc                                 |   77 +-
 src/crush/CrushCompiler.cc                         |    8 +-
 src/global/global_init.cc                          |   10 +
 src/include/ceph_fs.h                              |    1 -
 src/include/filepath.h                             |    1 +
 src/include/rados/rgw_file.h                       |   12 +-
 src/include/rbd/librbd.h                           |   28 +-
 src/include/rbd/librbd.hpp                         |    8 +
 src/include/str_list.h                             |    8 +
 src/include/utime.h                                |    4 +-
 src/journal/FutureImpl.cc                          |    9 +-
 src/journal/FutureImpl.h                           |    1 +
 src/journal/JournalMetadata.cc                     |    4 +
 src/librados/RadosClient.cc                        |    6 +
 src/librados/librados.cc                           |    4 +-
 src/librbd/AioImageRequestWQ.cc                    |   16 +-
 src/librbd/AioObjectRequest.cc                     |   45 +-
 src/librbd/BlockGuard.h                            |  172 +
 src/librbd/CMakeLists.txt                          |    5 +
 src/librbd/CopyupRequest.cc                        |   10 +-
 src/librbd/DiffIterate.cc                          |    9 +-
 src/librbd/ExclusiveLock.cc                        |  195 +-
 src/librbd/ExclusiveLock.h                         |   26 +-
 src/librbd/ImageCtx.cc                             |   91 +-
 src/librbd/ImageCtx.h                              |   11 +-
 src/librbd/ImageState.cc                           |   88 +-
 src/librbd/ImageState.h                            |   27 +-
 src/librbd/ImageWatcher.cc                         |  155 +-
 src/librbd/ImageWatcher.h                          |    7 +-
 src/librbd/Journal.cc                              |  190 +-
 src/librbd/Journal.h                               |    2 +
 src/librbd/LibrbdWriteback.cc                      |    7 +-
 src/librbd/Makefile.am                             |   12 +
 src/librbd/ObjectMap.cc                            |  190 +-
 src/librbd/ObjectMap.h                             |   94 +-
 src/librbd/Operations.cc                           |    9 +-
 src/librbd/exclusive_lock/AcquireRequest.cc        |  309 +-
 src/librbd/exclusive_lock/AcquireRequest.h         |   67 +-
 src/librbd/exclusive_lock/AutomaticPolicy.cc       |   29 +
 src/librbd/exclusive_lock/AutomaticPolicy.h        |   34 +
 src/librbd/exclusive_lock/BreakRequest.cc          |  184 +
 src/librbd/exclusive_lock/BreakRequest.h           |   95 +
 src/librbd/exclusive_lock/GetLockerRequest.cc      |  124 +
 src/librbd/exclusive_lock/GetLockerRequest.h       |   53 +
 src/librbd/exclusive_lock/Policy.h                 |    3 +-
 src/librbd/exclusive_lock/ReacquireRequest.cc      |   72 +
 src/librbd/exclusive_lock/ReacquireRequest.h       |   63 +
 src/librbd/exclusive_lock/ReleaseRequest.cc        |   89 +-
 src/librbd/exclusive_lock/ReleaseRequest.h         |   19 +-
 src/librbd/exclusive_lock/StandardPolicy.cc        |   12 +-
 src/librbd/exclusive_lock/StandardPolicy.h         |    8 +-
 src/librbd/exclusive_lock/Types.h                  |   23 +
 src/librbd/image/OpenRequest.cc                    |    8 +-
 src/librbd/image/OpenRequest.h                     |    8 +-
 src/librbd/image/RefreshParentRequest.cc           |    2 +-
 src/librbd/image/RefreshRequest.cc                 |    7 +-
 src/librbd/image/RefreshRequest.h                  |    9 +-
 src/librbd/image/SetSnapRequest.cc                 |    5 +-
 src/librbd/image/SetSnapRequest.h                  |    4 +-
 src/librbd/image_watcher/RewatchRequest.cc         |  126 +
 src/librbd/image_watcher/RewatchRequest.h          |   78 +
 src/librbd/internal.cc                             |  219 +-
 src/librbd/internal.h                              |    6 +
 src/librbd/librbd.cc                               |  174 +-
 src/librbd/object_map/LockRequest.cc               |   10 +-
 src/librbd/object_map/RefreshRequest.cc            |    8 +-
 src/librbd/object_map/RefreshRequest.h             |    6 +
 src/librbd/object_map/Request.cc                   |    5 +-
 src/librbd/object_map/ResizeRequest.cc             |    6 +-
 src/librbd/object_map/SnapshotCreateRequest.cc     |    6 +-
 src/librbd/object_map/SnapshotRemoveRequest.cc     |    6 +-
 src/librbd/object_map/SnapshotRollbackRequest.cc   |    5 +-
 src/librbd/object_map/UnlockRequest.cc             |    2 +-
 src/librbd/object_map/UnlockRequest.h              |    4 +
 src/librbd/object_map/UpdateRequest.cc             |   39 +-
 src/librbd/object_map/UpdateRequest.h              |   16 +-
 src/librbd/operation/ResizeRequest.cc              |    2 +-
 src/librbd/operation/SnapshotRollbackRequest.cc    |    2 +-
 src/librbd/operation/TrimRequest.cc                |   44 +-
 src/mds/Beacon.cc                                  |    7 +-
 src/mds/CDentry.cc                                 |   30 +-
 src/mds/CDentry.h                                  |    4 +-
 src/mds/CDir.cc                                    |    2 +-
 src/mds/CDir.h                                     |    1 +
 src/mds/CInode.cc                                  |   62 +-
 src/mds/CInode.h                                   |    6 +-
 src/mds/FSMap.cc                                   |   99 +-
 src/mds/FSMap.h                                    |   32 +-
 src/mds/Locker.cc                                  |   19 +-
 src/mds/MDCache.cc                                 |  117 +-
 src/mds/MDCache.h                                  |   11 +-
 src/mds/MDLog.cc                                   |   34 +-
 src/mds/MDSDaemon.cc                               |   23 +-
 src/mds/MDSRank.cc                                 |   71 +-
 src/mds/MDSRank.h                                  |    2 +-
 src/mds/ScatterLock.h                              |    8 +
 src/mds/Server.cc                                  |   28 +-
 src/mds/SessionMap.cc                              |   20 +-
 src/mds/SessionMap.h                               |    4 +-
 src/mds/SimpleLock.h                               |    4 +-
 src/mds/locks.c                                    |    1 +
 src/mds/locks.h                                    |    1 +
 src/mon/MDSMonitor.cc                              |   23 +-
 src/mon/MonCap.cc                                  |    6 +
 src/mon/Monitor.cc                                 |   30 +-
 src/mon/Monitor.h                                  |    3 -
 src/mon/MonmapMonitor.cc                           |   28 +
 src/mon/MonmapMonitor.h                            |    7 +-
 src/mon/OSDMonitor.cc                              |   74 +-
 src/mon/OSDMonitor.h                               |    2 +-
 src/mon/PGMap.cc                                   |   13 +-
 src/mon/PGMonitor.cc                               |    3 +-
 src/msg/Message.h                                  |    4 +-
 src/msg/async/AsyncConnection.h                    |    4 +-
 src/msg/simple/Pipe.cc                             |    9 +-
 src/msg/simple/Pipe.h                              |    2 +-
 src/msg/simple/SimpleMessenger.cc                  |    4 +
 src/msg/xio/XioConnection.h                        |   22 +-
 src/os/filestore/DBObjectMap.cc                    |    2 +-
 src/os/filestore/FileStore.cc                      |   94 +-
 src/os/filestore/FileStore.h                       |    3 +-
 src/osd/OSD.cc                                     |   45 +-
 src/osd/OSD.h                                      |    1 +
 src/osd/PG.cc                                      |   38 +-
 src/osd/PGBackend.cc                               |    6 +-
 src/osd/ReplicatedBackend.cc                       |    4 +-
 src/osd/ReplicatedPG.cc                            |   14 +-
 src/osdc/ObjectCacher.cc                           |   11 +-
 src/pybind/ceph_volume_client.py                   |    9 +-
 src/pybind/rbd/rbd.pyx                             |  165 +-
 src/rgw/Makefile.am                                |   11 +-
 src/rgw/librgw.cc                                  |   16 +-
 src/rgw/rgw_admin.cc                               |  828 +++-
 src/rgw/rgw_bucket.cc                              |  207 +-
 src/rgw/rgw_bucket.h                               |   11 +-
 src/rgw/rgw_civetweb.cc                            |   11 +-
 src/rgw/rgw_common.h                               |   29 +-
 src/rgw/rgw_coroutine.cc                           |   14 +-
 src/rgw/rgw_coroutine.h                            |    4 +-
 src/rgw/rgw_cors.cc                                |    3 +-
 src/rgw/rgw_cr_rados.h                             |   36 +-
 src/rgw/rgw_data_sync.cc                           |   17 +-
 src/rgw/rgw_data_sync.h                            |    3 +-
 src/rgw/rgw_env.cc                                 |    2 +
 src/rgw/rgw_file.cc                                |   86 +-
 src/rgw/rgw_file.h                                 |   55 +-
 src/rgw/rgw_http_client.cc                         |  148 +-
 src/rgw/rgw_http_errors.h                          |    1 +
 src/rgw/rgw_json_enc.cc                            |   16 +-
 src/rgw/rgw_ldap.h                                 |    2 +-
 src/rgw/rgw_lib_frontend.h                         |    7 +
 src/rgw/rgw_log.cc                                 |   27 +-
 src/rgw/rgw_log.h                                  |   21 +-
 src/rgw/rgw_main.cc                                |   63 +-
 src/rgw/rgw_metadata.cc                            |  289 +-
 src/rgw/rgw_metadata.h                             |   45 +-
 src/rgw/rgw_op.cc                                  |   24 +-
 src/rgw/rgw_op.h                                   |   63 +
 src/rgw/rgw_period_history.cc                      |    2 +
 src/rgw/rgw_process.cc                             |    3 +-
 src/rgw/rgw_rados.cc                               |  387 +-
 src/rgw/rgw_rados.h                                |   38 +-
 src/rgw/rgw_rest.cc                                |   41 +-
 src/rgw/rgw_rest.h                                 |   45 +
 src/rgw/rgw_rest_log.cc                            |    2 +-
 src/rgw/rgw_rest_s3.cc                             |   11 +-
 src/rgw/rgw_rest_swift.cc                          |  224 +-
 src/rgw/rgw_rest_swift.h                           |  193 +
 src/rgw/rgw_swift.cc                               |   24 +-
 src/rgw/rgw_swift_auth.cc                          |   19 +-
 src/rgw/rgw_sync.cc                                |   71 +-
 src/rgw/rgw_sync.h                                 |    4 +-
 src/rgw/rgw_tools.cc                               |    2 +-
 src/rgw/rgw_tools.h                                |    2 +-
 src/test/Makefile-client.am                        |    9 +-
 src/test/Makefile.am                               |   10 +-
 src/test/centos-6/ceph.spec.in                     |    6 +-
 src/test/centos-6/install-deps.sh                  |   33 +-
 src/test/centos-7/ceph.spec.in                     |    6 +-
 src/test/centos-7/install-deps.sh                  |   33 +-
 .../cli/crushtool/compile-decompile-recompile.t    |    4 +
 src/test/cli/crushtool/missing-bucket.crushmap.txt |   39 +
 src/test/cli/radosgw-admin/help.t                  |   27 +-
 src/test/cli/rbd/help.t                            |    5 +-
 src/test/cls_lock/test_cls_lock.cc                 |   51 +
 src/test/common/test_util.cc                       |   15 +
 src/test/debian-jessie/install-deps.sh             |   33 +-
 src/test/encoding/types.h                          |    1 +
 src/test/fedora-21/ceph.spec.in                    |    6 +-
 src/test/fedora-21/install-deps.sh                 |   33 +-
 src/test/libcephfs/flock.cc                        |    7 +-
 src/test/libcephfs/test.cc                         |   38 +
 src/test/librbd/CMakeLists.txt                     |    6 +
 .../exclusive_lock/test_mock_AcquireRequest.cc     |  559 +--
 .../exclusive_lock/test_mock_BreakRequest.cc       |  249 +
 .../exclusive_lock/test_mock_GetLockerRequest.cc   |  216 +
 .../exclusive_lock/test_mock_ReacquireRequest.cc   |  101 +
 .../exclusive_lock/test_mock_ReleaseRequest.cc     |   92 +-
 src/test/librbd/image/test_mock_RefreshRequest.cc  |   77 +-
 .../image_watcher/test_mock_RewatchRequest.cc      |  215 +
 src/test/librbd/mock/MockExclusiveLock.h           |    2 +
 src/test/librbd/mock/MockImageCtx.h                |    3 +-
 src/test/librbd/mock/MockImageState.h              |    4 +-
 src/test/librbd/mock/MockObjectMap.h               |   21 +-
 .../librbd/object_map/test_mock_LockRequest.cc     |    9 +-
 .../librbd/object_map/test_mock_RefreshRequest.cc  |    8 +-
 .../librbd/object_map/test_mock_ResizeRequest.cc   |    2 +-
 .../object_map/test_mock_SnapshotCreateRequest.cc  |   10 +-
 .../object_map/test_mock_SnapshotRemoveRequest.cc  |    6 +-
 .../test_mock_SnapshotRollbackRequest.cc           |   10 +-
 .../librbd/object_map/test_mock_UnlockRequest.cc   |    3 +-
 .../librbd/object_map/test_mock_UpdateRequest.cc   |   18 +-
 .../librbd/operation/test_mock_ResizeRequest.cc    |    4 +-
 .../operation/test_mock_SnapshotRollbackRequest.cc |    4 +-
 src/test/librbd/test_BlockGuard.cc                 |   98 +
 src/test/librbd/test_ObjectMap.cc                  |   12 +-
 src/test/librbd/test_fixture.cc                    |    2 +-
 src/test/librbd/test_internal.cc                   |   56 +-
 src/test/librbd/test_librbd.cc                     |  252 +
 src/test/librbd/test_mock_ExclusiveLock.cc         |  122 +-
 src/test/librbd/test_mock_fixture.cc               |    2 -
 src/test/librbd/test_support.cc                    |    9 +-
 src/test/objectstore/store_test.cc                 |   21 +-
 src/test/opensuse-13.2/ceph.spec.in                |    6 +-
 src/test/opensuse-13.2/install-deps.sh             |   33 +-
 .../image_replayer/test_mock_BootstrapRequest.cc   |   71 +-
 .../image_replayer/test_mock_CreateImageRequest.cc |    2 +-
 .../image_sync/test_mock_ObjectCopyRequest.cc      |   21 +-
 .../image_sync/test_mock_SnapshotCreateRequest.cc  |    4 +-
 src/test/rbd_mirror/test_ImageDeleter.cc           |   20 +-
 src/test/rbd_mirror/test_ImageReplayer.cc          |   57 +-
 src/test/rbd_mirror/test_PoolWatcher.cc            |    2 +-
 src/test/rbd_mirror/test_fixture.cc                |    2 +-
 src/test/rbd_mirror/test_mock_ImageReplayer.cc     |    1 +
 src/test/ubuntu-12.04/install-deps.sh              |   33 +-
 src/test/ubuntu-14.04/install-deps.sh              |   33 +-
 src/tools/Makefile-client.am                       |    2 +
 src/tools/cephfs/DataScan.cc                       |    4 +-
 src/tools/cephfs/RoleSelector.cc                   |    4 +-
 src/tools/rados/rados.cc                           |  253 +-
 src/tools/rbd/Utils.cc                             |   13 +-
 src/tools/rbd/action/BenchWrite.cc                 |    5 +
 src/tools/rbd/action/DiskUsage.cc                  |    6 +
 src/tools/rbd/action/Info.cc                       |    4 +-
 src/tools/rbd/action/Journal.cc                    |    4 +-
 src/tools/rbd/action/Nbd.cc                        |   12 +-
 src/tools/rbd/action/Status.cc                     |   16 +-
 src/tools/rbd/action/Watch.cc                      |   12 +-
 src/tools/rbd_mirror/ImageDeleter.cc               |    2 +-
 src/tools/rbd_mirror/ImageReplayer.cc              |   20 +-
 src/tools/rbd_mirror/ImageReplayer.h               |    1 +
 .../rbd_mirror/image_replayer/BootstrapRequest.cc  |   66 +-
 .../rbd_mirror/image_replayer/BootstrapRequest.h   |   15 +-
 .../rbd_mirror/image_replayer/IsPrimaryRequest.cc  |  122 +
 .../rbd_mirror/image_replayer/IsPrimaryRequest.h   |   67 +
 .../rbd_mirror/image_replayer/OpenImageRequest.cc  |    2 +-
 .../image_replayer/OpenLocalImageRequest.cc        |   73 +-
 .../image_replayer/OpenLocalImageRequest.h         |    7 +
 .../rbd_mirror/image_sync/ObjectCopyRequest.cc     |    9 +-
 .../rbd_mirror/image_sync/SnapshotCopyRequest.cc   |   10 +-
 .../rbd_mirror/image_sync/SnapshotCreateRequest.cc |    4 +-
 src/tools/rbd_nbd/rbd-nbd.cc                       |   34 +-
 src/tracing/librbd.tp                              |   78 +
 systemd/ceph-disk at .service                         |    4 +-
 systemd/ceph-mon at .service                          |    3 +-
 systemd/ceph-osd at .service                          |    3 +-
 1937 files changed, 60162 insertions(+), 3417 deletions(-)

diff --git a/AUTHORS b/AUTHORS
index 57b2e4d..383d165 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -42,6 +42,8 @@ Andre Noll <maan at systemlinux.org>
 Andrew Bartlett <abartlet at catalyst.net.nz>
 Andrew Farmer <andrewf at hq.newdream.net>
 Andrew Leung <aleung at cs.ucsc.edu>
+Andrew Schoen <andrew.schoen at gmail.com>
+Andrew Schoen <aschoen at redhat.com>
 Andrew Woodward <awoodward at mirantis.com>
 Andrey Kuznetsov <Andrey_Kuznetsov at epam.com>
 Andrey Stepachev <octo at yandex-team.ru>
@@ -80,6 +82,7 @@ Brian Chrisman <brchrisman at gmail.com>
 Brian Felton <bjfelton at gmail.com>
 Brian Rak <dn at devicenull.org>
 Brown, David M JR <david.brown at pnl.gov>
+buck <buck at vm-buck-ceph-bindings.(none)>
 Burkhard Linke <Burkhard.Linke at computational.bio.uni-giessen.de>
 Caleb Miles <caleb.miles at inktank.com>
 Carlos Maltzahn <carlosm at cs.ucsc.edu>
@@ -101,6 +104,7 @@ Chengyuan Li <chengyli at ebay.com>
 Chris Dunlop <chris at onthe.net.au>
 Chris Glass <tribaal at gmail.com>
 Chris Holcombe <chris.holcombe at nebula.com>
+Chris Lee <chris.lee at dreamhost.com>
 Christian Brunner <christian at brunner-muc.de>
 Christian Marie <pingu at anchor.net.au>
 Christian Theune <ct at gocept.com>
@@ -144,6 +148,7 @@ Donghai Xu <xu.donghai at h3c.com>
 Dongmao Zhang <deanraccoon at gmail.com>
 Dongsheng Yang <dongsheng.yang at easystack.cn>
 Dongsu Park <dpark1978 at gmail.com>
+Dong Wu <archer.wudong at gmail.com>
 Dong Yuan <yuandong1222 at gmail.com>
 Douglas Fuller <dfuller at redhat.com>
 Drunkard Zhang <gongfan193 at gmail.com>
@@ -191,9 +196,13 @@ Gerhard Muntingh <gerhard at warpnet.nl>
 Germain Chipaux <germain.chipaux at gmail.com>
 Greg Farnum <gfarnum at redhat.com>
 Greg Farnum <greg at inktank.com>
+GregMeno <greg.meno at gmail.com>
+Gregory Farnum <gfarnum at gregory-farnums-mac-mini.local>
 Gregory Meno <gmeno at redhat.com>
+Gregory Meno <gregory.meno at inktank.com>
 Guangliang Zhao <guangliang at unitedstack.com>
 Guang Yang <yguang at yahoo-inc.com>
+Gui Hecheng <guihecheng at cmss.chinamobile.com>
 Guilhem Lettron <guilhem at lettron.fr>
 Gu Zhongyan <guzhongyan at 360.cn>
 Haifeng Liu <haifeng at yahoo-inc.com>
@@ -224,11 +233,13 @@ Ira Cooper <ira at redhat.com>
 Ira Cooper <ira at samba.org>
 Ismael Serrano <ismael.serrano at gmail.com>
 Ivan Grcic <igrcic at gmail.com>
+Ivo Jimenez <ivo.jimenez at gmail.com>
 Jacek J. Łakis <jacek.lakis at intel.com>
 Jacek J. Lakis <jacek.lakis at intel.com>
 James Liu <james.liu at ssi.samsung.com>
 James Page <james.page at ubuntu.com>
 James Ryan Cresawn <jrcresawn at gmail.com>
+Jan Fajerski <jfajerski at suse.com>
 Jan Harkes <jaharkes at cs.cmu.edu>
 Janne Grunau <j at jannau.net>
 Jashan Kamboj <jashank42 at gmail.com>
@@ -268,6 +279,7 @@ João Eduardo Luís <joao at redhat.com>
 João Eduardo Luís <joao at suse.de>
 Joaquim Rocha <joaquim.rocha at cern.ch>
 Joe Buck <jbbuck at gmail.com>
+Joe Buck <joe.buck at inktank.com>
 Joe Handzik <joseph.t.handzik at hp.com>
 Joe Julian <jjulian at io.com>
 Johannes Erdfelt <johannes at erdfelt.com>
@@ -290,6 +302,7 @@ Josh Durgin <jdurgin at redhat.com>
 Josh Durgin <josh.durgin at inktank.com>
 Josh Pieper <jjp at pobox.com>
 JP François <francoisjp at gmail.com>
+jtlayton <jlayton at redhat.com>
 Juan A. Suarez Romero <jasuarez at igalia.com>
 JuanJose 'JJ' Galvez <jgalvez at redhat.com>
 Kacper Kowalik <xarthisius at gentoo.org>
@@ -299,6 +312,7 @@ Karel Striegel <karel.striegel at ipc.be>
 Karl Eichwalder <ke at suse.de>
 Karol Mroz <kmroz at suse.com>
 Kévin Caradant <kevin.caradant at gmail.com>
+kawaguchi-s <kawaguchi.s at jp.fujitsu.com>
 Kefu Chai <kchai at redhat.com>
 Ken Dreyer <kdreyer at redhat.com>
 Ken Dreyer <ken.dreyer at inktank.com>
@@ -311,6 +325,7 @@ Kiseleva Alyona <akiselyova at mirantis.com>
 Kongming Wu <wu.kongming at h3c.com>
 Kris Jurka <kjurka at locatortechnologies.com>
 Krzysztof Kosiński <krzysztof.kosinski at intel.com>
+ksharma <ksharma at suse.com>
 Kuan Kai Chiu <big.chiu at bigtera.com>
 Kun Huang <academicgareth at gmail.com>
 Kyle Bader <kyle.bader at dreamhost.com>
@@ -325,6 +340,7 @@ Liam Monahan <liam at umiacs.umd.edu>
 Li Peng <lip at dtdream.com>
 Li Tianqing <tianqing at unitedstack.com>
 Liu Peiyan <liu.peiyang at h3c.com>
+LiuYang <yippeetry at gmail.com>
 Li Wang <li.wang at kylin-cloud.com>
 Lluis Pamies-Juarez <lluis.pamies-juarez at hgst.com>
 Loic Dachary <ldachary at redhat.com>
@@ -344,8 +360,10 @@ Marco Garcês <marco.garces at bci.co.mz>
 Marcus Sorensen <shadowsor at gmail.com>
 Marcus Watts <mwatts at redhat.com>
 Mark Kampe <mark.kampe at dreamhost.com>
+Mark Nelson <mark.nelson at dreamhost.com>
 Mark Nelson <mark.nelson at inktank.com>
 Mark Nelson <mnelson at redhat.com>
+Mark Nelson <nhm at clusterfaq.org>
 Markus Elfring <elfring at users.sourceforge.net>
 marnberg <marnberg at 29311d96-e01e-0410-9327-a35deaab8ce9>
 Martin Ettl <ettl.martin at gmx.de>
@@ -371,6 +389,7 @@ Milan Broz <mbroz at redhat.com>
 Min Chen <chenmin at xsky.com>
 Min Chen <minchen at ubuntukylin.com>
 MingXin Liu <mingxin.liu at kylin-cloud.com>
+Mingxin Liu <mingxin at xsky.com>
 Mingyue Zhao <zhao.mingyue at h3c.com>
 Mohammad Salehe <salehe+dev at gmail.com>
 Moritz Möller <mm at mxs.de>
@@ -388,12 +407,14 @@ Nilamdyuti Goswami <ngoswami at redhat.com>
 Ning Yao <yaoning at ruijie.com.cn>
 Ning Yao <yaoning at unitedstack.com>
 Nishtha Rai <nishtha3rai at gmail.com>
+Nitin A Kamble <Nitin.Kamble at Teradata.com>
 Noah Watkins <nwatkins at redhat.com>
 (no author) <(no author)@29311d96-e01e-0410-9327-a35deaab8ce9>
 Oleh Prypin <oleh at pryp.in>
 Orit Wasserman <owasserm at redhat.com>
 Owen Synge <osynge at suse.com>
 Padraig O'Sullivan <posulliv at umd.edu>
+Pan Liu <pan.liu at istuary.com>
 Pascal de Bruijn <pascal at unilogicnetworks.net>
 Patience Warnick <patience at cranium.pelton.net>
 Patrick Donnelly <batrick at batbytes.com>
@@ -412,6 +433,7 @@ Pete Zaitcev <zaitcev at redhat.com>
 Petr Machata <pmachata at redhat.com>
 Pierre Chaumont <pierre.chaumont31 at gmail.com>
 Pierre Rognant <prognant at oodrive.com>
+Piotr Dałek <piotr.dalek at corp.ovh.com>
 Piotr Dałek <piotr.dalek at ts.fujitsu.com>
 Pritha Srivastava <prsrivas at redhat.com>
 Qiankun Zheng <zheng.qiankun at h3c.com>
@@ -441,9 +463,11 @@ Rohan Mars <code at rohanmars.com>
 Roi Dayan <roid at mellanox.com>
 Roland Mechler <rmechler at cisco.com>
 Roman Haritonov <reclosedev at gmail.com>
+Ronak Jain <ronakjain at outlook.in>
 Ron Allred <rallred at itrefined.com>
 Rongze Zhu <zrzhit at gmail.com>
 root <root at ceph-node1.homeoffice.wal-mart.com>
+root <root at korea-fg-ceph-57-117.120.120.117>
 root <root at phenom.dyweni.com>
 Ross Turk <ross.turk at inktank.com>
 Ross Turk <rturk at redhat.com>
@@ -460,6 +484,9 @@ Sahithi R V <tansy.rv at gmail.com>
 Sam Lang <sam.lang at inktank.com>
 Samuel Just <sam.just at inktank.com>
 Samuel Just <sjust at redhat.com>
+Samuel Matzek <smatzek at us.ibm.com>
+Sam Zaydel <szaydel at gmail.com>
+Sander Pool <sander.pool at inktank.com>
 Sandon Van Ness <sandon at inktank.com>
 Sandon Van Ness <svanness at redhat.com>
 Sangdi Xu <xu.sangdi at h3c.com>
@@ -492,6 +519,7 @@ Stefan Eilemann <Stefan.Eilemann at epfl.ch>
 Stephan Renatus <s.renatus at x-ion.de>
 Stephen F Taylor <steveftaylor at gmail.com>
 Stephen Jahl <stephenjahl at gmail.com>
+Stephon Striplin <stephon.striplin at dreamhost.com>
 Steve Capper <steve.capper at linaro.org>
 Steve MacGregor <grape at lapgoat-0.(none)>
 Steve Stock <steve at technolope.org>
@@ -506,8 +534,10 @@ Sylvain Munaut <s.munaut at whatever-company.com>
 Takanori Nakao <nakao.takanori at jp.fujitsu.com>
 Takeshi Miyamae <miyamae.takeshi at jp.fujitsu.com>
 Takuya ASADA <syuu at dokukino.com>
+Tamilarasi Muthamizhan <tmuthami at redhat.com>
 Tamil Muthamizhan <tamil at magna002.ceph.redhat.com>
 Tamil Muthamizhan <tamil.muthamizhan at inktank.com>
+Tamil Muthamizhan <tmuthami at redhat.com>
 Tamil Muthamizhan <tmuthamizhan at MacBook-Air.local>
 Tao Chang <changtao at hihuron.com>
 Thomas Bechtold <t.bechtold at telekom.de>
@@ -520,6 +550,7 @@ Thorsten Glaser <tg at mirbsd.de>
 Tianshan Qu <tianshan at xsky.com>
 Tim Freund <tim at freunds.net>
 Tim Serong <tserong at suse.com>
+tmuthamizhan <tmuthami at redhat.com>
 Tobias Florek <tobias.florek at bytesandbutter.de>
 Tobias Suckow <tobias at suckow.biz>
 Tomasz Paskowski <ss7pro at gmail.com>
@@ -547,7 +578,10 @@ VRan Liu <gliuwr at gmail.com>
 Vu Pham <vu at mellanox.com>
 Walter Huf <hufman at gmail.com>
 Wang, Yaguang <yaguang.wang at intel.com>
+Wanlong Gao <wanlong.gao at easystack.cn>
 Warren Usui <warren.usui at inktank.com>
+Warren Usui <wusui at ubuntu.(none)>
+Weibing Zhang <atheism.zhang at gmail.com>
 Wei Feng <feng.wei at h3c.com>
 Wei Jin <wjin.cn at gmail.com>
 Weijun Duan <duanweijun at h3c.com>
@@ -589,6 +623,7 @@ Yazen Ghannam <yazen.ghannam at linaro.org>
 Yehua Chen <chen.yehua at h3c.com>
 Yehuda Sadeh <yehuda at inktank.com>
 Yehuda Sadeh <ysadehwe at redhat.com>
+Yibo Cai <yibo.cai at linaro.org>
 YiQiang Chen <cyqsign at 163.com>
 Yongqiang He <he.yongqiang at h3c.com>
 YongQiang <he.yongqiang at h3c.com>
@@ -597,13 +632,19 @@ You Ji <jiyou09 at gmail.com>
 You Ji <youji at ebay.com>
 Yuan Zhou <yuan.zhou at intel.com>
 Yunchuan Wen <yunchuan.wen at kylin-cloud.com>
+Yuri Weinstein <yuri.weinstein at gmail.com>
 Yuri Weinstein <yuri.weinstein at inktank.com>
+Yuri Weinstein <yuriw at magna002.ceph.redhat.com>
+Yuri Weinstein <yweinste at redhat.com>
+yuriw <yuri.weinstein at gmail.com>
+Zack Cerza <zack.cerza at inktank.com>
 Zack Cerza <zack at cerza.org>
 Zack Cerza <zack at redhat.com>
 Zengran Zhang <zhangzengran at h3c.com>
 Zeqiang Zhuang <zhuang.zeqiang at h3c.com>
 Zhang Huan <zhanghuan at ict.ac.cn>
 zhangweibing <zhangweibing at unitedstack.com>
+Zhao Chao <zhaochao1984 at gmail.com>
 Zhao Junwang <zhjwpku at gmail.com>
 Zhe Zhang <zzxuanyuan at gmail.com>
 Zhicheng Wei <zhicheng at opensourceforge.net>
diff --git a/CMakeLists.txt b/CMakeLists.txt
index d6b3a26..9cb3093 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -1,7 +1,7 @@
 cmake_minimum_required(VERSION 2.8.11)
 
 project(Ceph)
-set(VERSION 10.2.5)
+set(VERSION 10.2.6)
 
 if (NOT (CMAKE_MAJOR_VERSION LESS 3))
   # Tweak policies (this one disables "missing" dependency warning)
diff --git a/ChangeLog b/ChangeLog
index 4ef423a..f1a39ee 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,21 +1,442 @@
-c461ee1 (HEAD, tag: v10.2.5, origin/jewel) 10.2.5
+656b5b6 (HEAD, tag: v10.2.6, origin/jewel) 10.2.6
+88f4895 qa/suites/upgrade/hammer-x: Add some volumes
+0c242d1 qa/suites/ceph-deploy: Drop OpenStack volume count
+ccd0265 qa: replace centos 7.2 with centos 7.3
+2cbec5b Removed dumplin test 13234.yaml as not needed anymore
+771e1d9 qa/suites/rest: Openstack volumes
+a18640f qa/suites/ceph-ansible: Openstack volumes
+841688b qa/suites/fs: Add openstack volume configuration
+9778743 qa/suites/samba: Openstack volume configuration
+cd1e8ef qa/suites/hadoop: Openstack volume configuration
+ac7add1 qa/suites/knfs: Add openstack volume configuration
+ba35859 qa/suites/kcephfs: Openstack volume configuration
+aced718 qa/suites/krbd: Add openstack volume configuration
+94d5888 qa/suites/rgw: Add openstack volume configuration
+cb7bb0c tests: reduce stress-split-erasure-code-x86_64 dependency on stress-split
+8ae1886 tests: reduce stress-split-erasure-code dependency on stress-split
+4d4b38e qa: drop ubuntu trusty support
+ebb2f73 mds: fix incorrect assertion in Server::_dir_is_nonempty()
+256b850 tests: remove extra indentation in exec block
+898702d tests: add require_jewel_osds to upgrade/hammer-x/tiering
+214ce1b librbd: async method to check journal tag owner
+5723b93 rbd-mirror: check image mirroring state when bootstrapping
+8361a60 rbd-mirror: async request to test if image is primary
+21ded74 test/cli/crushtool: fix the test of compile-decompile-recompile.t
+d92738c crush/CrushCompiler: error out as long as parse fails
+4824ad2 qa: ceph-ansible smoke suite modified for jewel
+5ed454d rgw: fix use of marker in List::list_objects()
+b06d6f5 Backport bucket reshard to jewel.
+25af6e5 rgw_admin: add a few admin commands to the usage
+b429331 rgw_admin: add bi purge command
+eb65394 rgw: bucket resharding, adjust logging
+e197ec0 cls/rgw: bi_list() fix is_truncated returned param
+81daefa rgw_admin: require --yes-i-really-mean-it for bucket reshard
+4adb247 rgw_admin: better bucket reshard logging
+139842d rgw: limit bucket reshard num shards to max possible
+7e4493a rgw_admin: fix bi list command
+1604b52 rgw_admin: use aio operations for bucket resharding
+cd2e35e rgw: bucket reshard updates stats
+678dac9 cls/rgw: add bucket_update_stats method
+d39eac0 rgw_admin: reshard also links to new bucket instance
+29c9e98 rgw: rgw_link_bucket, use correct bucket structure for entry point
+b40ce27 radosgw-admin: bucket reshard needs --num-shards to be specified
+d9df745 cls/rgw: fix bi_list objclass command
+4892def rgw_admin: bucket rehsrading, initial work
+c1cf61f rgw: utilities to support raw bucket index operations
+0ce2a12 rgw: use bucket_info.bucket_id instead of marker where needed
+5fe58c5 cls/rgw: utilities to support raw bucket index operations
+7515a77 tests: drop buildpackages.py
+36f96f4 tests: update SUSE yaml facets in qa/distros/all
+7cf37f6 rbd-nbd: support partition for rbd-nbd mapped raw block device.
+bb3c594 buildpackages: remove because it does not belong
+5c328f0 tests: fix regression in qa/tasks/ceph_master.py
+0cf7a61 Revert "Merge pull request #12978 from asheplyakov/jewel-18581"
+260801f rgw: be aware abount tenants on cls_user_bucket -> rgw_bucket conversion.
+e9a6dec rgw: add check for update return value
+a27f6a7 rgw: we need to reinit the zonegroup after assignment to avoid invalid cct and store
+006140a rgw: fix init_zg_from_period when default zone is not set as default
+78d296b osd: do not send ENXIO on misdirected op by default
+30fb615 tests: ignore bogus ceph-objectstore-tool error in ceph_manager
+3eff1ac librbd: allow to open an image without opening parent image
+bee1d2c rbd-mirror: hold owner lock when testing if lock owner
+463e88e OSDMonitor: clear jewel+ feature bits when talking to Hammer OSD
+b1d6c2e rgw: RGWCloneMetaLogCoroutine uses RGWMetadataLogInfoCompletion
+7ca400b rgw: expose completion for RGWMetadataLog::get_info_async()
+335a732 rgw: RGWMetaSyncShardCR drops stack refs on destruction
+e5646a0 rgw: librados aio wait_for_safe, not wait_for_complete
+1e75e23 qa/suites/upgrade/hammer-x: wrap thrash and workloads
+75d0580 tests: add require_jewel_osds before upgrading last hammer node
+3a02868 Revert "use the create option during instantiation"
+ebdc0d2 use dev option instead of dev-commit
+3839727 librbd: ensure owner lock is held before purging cache
+d0c12ed librados: blacklist_add should wait for latest OSD map
+bf8b78c librbd: prevent assertion failure when journal IO is blacklisted
+2ca7030 librbd: ignore blacklist error when releasing exclusive lock
+7aa424a librbd: fail immediately if the exclusive lock cannot be acquired
+5d96332 librbd: add new lock_get_owners / lock_break_lock API methods
+245898a librbd: separate break lock logic into standalone state machine
+968a10b librbd: separate locker query into standalone state machine
+652e65a librbd/exclusive_lock/AcquireRequest.cc: init lock_type
+d4085d3 librbd: API methods to directly acquire and release the exclusive lock
+476e2b1 rbd-mirror: fix error messages formatting
+374d89f librbd: ignore partial refresh error when acquiring exclusive lock
+0bd843a librbd: potential seg fault when blacklisting an image client
+273fd99 librbd: potential double-unwatch of watch handle upon error
+33e037a librbd: deadlock when replaying journal during image open
+bca65c4 librbd: improve image state machine debug log messages
+a12f435 librbd: remove unused refresh request logic
+a475bfb librbd: interlock image refresh and lock operations
+3d61b69 librbd: image state machine now has hooks for lock requests
+4ea8d55 librbd: integrate asynchronous image rewatch state machine
+62f265b librbd: helper state machine for asynchronous watch recovery
+ead7201 librbd: exclusive lock now supports reacquiring a lost lock
+38ca4ff librbd: store exclusive lock cookie instead of recalculating
+73a4455 librbd: helper state machine to update lock cookie
+98a5e11 cls_lock: support updating the lock cookie without releasing the lock
+8a75f98 tests: make distros/all/centos.yaml be a symlink to centos_7.3
+18545a2 ceph-disk: convert none str to str before printing it
+077290b rbd-mirror: fix gmock warnings in bootstrap request unit tests
+ce32297 qa/workunits/rbd: test_status_in_pool_dir: explicitly check grep return value
+6d729d2 rbd-mirror: make 'rbd mirror image resync' work after split-brain
+e34a403 rbd-mirror: split-brain issues should be clearly visible in mirror status
+8f9a93c qa/workunits/rbd: use image id when probing for image presence
+1f2d30c qa/workunits/rbd: check status also in pool dir after asok commands
+5d0fba4 qa/workunits/rbd: wait for image deleted before checking health
+38e06fb qa/workunits/rbd: wait for image deleted before checking health
+735e32b qa/workunits/rbd: small fixup and improvements for rbd-mirror tests
+bf3400f systemd: Restart Mon after 10s in case of failure
+401271e build/ops: add libldap dependency for RGW
+73d2114 radosgw-admin: check for name mistmatch in realm set
+e4b6cf6 radosgw-admin: relam set can use input redirection
+3cd42f4 radosgw-admin: realm set should create a new realm
+33c6ef5 rgw: fix off-by-one in RGWDataChangesLog::get_info
+8b124c8 rgw_file: add timed namespace invalidation
+2cb0307 rgw_admin: read master log shards from master's current period
+b2feddd rgw: allow getting master log shards info on specified period
+a0e0893 rgw_admin: get master's period from store's current period info
+0a47342 rgw: complete versioning enablement after sending it to meta master
+7e51bec rgw: clear master_zonegroup when reseting RGWPeriodMap
+509de4d PrimaryLogPG::try_lock_for_read: give up if missing
+cedaecf ReplicatedBackend: take read locks for clone sources during recovery
+016b059 rgw: fix interface compliance of RGWCivetWeb::write_data()
+1555638 librbd: metadata_set API operation should not change global config setting
+7b74238 tests: Remove ext4 option from rados:thrash tests
+4e60be5 Revert "jewel: rgw: multipart upload copy"
+99bafc1 rbd: enabling/disabling rbd feature should report missing dependency
+16a2fec librbd: don't remove an image w/ incompatible features
+dd1f425 rbd-mirror: avoid processing new events after stop requested
+de1ebc3 tests: explicitly use centos 7.3 in distros/supported
+0133d63 qa: fixed distros links
+1481c8f tests: upgrade: install firefly only on Ubuntu 14.04
+514e2ba rgw: minor optimization
+7f76bb1 rgw: rgw_obj_key use adjustment in multipart copy part
+f99ead1 rgw: multipart copy-part handle versionId
+6793489 rgw: multipart copy part minor parsing cleanup
+2ca1bcd rgw: multipart copy, check for empty bucket, improve logging
+e5ac120 rgw: multipart copy part, chunked read
+a54a7ad rgw: doc: add multipart uploads copy part feature as supported
+987b425 rgw: multipart uploads copy part support
+21622c1 src/mds: fix MDSMap upgrade decoding
+be8bc11 mds: use FSMap::insert to add to standby_daemons
+649b1d4 rbd: bench-write should return error if io-size >= 4G
+c2f86a4 journal: don't hold future lock during assignment
+5eda4aa mds: fix dropping events in standby replay
+df4558c cephfs: fix missing ll_get for ll_walk
+fef3de8 mon/MDSMonitor: fix iterating over mutated map
+0591627 mon: use clearer code structure
+bc9b779 client: fix Client::handle_cap_flushsnap_ack() crash
+b147022 qa/tasks: add test_open_ino_errors
+3385419 mds: propagate error encountered during opening inode by number
+5c4fffa librados: Memory leaks in object_list_begin and object_list_end
+7bbb5a8 mon/OSDMonitor: only show interesting flags in health warning
+336c351 mon/OSDMonitor: set last_force_op_resend on overlay pool too
+8742203 mds: finish clientreplay requests before requesting active state
+b359935 tests: subst repo and branch in qemu test urls
+f66bd81 tests: subst branch and repo in qa/tasks/qemu.py
+69a0efa tests: subst repo name in krbd/unmap/tasks/unmap.yaml
+2931aef tests: subst repo name in qa/tasks/cram.py
+205403b cram: support fetching from sha1 branch, tag, commit hash
+ea65450 qa/workunits/rbd: use more recent qemu-iotests that support Xenial
+f449e3d qa/workunits/rbd: removed qemu-iotest case 077
+61e1b0c tasks/rbd_fio: unmap rbd devices on cleanup
+5fcfa32 tasks/rbd_fio: don't use sudo unnecessarily
+85fbddd qa/tasks/cephfs/filesystem.py: backport _write_data_xattr() function
+89dcd8b client: populate metadata during mount
+ff91688 msg/simple: clear_pipe when wait() is mopping up pipes
+50a3fa1 mds: fix null pointer dereference in Locker::handle_client_caps
+20e7502 systemd: Restart Mon after 10s in case of failure
+aa0e450 qa/distros: centos_7.yaml -> centos.yaml
+b0c1e88 qa/suites: centos_7.2.yaml -> centos_7.yaml
+8a98f06 qa/distros: add centos 7.3
+a45ad83 qa/distros: add centos 7 yaml; use that instead
+23680e0 doc: document hostname constraints for rados bench
+67e7a90 selinux: Allow ceph to manage tmp files
+35e10a0 mon: do not send duplicated osdmap msg to not sync'ed osd
+bcd4698 Doc: Fixes Python Swift client commands
+c2bbf7f tests: run fs/thrash on xfs instead of btrfs
+7a341a8 client/Client.cc: prevent segfaulting
+173ea7f Ceph-disk to use correct user in check_journal_req
+bf873a7 qa: update remaining ceph.com to download.ceph.com
+e8f55f6 rgw: RGWAsyncRadosRequest drops notifier ref on cancel
+3f509aa rgw: remove circular reference in RGWAsyncRadosRequest
+0ef1bdf rgw: release RGWAioCompletionNotifier refs on destruction
+c21622d rgw_rados: add guard assert in add_io()
+585eb48 rgw_rados: sanitize dout print in GWRados::get_obj_iterate_cb(...)
+e2eaae4 Add ceph-create-keys to explicitly create admin/bootstrap keys
+2adc0ee Remove debug overrides
+8f36e23 use the create option during instantiation
+1ea9de2 qa/tasks/qemu: update default image url after ceph.com redesign
+36186d0 test_volume_client: remove superfluous arguments
+7549681 test_volume_client: check volume size
+3e3ffcf tasks/cephfs: test recovery of partial auth update
+3320ef1 ceph_volume_client: fix partial auth recovery
+5115c21 ceph_volume_client: check if volume metadata is empty
+bf33cd5 ceph_volume_client: fix _recover_auth_meta() method
+8404426 mds/server: skip unwanted dn in handle_client_readdir
+a0ee8b9 jewel: fix compile error for dencode test case when --with-radosgw=no
+5e6c729 jewel: fixed compile error when --with-radosgw=no
+cdd6cbf librbd: block concurrent in-flight object map updates for the same object
+04cee05 qa/tasks/workunit: clear clone dir before retrying checkout
+1a98850 qa/tasks/workunit: retry on ceph.git if checkout fails
+c101fba qa/tasks/workunit.py: add CEPH_BASE env var
+c7b74cd qa/tasks/workunit: leave workunits inside git checkout
+384e5c0 rgw: add 'rgw log http headers' gloss to config-ref.rst
+9fd29b4 use std::map
+8838bcb rgw: add rgw_log_http_headers option
+8755775 librbd: new block guard helper to prevent concurrent IO to blocks
+5d306fd librbd: convert ObjectMap to template for unit testing
+c53df37 librbd: clean up object map update interface
+6fe9be8 librbd: update in-memory object map after on-disk update committed
+f24c3ff osd/PG: publish PG stats when backfill-related states change
+fe753db install-deps.sh: unify indentation in case statement
+94ab8d8 install-deps.sh: allow building on SLES systems
+7159265 qa/tasks: add test_corrupt_backtrace
+893d4ab mds: check for errors decoding backtraces
+895ab24 PG: fix cached_removed_snaps bug in PGPool::update after map gap
+299478a qa/config/rados.yaml: enable osd_debug_verify_cached_snaps
+b492c00 PG::handle_advance_map: add debugging option to verify cached_removed_snaps
+2296c87 osd: improve error message when FileStore op fails due to EPERM
+7819adb client: don't use special faked-up inode for /..
+95edad2 client: don't use special faked-up inode for /..
+9a59ce9 rgw: fix decoding of creation_time and last_update.
+bbf4c27 qa/tasks/admin_socket: subst in repo name
+944ec03 rbd-nbd: invalid error code for "failed to read nbd request" messages
+fcdd5e7 rados: optionally support reading omap key from file
+d313e42 librbd: ignore error when object map is already locked by current client
+d0b0d41 tests: rbd/test_lock_fence.sh: fix rbdrw.py relative path
+06e40eb tests: use ceph-jewel branch for s3tests
+8877ee4 rbd: fix json formatting for image and journal status output
+9c84a65 journal: prevent repetitive error messages after being blacklisted
+d069464 journal: avoid logging an error when a watch is blacklisted
+9a1258d rgw: use explicit flag to cancel RGWCoroutinesManager::run()
+a67dca4 qa/tasks/workunit: clear clone dir before retrying checkout
+e5c81c3 qa/tasks/workunit: retry on ceph.git if checkout fails
+efaedb3 qa/workunits: include extension for nose tests
+de15912 qa/workunits: use relative path instead of wget from git
+74aac99 qa/tasks/workunit.py: add CEPH_BASE env var
+117d38e qa/tasks/workunit: leave workunits inside git checkout
+5aa9387 rgw: ldap: simple_bind() should set ldap option on tldap
+dcc9483 mon: OSDMonitor: trigger an immediate propose if any newly down osd is detected during tick()
+64c0cae librbd/diff_iterator: use proper snap to query parent overlap
+d584f9e rgw: log name instead of id for SystemMetaObj on failure
+1a0becf rgw: drop unnecessary spacing in rgw zg init log
+f15c8da ceph_disk: fix a jewel checkin test break
+8e0cffd automake: convert to tar-pax
+a0ae9a8 client: drop setuid/setgid bits on ownership change
+d49e628 mds: clear setuid/setgid bits on ownership changes
+f2dfc20 client: set metadata["root"] from mount method when it's called with a pathname
+87a2a95 rgw: Replacing '+' with "%20" in canonical uri for s3 v4 auth.
+aa8e57d rbd: utilize new API methods for image id and block name prefix
+05295ef librbd: new API methods to retrieve image id and block name prefix
+240431b build/ops: fix /etc/os-release parsing in install-deps.sh
+f651950 install-deps.sh: initial distro detection based on /etc/os-release
+1c28e7f move ceph-qa-suite dirs into qa/
+282451d Revert "tasks/workunit.py: depth 1 clone"
+e6f61ea tasks/workunit.py: depth 1 clone
+426f7cf tasks/workunit: remove kludge to use git.ceph.com
+1ba5995 tasks/ceph: restore context of osd mount path before mkfs
+26c87fd rbd: --max_part and --nbds_max options for nbd map
+ddb5403 radosgw-admin: 'zone placement modify' doesnt require pool names
+7cfc346 radosgw-admin: add 'zonegroup placement default' command
+dbc1b61 radosgw-admin: fix 'placment' typos
+4f7147c rgw_admin: commands to manage placement targets
+155641f rgw-admin: add commands to manage zonegroup placement fields
+bb9678b rgw: use set for zonegroup placement target tags
+c461ee1 (tag: v10.2.5) 10.2.5
+8b10d3b rgw: omap_get_all() fixes
+df9d929 rgw/rgw_rados: do not omap_getvals with (u64)-1 max
+ad869de msg: don't truncate message sequence to 32-bits
+4eb7c73 rgw: do not abort when accept a CORS request with short origin
+36ff758 os/ObjectStore: properly clone object map when replaying OP_COLL_MOVE_RENAME
+7f94c7b os/ObjectStore: properly clear object map when replaying OP_REMOVE
 f7abffe msg/simple/Pipe: avoid returning 0 on poll timeout
+a9da605 FileStore::_do_fiemap: do not reference fiemap after it is freed
 9411351 (tag: v10.2.4) 10.2.4
+e725605 ceph_volume_client: set an existing auth ID's default mon caps
+e1af490 mds: force client flush snap data before truncating objects
+900f2ac librbd: account m_processing when failing request after refresh
+4a157ea librbd: diffs to clone's first snapshot should include parent diffs
+820ab7d rgw: fix for versioned delete_multi_object
+514a31f rgw:fix for deleting objects name beginning and ending with underscores of one bucket using POST method of AWS's js sdk. Fixes: http://tracker.ceph.com/issues/17888
+7db6d1d rgw: add recovery procedure for upgrade to older version of jewel
+478e40a rgw: fix missing master zone for a single zone zonegroup
+b502b96 rgw: RGWBucketSyncStatusManager uses existing async_rados
+ece622d rgw: only set CURLOPT_UPLOAD for PUT/POST requests
+bee7e3a ceph-disk: enable --runtime ceph-osd systemd units
+e3f6593 build/ops: restart ceph-osd at .service after 20s instead of 100ms
+e16d756 ceph-disk: trigger must ensure device ownership
+93e6719 ceph-disk: systemd unit must run after local-fs.target
+5400673 tests: check hostname --fqdn sanity before running cmake check
+bd96c31 tests: check hostname --fqdn sanity before running make check
+cec7441 thrashosds: try ceph-objectstore-tool for 10 minutes
+f47213c build/ops: fix undefined crypto references with --with-xio
+8a774cc msg/simple/Pipe: handle addr decode error
 d194db8 OSDMonitor: only reject MOSDBoot based on up_from if inst matches
+890692d suites/rados: s/trusty/"14.04"/
+89248e1 jewel: fixed the issue when --disable-server, compilation fails.
+78848fe upgrade/infernalis-client-x: ceph-test is needed for ceph-coverage
+791de76 upgrade: ceph-test is needed for ceph-coverage
 00de014 mon: MonmapMonitor: drop unnecessary 'goto' statements
 25f1b39 mon: MonmapMonitor: return success when monitor will be removed
+3f4a636 upgrade/client-upgrade: correct distros/ location
+27067c7 upgrade/client-upgrade: fix distro symlinks
+d9c1d86 rgw: add support for the prefix parameter in account listing of Swift API.
+6ead4cc rgw: optimize out ctor-copy in RGWListBuckets_ObjStore_SWIFT.
+9832332 upgrade/client-upgrade: specify centos or trusty (not xenial)
+f95ed3e rados: avoid ubuntu xenial on upgrade tests
+0ab5b7a systemd/ceph-disk: reduce ceph-disk flock contention
+2c1df81 upgrade/hammer-x: verify shec before the full upgrade
 79be070 librados: remove new setxattr overload to avoid breaking the C++ ABI
 4d6f848 crush: condition latest tunable encoding on features
 bf96b30 crush/CrushWrapper: encode with features
 c5f5b94 crush/CrushWrapper: drop unused 'lean' encode() argument
+2eba625 upgrade/hammer-x/stress-split-*: disable sighup injection
+5ff2680 upgrade/hammer-x/parallel: white 'failed to encode'
+a97f1be upgrade/hammer-x/parallel: upgrade osds first
+ca21247 upgrade/hammer-x: do not whitelist 'failed to encode map'
+6587484 drop broken name length config args
+d4a28eb rgw: don't store empty chains in gc
+8f3548c upgrade/hammer-x: fix symlinks
 c66c556 osd/osd_types: encode pg_pool_t like hammer if features indicate hammer
 85caf34 osd/osd_types: conditional pg_pool_t encoding
+b84c1a2 upgrade/hammer-x: debug mds
+3f7ad75 upgrade/hammer-x: debug client
+d7a479c4 rgw: TempURL properly handles accounts created with the implicit tenant.
+a0b4e60 rgw: look for region_map in rgw_region_root_pool
+af10e57 rgw: region conversion respects pre-existing rgw_region_root_pool
+05e5a5a mds: require MAY_SET_POOL to set pool_ns
+6efad69 mds: use projected path construction for access
+edac06f mds: ignore 'session evict' when mds is replaying log
+4df1b9c tests: save 9 characters for asok paths
+1a63cb6 tasks.rgw: 'time' imported but unused
+ea9665b client: fix stale entries in command table
+8d3b0e7 rgw: settle /info implementation across other swift-at-root features.
+22eca66 swift /info implementation.
+4a0f87a Restart OSDs that belong to first node only
+aa6f7f7 This is triggering failures like
+5b89e7b rgw: add support for the healthcheck feature of Swift API.
+ca7e583 rgw: add support for the crossdomain.xml resource of Swift API.
+6d88b89 rgw: fix the handling of rgw_swift_url_prefix.
+3ec00fc rgw_http_errors: add http error code for 503
+b33a690 rgw: Allow to serve Swift off the URL root
+ca0bc00 whitelist CRC mismatch
+23ea258 rgw: remove unnecessary sleeps
+19c7c44 rgw: start_rgw() polls gateway until it accepts connections
+0e046c6 rgw: add retry/backoff to sync agent requests
+b044361 rgw_file: fix spurious mount entries w/Linux NFS client
+732405e rgw: fix for bucket delete racing with mdlog sync
+652f490 mon,ceph-disk: add lockbox permissions to bootstrap-osd
+d1df8f7 rgw: for the create_bucket api, if the input creation_time is zero, we should set it to 'now"
+8ecf8b8 Added /upgrade/jewel-x/point-to-point-x
+861488c DO NOT whitelist CRC mismatch
 3cc29c6 os/filestore/HashIndex: fix list_by_hash_* termination on reaching end
+33b8893 mon: MonmapMonitor: drop unnecessary 'goto' statements
+fbdb01c mon: MonmapMonitor: return success when monitor will be removed
+ecfe80f test: add test for fiemap xfs issue when #extents > 1364
+aa56ade FileStore:: fix fiemap issue in xfs when #extents > 1364
+ff2e194 ReplicatedPG::do_update_log_missing: take the pg lock in the callback
+fff2127 osd: limit omap data in push op
+35acd52 ceph-disk: fix flake8 errors
+182babf OSDMonitor: only reject MOSDBoot based on up_from if inst matches
+0f41bbf upgrade/hammer-x: osds first
+8efa360 upgrade/hammer-x/f-h-x-offline: osds first
+db4071a osd: Add config option to disable new scrubs during recovery
+9880093 ceph-create-keys: wait 10 minutes to get or create the bootstrap key, not forever
+beebbcf ceph-create-keys: wait 10 minutes to get or create a key, not forever
+9e44556 ceph-create-keys: wait for quorum for ten minutes, not forever
+ddfe087 qa/workunits: update test_cache_pool.sh
+c74a4fa tools/rados: add --with-clones option to include clones for cache-flush/cache-evict
+9c001e2 tools/rados: default to include clone objects when excuting "cache-flush-evict-all"
+5b83fe5 upgrade/hammer-x/stress-split: set require_jewel_osds
+0a35b7e upgrade: disable ceph-objectstore-tool test in infernalis-x
+c1bcf40 test: temporarily disable fork()'ing tests
+ef2709a mon: update mon(peon)'s down_pending_out when osd up
+6b8361f librbd: restore journal access when force disabling mirroring
+e949bfb Added require_jewel_osds flag Added to point-to-point as well
+faaee33 suites/powercycle: no ext4
+e6bb826 rgw: add sleep to let the sync agent to init
+cea3de3 rgw: add debug info when comparing bucket metadata
+fe30fa5 rgw: add missing mutex header for std::once_flag
+19a836b rgw: Have a flavor of bucket deletion to bypass GC and to trigger object deletions async.
+2d0ccf3 rgw: remove suggestion to upgrade libcurl
+6f512d1 rgw/rgw_http_client: add compat.h include for TEMP_FAILURE_RETRY
+8b7d722 rgw: detect and work around a curl_multi_wait bug
+3881444 rgw: use non-blocking reads for clear_signal
+e956efe rgw: factored clear_signal out of do_curl_wait
+cf6e695 rgw: do_curl_wait uses ldout
+0330031 rgw: add pipe fd to set for select() in do_curl_wait()
+01f7868 rgw: clean up thread_pipe in RGWHTTPManager::stop
+b5a4610 rgw: create thread_pipe before RGWHTTPManager::ReqsThread
+6a3c10f rgw: fix the field 'total_time'  of log entry in log show opt
+66a19a8 common: Remove the runtime dependency on lsb_release
+937f057 common/util: add support for distro info from /etc/os-release file
+aa6cc10 mds: respawn using /proc/self/exe
+73cef9f rgw: delete entries_index in RGWFetchAllMetaCR
+73894c5 rbd-nbd: disallow mapping images >2TB in size
+3d27a50 rbd-mirror: snap protect of non-layered image results in split-brain
+3239ce8 rgw: store oldest mdlog period in rados
+269447a rgw: get_system_obj does not use result of get_system_obj_state
+0b7577e rgw: clean up RGWShardedOmapCRManager on early return
+c7c9ef7 rgw: RGWSimpleRadosReadCR tolerates empty reads
+ca43639 rgw: fix for passing temporary in InitBucketSyncStatus
+ec2f3d3 rgw multisite: fix the increamtal bucket sync init
+3c56892 rgw: get_zonegroup() uses "default" zonegroup if empty
+804c46c mds: use parse_filesystem in parse_role
+8a028b0 mds: group filesystem access methods
+89653e9 mds: use reference to avoid copy
+9a2596d mds: fully encapsulate filesystems map
+84cfd39 mds: fix false "failing to respond to cache pressure" warning
+9e5694a Revert "osdc: After write try merge bh."
+4d32d04 osdc/ObjectCacher: wake up dirty stat waiters after removing buffers
+54d6c1d mds: check if down mds is known
+ea3bd24 mds: fix false "failing to respond to cache pressure" warning
+953dbc3 rpm: fix permissions for /etc/ceph/rbdmap
+b273582 rbd: return error if we specified a wrong image name for rbd du
+28e10ef test: skip TestLibRBD.DiscardAfterWrite if skip partial discard enabled
+20ae76b librbd: exclusive lock incorrectly initialized when switching to HEAD
+3f708df common: Improve linux dcache hash algorithm
+4f8287f utime.h: fix timezone issue in round_to_* funcs.
+6c1edcd rgw: fix for assertion in RGWMetaSyncCR
+d54b354 rgw: RGWCoroutinesManager::run returns status of last cr
+aa24a8f rgw:bucket check remove _multipart_ prefix
+0a5713c mds: handle blacklisting during journal recovery
+bcf2289 mds: use a random nonce in Messenger
+fa1fc35 librbd: always respond to "release lock" request if lock owner
+b1374aa upgrade/hammer-x: wait for osdmaps to propagate
 8b595f5 ceph-create-keys: add missing argument comma
+8f8000f upgrade/hammer-x: set require_jewel_osds
+cb7684a rados/singleton-nomsgr/all: set require_jewel_osds
 eea546f mon: expose require_jewel_osds flag to user
 f8ee076 mon/OSDMonitor: encode OSDMap::Incremental with same features as OSDMap
 1f629b2 mon/OSDMonitor: health warn if require_{jewel,kraken} flags aren't set
 34555f1 mon/OSDMonitor: encode canonical full osdmap based on osdmap flags
+cfc6ce6 rgw: add commas to hostname output
+8b1f036 rgw: filter out empty virtual bucket hostnames
+95bd463 doc: configuring virtual hosted buckets for radosgw
+c9445fa rgw: only enable virtual hosting if hostnames are configured
+b72fc1b rgw: Fix Host->bucket fallback logic inversion
+13fa5db rgw: json encode/decode index_type, allow modification
+8de67af rgw: fix osd crashes when execute "radosgw-admin bi list --max-entries=1" command
 eb30cc5 ceph-post-file: Ignore keys offered by ssh-agent
 43282b0 ceph-post-file: migrate to RSA SSH keys
 d48e603 msg: adjust byte_throttler from Message::encode
@@ -26,10 +447,21 @@ e068c92 messages/MForward: fix encoding features
 3e1edde test/ceph_test_msgr: do not use Message::middle for holding transient data
 8f75bd6 test/ceph_test_msgr: fix circular locking dependency
 f960db4 ceph_test_msgr: use ceph log infrastructure to output
+362e47c mon: send updated monmap to its subscribers
+0ec4051 osd/PGBackend: fix collection_list shadow return value
+3667c56 core: set dumpable flag after setuid
+d74fcec     doc: change the osd_max_backfills default to 1   Fixes: http://tracker.ceph.com/issues/17701   Signed-off-by: huangjun <hjwsm1989 at gmail.com>
+0274518 test: Eliminate racey scrubbing in scrub_test.py
+eebc9e0 scrub_test: Fixes need with wip-13507
+217ce4f Revert "check for inconsistent objects count instead of all keys"
+e94f390 check for inconsistent objects count instead of all keys
+424db8e rgw: fix put_acls for objects starting and ending with underscore
+09dc90e rgw_rest_s3:  apply missed base64 try-catch
 39b8e78 rgw: fix put_acls for objects starting and ending with underscore
 779af22 qa: remove EnumerateObjects from librados upgrade tests
 4cb83c1 librbd: discard after write can result in assertion failure
 dc2ffda rgw: handle empty POST condition
+7040d2b mon/PGMap: PGs can be stuck more than one thing
 cd99a64 ceph-disk: allow using a regular file as a journal
 a800402 ceph-disk: PEP8ify
 e200b17 ceph-disk: Set space_symlink to the path, not file object
@@ -43,13 +475,16 @@ f1c2de7 ceph-disk: Use context manager with FileLock
 1eedf18 mon: fix missing osd metadata (again)
 f5e37ab journal: do not prematurely flag object recorder as closed
 2f9a5be Don't loop forever when reading data from 0 sized segment.
+cbf5b7a osd: Remove extra call to reg_next_scrub() during splits
 4f9e02c test: TestJournalReplay test cases need to wait for committed journal event
 e8e1acb librbd: ignore cache busy errors when shrinking an image
 ba2e87e librbd: invalidate cache before trimming image
 d7c0873 rbd-nbd: mask out-of-bounds IO errors caused by image shrink
 0ce342d rbd-nbd: fix kernel deadlock during teuthology testing
+a061bb3 rgw multisite: obsolete 'radosgw-admin period prepare' command
 eb6c3cb rgw: set correct instance on the object
 084108e rgw: fix regression with handling double underscore
+4ca626c tasks/ceph: move generate_caps from teuthology
 f400ff2 tests: ceph-disk: force debug monc = 0
 25a35d4 doc: fill keyring with caps before passing it to ceph-monstore-tool
 73ea926 tools/ceph_monstore_tool: bail out if no caps found for a key
@@ -131,6 +566,7 @@ d3ad2ff librbd: optionally flag "laggy" journal clients disconnected
 4056e36 journal: allow to trim journal for "laggy" clients
 3aec576 cls/journal: add async client_update_state method
 d66bb7a build: include more files in "make dist" tarball
+b25d80f suites/rbd: increase timeout for rbd-mirror stress test
 3bb2a9e librbd: ignore notify errors on missing image header
 5173563 client: properly set inode number of created inode in replay request
 2c4e1c1 mds: log path with CDir damage messages
@@ -145,6 +581,7 @@ ca8fc6f ceph-create-keys: fix existing-but-different case
 10e603b client: fix segment fault in Client::_invalidate_kernel_dcache().
 3320da0 mds: catch duplicates in DamageTable
 5d0e2f8 common: only call crypto::init once per CephContext
+6d28d15 upgrade: ceph-test is needed for ceph-coverage
 483d8c4 cephx: Fix multiple segfaults due to attempts to encrypt or decrypt an empty secret and a null CryptoKeyHandler
 5ae4f31 os/filestore/FileJournal: fail out if FileJournal is not block device or regular file
 c2d4239 mds: remove max_mds config option
@@ -192,16 +629,21 @@ de0c4e1 rgw: RGWDataSyncCR fails on errors from RGWListBucketIndexesCR
 16f9d95 src/osd: relax the requirement that we scrub a whole hash value
 2176c84 hobject: clarify is_snap and has_snapset for max
 35660d1 mon: OSDMonitor: Missing nearfull flag set
+a199dcd ceph_manager: test offline split via ceph-objectstore-tool
 3cb0a5e ceph-objectstore-tool: add a way to split filestore directories offline
 de672a0 logrotate: Run as root/ceph
 9cb45e1 log: Log.cc: Assign LOG_INFO priority to syslog calls
+0677189 suites/rbd: remove helgrind test cases
+cfb1cab smoke: mask out unsupported image features for krbd iozone test
 165e5ab librados: modify Pipe::connect() to return the error code
+a8fd09e Add basic testing of ceph bits
 5ab5e82 doc: fix description for rsize and rasize
 ecc2377 (tag: v10.2.3) 10.2.3
 c94244d Add Install section to systemd rbdmap.service file
 e2ce857 Add two options to radosgw-admin.rst manpage
 4e66f9e radosgw-admin: add "--orphan-stale-secs" to --help
 bfa90a1 doc: add "--orphan-stale-secs" to radosgw-admin(8)
+c0ef05b suites/upgrade: override rbd_default_features config back to pre-Jewel
 cefd6f5 rgw: fix collection of object sync errors
 aa36981 rgw: fix marker tracker completion handling
 bce19a3 rgw: collect() stops if error encoutered
@@ -211,8 +653,24 @@ f337a07 krbd: don't segfault if images are unmapped concurrently
 472cb29 qa: rbd/concurrent.sh: suppress rbd map output
 667d42a qa: rbd: don't modprobe, chown sysfs files or udevadm settle
 866c3e5 qa: rbd/map-snapshot-io.sh: don't chown sysfs files
+c97079f krbd/unmap: put client.0 on a separate remote
+f507bf4 krbd/unmap: override client.0 only
+45116a8 krbd/unmap: set tunables to bobtail
+e743654 krbd/unmap: assert that pre-single-major kernel is installed
+db2b0d8 buildpackages/common.sh: use install-deps.sh from jewel
+76fd462 buildpackages/make-rpm.sh: install lsb_release dependencies
+00f8378 buildpackages/make-rpm.sh: adjust Source0 spec file line on SUSE
+2074717 buildpackages: use unadulterated make-dist
+4eee9f5 buildpackages: make make-{deb,rpm}.sh aware of cmake
+4c19824 buildpackages/make-rpm.sh: use /etc/os-release
 cf211d7 client: fix shutdown with open inodes
+292d757 buildpackages: force ceph-test build on SUSE
+d1bb36c buildpackages: fix RPM generation script to support opensuse spec file
+3b72f0b rbd-mirror: remove ceph_test_rbd_mirror_image_replay test case
+7cd0103 rados/objectstore/objectstore.yaml: skip bluestore tests
 1bc047b client: add missing client_lock for get_root
+91d1ac2 openstack: add disks where necessary
+2502032 11429: wait_for_clean between restarting the osds and starting the bench
 086f6e0 rgw: fix upgrade from old multisite to new multisite configuration
 23d73dc rgw: delete region map after upgrade to zonegroup map
 27626ba rgw_file: restore local definition of RGWLibFS gc interval
@@ -227,6 +685,9 @@ c0db9fb ceph: don't fudge the ctime in stat() unless it's really older than the
 66cd43b client: only skip querying the MDS in _lookup when we have the necessary caps
 fb4a939 client: plumb a mask argument into _lookup
 b5cbd57 client: add mask parameter to _do_lookup
+16dd940 Update console checking, and DRY
+0a03f4e Drop unnecessary console handling code
+c1e4209 workunit: allow parallel git clone
 416ec6f rgw: fix radosgw daemon core when reopen logs
 f034fd0 rgw: fix period update --commit return error
 457d78f rgw: adjust manifest head object
@@ -242,6 +703,7 @@ bd63666 librbd: fix possible inconsistent state when disabling mirroring
 d93eda8 common: add int64_t template for strict_si_cast()
 f7cd284 common/config: cast OPT_U32 options using uint32_t
 518883d Revert "common: add int64_t template for strict_si_cast()"
+33b136f Fix for 'branch' overrides to work
 577336e mds: fix double-unlock on shutdown
 21da103 rgw: collect skips a specific coroutine stack
 98779c3 rgw: fix compilation
@@ -258,6 +720,7 @@ dda0ee0 rgw: convert bucket instance listings back to metadata key format
 cac6612 rgw: add get_key() methods to format rgw_buckets
 ac557e0 rgw: data sync debug logging
 6bb8c15 rgw: modifying multi-site log messages.
+4c98ee1 Revert "packages.yaml: reflect python-ceph package split"
 ecea6dc librbd: delay acquiring exclusive lock if watch has failed
 49a39eb librbd: convert ImageWatcher class to template
 f4fb598 build/ops: bump rocksdb submodule
@@ -337,6 +800,7 @@ ac27352 mon/osdmonitor: initialize local variable "kb_avail_i"
 0b30a1d mon/osdmonitor: decouple adjust_heartbeat_grace and min_down_reporters
 96ad2d1 rgw: can set negative max_buckets on RGWUserInfo
 dbf8cf0 rgw: improve support for Swift's object versioning.
+1aaa0eb rgw: data_extra_pool is unique per zone
 5ffdc34 doc: format 2 now is the default image format
 bd70d6d qa: remove tmap_migrate tests from upgrade testing
 49db733 qa: add rados test script for upgrades
@@ -431,7 +895,14 @@ fe57ace rgw ldap: fix ldap bindpw parsing
 276ec72 rgw: use std::unique_ptr for rgw_aws4_auth management.
 2c422e3 rgw: add handling of memory allocation failure in AWS4 auth.
 79e2acb crush: reset bucket->h.items[i] when removing tree item
+60ba652 tox.ini: exclude .tox dir from flake8
+ad42602 Fix rechecking of health in loop
+81e8ad9 osd_recovery: add delay to ensure maps propogate
+12f84d2 tasks/cephfs/test_volume_client: test authentication metadata
+97a7723 tasks/cephfs: test read-only authorization for volumes
+f753e40 tasks/cephfs: add function to configure guest credentials
 2cd3ed8 ceph_volume_client: allow read-only authorization for volumes
+ec4b0c3 packages.yaml: reflect python-ceph package split
 46246e3 osd: increment stas on recovery pull also
 3da251f pybind/ceph_argparse: handle non ascii unicode args
 b01af21 Fix tabs->whitespace in ceph_argparse
@@ -441,6 +912,7 @@ a28810c rpm: move gperftools-devel to non-distro-specific section
 e6b7a4b rpm: use new name of libatomic_ops-devel
 9bbf2e8 fix tcmalloc handling in spec file
 b26acc0 ceph-osd-prestart.sh: drop Upstart-specific code
+1558a48 fs: add snapshot tests to mds thrashing
 1e622a5 rpm: Fix creation of mount.ceph symbolic link for SUSE distros
 89cb116 build/ops: build mount.ceph and mount.fuse.ceph as client binaries
 84b45b7 rpm: move mount.ceph from ceph-base to ceph-common
@@ -500,6 +972,7 @@ a5f5513 test: fix CMake build of ceph_test_objectcacher_stress
 3446fa4 test: build a correctness test for the ObjectCacher
 b668491 test: split objectcacher test into 'stress' and 'correctness'
 74f5920 test: add a data-storing MemWriteback for testing ObjectCacher
+4aeeed8 rbd: pull formatted-output.t from the relevant branch
 757babb librbd: memory leak possible if journal op event failed
 e7ec20e librbd: ignore snap unprotect -EBUSY errors during journal replay
 cbc9636 librbd: delete ExclusiveLock instance when switching to snapshot
@@ -520,6 +993,7 @@ deb6ca8 librbd: force-remove journal when disabling feature and removing image
 cf65ed9 rbd: Skip rbd cache flush if journaling is enabled under aio_flush
 caad884 mon: Monitor: validate prefix on handle_command()
 3250c4d rgw_swift: newer versions of boost/utility no longer include in_place
+976ec94 packages.yaml: drop ceph-devel
 dd635e4 librbd: ignore missing object map during snap remove
 db7ce96 librbd: removal of partially deleted image needs id lookup
 c1a47c7 packaging: move parted requirement to -osd subpkg
@@ -543,6 +1017,7 @@ b751d48 librbd: flag image as updated after proxying maintenance op
 9b75275 install-deps.sh: use mk-build-deps instead of processing control
 a34b227 xio: add MNop.h to dist tarball
 393bf7e rgw: check for -ERR_NOT_MODIFIED in rgw_rest_s3.cc
+b2c60b7 rbd: added rbd-nbd fsx test case
 6b41d76 TaskFinisher: cancel all tasks wait until finisher done
 47605a2 msg/msg_types: update sockaddr, sockaddr_storage accessors
 762db30 rgw: support size suffixes for --max-size in radosgw-admin command
@@ -624,6 +1099,7 @@ bb279f1 librbd: refresh image if needed in mirror functions
 4a967eb Revert "osd/ReplicatedPG: for copy_get get omap, firstly check ob whether has omap."
 fd8f8af Revert "osd/ReplicatedPG: For omap read ops, it should check object wether has omap"
 d59ca31 Revert "osd/ReplicatedPG: When do omapclear, it should check object whether is omap."
+68f8114 Initial check in for a complete jewel-x suite
 d48a1ed rgw/s3website: Fix x-amz-website-redirect-location support.
 f4306de osdc/Objecter: upper bound watch_check result
 64f15b3 OSD: fix deadlock in OSD::_committed_osd_maps
@@ -639,6 +1115,8 @@ ec884a3 rgw/s3website: whitespace style fixes
 bf26b6e rgw/s3website: Fix ErrocDoc memory leak.
 36672c6 rgw/s3website: Fix x-amz-website-redirect-location support.
 3c0ac8e rgw/s3website: Implement ErrorDoc & fix Double-Fault handler
+8deed78 scrub_test: Shards are not marked in error when osd guesses
+f721e10 scrub_test: Handle list-inconsistent-obj changes in pull #8983
 cb9e9e1 msg/async: Implement smarter worker thread selection
 578ac8a Event: fix delete_time_event while in processing list
 8c7a13f test_msgr: add delay inject test
@@ -676,10 +1154,19 @@ e8b7dd4 rgw: remove unnecessary data copying in RGWPutMetadataBucket.
 63e0993 rgw: Fix updating CORS/ACLs during POST on Swift's container.
 4eded9a rgw: fix update of already existing account/bucket's custom attributes.
 30ee180 rgw: fix updating account/container metadata of Swift API.
+a80eb52 suites/rados: add test for 16113
 a32820d src/: remove all direct comparisons to get_max()
 f869594 PG::replica_scrub: don't adjust pool on max object
 1737ff3 hobject: compensate for non-canonical hobject_t::get_max() encodings
+414763d tasks: fix non-existent sleep function
+e9c3bea fs/volume_client: exercise the configurable prefix and ns_prefix.
+aad0d91 tasks/cephfs: add TestVolumeClient.test_purge
+c98f2d8 tasks/cephfs: test 'df' output in volumeclient
+cb1b663 tasks/cephfs: make mount point more configurable
+60fd1e1 tasks/cephfs: test volume client eviction
+fe74a2c suites: allow four remote clients for fs/recovery
 5d9ee88 pybind: configurable cephfs_vol_client prefix and ns_prefix.
+6fa3c49 rbd: provision volumes to format as XFS
 470605c ceph_volume_client: evict client also based on mount path
 726292e client: report root's quota in statfs
 46c2bd0 pybind: fix unicode handling in CephFSVolumeClient::purge
@@ -731,13 +1218,20 @@ f92c2a5 cls::journal: treat empty commit position as minimal
 e9f9916 rgw: add missing metadata_heap pool to old zones
 2266287 mds: wrongly treat symlink inode as normal file/dir when symlink inode is stale on kcephfs
 be9e85d tests: rm -fr /tmp/*virtualenv*
+68a1668 tasks/create_verify_lfn_objects: adjust to new ctx.manager location
 0bdc8fd rgw : cleanup radosgw-admin temp command as it was deprecated and also implementation code for this command was removed in commit 8d7c8828b02c46e119adc4b9e8f655551512fc2d
 5ffee35 mon : Display full flag in ceph status if full flag is set
 8fbb555 cls_journal: Select min commit position for new clients
 576ff0c cls_journal: remove duplicated key generation
 fae360f rgw: fix manager selection when APIs customized
+72618e9 suites/rbd: added replication stress test workunit
+8de26dd openstack: ovh renamed flavors : s/eg/hg/
 8bbb5ad osd/OpRequest: reset connection upon unregister
 e97cc2d osd: reset session->osdmap if session is not waiting for a map anymore
+d837f46 tasks/cephfs: find processes correctly
+e2191a8 tasks/cephfs: fix mount wait in test_mount_conn_close
+6c9a687 tasks/cephfs: fix ps usage to not truncate cmds
+ce2645b tasks/cephfs: update failover test for standby changes
 555cec9 ceph.in: fix exception when pool name has non-ascii characters
 305ebbc librbd: metadata retrieval added to open image state machine
 5c9ecea cls_rbd: async version of metadata_list helper method
@@ -778,10 +1272,57 @@ a40cfe4 rgw: remove -EEXIST error msg for ZoneCreate
 ff9c29a rgw: camelcase names of custom attributes in Swift's responses.
 4a3c9f3 rgw: fix realm pull and period pull for apache frontend
 a08caa6 rgw: handle errors properly during GET on Swift's DLO.
+9b2272b suites/teuthology/multi-cluster: make them pass again
 8163c4d rgw: don't unregister request if request is not connected to manager
 ffd545b rgw: keep track of written_objs correctly
 8356021 Pipe: take a ref to existing while we are waiting
 e0dfc55 qa/workunits/rbd: fixed rbd_mirror teuthology runtime errors
+0528544 tasks: fix {testdir}/data paths
+3cfc703 tasks/ceph.healthy: allow None as config
+82550b0 suites: add multi-cluster tests to the teuthology suite
+62c98fc tasks: update ctx.ceph.conf readers to use per-cluster conf
+ea9471c suites/rbd: add basic mirroring suite
+07ef48f tasks: add an rbd-mirror task
+452f6ca tasks/ceph: ignore EEXIST for the archive data dir creation
+a7a1a8f tasks/ceph: pull each mon dir only once
+395c697 tasks/ceph: only run ceph_log and valgrind_post once
+84bf818 tasks/ceph: make scrubbing cluster-aware
+b813a8e tasks/watch_notify_same_primary: adjust to new ctx.manager location
+ddd1c3f tasks/thrash_pool_snaps: adjust to new ctx.manager location
+0e1ff44 tasks/repair_test: clean up manager usage, adjust to new location
+d22b0a7 repair_test: Disable scheduled scrubbing to not mess up do_pg_scrub()
+527b80e tasks/reg11184: adjust to new ctx.manager location
+dbde48c tasks/radosbench: adjust to new ctx.manager location
+91ed7e8 tasks/rados: adjust to new ctx.manager location
+b9a06a5 tasks/populate_rbd_pool: adjust to new ctx.manager location
+bb2d9fe tasks/peering_speed_test: adust for new ctx.manager location
+ada2a8e tasks/osd_failsafe_enospc: adjust for new ctx.manager location
+235583d tasks/divergent_priors*: adjust to new ctx.manager location
+c09e76b tasks/ceph_objectstore_tool: use existing manager
+5172920 tasks/ceph_manager: make utility_task cluster-aware
+5be9eaa tasks/ceph: make restart subtask cluster-aware
+5552bd5 tasks/ceph: make wait_for_mon_quorum cluster-aware
+965eb26 tasks/ceph: make wait_for_osds_up cluster-aware
+f90468b tasks/ceph: update ctx.manager usage to ctx.managers
+8e3ddfb tasks/workunit: work with roles that include cluster
+037ba88 tasks: move find_remote to util, rename and add helper
+08c97c5 tasks/blktrace: make cluster-aware
+d2309ac tasks/ceph_manager: make Thrasher cluster-aware
+af901c7 tasks/ceph_manager: make mount_osd_data() cluster-aware
+25e9471 tasks/ceph_manager: add cluster param to write_conf()
+42e7a8d tasks/ceph_manager: simplify remote lookup, and make it cluster aware
+8f0ebfa tasks/ceph_manager: parameterize CephManager with cluster
+6cf83d0 tasks/thrashosds: adjust to per-cluster managers
+0ee01fa tasks/thrashosds: add an option to specify the cluster to thrash
+eabf893 tasks/ceph: store cluster config in a per-cluster dict
+a2f0551 tasks/ceph: create a CephManager per cluster
+733d743 tasks/ceph: make healthy() cluster-aware
+149a78d tasks/ceph: make cephfs_setup() cluster-aware
+b368f56 tasks/ceph: make crush_setup() cluster-aware
+d17659d tasks/ceph: adapt to cluster-aware daemon.resolve_role_list
+f4fab8a tasks/ceph: make run_daemon() cluster-aware
+7c996e4 tasks/ceph: convert cluster creation to work with multiple clusters
+c264bcf tasks/ceph_client: add cluster param
 5f09b9a journal: reset watch step after pruning expired tag
 2ee1e0a rbd-mirror: additional debug messages during image replayer start/stop
 f9e6bd8 rbd-mirror: ensure proper handling of status updates during shutdown
@@ -807,6 +1348,8 @@ e885f1e radosgw-admin: fix 'period push' handling of --url
 818166e doc: fixup: "rbd-mirror daemon" instead of "rbd-daemon"
 7be281d debian/control: dh_systemd_start is in the dh-systemd package
 e463aa8 debian: install systemd target files
+4f154a7 make-rpm.sh: support openSUSE
+58e86bd add user-data for openSUSE 42.1
 3a66dd4 (tag: v10.2.1) 10.2.1
 70018bf os/FileStore::sync_entry check for stop in after wait
 72c9b6f osd: remove all stale osdmaps in handle_osd_map()
@@ -882,7 +1425,7 @@ b55514c rgw: RGWHTTPManager, avoid referring to req_data->client when completing
 e5312b1 rgw: RGWHTTPManager, can call stop() more than once
 cbea993 rgw: RGWReadRESTResourceCR, fix refcounting
 93a65f3 rgw: RGWReadRemoteDataLogShardCR fix destructor
-2de1669 rgw: RGWDataSyncStatusManager, cleanup if failing init
+2de16690 rgw: RGWDataSyncStatusManager, cleanup if failing init
 57266e6 rgw: rest crs, explicitly call cleanup
 6b86332 rgw: more leaks fixes
 0fb4854 rgw: drop a reference to http op
@@ -919,6 +1462,7 @@ c1279d8 mds: remove inc array from mdsmap
 9d5162f test/mds: add test for symbols in paths
 af3a4e4 mds: fix auth caps with hyphen in path
 3674341 osd/PG: update info.stats.* mappings on split
+fbf26d1 suites/hadoop: use xfs
 075ee03 rgw_op: pass delete bucket op to master zone first
 1527b56 rgw: add errno entry for -ENOTEMPTY
 791eba8 fix deb package /etc/default/ceph location
@@ -959,7 +1503,10 @@ c72f0bc rpm: implement scriptlets for the post-split daemon packages
 81f4073 systemd: enable all the ceph .target services by default
 f4d63af python_cephfs: rule out empty/None volume_id
 b609017 python-cephfs: use rados namespace for data isolation.
+996e21a Signed-off-by: Tamil Muthamizhan <tmuthami at redhat.com>
 96b3726 Signed-off-by: Tamil Muthamizhan <tmuthami at redhat.com>
+9d4b0b9 Signed-off-by: Tamil Muthamizhan <tmuthami at redhat.com>
+ecf6a57 Signed-off-by: Tamil Muthamizhan <tmuthami at redhat.com>
 6c1163c admin-socket: use chown instead of fchown
 3963de7 global-init: fixup inconsistent use of ceph ctx and conf
 2bc4194 global-init: chown pid files
@@ -969,12 +1516,45 @@ f7e6b3c global-init: check init flags and set accordingly
 d4afe94 global-init: add a path chown wrapper function
 770ae9e ceph-context: add function to set init flags
 77fdbf1 Signed-off-by: Tamil Muthamizhan <tmuthami at redhat.com>
+f216516 rados/monthrash: debug client on librados api tests
 1fa533e fix ceph init script
 7acbefa test: Fix ceph-objectstore-tool test to run manually from src non-cmake
+a2e0e00 Signed-off-by: Tamil Muthamizhan <tmuthami at redhat.com>
+dc429cb Signed-off-by: Tamil Muthamizhan <tmuthami at redhat.com>
 af4b31c Drop --setuser/--setgroup from osd prestart
+b095cc6 rados/objectstore: add ceph_test_keyvaluedb test
 8a86d08 osd: fix backwards min/max osd utilization
+5962bfa rgw: increase sleep to 15 seconds
 3a9fba2 (tag: v10.2.0) 10.2.0
+a534c1c rgw: move xfs to a seperate directory
 f86f73f rgw: drop rest op reference in error path
+a66db15 rgw: move xfs to a seperate directory
+844b0c3 rgw: force frontend to be civetweb
+7b5ab7a rgw: fix debug messages
+3e8f550 rgw: fix zone get with multisite
+9472ee4 rgw: configure users on all clients
+724e7e0 rgw: do not overwrite user_info with zone user_info
+f092008 rgw: allow already exist realm
+fd6a6f8 rgw: increase timeout to wait for master to 20 seconds
+c5ee9de rgw: we can have multicluster without multizone
+d03594e rgw: no need to do period pull
+564a883 rgw: non master zones need to be created on the remote gateway
+20dab79 rgw: mtime is now a time string with microseconds
+b75011f rgw suite: add --rgw-zone
+82e0338 rgw suite: create default zone after zonegroup creation
+895f9d3 rgw suite: debug zone info
+1673e9f rgw suite: set default zonegroup on all clients
+5f9b600 rgw: default zone should be always master zone
+e88ee58 rgw suite: different clients have different defaults zone
+157600b rgw suite: In multisite configuration start the master gw first
+c203974 rgw suite: use master_zonegroup not master_region
+b8120f7 rgw suite: use zonegroups instead of regions
+1c71383 rgw: move realm pull and period pull to pull_configuration
+d911214 rgw: update and commit period
+09891a2 add realm configuration to all multisite test
+63192cb rgw: add realm creation
+6d066a7 rgw suite: always add --rgw-frontends fastcgi
+2116012 use ceph-master branch for s3tests
 88369e2 doc: rgw multisite, add pools section & minor cosmetic improvements
 7ca01d4 rgw_admin: improve period update errors
 558863a test: set a default $CEPH_ROOT env variable
@@ -982,6 +1562,13 @@ ea2c703 rgw: clean async rest ops in calling cr
 2a15418 rgw: drop async cr reference on simple cr completion
 0a361d5 rgw: RGWRealmWatcher::watch_restart() also unwatches watch
 fe0f4bb rgw: stop cr manager in destructor
+f0f035e suites/krbd/unmap: specify xfs
+226a506 tasks/cephfs: check data written to NS.
+52b13e8 tasks/ceph: allow set allow_multiple to fail
+1cc24d5 suites/rest: specify xfs
+b4de41e suites/fs: specify xfs as needed
+0521da4 suites/rgw: specify xfs as needed
+a2b80c3 suites/rbd: specify xfs as needed
 f0e3b61 osd: fix noisy debug line
 61acd9d doc/release-notes.rst: clarify ceph UID/GID assignment on SUSE
 b74a9dd doc/release-notes.rst: minor grammar and style corrections
@@ -1006,6 +1593,7 @@ a8efd40 mds/FSMap: use _ in key name
 02f5885 osd: add option to disable startup check
 8c1a8a6 osd: refuse to start if configure object limits don't work
 a13ae6f vstart.sh: --short to set ext4-friendly object name and namespace limits
+e265bee rados: specify short names for misc jobs
 413b3e7 osd/ReplicatedPG: fix implementation of register_on_success()
 f1cfcff mailmap: Jenkins name normalization
 b3991d0 mailmap: Abhishek Lekshmanan name normalization
@@ -1076,6 +1664,8 @@ a7bd5e4 cmake: Reorganized test dir, added cmake functions
 e57d7c1 rbd: add support for relaxed image spec validation
 6579c7d common: new rbd image spec validation option
 18ea756 Removed parentheses for if statement.
+64f383b suites/upgrade/hammer-x: specify xfs
+32719a6 rados: specify xfs
 d1ed6eb update release-notes manually
 df4e134 msg/simple/DispatchQueue: inject internal delays in special event dispatch
 0b0d584 Fixes for python omap method return values.
@@ -1088,11 +1678,15 @@ ae604de rgw: handle no current_preiod in is_syncing_bucket_meta
 a4819a6 release-notes: v10.1.2 release notes (draft)
 a353eac rgw_file: fixup attrs across renames
 bf299dc rgw: try to use current period id in a few more cases
+7f78ea7 fs/ext4: longer names
 c04fd42 osd: fix watch reconnect race
 8166042 osd/Watch: slightly more informative debug output
 0f8585c doc: fix dependencies
 6208437 systemd: Use the same restart limits as upstart
+562d4f6 fs/ext4: max of 256 works better
 a201365 rgw: call rgw_log_usage_finalize() on reconfiguration
+f4d39b0 fs/ext4: set shorter object name limits so that osds can start
+2b7dd17 rados/basic: test over xfs (not just btrfs)
 4b1d169 test: image replayer needs dummy remote mirror peer uuid
 5230967 rgw: RGWPeriod::reflect() sets master zonegroup as default
 877d44b journal: race possible when re-watching a journal object
@@ -1131,6 +1725,8 @@ f87af25 rgw_file:  declare an attribute for all Unix/rgw_file attrs
 489324f doc/configuration/filesystem-recommendations: recommend against ext4
 a8e2869 doc/release-notes: mention ext4 in the release notes
 112649f doc/start/os-recommendations: drop ancient 'syncfs' note
+649a5e0 Repleaced ext4 with xfs
+9a21435 suites/rgw/multifs: do not test on ext4
 638fd2e doc/rados/configuration: minor cleanup of inline xattr description
 2601e2f rgw/rgw_rados: use to_str() instead of c_str()
 d56e439 buffer: add list::to_str()
@@ -1144,10 +1740,15 @@ d5bc886 xio: refactor release_xio_req -> release_xio_msg
 bde87d3 xio: fix invalid access to member pointer before it is being initialized
 5a746e6 xio: use const for input argument in 2 functions' prototype
 8b98556 PG: set epoch_created and parent_split_bits for child pg
+7f0ad8a rgw: use civetweb if no frontend was configured
 2be6017 pybind/Makefile.am: fix build with ccache
+a739533 FIXUP 'cephfs: update tests to enable multimds when needed'
 142610a os/filestore: fix return type mismatch for lfn parse
 bd1c548 test: fix ut test failure caused by lfn change
 8087cfa librbd: do not return a failure if a peer cannot be notified of update
+1c4e019 cephfs: enable dirfrags in test_data_scan fragmentation test
+f23466a Added civetweb test case
+9d7801d Fixed distro symblinks
 45219e0 Fixed ceph-common install.
 fd2f455 mds: take standby_for_fscid into account in FSMap::find_unused()
 b6d8c32 librbd: Fixed bug in disabling non-primary image mirroring
@@ -1173,6 +1774,10 @@ a33ee34 qa/workunits/rbd: switch qemu test script shell to bash
 0e4a92e crush: fix typo
 0a622e6 doc: rgw admin uses "region list" not "regions list"
 3c77292 journal: fix final result for JournalTrimmer::C_RemoveSet
+a33517a buildpackages: add build timeouts.
+df5d147 buildpackages: support ubuntu 12.04.
+cd4b7ad buildpackages: fix configure on Ubuntu 12.04.
+785801c buildpackages: catch VM instances in ERROR state.
 3b54d5d test/rados/misc.cc: add long locator key and namespace tests
 cb03d4d LFNIndex: use chain_getxattr_buf
 c7db303 chain_xattr: add chain_getxattr_buf
@@ -1180,15 +1785,18 @@ c7db303 chain_xattr: add chain_getxattr_buf
 e4916f6 LFNIndex::list_objects: lfn_translate does not set errno
 25f937e FileStore::set_xattr_limits_via_conf: add warning if max xattr size smaller than max name
 18b9f95 test/objectstore/chain_xattr.cc: add test for ensure_single_attr
+6927f64 rgw: add default zone name
 73778f1 LFNIndex: ensure that lfn and directory attrs are written atomically
 ac750ce chain_[f]getxattr: always use size, no reaon to consider CHAIN_XATTR_MAX_BLOCK_LEN
 8dc0330 chain_xattr: s/onechunk/skip_chain_cleanup, add ensure_single_attr
 21487fd os/,osd/: restructure the rados name length check
+4534e8e rgw: policy acl format should be xml
 8770043 ceph-disk: fix set_data_partition() when data is partition.
 a330078 rgw-admin: fix period delete error message
 3320f8f rgw-admin: remove unused iterator
 4b0e39e osd/ReplicatedPG: make handle_watch_timeout no-op if !active
 21f0216 ceph-disk: Accept bcache devices as data disks
+465ff97 buildpackages: add ubuntu 16.04 xenial support
 64a8a6a rbd-mirror: fixed bug that caused infinite loop when disabling image mirroring
 a651598 mailmap: Luo Kexue name normalization
 c36c5d4 mailmap: Ning Yao affiliation
@@ -1216,9 +1824,18 @@ cda1c1a FSMap: add output for enabled_multiple flags
 2429463 ceph-dencoder: add FSMap
 15cabdc rgw_ldap: make ldap.h inclusion conditional
 d58e5fe rgw: fix problem deleting objects begining with double underscore
+7e53203 rados/singleton-nomsgr: add lfn upgrade tests
+4629585 tasks: add create_verify_lfn_objects
+93892eb ceph_manager: return exit status on do_get, do_put, do_rm
+269d600 ceph_manager: add do_rm
+670ca43 ceph_manager: extend do_put and do_get to allow a namespace
+c8f7694 ceph_manager: fix do_get to actually do a get
+da9c65b rados_api_tests: reduce the osd_max_object_name_len value
+f2eb0a5 short_pg_log: make the log less short
 492a572 test: fix memory leaks in rbd-mirror test cases
 acfc2b1 test: avoid leaking librados connections when creating pools
 88e244c rbd: journal reset should disable/re-enable journaling feature
+9b4e317 distros: add ubuntu 16.04 / xenial
 2fa4147 osd/ReplicatedPG: clean up temp object if copy-from fails
 a0bb575 ceph_test_rados_api_misc: make CopyFrom omap test be a big object
 134416a Revert "rados: Add new field flags for ceph_osd_op.copy_get."
@@ -1226,13 +1843,21 @@ a0bb575 ceph_test_rados_api_misc: make CopyFrom omap test be a big object
 98744fd logrotate.conf: poke ceph-fuse after log rotation
 91e0be0 ceph-fuse: reopen log file on SIGHUP
 60679fc librbd: restore out-of-band future callbacks to avoid lock cycles
+c655de7 buildpackages: discard google-perftools on trusty aarch64
 e98d046 MDS: unregister command add in clean_up_admin_socket
 2f4bc84 rgw: aws4 subdomain calling bugfix
+2113fcf samba: run kclient mount tests on testing kernel
 dab0b55 rgw: the map 'headers' is assigned a wrong value
+5e7e017 cephfs: update tests to enable multimds when needed
 f01261f authtool: fix test output validation & minor whitespace.
 408964e journal: fix context memory leak when shutting down live replay
 f931066 journal: Future does not require metadata shared pointer
 d3dbd85 mon: warn if 'sortbitwise' flag is not set and no legacy OSDs are present
+5ab482e arch: teuthology must use aarch64, not arm64
+42836aa buildpackages: runabove has 30GB root disk max
+3e7f311 buildpackages: install rsync
+4f10291 buildpackages: add missing arm64 to make-deb.sh
+ac38373 tasks/cephfs: test health messages from readonly stat
 d544e44 mds: validate file layouts during replay
 9414bef debian/rules: include ceph-mds-*.conf upstart files in ceph-mds
 45a0bc1 mds: add operator<< for file_layout_t
@@ -1318,6 +1943,7 @@ aedc529 test: Fix test to run with btrfs which has snap_### dirs
 3dd5249 librbd: avoid throwing error if mirroring is unsupported
 280b8a1 rgw: add exclusive flag to set_as_default()
 7567b45 rgw: add exclusive flag to RGWRealm::create_control
+49c8b57 Update start.yaml
 27e4c46 rgw_admin: improve the orphans find help
 c4efef5 rgw: add a few missing cmdline switches in help
 09b5356 cls_journal: fix -EEXIST checking
@@ -1326,11 +1952,13 @@ c4efef5 rgw: add a few missing cmdline switches in help
 a29b96a debian/rules: put init-ceph in /etc/init.d/ceph, not ceph-base
 09c4195 MDSMonitor: introduce command 'fs set_default <fs_name>'
 602425a configure: Add -D_LARGEFILE64_SOURCE to Linux build.
+8e11f0d suites: radosbench 180 -> 150s
 639f158 mon: remove unnecessary comment for update_from_paxos
 f5ef4d4 cmake: add missing librbd/MirrorWatcher.cc and librd/ObjectWatcher.cc
 919ff4f releases: what is merged where and when ?
 9f60e07 releases: update understanding the release cycle
 c1c71ec release-notes: v10.0.5 release notes
+8661940 buildpackages: add debian 8.0 support
 535eb0e rgw: fix compiling error
 4e1e81a qa/workunits/rbd: use cluster1/2 instead local/remote as cluster names
 2226019 qa/workunits/rbd: add helper to determine rbd-mirror local cluster
@@ -1347,6 +1975,7 @@ c97ce4f test/objectstore: fix a -Wsign-compare warning in compiling.
 231edd7 cls_rbd: pass WILLNEED fadvise flag for object map update ops
 0510301 objclass: add cls_cxx_read2/cls_cxx_write2
 7e58045 OSD::handle_pg_create: check same_primary_since
+c467a0f Added blogbench.yaml to parallel before and after -x upgrade steps
 57db617 librbd: integrate listener for new mirroring notification payloads
 e95a383 librbd: send notifications on mirroring updates
 3748b88 librbd: helper methods for mirroring notifications
@@ -1355,6 +1984,7 @@ e95a383 librbd: send notifications on mirroring updates
 184ec19 pybind: remove language="c++"
 68ce9a9 cmake: remove unneeded C++ from cython build
 bb07a1b rbd: rbd-mirroring: Automatically disable image mirroring when image is removed
+a7194e5 openstack: thrash_cache_writeback_proxy_none.yaml need bigger disks
 f254486 os/bluestore: _do_write: fix _do_zero_tail_extent to handle shared extents
 2ed445e os/bluestore: _do_zero: simply truncate up if past eof
 dcc5cea os/bluestore: prevent rename src from getting trimmed from lru
@@ -1386,6 +2016,8 @@ a58ffab os/bluestore: fix fsck vs enodes
 95a5f56 os/bluestore: fix off-by-one on caching tail block
 c1b42e9 os/bluestore/KernelDevice: print buffered flag in debug line
 8dc2c21 ceph-detect-init/run-tox.sh: FreeBSD: No init detect
+935a7eb rados/singleton-nomsgr/all/full-tiering: s/readforwad/readproxy/
+d823ff6 rados/singleton: thrash proxy (not forward) cache mode
 3995caf cls::rbd: read_peers: update last_read on next cls_cxx_map_get_vals
 557955a qa: update rest test cephfs calls
 c298959 osd: skip heartbeat_check for osd which we haven't sent ping to
@@ -1394,6 +2026,8 @@ c298959 osd: skip heartbeat_check for osd which we haven't sent ping to
 aacadad cmake: add StandardPolicy.cc to librbd
 383d48b rbd-mirror: fix missing increment of iterators
 c7a0223 test/pybind/test_rados: force setting readonly cache mode
+d9a8f49 openstack: define the resources required to run a rados suite
+1007011 Added blogbench.yaml to parallel before and after -x upgrade steps
 23336a3 vstart: fix up cmake paths when VSTART_DEST is given
 e3dc7c7 os/ObjectStore: fix _update_op for split dest_cid
 ded7a77 qa/workunits/rbd: add basic failover/fallback test case
@@ -1451,6 +2085,9 @@ ad2e6f4 ceph.in: update for cmake path changes
 f02f1d1 cmake: Cython modules building with cmake
 e29bac6 osd: make rs_RepRecovering_latency fully lowercased
 ab0844c osd: drop l_osd_hb_from perf counter
+9263bef tasks/cephfs: update vstart_runner for cmake build changes
+240c0dd tasks/cephfs: reproducer for #15303 client bug
+4f33f26 tasks/cephfs: tidy Mount.ls to return [] on empty dir
 9570f54 osd: fix wrong dump format for bench command
 44bbdb6 mailmap: Adam C. Emerson name normalization
 4a6f8b3 mailmap: Jenkins name normalization
@@ -1473,11 +2110,19 @@ c61ae3e rbd: rbd-mirroring: Added unit tests to test enable/disable image mirror
 e5a4d21 doc/release-notes: fix indents
 92d1857 doc: fix typo, duplicated content etc. for Jewel release notes
 1c9332b pybind/Makefile.am: Prevent race creating CYTHON_BUILD_DIR
+6122002 Added blogbench.yaml to parallel before and after -x upgrade steps
 359d832 qa/workunits/cephtool/test.sh: fix cache mode tests
 90fe8e3 mon/OSDMonitor: require force flag to use obscure cache modes
 d7da688 osd: add 'proxy' cache mode
+b976563 buildpackages: do not try tcmalloc on arm64
+b9b00a5 buildpackages: desambiguate nic on server create
 f22676b mon: remove unused variable
 8e78ed3 pybinding: python3 fixes
+d4e3cec dummy: reduce run time, run user.yaml playbook
+a8f4321 buildpackages: build will work on a range of flavors
+c5182f2 buildpackages: do not hardcode x86_64 in build scripts
+f9c4858 buildpackages: use architecture suffixed images
+40277ca archs: add files for each known architecture
 13bd851 rgw_file tests: allow override of owner_uid and owner_gid
 5b44a34 mon/MonClient: fix shutdown race
 a5b4460 common: fix race during optracker switches between enabled/disabled mode
@@ -1487,8 +2132,12 @@ b3930c5 Fixes headline different font size and type
 81792b3 rgw_file: set owner uid, gid, and Unix mode on new objects
 fa45089 Makefile-env.am: set a default for CEPH_BUILD_VIRTUALENV (part 2)
 239f164 test/system/*: use dynamically generated pool name
+89e456a suites: run rados bench for max 180 seconds
 0ab4813 script: subscription-manager support (part 2)
 1cbe2bd ceph_test_rados_api_misc: debug LibRadosMiscConnectFailure.ConnectFailure
+843619d suites/upgrade: remove stable upgrade suites
+1686a20 suites/upgrade: remove obsolete *-x suites
+2b3ec8d suites: run rados bench for max 180 seconds
 bdca28d messages/MOSDOp: clear reqid inc for v6 encoding
 038d1b6 osd: duplicated clear for peer_missing peering_missing is also cleared in clear_primary_state
 6577005 doc: amend Fixes instructions in SubmittingPatches
@@ -1506,9 +2155,12 @@ a4e63b5 doc: Updated CloudStack RBD documentation
 5bf340e Update SSL support a bit.
 8e87ce7 cmake: add FindOpenSSL.cmake
 3e8bb4b doc/rados/operations/crush: fix the formatting
+2e10639 fs: remove extra ceph-fuse invocation from TestMisc yaml fragment
+b977e86 tasks/cephfs: test for request caps during lookup/open
 c3fcd83 rgw: fetch_remote_obj() fix handling of ERR_NOT_MODIFIED
 103f4b8 rgw: parse mtime only when header exists and not error
 7fe2657 rgw: fix lockdep false positive
+dc0b181 upgrade/infernalis-x: allocate OpenStack volumes for each OSD
 206cf5a cls_rbd: mirror_image_list should return global image id
 ccdb0d1 rgw:Use count fn in RGWUserBuckets for quota check
 96ae8bd (tag: v10.1.0) 10.1.0
@@ -1523,6 +2175,7 @@ e29257e osd/pg: set dirty_info if we succeeding in updating log from master
 f45094f osd/pg: set dirty_info after we dirty history
 5b3da26 Makefile-env.am: set a default for CEPH_BUILD_VIRTUALENV
 fa05d80 debian/control: try installing virtualenv first, if it exists
+fcde1f6 suites/upgrade/hammer-x: do upgrade before staritng working
 4f74856 rbd: rbd-mirroring: Replayer registers in remote journal with mirror_uuid
 9872dcc rbd: allow librados to prune the command-line for config overrides
 eb583cb build/ops: in jessie virtualenv is in package virtualenv
@@ -1575,6 +2228,7 @@ c6623c0 mon: config setting to skip FSMap::sanity
 d4f2807 tools: update naming s/handle_mds_map/handle_fs_map/
 980f684 mon: name cleanup s/mdsmap_bl/fsmap_bl/
 122e0d3 messages: support features in MFSMap
+41b2797 tasks/cephfs: extend TestMultiFilesystems
 55f4ade release-notes: draft v10.1.0 release notes (manual edits)
 fb4e5cc rgw: Do not send a Content-Length header on a 304 response
 7051335 mailmap: Eric Lee affiliation
@@ -1584,6 +2238,7 @@ a8428a9 release-notes: draft v10.1.0 release notes
 14dc847 cmake: fix mrun to handle cmake build structure
 d51f564 doc: fix wrong type of hyphen
 ccc3955 doc/release-notes: known issues with 10.1.0
+fb8c720 upgrade: use infernalis instead of hammer for rbd tests
 c9245e7 rgw: fix error message for zone delete
 a67f0cf rbd-mirror: asok commands to get status and flush on Mirror and Replayer level
 d66e8f6 rbd-mirror: async flush for ImageReplayer
@@ -1594,19 +2249,35 @@ acea6ef rgw: fix zone delete message
 b50caa4 global/signale_handler: print thread name in signal handle.
 ba1dd35 mon: fix mixed-version MDSMonitor
 a6adb88 mon/PGMonitor: do not clobber pg_stat update with map_pg_creates
+1b6795a upgrade/infernalis-x/point-to-point: do upgrade before test workload
+83f1f46 upgrade/hammer-x/f-h-x-offline: fix mon statup
+bea9b5b tasks: fix ceph_deploy call to Filesystem()
+8e53f38 suites/rbd: add ceph_test_rbd_mirror workloads
 0effb9e qa: test_rbdmap_RBDMAPFILE.sh workunit
+c4237de rbd: add singleton to assert no rbdmap regression
+eb8c02a upgrade/infernalis-x/point-to-point: run rgw + s3tests in sequential
+6111939 upgrade/infernalis-x/parallel: do upgrade before workload
+534e39a upgrade/hammer-x/f-h-x-offline: stop mons during upgrade
 57c5754 doc/dev: add "Deploy a cluster for manual testing" section
+917bc9a upgrade/infernalis-x/point-to-point: upgrade before workload
+e58ef3c upgrade/infernalis-x/point-to-point: run steps in series
+4ffaf2b upgrade/infernalis-x/point-to-point: remove stray upgrade steps
+a7e9d16 upgrade/infernalis-x/parallel: do workload in series
 098fea2 rgw: add zone delete to rgw-admin help
 3a9a60b xio: xio_init needs to be called before any other xio function
+322b79f test_msgr: improve debug ms level
 c4364b1 packaging: align radosgw package description
 7da141d rbdmap: manpage
 bbac766 rbd-mirror: fix long termination due to 30sec wait in Mirror::run loop
+1cc6577 buildpackages: provision a small build machine for tests
 c3adc90 rbdmap: drop init- prefix from logger tag
 27bbf3e rbdmap: default RBDMAPFILE to reasonable value
 a7a3658 systemd: set up environment in rbdmap unit file
+f3aeb7b suites/rbd/maintenance: use symlinks for cluster config
 9f6ec70 qa: add workunit to run ceph_test_rbd_mirror
 9722dee journal: prevent race injecting new records into overflowed object
 f6408ec rgw_admin: new command to get bilog status
+eff7ada morepggrow: enable filestore and journal soft backoffs
 9a6bf6c config_opts: disable filestore throttle soft backoff by default
 f5375dc packaging: added rbd-mirror startup scripts
 58d4734 upstart: new rbd-mirror daemon scripts
@@ -1614,14 +2285,17 @@ f5375dc packaging: added rbd-mirror startup scripts
 f0143bb OSD: bail out of _committed_osd_maps if we are shutting down
 ebbfdc7 test_pool_create.sh: put test files in the test dir so they are cleaned up
 2b3f01e rgw: don't record fetch remote obj in new multisite
+b483793 upgrade/hammer-x/parallel: run workloads in sequence, not parallel
 55c90fc rgw/rgw_admin:fix bug about list and stats command result of bucket-list and bucket-stats are incorrect when The first character of bucket name is underline
 d9017fb qa/workunits/rbd: new online maintenance op tests
 3e61f96 doc/release-notes: v10.1.0 draft jewel notes
+e463ac2 suite/rbd: test dynamic features and rebuild object map
 0fd674b osd/OSD: fix build_past_intervals_parallel
 2da5054 Revert "osd: build_past_intervals_parallel() add diagnostics before assert"
 fab2144 cls/rgw: fix use of timespan
 65858fe osd/PG: indicate in pg query output whether ignore_history_les would help
 5a429e0 doc/release-notes: 9.2.1 notes
+5caa4ed suites/rbd: add qemu tests with journaling
 959ae39 ceph_test_rados_misc: shorten mount timeout
 8b9ed00 os/filestore: fix warning
 811b8f5 qa/workunits/rest/test.py: don't use newfs
@@ -1643,8 +2317,12 @@ f2fd396 rgw: log message cleanup
 b839a06 osd: commit osdmaps before exposing them to PGs
 30e0f92 os/bluestore/BlueFS: Before reap ioct, it should wait io complete.
 f8cca62 mon: ignore msg without session attached
+6aad8c0 Changed run time for radosbench to 200
+0ad1e40 Changed run time for radosbench to 200
 b0d9b0d mon: remove 'mds setmap' command
 cd14bcc tools/cephfs/DataScan.cc: fake non-empty dirstat for injected directory
+eb4b43b upgrade/hammer-x: fix ec portion of workload
+e0a1c38 suites/rados/thrash: vary min_{read,write}_recency_for_promote
 13c7ba4 test: rbd-mirror: different log and asok location for local/remote contexts
 9bdf337 mds: avoid scrubbing (CDir*)NULL
 21e127b rbd-mirror: make remote context respect env and argv config params
@@ -1672,6 +2350,7 @@ ac443a8 features: deprecate CEPH_FEATURE_INDEP_PG_MAP
 c21b796 features: deprecate CEPH_FEATURE_QUERY_T
 cb325b2 features: free CEPH_FEATURE_OMAP bit, hasn't been used since before hammer
 f381390 features: deprecate CEPH_FEATURE_MONCLOCKCHECK
+228f71e tasks/ceph.py: Remove *.pid at end of run
 60429c3 rgw: take a reference to the cr in async cr workers
 1f74171 rgw: finalize reqs through a temporary set
 4083271 rgw: update data_log only when completing the op
@@ -1679,6 +2358,8 @@ f381390 features: deprecate CEPH_FEATURE_MONCLOCKCHECK
 b15ff2f test: rbd-mirror: compare positions using all fields
 e348a7e rbd-mirror: use pool/image names in asok commands
 1539d90 rgw-ldap: conditional build
+0304c95 upgrade/infernalis-x: do not to tier *Whiteout tests against post-infernalis cluster
+c619feb rados/singleton-nomsgr: run singleton upgrade tests on ubuntu
 9789c29 osd: populate the trim_thru epoch using MOSDMap.oldest_map
 81b07be mstart: start rgw on different ports as well
 e81c81b rbd: rbd-mirror: PoolWatcher watches for mirroring enabled images
@@ -1688,6 +2369,13 @@ e81c81b rbd: rbd-mirror: PoolWatcher watches for mirroring enabled images
 50b53ea qa/workunits/rbd: rbd_mirror was extracting the incorrect image id
 f2e3988 qa/workunits/rbd: use unique logs for each rbd-mirror daemon
 3cf8952 journal: refetch active object before defaulting to new tag
+63c1a6f buildpackages: ensure clone to a ceph directory
+df3eba0 buildpackages: for source inclusion in debian
+33b4397 buildpackages: allow for more parallel builds
+c60ace3 buildpackages: enable autoindex in the packages repository
+b30655e buildpackages: do not requires the long tag version
+5e145c1 buildpackages: also pull tags from the user ceph fork
+ddd5f7c buildpackages: create release packages
 b6101e9 vstart: support creating multiple cephfs filesystems
 e9b70e4 mon: s/mdsmap/fsmap/ in "ceph status"
 dd58719 mds: plain text prints for FSMap/Filesystem
@@ -1697,6 +2385,7 @@ b296629 mon: config setting to skip FSMap::sanity
 ff3f0f8 ceph.spec.in: Make ceph-common require libcephfs1
 cd4751a test: rbd-mirror: add "switch to the next tag" test
 7a50b3d xio: add prefix to xio msgr logs
+d1f4568 workunit: git checkout sha1/branch/tag
 f03f99d cls/rgw: fix FTBFS
 70bf219 mds: fix mds_info_t::dump
 57fa912 mds: fix whitespace in is_cluster_available
@@ -1726,8 +2415,11 @@ e82a801 rgw: extend date/time parsing
 fe6dc35 packaging: remove sub-package dependencies on "ceph"
 a7fcab1 osd: handle_osd_map whitespace
 5f3f37f packaging: move cephfs repair tools to ceph-common
+a3d78d0 suites: fix assumed filesystem pool name
+9e202b4 tasks/cephfs: support old mdsmap command during setup
 921409f journal: reschedule watch if no entries available during live replay
 4c42b95 librbd: correct valgrind memcheck errors
+96ef6ab workunit: git-reset --hard does not accept a branch
 36af39c qa/workunits/rbd: disable deep-flatten during permissions testing
 c287d79 tools/cephfs: fix tmap_upgrade
 8a72491 rbd: snap list should open image as read-only
@@ -1747,9 +2439,11 @@ bbde2f0 os/filestore: exit if we fail to remove any xattr
 82419db osd: pg: drop get_nrep() method, which is never used by anyone
 ed02ce5 osd: fix misnamed macro OSD_SUPERBLOCK_POBJECT
 917e06b doc/dev: add section on interrupting a running suite
+77e35c0 suites: remove unused mdss from RADOS tests
 b5315d2 tools/cephfs/DataScan.cc: don't set directory inode's size to non-zero
 13ae262 client: add debug hook for requesting caps on open/lookup/getattr
 22fe493 qa/workunits/rados/test.sh: test tmap_migrate
+4a65da5 cephfs/test_client_limits.py: trim cache when mounting into subdir
 9ea6569 osd: pg: skip over update_heartbeat_peers() on non-primary pg
 3ce61eb osd: pg: drop stray_purged field, which is never used.
 b20cd76 osd: make needs_backfill() fast
@@ -1834,6 +2528,7 @@ b4157fa doc: add doc for osd_crush_initial_weight
 6c7dd80 rgw: increase sync lock lease period
 89bf43c rgw: data sync shard controller re-read sync status
 5f5adfe rgw: fix metadata sync backoff
+96a4567 upgrade/client-upgrade: only use supported features for RBD tests
 7c62ddf ceph-detect-init: return None at seeing unknown debian distro
 52ccf75 Revert "rgw ldap"
 e2bbd17 test: verify proper update of object map during rbd-mirror image sync
@@ -1879,6 +2574,7 @@ e52f7b4 mds: fix FSMap upgrade with daemons in the map
 e10c6e4 common: buffer: put a guard for stat() syscall during read_file
 1ea1735 osd: fix wrong counter for batch objects removal during remove_dir()
 12d151f osd: initialize last_recalibrate field at construction
+4e14565 Fix review comments
 f1a4490 ceph.spec.in: disable lttng and babeltrace explicitly
 996be8e qa/workunits/rbd: use POSIX function definition
 d9af48a ReplicatedPG: be more careful about calling publish_stats_to_osd() correctly
@@ -1890,6 +2586,7 @@ da9120a rgw: dump X-Versions-Location HTTP header of Swift API.
 a230a38 debian: make infernalis -> jewel upgrade work
 ba4badf FileStore: fix initialization order for m_disable_wbthrottle
 81b9928 ceph-detect-init: return systemd on Debian Jessie
+4466d2f rbd: add some missing workunits
 4c97aac Revert "test/time: no need to abs(uint64_t) for comparing"
 2b07695 unittest_compression_zlib: do not assume buffer will be null terminated
 2cc736c FSMap: actually include the legacy MDSMap in Filesystems when upgrading
@@ -1904,6 +2601,7 @@ a0c816b rgw: cleanup of debug messages
 d963eb7 os/ObjectStore: drop collection attr deprecated warnings
 f9dbf54 osd/PGLog: fix warning
 4bd31fa ghobject_t: use # instead of ! as a separator
+e805ebc Update fio version to 2.7
 5acb265 v10.0.5
 10ca7f3 debian/changelog: Remove stray 'v' in version
 48d929c msg: async: improve _send_keepalive_or_ack() a little
@@ -1932,6 +2630,22 @@ b61692b rbd-mirror: ImageReplayer async start/stop
 a795d34 librbd: protect against journal replay shutdown race condition
 aa6a8d3 librbd: replaying a journal op post-refresh requires locking
 fdeb125 test: periodic failure in TestJournalReplay.SnapRename
+a54bf91 tasks/cephfs: cover fancy layout in test_strays
+9915752 tasks/cephfs: enable multi fs flag
+e1634ed tasks/cephfs: debug in wait_for_daemons
+e666fdd tasks/cephfs: switch off extra daemons in TestStrays
+53f4430 tasks/cephfs: further thrasher fixes
+9ca3721 tasks/cephfs: simplify recreate()
+09f239f tasks/cephfs: update test_journal_repair for multi fs
+c94d502 tasks/cephfs: add TestStandbyReplay
+79357c2 suites: add fs/multifs
+212a507 tasks/cephfs: fix datascan for multiple filesystems
+e217666 tasks/cephfs: move the journaltool smoke test into python land
+951102d tasks/cephfs: rename Filesystem.reset to recreate
+b02afb2 tasks: fix mds_thrash for multi filesystems
+3c3804e tasks/cephfs: update test_runner for mds_cluster
+22b350d tasks: generalise cephfs classes for multi-fs
+8528ccc tasks: add cephfs TestMultiFilesystems
 ccc5e71 config-key: introduce command of 'config-key rm'
 9537762 osd: introduce command of 'osd tier rm-overlay'
 e2b297a osd: introduce command of 'osd tier rm'
@@ -1944,6 +2658,7 @@ b0b1dfb mon: Introduce command of 'auth rm'
 cecdc49 osd: cleanup: Specify both template types for create_request()
 99ec183 mds: fix stray purging in 'stripe_count > 1' case
 7663b9f crushtool: Don't crash when called on a file that isn't a crushmap
+3e96a92 packages: update for all the new debug packages we have in jewel
 2408f8d rgw: store system object meta in cache when creating it
 4d59b1d rgw:bucket link now set the bucket.instance acl
 b988f79 rgw_admin: policy dump --xml backward compatibility
@@ -1954,15 +2669,21 @@ fd4d8ce include/encoding: do not try to be clever with list encoding
 3a8b0e3 rgw: remove unused header file
 dd24f51 rgw: support json format output for rados-admin policy command
 a1333f5 rgw: support json format for admin policy API
+9a631ce Removed branch: firefly from 4-after.yaml
+872aed9 Removed branch: firefly from 4-after.yaml
 c85573a rgw: also dump realm in radosgw-admin sync status command
 d8f4d9e FileStore: Added O_DSYNC write scheme
+3625b8c rados/singleton-nomsgr/all/11429: no mds
 d0f8713 test/pybind/test_ceph_argparse: fix reweight-by-utilization tests
 797f1d4 man/8/ceph.rst: remove invalid option for reweight-by-*
+88e86d7 rados: Enable odsync write for pggrow thrasher
 d9ac047 log: do not repeat errors to stderr
 d1627f5 xio: remove duplicate assignment of peer addr
+5a78390 suites: invoke libcephfs python tests
 9cf46b8 test: correct journal test failure caused by two merged commits
 8affb39 xio: remove unused variable
 e68e37f rgw: require openldap library client headers
+a693be2 tasks/cephfs: vstart & fuse improvements
 9b36285 mds: set FSMap encoding version to 6
 eb88101 mds: make FSMap.get_filesystem return const
 3c2f28a test: update argparse test for rmfailed
@@ -2004,6 +2725,8 @@ ce33a41 osd: drop unused from arg from handle_pg_peering_evt
 e929b0b osd: only pass history to handle_pg_peering_evt
 363e431 doc: Add French mirror
 868b794 mirrors: Change contact e-mail address for se.ceph.com
+100b0db Reflect that ceph-dbg is dropped
+705927f Add packages.yaml
 4c3c2ae common: WeightedPriorityQueue Boost 1.60.0 requires some more comparisions for advanced lookp and insertion functions.
 ca16037 pybind: flag an RBD Image as closed regardless of result code
 a8e82a3 librbd: permit watch flush to return error code
@@ -2040,6 +2763,7 @@ a572226 common: include malloc.h only on linux
 9e34417 ceph_test_libcephfs: shutdown without closing file/dir
 8fc1b1b client: close opened dirs when umounting
 f4b210e rgw: remove unused vector
+1c24f58 Fix recent failures due to HEALTH_WARN check issues
 79e2f18 client: fix root inode number for fuse
 d7e4221 rgw: handle error when fetching data log changes
 aab9166 xxHash: add .gitignore for build artifacts
@@ -2056,6 +2780,7 @@ ea29b71 rgw: don't equeue async cr rados operations if going down
 1c99dc2 doc/release-notes: v10.0.4
 7139a23 osd: handle boot racing with NOUP set + clear
 7eba5ba osd: inline advance_map()
+7784ffe rados/thrash/workloads/radosbench: shorten 300s->200s
 4ded44a journal: possible race condition during fetch playback
 3982895 test: fix errors introduced by rebase to master
 b37f135 journal: clean up playback notification handling
@@ -2088,6 +2813,9 @@ e48c708 doc/rados/operations/crush: rewrite crush tunables section
 754d210 packaging: Moving Cython into distro specific area
 019d9e1 packaging: Adding redhat-rpm-config
 7d48d21 packaging: Adding btrfs build require
+4859946 krbd: do not test unsupported striping feature
+eb29adf tasks/rbd_fio: create sane image name based on feature set
+6deba7c tasks/ceph_manager: dump pgs if other peering timeouts expire
 60b71ec stop: Add missing stop_rgw variable
 4a5875a doc/dev: start Testing in the cloud chapter
 f7eb860 rbd-mirror: avoid recursive lock in timer
@@ -2133,6 +2861,7 @@ f29091b mds: check dirfrag rstat when dirfrag is fetched
 d6a48d9 cls_hello: Fix grammatical error in description comment
 f4bd1fc mds: allow client to request caps when opening file
 5b57065 doc/dev: integrate testing into the narrative
+59585b6 suites/smoke/1node: move + to clusters/ subdirectory
 c823018 msg: remove duplicated code - local_delivery will now call 'enqueue'
 e904670 Event: fix clock skew problem
 a0572bc doc: detailed description of bugfixing workflow
@@ -2210,8 +2939,17 @@ bd4bd5c ceph-disk: simplify trigger
 4727d42 rgw: avoid showing payer when payer == owner
 b7d022f rgw: indexless buckets
 f1ac0de rgw: configurable index type
+ec0db68 tasks: add TestDamage.test_damaged_dentry
+ffee590 suites: update log whitelist for TestDamage
+356579b tasks/cephfs: update TestDamage
+aa193b3 tasks: move wait_for_health up into CephFSTestCase
+67b711d tasks/cephfs: optionally stat() in background
+7a97fcb tasks/cephfs: avoid sleep in test_client_cache_size
+41747d6 tasks/cephfs: remove extraneous ;s in test_client_limits
+21348ab tasks/cephfs: add test case for scrub repair
 905b1d9 rgw: don't override error when initializing zonegroup
 0b48c86 rgw: adjust error code when bucket does not exist in copy operation
+3458adb distros: add yaml fragments for openSUSE 13.2 and 42.1
 18a75f1 test: use StrEq for C-style string matching
 d0e3da2 common/buffer: correct list_iterator::operator!= behavior
 0357d87 test: librbd template specializations leaking between translation units
@@ -2288,6 +3026,7 @@ acfc06d ceph.spec.in: use %{_prefix} for ocf instead of hardcoding /usr
 e13a4b1 test: common/test_weighted_priority_queue Add Some More Corner Cases
 33f68b8 Revert "test/common/test_weighted_priority_queue Fix the unit tests since the"
 de001bd osd: common/WeightedPriorityQueue.h Re-add Round Robin between classes
+b3bcf9d ceph-ansible: Add openstack hints for volumes
 a2d58fc rgw: TempURL of Swift URL does support Content-Disposition override.
 c857fcf rgw: ONLY refactor dump_object_metadata() of rgw_rest_swift.cc.
 26f9d69 rgw: add support for overriding Content-Disposition in GET of Swift API.
@@ -2303,9 +3042,12 @@ eb5f00a msg: async: reset result code to errno for better tracing
 effcd77 BlueStore.h:Remove unneeded includes
 61026c8 cmake: Turned LTTng OFF
 597aaba configure.ac: cython is used for more than librbd now
+8387114 Install ceph-test
 13a5aac RPM: move scriptlets from ceph to ceph-base
 c6f317b AUTHORS: update email
 55eeee9 rgw: calculate payload hash in RGWPutObj_ObjStore only when necessary.
+4a450ca Disable rbd_cli_tests for now
+6df45fd Add ceph-deploy_hello_world.sh
 da84cba os/kstore: Latency breakdown in each stage of transaction for Kstore
 677d290 librados: race condition on aio_notify completion handling
 3aa0cce osd: add mon_reweight_max_osds to limit reweight-by-* commands
@@ -2348,6 +3090,8 @@ e08dd70 mds,mon: include features in beacon messages, MDSMap
 b0f692d os/bluestore: print Enode::hash w/ hex format.
 7c4f37d os/bluestore: make _clone work when object has overlay data.
 533b617 rgw: send proper ETag value during GET on DLO/SLO through S3 API.
+14211ba suites/fs/verify: debug mon too
+bb2f575 tasks/cephfs/filesystem: ceph_file_layout -> file_layout_t
 b87f4ef packaging: lsb_release build and runtime dependency
 aa2b891 buffer: use alignof for raw_combined allocation arithmetic
 ef80690 buffer: clean up raw_combined construction
@@ -2381,10 +3125,16 @@ ed9de5f librbd: support replay of maintenance ops
 ea9cb4f doc: rgw explain keystone's verify ssl switch
 e564111 doc: add notes about upgrading cephfs
 8ffc4e8 msg: async: start over after failing to bind a port in specified range
+70fb749 cram: pin to version 0.6 because 0.7 handles \r differently
 acc6405 test/cli-integration/rbd: disable progress output
+3a6bf7d tasks/scrub_test: match the NOSNAP with "head"
+b82fe3d rbd/cli: add older default features
 2039873 test/TestPGLog: fix the FTBFS
 0bd82d3 [rgw] RGWLib::env is not used so remove it
 6cd5e20 fixed a bug for write bench with rados
+6305415 Make the keyring world-readable
+86e67cd Rename mon.0 to mon.a
+7dd350e Run 'ceph health' after deployment
 cf812ea rgw: aggregate usage by payer
 ff8305a rgw: fix permission check for request payer
 554b643 test_filejournal: reserve throttle as needed
@@ -2402,6 +3152,8 @@ ece5b9e Add new mirrors to overview file
 bd592c0 AsyncConnection: dispatch write handler when accept done
 bcff5be osd: fix typo
 90d4aff osd: fix overload of '==' operator for pg_stat_t
+d1c211d rbd: updated tests to use new rbd default feature set
+f9a6902 krbd: override rbd defaults features to 1
 bcc2866 doc: fix typo, indention etc.
 0952f35 tools/cephfs: add tmap_upgrade
 f22a097 Bugfix: set thread name will fail, when running as differnt user.
@@ -2432,9 +3184,11 @@ cba211d packaging: pkg_resources.py runtime dependency
 a4527c3 journal: async methods to (un)register and update client
 378f4c7 cls::journal: async client_unregister
 8671859 cmake fix: build civetweb with the right include path.
+130b12b Fix old and new pep8 issues
 ea2e24f rgw: fixes and adjustments following rebase
 23785d3 rgw: remove usage from signed resources
 155f079 rgw: change naming of utility functions in rgw_keystone.cc.
+4a8448a Add ceph-ansible suite
 aaa0f6f Use fixed version of civetweb to avoid warning.
 f3925ec Fix ssl link error.
 b451cbb Use ld.so to link in ssl crypto (cmake changes).
@@ -2459,6 +3213,7 @@ dff62e4 rgw: use pimpl pattern for RGWPeriodHistory
 2a80042 rbd/run_cli_tests.sh: Reflect test failures
 614597b AsyncMessenger: remove experiment feature
 ca50f42 doc: batch small fixes, including typo, syntax etc.
+f274737 suites: debuginfo: true for valgrind
 dc7e027 doc: fix typo
 0cee333 mds: remove stray dentry from delayed eval list after calling eval_stray()
 b33984b os/bluestore/NVMEDevice: remove unused variables ref
@@ -2537,6 +3292,8 @@ cb4efbd librados: add get_inconsistent_pgs() to librados
 50bbf7f tools/rados: support more --format options
 0c27417 librbd: Truncate of non-existent object results in object map flagged as exists Fixes: #14789
 0cbe3de debian/rpm: split mon/osd/mds server packages
+52704d3 Reduced runtime for radosbench to 600
+a72bba2 Reduced runtime for radosbench to 600
 28e2d1b cls_rbd: add methods for keeping track of mirrored images
 da9b36a librbd: rename rbd_pool_settings object to rbd_mirroring
 2104df8 test/objectstore: add test for GetNumBytes
@@ -2578,6 +3335,7 @@ e92b452 rbd-mirror: ImageReplayer: pass registered client ID as external param
 dc9aba1 BlueStore: fix type mismatch for openat syscall
 204a96c BlueStore: add sanity check for attr removal
 468b64a BlueStore: remove unused local variables
+4f88f5f hadoop: disable client_permissions for tasks
 1e53e5b BlueStore: remove redundant continue
 a843569 BlueStore: fix typo
 f99bf34 BlueStore: fix obsolete comment
@@ -2599,9 +3357,12 @@ ef6aafa ceph-disk: s/dmcrpyt/dmcrypt/
 a7a5cf2 rgw: return proper error codes in S3/Keystone auth.
 19843ce rgw: enable users of RGWHTTPClient to get HTTP status code.
 7186d1c rgw: improve debugs around S3/Keystone auth mechanism.
+74b9216 tasks/cephfs: add test case for scrub repair
 edf8152 rgw: ONLY move PKI-related things from rgw_swift to rgw_keystone.
+9a3252c tasks/scrub_test: add test for get-inconsistent-* commands
 0914246 OSD: fix race condition for heartbeat_need_update
 804c615 osd/ReplicatedPG: remove unused bufferlist
+bdf998c tasks/scrub_test: restore the changes to omap after the test
 83da093 test: create pools for rbd tests with different prefix
 4d26000 OSD: put a guard for updating heartbeat peers
 67b2744 OSD: fix typo
@@ -2654,6 +3415,11 @@ ed54420 bufferlist: Add new func is_provided_buffer(const char *dst)
 b2bfd6c common/TrackedOp: fix inaccurate counting for total slow requests
 700c3dc librbd: update mirror peer data structure
 0ec038a librbd: support image and pool-level mirroring modes
+af3af30 divergent_priors: add sleep after reviving divergent
+ef75c89 rados: add lost-unfound upgrade tests
+b2a08ef tasks/*unfound*.py: run rados bench in parallel
+8cf2561 ceph_manager: use time before mon command for timeout
+7e0fb5d rados/singleton: use xfs instead
 55cbc60 rgw: Keystone token parsing doesn't need to know API version.
 abbd912 rgw: S3 always must use token format of Keystone v2.
 2f190fd rgw: Keystone token parsing should fail on misformed JSONs.
@@ -2787,6 +3553,7 @@ ebfc6bf rgw: move signal.h dependency from rgw_front.h
 58fe65f os/kstore: add transaction dump msg in _txc_add_transaction
 bce0bc0 os/kstore: wr lock collection in _txc_add_transaction
 67f95c8 Update the documentation
+362abf7 tasks/scrub_test: refactor this test
 c4162b5 librbdpy: Use new rados lib
 7216b06 ceph.in: Use new python rados module
 82869f0 Fix rpm/deb packaging
@@ -2813,6 +3580,7 @@ fe14a26 ceph.spec.in: declare /usr/share/ceph properly
 edefcaf vstart.sh: clarify usage on single osd/mds/mon.
 912fe8e vstart.sh: silence a msg where btrfs is not found.
 a6cc8ea rpm: drop systemd_libexec_dir template variable
+3c76c57 cephfs/test_full: add some gratuitous debugging
 5c09a3e rpm: drop user_rgw and group_rgw template variables
 84e9f3f librados_test_stub: watch_flush should block until notifies complete
 d898995 librbd: lock notifications should be executed outside librados thread
@@ -3705,6 +4473,7 @@ d07306c rgw: add period activate admin command and fix get current command
 d7f0304 common: fix build error with Graylog
 d8675cf Log: Adding UT to catch an issue with huge line logging
 dd533a9 Log: Fixing stack overflow when flushing large log lines. See http://tracker.ceph.com/issues/14707
+44c0307 rbd: add fsx journal replay test case
 5b3a4d2 test: new librbd flatten test case
 0da0eda unittest_[ceph_]crypto: fix warnings
 fd85a9b unittest_bufferlist: fix warning
@@ -3718,15 +4487,21 @@ bfafc3b xio: avoid trying bind to port we know will fail
 36a82b3 xio: fix error from explicit ctor of entity_name_t
 b448f3c xio: thread name must be less than 16 chars
 40936fb os/bluestore: fix a typo in SPDK path parsing
+e03cee4 rados/upgrade: add '%' to combine rados.yaml with other yamls
+9c8d2fb rados/: remove unnecessary rados.yaml entries
 b92eb66 librbd: protect journal replay against overlapping writes
 bb66231 librbd: track in-flight AIO flush requests during journal replay
 1cc409b test: possible librbd journal replay flush race
 3969b83 os/bluestore/KernelDevice: force block size
 fce9b27 rgw: make function parameter 'errordoc_key' passed by reference
 7d10a44 doc/release-notes: v10.0.3
+a13d9e8 rados/upgrade: add '%' to combine rados.yaml with other yamls
+ebea8ad rados/: remove unnecessary rados.yaml entries
 676c489 global/global_init: chown log, asok if drop privs is deferred
 ccd5531 common/admin_socket: add chown
 32da962 log: add option fchown on log file
+9fb5778 Whitelisted 'soft lockup' and 'detected stalls on CPUs'
+367bd5b Whitelisted 'soft lockup' and 'detected stalls on CPUs'
 8ca144e OSD/ReplicatedPG: Adding test case to catch issue #14511
 4e3ce25 Add better documentation of --setuser and --setgroup options for ceph-disk.
 17fe9d2 Add --setuser and --setgroup options to ceph-disk(8).
@@ -3734,6 +4509,10 @@ ccd5531 common/admin_socket: add chown
 efab230 tests: sync ceph-erasure-code-corpus for mktemp -d
 30b257c ghobject_t: use ! instead of @ as a separator
 7ba6889 tests: ceph-disk.sh: should use "readlink -f" instead
+17e310f tasks/cephfs: add TestConfigCommands
+86b4231 tasks/cephfs: run ceph-fuse in foreground
+08c2876 tasks/cephfs: optionally check result of fuse proc on umount
+9fd891f tasks/cephfs: make FuseMount.admin_socket public
 ca5ec8a release-notes: draft v10.0.3 release notes
 5591cdf scripts: ceph-release-notes fix merge messages handling
 075e595 cmake: remove Boost libraries from EXTRALIBS
@@ -3889,6 +4668,8 @@ e8e0521 mailmap: Shun Song affiliation
 f59db3c mailmap: Luo Kexue affiliation
 588eeea mailmap: Aron Gunn affiliation
 efc8134 v10.0.3
+e96a3fe Changed centos version to 7.2
+aa207c2 Changed centos version to 7.2
 8556b9d mailmap: further cleanup.
 17d2622 mailmap: Abhishek Lekshmanan affiliation
 467990a mailmap: Darrell Enns affiliation
@@ -3945,6 +4726,8 @@ c1b3138 concurrency: Add shunique_lock
 5eda285 cls_journal: new get_client / client_update methods
 3f29e7b cls_journal: client registration should hold opaque data structure
 fa6d0ba cls_journal: new tag management methods and handling
+b725fdf Made disk size in openstack 40GB, so osd won't run out of space
+0cf150f Adde + in the openstack.yaml dir Fixes failures in #14635
 243d91d journal: switched entry tags to use id instead of string
 48c542a concurrency: make C++11 style debugging mutices
 672a694 time: Remove constexpr from non-literal type in test
@@ -4029,15 +4812,18 @@ dd6fdc7 os/bluestore/BlueStore: dump txn in _txc_add_transaction at high debug
 f1bf983 os/bluestore/BlueStore: fix wal tail block padding
 8eac9c6 os/bluestore/BlueStore: wr lock collection in _txc_add_transaction
 82637be cmake: add KernelDevice.cc to libos_srcs
+183c782 Whitelisted "wrongly marked me down" for the tiering tests
 457f023 CMake: For CMake version <= 2.8.11, use LINK_{PRIVATE,PUBLIC} instead of PRIVATE,PUBLIC for backward compatibility
 669b932 rados: Add units to rados bench output
 06af9f0 os/kstore: insert new onode to the front position of onode LRU
 e85ffac scripts/run-coverity: fix upload process
+c4e064f add test for norstats functionality
 41c3dc2 test_bufferlist: add move tests for bufferlist
 573a2d9 bluestore/NVMEDevice: remove unused local variables
 83ca830 bluestore/NVMEDevice: fix fd leak
 a5714fb bluestore/NVMEDevice: fix wrong remove_device logic
 06f3837 bluestore/NVMEDevice: fix compiling error
+2e6e65d suites/fs: enable all snapshost test scripts
 0746470 Mon: show the pool quota info on ceph df detail command
 caed882 os/bluestore: insert new onode to the front position of onode LRU
 7c86775 bluestore/bluefs_types: fix imcomplete output message
@@ -4058,7 +4844,9 @@ b593e1c BlueFS: remove redundant intermediate buffer pad
 65e1a21 BlueFS: stop alloc if unable to replay
 a3789e8 BlueFS: fix unprecise calculation of sync interval
 09d2644 BlueFS: add log after updating prefer_bdev field.
+e1faa31 Whitelisted "soft lockup" to avoid failures on slow nodes like in #14618
 93413b5 qa/workunits/rados/test.sh: bash
+97b0b2b Made rados runs in ovh call vps.yaml
 ec36dcd rgw: approximate AmazonS3 HostId error field.
 669f143 buffer: add operator= for ptr/bufferlist rvalue
 0305cee misc: using move construct to avoid extra atomic inc/dec
@@ -4099,7 +4887,7 @@ ada41f4 lockdep: dump lock names if we run out of IDs
 300b15d os/bluestore: fix typo (again)
 9858626 os/bluestore: fix typo
 62906d8 osd/PG: fix scrub start object
-0819509 qa/workunits/rados/test.sh: run tests in parallel by default
+0819509c qa/workunits/rados/test.sh: run tests in parallel by default
 4d2c0f5 osd/ECTransaction: Removing unused local
 ccebf7a osd/PGBackend: PGBackend interface cleanup - make some interface methods pure virtual instead of assertion usage. Signed-off-by: Igor Fedotov <ifedotov at mirantis.com>
 7fcfc28 Compressor: add zlib unittests Signed-off-by: Alyona Kiseleva <akiselyova at mirantis.com>
@@ -4189,6 +4977,7 @@ cdf3c90 os/memstore: return empty list if offset overflows
 79401d5 rgw: remove duplicated code in RGWRados::get_bucket_info()
 579fbc8 add some cppcheck-suppress noExplicitConstructor comments
 bbf0582 make ctors with one argument explicit
+58fd981 Initial draft for infernalis stable upgrade tests Fixed indentation Replace ‘description’ with ’meta’ Incorporated Josh's comments Moved all overrides to the top
 65ed192 rgw/rgw_rest.cc: fix -Wsign-compare
 b811760 test/librados/list.cc: fix -Wsign-compare
 4b550e0 tools/rbd/action/Nbd.cc: prefer ++operator for non-primitive iterators
@@ -4230,6 +5019,8 @@ e892f9a rbd-replay-prep.cc: replace inefficient string::find() w/ compare()
 1f262a1 mds/Server.cc: use string::compare() instead of ::find()
 fd8f18a ceph_mon.cc: replace string::find() with compare()
 09ca147 tools/rados/rados.cc: fix race condition in load-gen complete callback
+552e3e4 suites/rados: test random queue options
+17a7032 admin-socket: add set_heap_property tests
 136433d rgw: support admin credentials in S3-related Keystone authentication.
 882672d cmake: Add Graylog logging backend build support
 3968852 graylog: Fix ptr to bool conversion
@@ -4250,6 +5041,7 @@ fb307c2 kstore: flush before we really start a truncate
 b0bed15 BlueStore: add fast check against empty list
 06ad784 kstore: add fast check against empty list
 ddca0dc tools/rados: fix wrong op/object sizes in rand/seq bench
+9cf3708 tasks/cephfs: test sharded cephfs-data-scan
 4ad0858 kstore: simplify open_collection logic a bit
 21b6fe6 kstore: fix unmatched type of decode
 b99b61e ceph_osd.cc/ceph_mon.cc: cleanup unreachable exit call
@@ -4265,6 +5057,7 @@ c8c5d0a mon:some cleanup in MonmapMonitor.h
 87b7e53 FuseStore: fix unhandled error cases for open
 97639f6 FuseStore: fix memory leak
 5e564ea tests: simulate writeback flush during snap create
+114d720 Revert "basic/msgr: remove async and random for now"
 d5ae7f8 rgw: don't use s->bucket for metadata api path entry
 3ba54cc test/commom/test_weighted_priority_queue.cc: Add unit tests for the new Weighted Priority Queue.
 2061496 test/common/test_prioritized_queue.cc: Fix random shuffle and remove unneeded include.
@@ -4282,6 +5075,8 @@ fd8ab0b cmake: remove the copyright of FindSnappy.cmake
 38e2f7a rbd: simplify snap rename arguments
 18fe93c osd: Pass coll_t by reference
 5e4eb3f OSD: Deleting transaction object right after applying transaction
+bef4ef9 Added point-to-point suite Added parallel suite Added stree-split suite Added stress-split-erasure-code suite Added stress-split-erasure-code-x96-64 suite
+b07a2ed Added point-to-point suite Added parallel suite Added stree-split suite Added stress-split-erasure-code suite Added stress-split-erasure-code-x96-64 suite
 464a0ad gitignore: ignore backup files from editors etc.
 e942051 remove unused source file
 bd2fdf8 OSD::consume_map: correctly remove pg shards which are no longer acting
@@ -4414,6 +5209,7 @@ a2c1b53 SubmittingPatches: sending kernel patches to mailing list
 dbabbd0 SubmittingPatches: add doc for "Fixes:" tag
 2b5588a SubmittingPatches: update with reST syntax
 03e01ae init-ceph: pass TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES through
+783f33f admin-socket.yaml: add get_heap_property tests
 3772006 osd/: make heap properties settable via admin socket
 99b054f osd/: make heap properties gettable from admin_socket
 e1da085 test: race condition in ReplayOnDiskPostFlushError test case
@@ -4478,10 +5274,13 @@ fcac028 os/bluestore/BlueStore: Simplifying default return value
 858f606 mon/PGMonitor: Removing dead code
 dece3be Add error check for object_map->sync()
 3995b1c mon/MonClient: avoid null pointer error when configured incorrectly
+d26f2ed suites/fs: fix ceph.restart syntax
 bd56e17 OSDService: Fix typo in osdmap comment
 326eb4c tests: fix pybind path for RBD unit tests
 1aa674c librbd: missing lock on state transition
 1215a1a helgrind: annotate false-positive race conditions
+30e0895 rbd: rbd-nbd can only run on Ubuntu
+05c678b Whitelisted 'wrongly marked me down'
 bb4d7a7 qa: disable rbd/qemu-iotests test case 055 on RHEL/CentOS
 4c9345b cmake: add missing check for HAVE_EXECINFO_H
 887fbe1 Fixes #14400 + some refactoring to avoid input buffer modification in future.
@@ -4497,6 +5296,7 @@ aed8577 osd: disable filestore_xfs_extsize by default
 b4bd72f BlueStore: drain wal_wq first if we exit from mounting due to wal_replay error
 aa28b58 vstart: update help with a few more switches
 5183ff3 BlueStore: verify fsid in a more proper way during mkfs
+67c0033 suites/fs: cephfs_journal_tool: wait for health after restart
 29a7d68 tests: update pool alignment API
 f021f5a tools: fix pool alignment API overflow issue
 8aa068d librados: fix rados_ioctx_pool_requires_alignment2()
@@ -4560,6 +5360,7 @@ eb057e1 tests: compile RBD and RGW types into ceph-dencoder when enabled
 838b16a ceph_test_libcephfs: tolerate duplicated entries in readdir
 70cb0f1 doc: remove redundant space in monmaptool doc
 24d10e6 doc: remove redundant space in ceph-authtool doc
+9b0e2a3 tasks/cephfs: test for race between cap flush and clientreplay
 cbaee84 osd:osdmap some clean up
 19dc272 mds: add config option to suspend logging
 5d8d666 mds: fix completed cap flush handling
@@ -4568,12 +5369,26 @@ dafb46b mds: delay handling client caps until corresponding inode is created
 0187a59 cls/cls_rbd: pass string by reference
 300c2f7 Compressor: compressor plugins unit tests
 2cadc41 Compressor: compressor code extention: plugin system added
+500aac5 tasks/cephfs: override max_pgs_per_osd in TestVolumeClient
+d1c3ef1 tasks/cephfs: pg_num check in volume_client test
+fb2e38f tasks/cephfs: fix raising from RemoteProcess.wait()
+55a0493 tasks/cephfs: fix vstart's LocalDaemon.proc
+5c52521 tasks/cephfs: fix wait in test_volume_client
+b06818c suites: add an extra client to fs/recovery
+5055f12 suites: add yaml for TestVolumeClient
+627c330 tasks/cephfs: add test_volume_client
+523d84b tasks/cephfs: fix fuse killing in vstart
+cb699cc tasks/cephfs: write_n_mb optionally background
+b94b35b tasks/cephfs: reset IDs of mounts between tests
+7a19b6d tasks/cephfs: clear extra auth ids on test start
+addf313 tasks/cephfs: add test case auth_list helper
 7d52372 fsx: checkout old version until it compiles properly on miras
 256dc84 doc/release-notes: v10.0.2
 6a309c8 pybind/rbd.pyx: drop stray semicolon
 e92cbde os/bluestore/FreelistManager: fix _dump locking
 386834c pybind: remove extraneous semicolon in method
 9e07482 osd: less chatter about tick
+e22a8c6 rados/objecststore/fusestore.yaml: run ceph-objectstore-tool fuse test
 f6eb3e5 ceph.in:avoid a broken pipe error when use ceph command
 2be865c kstore: remove unreachable code
 a987fc3 BlueStore: decrease max listed object number from 5 to 1
@@ -4596,8 +5411,11 @@ b085a56 BlueStore: differentiate error code for better tracing
 eae97f8 client: check if Fh is readable before read
 d8a3f6d ceph-fuse: double decreased the count to trim caps
 fa9c8e0 mon: paxos is_recovering calc error
+9afe229 Fixed symblink for os configs
 4c3b685 tests: snap rename and rebuild object map in client update test
+0915747 Fixed symblink for os configs
 c172ca3 pybind/rbd.pyx: add rebuild_object_map method
+5a79aa6 Change supported ditros to use latest CentOS 7.2
 fb3246b add -f flag for btrfs mkfs
 3a8c674 os/bluestore: use auto for freelist map type
 e0a17b9 os/bluestore/BlockDevice: disable readahead of fd_buffered
@@ -4641,6 +5459,7 @@ cf56490 tests: improve rbd-nbd tests
 3ff60a6 rbd-nbd: returned length equal requested only for read and write
 19cc1e8 ReplicatedPG : clean up unused function
 c628d06 mailmap: Lenz Grimmer affiliation
+b010b6f tasks: add TestSessionMap.test_[mount|tell]_conn_close
 ab3fc3a kstore: fix wrong verification logic of object key
 b858e86 osd/PGLog: delay populating PG log hashmaps
 e39ccb4 mailmap: Igor Fedotov affiliation
@@ -4653,15 +5472,18 @@ af1a3b2 client: avoid re-sending cap flushes twice during MDS recovers
 d64d98c kstore: fix unclosed formatter session
 41aa50b client: add scattered write callback to ObjecterWriteback
 60d205f objectcacher: coalesce dirty buffers in one object into one IO
+608fb1f suites/rbd: avoid redundant helgrind tests
 eb2bbe1 mailmap: Hector Martin affiliation
 7b4f111 mailmap: Tao Chang affiliation
 b430484 mailmap: Florent Manens affiliation
 5265302 mailmap: Evgeniy Firsov affiliation
+be35a7b suites/teuthology: Add CentOS 7.1 and 7.2
 ebdb0a9 packaging: add build dependency on python devel package
 e3d45f0 os/bluestore: don't include when building without libaio
 0b369e1 rados: allow both object size and op size to be specified
 e59293d librbd: add additional granularity to lock states
 94b7d23 librbd: initialize object map before replaying journal
+83f6e81 Added ditros to f-h-x-offline, tiering, v0-94-4-stop tests
 4fa53ea librbd: do not accept RPC requests while transitioning lock state
 5216146 librbd: avoid error message when peer owns exclusive lock
 58a1a45 java/test: update for libcephfs permission check
@@ -4669,6 +5491,8 @@ e59293d librbd: add additional granularity to lock states
 93d9d05 cmake: include access tests in test_libcephfs
 68ed47d client: allow user with WRITE perm to set atime/mtime to current time
 c9d42d2 BlueFS: use wrapper for error closing fd
+c97c10c suites: turn up debug for TestFull
+7939fca suites: debug on client 0/1 instead of all
 329106b common: remove unused buf_is_zero
 6537eda osd/osd_types.h: use optimized is_zero in object_stat_sum_t.is_zero()
 181608b BlueFS: return if we are unable to decode a transaction
@@ -4692,13 +5516,20 @@ b84cf3e test_libcephfs: add POSIX ACL tests
 e063edd CrushTool: fix typo and improve indention
 db2e21c CrushTool: exit if fail to add a bucket
 c69a2aa os/FileStore: print error to log on eio
+558fbe2 Removed point-to-point-x test, will need to add jewel specoific later http://tracker.ceph.com/issues/14339
+f566cf7 tasks/cephfs: check status asok output while in reconnect
 d94cf5b os/memstore: fix fiemap
 fa1c64a os/kstore: fix split_collection to persist cnode
 00ec724 os/bluestore: fix split_collection to persist cnode
+5c06511 tasks/cephfs: add TestStrays.test_dir_deletion
 092c5ea os/bluestore: simplify rebalance_freespace
 cec5069 os/bluestore/BlueFS: add reclaim_blocks interface
 d50ddba os/bluestore: base initial bluefs allocation on min_alloc_ratio
+028a140 tasks/cephfs: better message from test_full_fsync
 474c989 rados: fix bench output column separation
+7d9c960 suites/fs: update log whitelist for test_session_reject
+aafa06e tasks/cephfs: generate mount paths on the fly
+acff449 tasks/cephfs: support mount_path in Mount subclasses
 95e885e Reduce string use in coll_t::calc_str()
 8d97df0 mailmap: Dongmao Zhang affiliation
 2c914e2 mailmap: sort files
@@ -4757,13 +5588,16 @@ aa8a74a Add copyright for rbd-nbd
 4f53cde test/test_objectstore_memstore.sh: clear store_test_temp_dir
 9179ce8 osdc: Fix race condition with tick_event and shutdown
 5839a1b unittest_bufferlist: fix hexdump test
+7831ca1 Revert "pg: add test cases for pg list_missing on EC pool"
 d5c02f3 cls_rbd: enable object map checksums for object_map_save
 eb120e8 librbd: correct include guard in RenameRequest.h
 cf9cd80 crush/CrushTester: check for overlapped rules
 279e9fb doc: dev: document ceph-qa-suite
 3ded44d cmake: detect bzip2 and lz4 for rocksdb
 3626d86 cmake: add check for liblz4
+58c13c1 tasks/cephfs: fix wait_for_state timing
 ede0e3a cmake: made rocksdb an imported library
+00f1523 tasks/cephfs: update vstart_runner for teuthology change
 1329a09 librbd: disable write_full optimization if image has parent
 c425bf0 tests: new test case for write_full optimization
 3337eee mds: fix standby replay thread creation
@@ -4810,6 +5644,12 @@ a614c3b cmake: Updated allocator checking
 b94f910 cmake: kv dir and bluestore unittests
 fee536d osd: Avoid debug std::string initialization in PG::get/set
 c1a0194 client: add client_metadata option to override metadata
+26423e9 tasks/cephfs: fix wait in assert_cluster_log
+a697027 tasks/cephfs: fixes to FuseMount
+141de6e tasks/cephfs: add TestSessionMap.test_session_reject
+3b9b68b tasks/cephfs: add assert_cluster_log
+d8106fa tasks: add run_ceph_w to CephManager
+8b5edd4 tasks/cephfs: new hooks for auth keys and config
 089673a qa/workunits/rbd: import_export should use clean temp space
 5b065aa librbd: reduce verbosity of common error condition logging
 4a4b447 threads: add thread names
@@ -4864,6 +5704,7 @@ ef263b3 osd/ReplicatedPG: It should call clear_omap_digest.
 ad44bb5 test: Fix ceph-objectstore-tool test due to recent changes
 aa8583d ceph-objectstore-tool: Warn if --type specified and doesn't match fs
 e7afe62 ceph-objectstore-tool: Improve error message for --type option
+66d244f Removed tests for EOL dumpling clients on jewel clusters
 70a1392 qa/workunits/rbd: rbd-nbd test should use sudo for map/unmap ops
 b24378d tests: configure with rocksdb by default
 0fdf745 cmake: build and link rocksdb etc for bluestore
@@ -4880,6 +5721,7 @@ ca42be2 mailmap: Tobias Suckow affiliation
 b9e73bf mailmap: Rahul Aggarwal affiliation
 b2f7fc6 mailmap: Brian Felton affiliation
 b0f6af8 tools: ceph_monstore_tool: add inflate-pgmap command
+b2a304e tasks/cephfs: fix race in test_damage
 a9b7c0d os: remove duplicated assignment
 d30ef37 os: remove redundant omap_head check logic
 02f8e92 os: remove unused local variable
@@ -4899,6 +5741,7 @@ dfc5b94 PGLog: clean up read_log
 a17f4e2 rgw: radosgw-admin bucket check --fix not work
 4502446 qa/workunits/rados/test_rados_tool.sh: fix path
 1097bd2 qa/workunits/rados/test_rados_tool: adapt to new buffer hexdump
+926a8cb tasks/osd_recovery: fewer objects in test_incomplete_pgs
 bc1cf95 uniform the type format
 757152f os/bluestore/BlueStore: fix overlay keys
 d6043d9 fs: fix unclosed formatter session
@@ -5094,10 +5937,12 @@ f0f815f os/newstore: dump onode contents
 2993504 os/newstore: fixed fragment size
 be0528f os/newstore: recycle rocksdb log files
 feb2d3f rocksdb: latest master
+04e41a6 rados: 5m radosbench (not 10m)
 c25ff99 osd: clear pg_stat_queue after stopping pgs
 c01a314 mon/PGMap: show rd/wr iops separately in status reports
 7404f85 Remove unused SnapContext member snapc from MOSDSubOp message.
 6a59aae config: complains when a setting is not tracked
+6110421 rados: run radosbench for 10m (not 15m)
 2fd3f43 osd: remove repop_map in osd
 e29f55e osd: fix wip (l_osd_op_wip) perf counter
 1e120c8 osd: Avoid osd_recovery_sleep cause recovery-thread suicide.
@@ -5111,6 +5956,7 @@ dd8221d osd/OSD.cc Check health state before pre_booting
 9af4d56 test: enable changing subscribe level testing
 7485668 mon: don't send Mlog msg to client when there is no entries
 9ea99fa mon: remove conflicting subscribes when handle_subscribe
+07232b1 rados/singleton/all/pg-removal-interruption: ensure osd.0 is marked down
 4efa214 kv: implement value_as_ptr() and use it in .get()
 41fcd87 doc: s/InvalidCap/InvalidCapability
 f302e05 osd: avoid FORCE updating digest been overwritten by MAYBE
@@ -5125,9 +5971,15 @@ bd86c30 objectcacher: introduce ObjectCacher::flush_all()
 a7501c9 client: fix 'wait unsafe requests' code in _fsync()
 8d2df89 client: wait unsafe requests that created/modified the file in fsync()
 c615feb rgw: let radosgw-admin bucket stats return a standard josn
+c7cd4f7 rbd: increase length of non-valgrind fsx workloads
+ef25ba3 rbd: add helgrind to valgrind subsuite
+888ff87 rbd: add a couple new workunits
+8bd0b9d rbd: add some workloads with journalling enabled
+5864d56 rbd: only test on xfs
 e85907f osd: Improve log message which isn't about a particular shard
 10b4a08 osd/OSD: clear_temp_objects() include removal of Hammer temp objects
 508deb9 common/address_help.cc: fix the leak in entity_addr_from_url()
+e2f9ced rados: run radosbench for 15m instead of 30m
 b4dbaa6 librbd: do not ignore self-managed snapshot release result
 bc309d9 librbd: properly handle replay of snap remove RPC message
 8e095de qa/workunits: merge_diff shouldn't attempt to use striping v2
@@ -5154,6 +6006,7 @@ d821aca doc: document "readforward" and "readproxy" cache mode
 fd6fca7 doc: `Amazon.AWSClientFactory' is obsolete
 c2e3913 man: document listwatchers cmd in "rados" manpage
 877f332 ceph doc fix slip of pen
+2b4e949 suites/rados/singleton/all/radostool: whitelist quota error
 eb8057c ceph-fuse: fix double free of args
 93497e5 rgw: warn on suspicious civetweb frontend parameters
 eace76d osd: Avoid osd_op_thread suicide because osd_scrub_sleep > tp_suicide_timeout.
@@ -5218,6 +6071,7 @@ bab16bb librbd: stop the copyup thread during shutdown
 d4cf79b mon: modify the level of a log about OSD's condition in OSDMonitor.cc
 69bcac5 qa/workunits/rbd: use --object-size instead of --order
 94cd34b rbd: process crushed, rbd creating with striping parameters
+46dddfc suites/rbd: disable pool validation for pool snaps test
 127cd92 rbd: use default order value from g_conf
 8e93f3f log: Log.cc: Assign LOG_DEBUG priority to syslog calls
 fc39e6f os/MemStore: fix use-after-free on iterator
@@ -5229,6 +6083,7 @@ b88561a common/buffer: reset last_p on get_contiguous
 95afcad newstore: fix was hidden warning
 3167608 rgw: use unsigned long to print to log
 432d26c cmake: no need to run configure from run-cmake-check.sh
+834a38c buildpackages: support CentOS 7.1 and CentOS 7.2
 4269263 crush: fix the default type 0 name if it's not specified when decompiled
 895bf8b crushtool: set type 0 name "osd" for --build option
 d914448 OSD: fix null pointer access and race condition
@@ -5243,6 +6098,7 @@ fbd5959 doc/release-notes: v10.0.1
 ec42514 osd: use unordered_map for repop_map With a prediction of expected per pg maximum ops to initialize the number of hash bucket
 d13f602 rbd:add destination image name validation for rbd-fuse mv operation
 8284165 rbd: process crushed, rbd creating with striping parameters
+a204f6f repair_test: Add whitelist item for new stat error message
 e7b7e1a rgw: add a method to purge all associate keys when removing a subuser
 35542eb Revert "makefiles: remove bz2-dev from dependencies"
 79c7ccb osd: combine map.count() and map.find() into one searching operation
@@ -5320,6 +6176,9 @@ ef8fa8b pybind: Implementation of Ioctx.set_read to allow read from snapshots
 14e9d29 mount/mtab.cc: memory leaks the free() should be called to free resources, in order to avoid memory leaks
 58bad43 client: modify a word in log
 c36d73e doc: Modified a note section in rbd-snapshot doc.
+64d8a58 buildpackages: no need to sudo in bootcmd
+0c1e7f1 buildpackages: log the output of make
+d4d3787 Suites/jewel in the smithi lab will be on centos 7.1
 f07e029 assert: abort() rather than throw
 4e28f9e osd/OSDMap: clear osd_info, osd_xinfo on osd deletion
 ad15b81 vstart.sh: mon osd reporter subtree level = osd
@@ -5344,8 +6203,12 @@ e2cd81d mailmap: You Ji affiliation
 ea8ad14 mailmap: Sangdi Xu name normalization
 e9aa132 mailmap: Robin H. Johnson name normalization
 fee41ad mailmap: Guang Yang name normalization
+ebf8508 buildpackages: pkgrepo host also needs user-data setup.
+89bde0b buildpackages: copy teuthology user-data.txt changes
 5890760 test: add test case for c++ osd/pg command
 5f7f806 librados: add osd/pg command interface for c++
+3c69546 buildpackages: catch instance creation failure.
+0240f57 buildpackages: Be robust about addresses
 c6cdc33 tests: verify it is possible to reuse an OSD id
 e2f163a tests: fix failure for osd-scrub-snap.sh
 5876829 tests: kill_daemons uses TERM instead of KILL
@@ -5405,6 +6268,7 @@ f9a230a test/osd: add test case for shutdown_when_flapping
 b47eeae tests: centos7 needs the Continuous Release (CR) Repository enabled for libunwind
 7d48f62 LifeCycle feature As same as amazon S3 interface,"PUT Bucket lifecycle" and "DELETE Bucket lifecycle" have been implemented, "GET Bucket lifecycle" not realized yet as S3cmd has not realize it also. The feature`s main point is to remove expire file per day. Files transfer from hot layer to cold layer is not supported. ToDo:Maybe to transfer from replicate pool to EC pool or from ssd to sata pool will be valuable.
 0565055 rgw: fix a typo in init-radosgw
+d0280d7 Revert "dummy: only use the user playbook"
 cd0c13b qa/workunits/cephtool/test.sh: false positive fail on /tmp/obj1.
 a80ff1f common/ceph_context.cc:fix order of initialisers
 0186cdc rgw:dont update entrypoint when removing bucket
@@ -5429,13 +6293,18 @@ c83d6db cmake: update for recent rbd changes
 c11ca42 Update Rongze Zhu affinity
 593c124 doc: rst style fix for pools document
 e62954e deb,rpm: package buffer_fwd.h
+b0a770a workunit: fetch from --ceph-git-url
+28dc5c0 workunit: keep the path to the list of tests in a variable
+7de33f0 workunit: allow tag to override sha1
 b23b92d AsyncConnection: Fix potential return code overflow
 3443b6d Pipe: Fix potential return code overflow
 1adf306 SubmittingPatches: there is no next; only jewel
 c5ff6f6 Pipe: Fix large message data content length causing overflow
 8160f9e Add common/PluginRegistry.cc to CMakeLists.txt
+6fb44db pg: add test cases for pg list_missing on EC pool
 c1daf4e doc/dev/index.rst: wholesale refactor
 a62b5ac pybind: support ioctx:exec
+371e897 Removed CentOS 7.0 and added CentOS 7.1.1503 instead
 7f81728 common/Makefile: ship common/event_socket.h
 c8f7d44 build/ops: systemd ceph-disk unit must not assume /bin/flock
 73aab5e test: use sequential journal_tid for object cacher test
@@ -5483,7 +6352,13 @@ cd2bc41 Objecter: clean up Objecter.h/ObjectOperation
 053ee91 PGLog::rewind_divergent_log: fix rollback_info_trimmed_to before index()
 66c7246 TestPGLog: add test for 13965
 7455897 osd: prioritize recovery based on pool's customized priority
+1280caf tasks/cephfs: avoid using bare remote.run in scrub_checks
+0cd6cb1 tasks/cephfs: update for run_shell vs. sudo
+80397a5 tasks/cephfs: always sudo in run_shell
+bb924d2 tasks/cephfs: cover snaps in test_strays
 bc21a23 doc/dev/index.rst: fix headings
+521a93a suites: add yaml for TestForwardScrub
+14e2cf1 tasks/cephfs: add TestForwardScrub
 b6f9a9b doc/dev/index.rst: begin writing Contributing to Ceph
 fb120d7 osd: call on_new_interval on newly split child PG
 34b3283 common: lockdep now tracks lock ordering when backtraces disabled
@@ -5516,6 +6391,7 @@ f8a9aef librbd: snap create doesn't properly handle race conditions
 835989c librbd: convert op state machines to templates
 5b63666 tests: add gmock unit tests for librbd object map state machines
 dfbcbc7 librbd: ensure object map is invalidated on disk if forced
+ea6360a teuthology-integration: run teuthology integration tests
 b89179a OSD: remove heartbeat_epoch which is redundant
 414252c PG: remove redundant statements
 b9c7e81 PG: perfect publish_stats tip message
@@ -5592,10 +6468,12 @@ ac35e84 journal: helper method to determine if journal exists
 644d600 librbd: not necessary to hold owner_lock while releasing snap id
 9d06041 rbd: bail if too many arguments provided
 d133f42 rbd: don't append an extra newline after some errors
+ab9a3d5 buildpackages: create the image if it does not exist
 1c84681 tests: update unmap.t CLI test
 5ce663f cmake: librbd needs libjournal and libcls_journal_client
 77aef0d rgw: remove unused variable in RGWPutMetadataBucket::execute.
 117e630 Correct typo 'restared' to 'restarted'
+4f29d92 suites: yaml for TestDataScan
 691199b CodingStyle: fix broken URLs
 3ee2f6d os/LFNIndex: check object whether exist for remove_object.
 6b402f5 ceph::buffer, Add cached_crc and cached_crc_adjust count in perf dump in order to track the hit rate and efficiency of crc_cache
@@ -5610,19 +6488,25 @@ f79e289 tests: fix race condition testing auto scrub
 26752f9 cmake: update for recent librbd changes
 a7f520c auth: fix a crash issue due to CryptoHandler::create() failed
 e9e0533 auth: fix double PK11_DestroyContext() if PK11_DigestFinal() failed
+b3da9d7 tasks/cephfs: add TestJournalRepair.test_table_tool_take_inos
 574e319 Test:bencher ops counter doesn't increase Signed-off-by: Tao Chang <changtao at hihuron.com>
 0b474c5 mon: don't require OSD W for MRemoveSnaps
+a6de4a0 openstack: rados/.../morepggrow.yaml may need more disk
+d14f2da task/rbd_fio: allow for unsigned packages
 e6dcf14 osd: store per pool scrub intervals in pool options
 2b252d2 tests: workunits should not have ./ (assume it in $PATH)
 73077dd osd: pg_pool_t: add dictionary for pool options
 9a14ff7 osd/OSD.cc: shutdown after flapping certain times
 09c0d8d librbd: fix tracepoint parameter
+a25638c openstack: add 15GB for debugging purposes
 014e2f0 ceph.spec.in: use %tmpfiles_create macro
 da52b5e Workunits : fs/misc/: check if cache is used in direct IO
 8a9db37 test/encoding/readable.sh fix
 84db496 os/CollectionIndex: Change meaning of last parameter for lookup function.
 a0a424e os/FileStore: cleanup code for lfn_unlink.
 1813ef3 os/LNFIndex: Remove the dupicated code.
+8088d60 buildpackages: wait for the build machine deletion
+375c53b cephfs/test_client_recovery: check fsync waits for unsafe requests
 f76d5d6 pybind: decode empty string in conf_parse_argv() correctly
 9331e03 test: use sequential journal_tid for object cacher test
 720ac2b EventSocket: Add EventSocket structure used for event notification
@@ -5631,6 +6515,7 @@ f5e0cce osd: don't update rollback_info for replicated pool rollback_info is jus
 2b390fc osd: don't update unneccessary epoch for pg epoch always remains unless state of cluster changes. Therefore, avoid update epoch for every Op in order to same cpu cost and disk bandwidth.
 75f1412 DiskUsage: close formatter session on error exit
 573151f doc: Fixes a spelling error
+30b64fd rados/thrash/workloads/admin-socket: 60s -> 180s
 2f330f6 src/*/Makefile.am: test fixup for as-needed compiling.
 b96c7e6 aix shared library build
 3680dc3 mon/OSDMonitor: block 'ceph osd pg-temp ...' if update is pending
@@ -5648,6 +6533,7 @@ fdb3f66 crush: add chooseleaf_stable tunable
 f7ca00a rgw: make APIs to work with tenants
 1f19b60 rgw: buckets within tenant namespace
 788477a rgw: user has a tenant property
+abc07e2 openstack: add 30GB for debugging purposes
 61bf1bd rgw/Makefile.am: declare rgw_rest_s3website.h as well for bugfixing.
 13a12a5 rgw: add an inspection to the field of type when assigning user caps
 3369a83 librbd: simplify IO method signatures for 32bit environments
@@ -5675,6 +6561,7 @@ d92f611 mailmap: Burkhard Linke affiliation
 39032ba qa: erasure-code-benchmark technique and plugin selection
 a6433cc qa: erasure-code has --erasure-code-dir
 8789eb9 add aix compile warning
+69560a8 tasks/cephfs: tidy test_scrub_checks
 5b1f962 initialized backtrace variables
 f86eb3f mds: fix scrub_path
 4025f75 doc/release-notes: fix typo
@@ -5743,6 +6630,7 @@ b7faf67 tools:support printing the crushmap in readable fashion.
 a3a0e1c aix gcc librados port
 f74e310 osd: Only add random deep scrubs when NOT user initiated scrub
 4c19abd Revert "test: osd-scrub-snaps.sh: Randomized deep-scrubs can now happen during a scrub"
+2997382 openstack: rbd/{thrash,qemu}: allocate three disks, always
 0fe26c2 test: osd-scrub-snaps.sh: Randomized deep-scrubs can now happen during a scrub
 328c663 SubProcess: update to use new constructor
 000306e SubProcess: include iostream
@@ -5752,20 +6640,26 @@ fda3f7e add rbd-nbd test case
 7bbd54a add rbd-nbd package
 5ac1cbf add rbd-nbd doc
 37f1e84 add rbd-nbd tool
+8f2af59 openstack: convert to array hints
 8e2831b rgw: Remove unused code in PutMetadataAccount:execute
 07f68b5 Typo in the apt-get command. Signed-off-by: Chris Holcombe <xfactor973 at gmail.com>
 3193ee1 scripts: ceph-release-notes for development versions
 c44ab62 release-notes: draft v10.0.0 release notes
 9359847 librbd: commit journal op events immediately
+f398b71 buildpackages: do not -jX on dumpling
 18713e6 mon/PGMonitor: MAX AVAIL is 0 if some OSDs' weight is 0
 5e6fcd4 rgw: compile fixup for JSONFormatter move
+89dcc0d ceph_manager: do_pg_scrub: keep scrubbing until it's done
 1420a1f doc: add v0.80.11 to the release timeline
+5f90c35 rados: add test for ec fast_read
 9e9b03e doc/releases: add v0.80.11 to release table
 4b5afe5 doc/release-notes: final v0.80.11 notes
 6316ff8 10.0.0
 99ba661 13207: Rados Gateway: Anonymous user is able to read bucket with authenticated read ACL
 1536cb0 osd: note down the number of missing clones
+f8fcb67 Added infernalis-client-x - fixes #13774
 9109f14 common/hobject.h: don't reverse bits in zero
+87e50d3 buildpackages: avoid duplicate packages-repository
 3b146f5 RadosClient: reimplement the pool alignment methods using the new ones
 1633d3e doc: Update ceph-disk manual page to remove some option description.
 ac84faa librbd: improve debug output for object map state machines
@@ -5797,6 +6691,8 @@ d1541d6 librbd: migrate object map snapshot ops to async state machines
 7b1170a librbd: move object map async ops to standalone classes
 4944f20 librbd: initial conversion of snapshot ops to async versions
 d3d139b doc: Update ceph-disk manual page with new feature deactivate/destroy.
+13fb23e buildpackages: implement get_pkg_type
+69e9b22 buildpackages: refactor to not require remote
 9cbe132 pep8 changes
 cb18a10 Add test cases to validate symlinks pointing to devs
 b3c7cb0 Compare parted output with the dereferenced path
@@ -5870,6 +6766,7 @@ b3a1290 osd: change mutex to spinlock to optimize thread context switch.
 f2432e0 rbd: recognize cephx_sign_messages option
 34b2b8f rbd: unbreak rbd map CLI
 c6a2ec2 tests: fix test case using new api
+4825f64 buildpackages: protect packages-repository
 2d2e6b2 osd: inline do_mon_report
 c131c81 osd: limit nubmer of pg stat updates in flight
 093478a osd: fix pg_stats_queue lock protection
@@ -5899,6 +6796,7 @@ c73e96a radosgw-admin: fix cli tests
 a5b0465 crushtool: fix cli tests
 b7bb216 crushtool: fix cli test help
 2829e9d doc: flesh out MDS auth docs
+95538f2 tasks/cephfs: test for auth caps pool-setting restriction
 a536d11 mds: apply MAY_SET_POOL in request handling
 eee4b8f mds: add MAY_SET_POOL in MDSAuthCaps
 0533cf9 osd: fix wrong use of right parenthesis
@@ -5971,9 +6869,16 @@ eb020b6 os: write file journal optimezation
 7318384 stringify: Enable optimization for GCC only
 102539e librbd: API: options on image create: update tests
 c3be44e librbd: API: options on image create
+8d798c4 buildpackages: do not fails for unpackaged files
+cebabdf Update schedule_rados.sh
+4a78df0 Require -e arg for email address
 4052282 cmake: add nss as a suffix for pk11pub.h
 0e5a794 librbd: provide an out-of-class definition for MAX_DESCRIPTION_OFFSET
 6c5d601 cmake: fix librbd and add src/journal
+12ba21d Added % Fixed intall.upgrade syntax Whitelisted 'reached quota'
+bdf49e0 Added % Fixed intall.upgrade syntax Whitelisted 'reached quota'
+9287493 buildpackages: giant has some unpackaged files
+129c5db buildpackages: 10GB is too small most of the time
 460c74a mds: properly set STATE_STRAY/STATE_ORPHAN for stray dentry/inode
 1ce364b mailmap: Ubuntu Kylin name changed to Kylin Cloud
 0d684ad mailmap: sort files
@@ -5981,10 +6886,16 @@ d911641 journal: update allocated tid when skipping committed entry in player
 0669cba use new api and fix some wrong flag caller
 628f69f save init flags to CephContext
 925596a osd: check do_shutdown before do_restart
+978d83d Added % Fixed intall.upgrade syntax Whitelisted 'reached quota'
+ec1dbd2 Added % Fixed intall.upgrade syntax
 44b1488 stringify: Reduce CPU usage by reusing stringstream in stringify function
 016ed34 rados: Minor output changes for consistency across operations
+5c09af2 Added %
+ef9e43b Update schedule_rados.sh
+6c1a8a9 Reducing the ammount of resulting jobs scheduled
 3ea903e cmake: fix files list
 5ed8cdc tools:print the map infomation in human readable format.
+92980c3 Reducing the ammount of resulting jobs scheduled
 a1b690d cls::journal: fixup: constify dump functions
 0b261e2 journal: call metadata shutdown on journal remove
 0dd6e0f journal: don't use object_number when comparing positions
@@ -6007,6 +6918,8 @@ e629094 Osd: add two fields to pg_pool_t
 b30d1b8 doc: Adding --cluster option to rbd man page.
 9ebea48 rbd: dynamically generated bash completion
 01c720b rbd: hidden 'bash-completion' command dumps all available commands
+4cbf177 rados: 'failed to encode ...' warnings are normal on upgrades
+5ba70c8 Update fio version from 2.2.9 to 2.2.11
 6daa1eb tests: updated rbd CLI --image-feature optional
 c12f7d2 ceph_test_rados_api_tier: fix PromoteOn2ndRead for EC case
 1be02b1 rbd: corrected handling of '--image-feature' optional
@@ -6017,6 +6930,7 @@ afef58b mds: avoid potenial double get CDentry::PIN_SCRUBQUEUE
 6e370bb mds: call CDentry::scrub_finished() when skipping special dentry
 b65345e mds: remove dir dentry from scrubstack after finishing scrube dirfrags
 50c088b mds: properly call CDir::scrub_finished()
+accb287 rbd_fio: removed deprecated command-line option to rbd CLI
 dd5a263 librbd: perf counters might not be initialized on error
 08b4c29 PendingReleaseNotes: document updated rbd CLI options
 f4f1f57 mds: skip scrubbing remote linkage
@@ -6050,11 +6964,15 @@ e184ca2 os/LevelDBStore: faster LevelDBTransactionImpl::set
 48ceaaf kv/RocksDBStore: do not Delete before Put
 1e3c2fa kv/LevelDBStore: do not Delete before Put
 338b4ed osd/ReplicatedPG: use bl-based setkeys/rmkeys
+f05d977 tasks/ceph: fix up whitespace
 402d181 tests: fix test_rados_tools.sh rados lookup
+e43ce71 suites/fs: enable all snapshost test scripts
 fad3772 client: use null snapc to check pool permission
 4580cf5 qa/workunits/snaps: move snap tests into fs sub-directory
 f33dd76 librbd: start perf counters after id is initialized
 db85bdd FileStore: support multiple ondisk finish and apply finisher
+bda2897 client-upgrade: hammer-based test should be used on new client
+d918ff0 client-upgrade: hammer-based test should be used on new client
 26befe1 cls_rbd: change object_map_update to return 0 on success, add logging
 f33282e doc/releases-notes: fix build error
 9224ac2 rbdmap: systemd support
@@ -6127,6 +7045,7 @@ c0980af rbdmap: Move do_map and do_unmap shell functions to rbdmap script
 102f0b1 auth/cephx: large amounts of log are produced by osd if the auth of osd is deleted when the osd is running, the osd will produce large amounts of log.
 619d804 FileStore::_check_replay_guard avoids double check on replaying and can_checkpoint() Already checked in _check_replay_guard, avoid double check in the inner function _check_global_replay_guard
 c228bd2 [mailmap] add member info. Signed-off-by: Xiaowei Chen <chen.xiaowei at h3c.com>
+c713423 tasks/cephfs: fix race in test_sessionmap
 b0536eb librbd : fix enable objectmap feature issue
 2ac35be doc/release-notes: edits from Nathan Cutler
 6e87d23 doc/release-notes: final infernalis notes
@@ -6138,6 +7057,7 @@ da6825d test/test_rados_tool.sh: Add tests for the new bench's write options
 7524e16 tools/rados/rados.cc: Write to different destinations
 00c6fa9 Objecter: pool_op callback may hang forever.
 400b0f4 Build internal plugins and classes as modules
+7b8f205 buildpackages: silently ignore IPv6 addresses
 d457fc2 mds: apply validate_disk_state to dirs too
 6ba5bef mds: tidy up cdir scrub_initialize in scrubstack
 1930083 mds: write scrub tag during validation
@@ -6163,13 +7083,17 @@ a77bfd0 mds: refactor availability check
 8f0d796 mds: don't use g_conf from MDSMap
 21f5af0 client: a better check for MDS availability
 7db7eff OSD / ShardData: Pass ctx to mutex constructors in sdata and sdata_ordering lock to allow gain perfcounter values.
+54e53a0 tasks/cephfs: simplify test_backtrace_repair
 da48dbb rbd: fix clone issue when we specify image feature
 a603429 tests: test/librados/test.cc must create profile
 d5be20b librbd: resize should only update image size within header
+0324772 basic/msgr: remove async and random for now
 47abab9 tests: destroy testprofile before creating one
 ab46d79 tests: add destroy_ec_profile{,_pp} helpers
+e6b9b8f Added $4 for filter out "random.yaml\,async.yaml" tests for now
 629c41f ceph.spec.in: We no longer need redhat-lsb-core
 e382c67 init-rbdmap: Rewrite to use logger + clean-up
+ef04eac Added $4 for filter out "random.yaml\,async.yaml" tests for now
 5a6117e Objecter: remove redundant result-check of _calc_target in _map_session.
 8655416 Objecter: potential null pointer access when do pool_snap_list.
 5def4b7 osd: reoder fields in ObjectContext and ObjectContext::RWState structs
@@ -6177,6 +7101,7 @@ e382c67 init-rbdmap: Rewrite to use logger + clean-up
 7401124 osd: reoder fields in ScrubMap::object struct
 48d424c osd: reoder fields in pg_stat_t struct
 b9ac90d osd/PG: tolerate missing epoch key
+a016f29 dummy: only use the user playbook
 c9681fd osd: merge local_t and op_t tnx to single one
 43ba820 mon:honour last seen election epoch in win_standalone_election()
 a341d97 ceph.in: Notify user that 'tell' can't be used in interactive mode
@@ -6206,6 +7131,8 @@ a23036c osd: Make the _scrub routine produce good output and detect errors prope
 b3f8d56 osd: reoder fields in ObjectRecoveryProgress struct
 bf3c30c osd: reorder and trim fields SnapSetContext
 e0fd540 rgw:swift use Civetweb ssl can not get right url
+e381af3 nop: suite that does not even lock targets
+2e47e03 filesystem: fix get_daemon_names
 b698a76 rgw: Fix typo in RGWHTTPClient::process error message
 173bfd0 rgw: link against system openssl (instead of dlopen at runtime)
 8160af6 tools: ceph-monstore-update-crush: add "--test" to crushtool
@@ -6215,8 +7142,10 @@ b698a76 rgw: Fix typo in RGWHTTPClient::process error message
 3047b56 rgw: Add default quota config
 570285b ceph-disk: get Nonetype when ceph-disk list with --format plain on single device.
 f22f4ac mailmap: Xie Xingguo affiliation
+b51a929 tasks/cephfs: quick test for `tell`...
 93ec538 crush/mapper: ensure take bucket value is valid
 976a24a crush/mapper: ensure bucket id is valid before indexing buckets array
+970a372 buildpackages: fetch tags from the official Ceph repository
 4300f2a krbd: remove deprecated --quiet param from udevadm
 f46f7dc run_cmd: close parent process console file descriptors
 cb2d454 rgw/rgw_resolve: musl libc does not implement res_nquery. Added fallback to res_query.
@@ -6239,14 +7168,17 @@ a704c5d vstart.sh: grant full access to Swift testing account
 cfa2d0a fine-grained control systemd to start/stop/restart ceph services at once
 56d6929 KeyValueStore: fix the name's typo of keyvaluestore_default_strip_size
 545e4b2 osd: Fix log message name of ceph-objectstore-tool
+98e683a fs: fix two frag_enable fragments
 631469c Revert "Speed optimizations. Merged 3 writes into 1."
 45ab728 osd: only calculate op crush mapping if we don't have the PG
 56ba90f osd: move misdirected op check from OSD thread to PG thread
 6528563 osd: ensure op rwm flags are checked before they are initialized
 5c49192 osd: fix OSDService vs Objecter init order
+b4a4136 openstack: rados/thrash: allocate three disks, always
 1560057 ceph.spec.in: We no longer need redhat-lsb-core
 c567341 init-rbdmap: Rewrite to use logger + clean-up
 ebfd750 ReplicatedPG: remove unused local variables
+5eaf118 openstack: resource hint must contain an array
 f4906a1 tests: ceph-disk workunit uses configobj
 163de5b tests: ceph-disk workunit uses the ceph task
 c4fdbdd cmake: Use uname instead of arch. arch is deprecated in linux-utils and coreutils does not install it by default.
@@ -6255,6 +7187,7 @@ c4fdbdd cmake: Use uname instead of arch. arch is deprecated in linux-utils and
 011e9e5 tests: reproduce crash during read-induced CoW
 2a6b90f doc/release-notes.rst: recovery isn't in the unified queue yet
 9bf21ee doc: Updated the OS recommendations for newer Ceph releases
+59e768b suites/rbd/qemu: excercise CoW support during QEMU testing
 ea52014 rgw: support core file limit for radosgw daemon
 ee4db81 mailmap: Jason Dillaman name normalization
 5449b3d mailmap: Joao Eduardo Luis name normalization
@@ -6291,8 +7224,10 @@ b66f53f mailmap: Xie Rui name normalization
 cb64c9f mailmap: Radoslaw Zarzynski name normalization
 220fcef mailmap: Haomai Wang name normalization
 96106b5 os/chain_xattr: On linux use linux/limits.h for XATTR_NAME_MAX.
+739b1d6 ceph-disk: switch to using install / ceph
 57af631 LFNIndex: remove redundant local variable 'obj'.
 dc21d8e rgw: add explicit success/error paths in RGWGetObj::execute()
+8f9de17 ceph: log which ceph.conf file is written
 3d2ed6f mailmap: Dennis Schafroth affiliation
 ffd4f2a mailmap: Daniel Gryniewicz affiliation
 16e90c5 mailmap: Bo Cai name normalization
@@ -6300,6 +7235,7 @@ ffd4f2a mailmap: Daniel Gryniewicz affiliation
 7b2e9fc ceph.in: Remove unused variable
 113d727 ceph.in: Don't drop out of command mode on certain kinds of errors
 bb5bcab makefile: For ceph command generation don't append another copy of ceph.in
+12f7b03 Added openstack config openstack.yaml Added +
 d4869a6 test: add test for pg list_missing on EC pool
 531dd77 osd: list_missing should query missing_loc.needs_recovery_map
 597c43e tracing: add tracepoints for cache pin/unpin
@@ -6322,6 +7258,7 @@ ab6b923 pybind: Add Python 3 support for rados and rbd modules
 0278f5f doc/release-notes: drop 0.94.4 plaintext
 5f8ba74 doc/releases: fix 0.94.4 link
 49d3367 doc/release-notes: final v0.94.4 notes
+b2ad940 upgrade/hammer-x/f-h-x-offline: test firefly -> x upgrade
 cdcdd78 osd/ReplicatedBackend: add bl-based setkeys/rmkeys
 126ba59 os/MemStore: avoid STL map/set for omap_{setkeys,rmkeys}
 332481e os/newstore: avoid STL map/set for omap_{setkeys,rmkeys}
@@ -6332,6 +7269,7 @@ ca72d50 kv/LevelDBStore: make set() avoid bufferlist copy most of the time
 c9c9618 kv/RocksDBStore: make get() avoid bufferlist copy most of the time
 1b25ef8 buffer: make is_contiguous() const
 1f3c01b kv/RocksDBStore: implement single-item get()
+4a83c56 tasks/cephfs: include an empty dir in journal repair
 8874249 test/ObjectMap: add test for raw_key_is_prefixed
 709b111 os/KeyValueDB: reduce malloc/free/string copy count
 e1783d2 kv: move KeyValueDB from os/ to kv/, libos.a to libkv.a
@@ -6360,12 +7298,16 @@ c1e4429 Makefile: link libos.a statically (no .la)
 e8614f8 Makefile: link mon statically (not .la)
 f86fbdb Makefile: make libosd.a static (not .la)
 e10301b librados_test_stub: add missing headers
+1f076d9 Added one more "write_append_excl: false"
 a099270 osd/tools: new and delete ObjectStore::Transaction in a function is not necessary
 212157a doc: Renamed the "Create a Ceph User" section and added verbage about the "ceph" user
 e826213 osd: off-by-one when check deep scrubbing
 fde458a test: add integration test for the auto repair feature
 8c8e1b7 pg: add auto-repair for EC pool
 1079636 pg: only queue for recovery if there is any objects to repair after scrubbing
+3655ca6 Added "write_append_excl: false"
+3a13ec0 Added "write_append_excl: false"
+ed30c54 Removed osd.6 from roles to fix #13515
 7673845 osd/PG: make upgrade() use sequencer
 52b79f7 Revert "os/FileStore: require upgrade to hammer before moving beyond"
 41c9466 Revert "osd: require an upgrade to hammer first"
@@ -6375,9 +7317,14 @@ dff5783 Revert "ceph-objectstore-tool: drop support for pre-pgmeta PGs"
 9446770 Revert "os: drop deprecated collection_* attr methods"
 0f1b1f0 Revert "os/FileStore: fix version check"
 c560020 rgw/rgw_main: Added compat header for TEMP_FAILURE_RETRY
+8ae31f1 Removed osd.6 from roles to fix #13515
+b696051 Added write_append_excl: false to fix #13485
+9d5d0c6 Removed /parallel/7-final-workload/rgw_swift.yaml as it was creating the same user as the s3tests run right before
+64dcc28 Removed /parallel/7-final-workload/rgw_swift.yaml as it was creating the same user as the s3tests run right before
 b15c541 libcephfs: only check file offset on glibc platforms
 07e7496 osd: Add config option osd_read_ec_check_for_errors for testing
 661e2a0 qa: remove legacy OS support from rbd/qemu-iotests
+a3442cf rados/singleton-nomsgr/all: add export-after-evict.yaml
 6a91101 doc/release-notes: v9.1.0
 7f337a7 osd/PGLog.h: reorder bool fields in PGLog struct
 e4b8600 rgw: Handle x-amz-request-payer in pre-signed urls
@@ -6386,7 +7333,11 @@ f9c44ef osd: drop the interim set from load_pgs()
 fb62c78 ceph_context: remove unsafe cast for singletons
 24740a7 client: drop prefix from int types
 65d0fc4 doc: fix outdated content in cache tier
+bdf7988 Use cephtest as base dir for testing and let nuke cleanup in case of abrupt failures
+f49579fe rgw: sleep before agent startup
+a5f60ae rgw/verify: allow rgw's to start up before starting agent
 477bb06 ceph.spec.in: only run systemd-tmpfiles on ceph run directory
+a2f01d1 rgw/multifs: run multipart (perl) test on ubuntu
 40336fa CMake: fix rbd_replay error
 0009f34 osd: conditionally initialize the tracepoint provider
 6368c28 librados: conditionally initialize the tracepoint provider
@@ -6397,6 +7348,7 @@ b3d02cc tracing: dynamic tracepoint provider helper
 a7ed8e1 packaging: add new tracepoint probe shared libraries
 f4feee2 ceph.spec.in: add new tracepoint probe shared libraries
 4a5305e lttng: move tracepoint probes to dynamic libraries
+b995805 rgw: do quota tests on ubuntu
 7cd12cd CMake: add vstart convenience targets
 e6c7eb7 CMake: make mon and osd depend on EC plugins
 b61f3e4 osd: fix the snapshot reads of evicted tiering pool
@@ -6404,6 +7356,9 @@ e26469e mailmap: Alexander Chuzhoy affiliation
 fee7144 rgw: fix response of delete expired objects
 2cf8d20 update radosgw-admin command
 4a3f375 vstart: set cephfs root uid/gid to caller
+176e9ef tasks/cephfs: add test_failover
+043049f tasks/cephfs: add a wait_for_daemons
+b4caa05 tasks/cephfs: fix TestClientLimits.test_client_oldest_tid
 7060a3b doc/infernalis: hate hate
 e6a9e62 doc/release-notes: i hate rst
 e98408d doc/release-notes: final infernalis notes
@@ -6413,13 +7368,19 @@ b105449 doc/release-notes: fix some attributions
 e9f200c doc/release-notes: infernalis notable changes
 638738f Revert "common, global: use lttng ust functions for handling fork-like calls"
 fca97db rgw, doc: remove remark for lack of custom account metadata of Swift.
+2747164 upgrade/stress-split: needs only six OSDs
 b4c5620 doc: remove toctree items under Create CephFS
 3be81ae (tag: v9.1.0) 9.1.0
+baa8ea3 buildpackages: implement notcmalloc flavor
 74180ad doc: Fixed links in RGW start.
 036d36f debian/control: python-setuptools is a build dependency
 24a8e71 doc: wrapped the lines to 80 characters in start-RGW.
 8e59595 doc/release-notes: 9.1.0
 91d51b9 doc: More edits in RGW quick start.
+b03ac07 tasks/cephfs: fix TestClientLimits.test_client_oldest_tid
+eac791c rados/singleton-nomsgr/all/msgr: needs 15GB RAM
+1a63fd7 rados/objectstore/alloc-hint: need three attached disks
+c0828ca tasks/admin_socket.py: wait 120 seconds instead of 60
 1deb31d Init crush_location in Objecter from config file.
 303263d os: add a field indicate xattr only one chunk for set xattr.
 65064ca OSD:shall reset primary and up_primary fields when beginning a new past_interval.
@@ -6483,17 +7444,24 @@ c1d48ff osd: use pg id (without shard) when referring the PG
 2b21e3c common/*Formatters: Split Formatters
 2b7ddde osd: Correct the object_info_t::decode() version
 03078ba rgw: location constraints should return api name
+81f5c3a buildpackages: get ceph submodules
 0f488ac doc: Edited RGW quick start.
+d1ada2e radosgw_admin: skip log objects that start with obj_delete_at_hint
 a077301 mon/OSDMonitor: put crushtool error in log
 0bf2a79 messages/MOSDOp: fix reqid encoding/decoding
 6f6fe39 messages/MOSDOp: decode complete message for v6, too.
 e0cf25f Fix debug message in osd::is_healthy
+56a6e0c buildpackages: use _get_version instead of os_version
 f276308 ceph-fuse.cc: While starting ceph-fuse, start the log thread first
 d36d7f2 ReplicatedPG: allow maybe_handle_cache to return status detailing what happened
 68c722c pybind/rados, get_omap_vals: Fix pydoc type.
 5a6e762 test: pybind/test_rados: add binary data.
 db03d30 pybind/rados: Fix binary omap values.
+174a741 radosbench: use pool config even if create_pool is false
+c0b0ec2 divergent_priors2: give divergent time to come up
+9fdea19 Specified 'hammer' branch
 5b9c326 rgw: fix wrong etag calculation during POST on S3 bucket.
+490dbb2 Specified 'hammer' branch
 cbf36ad LibRBD: Adjust correct op latency scope
 4fdc703 librados_test_stub: prevent interleaving of operations
 d689db8 cls: new force-promotion flag for class methods
@@ -6504,14 +7472,22 @@ cac1d6f buffer: restored pre-infernalis API compatibility
 d09cdae rgw: Check request-payer configuration
 520c4bd rgw: Allow to set the requestPayment configuration
 f2a31ab rgw: Add requestPayment retrieval
+0045dfc buildpackages: enforce ceph-object-corpus canonical URL
 8f28913 rgw, doc: mention that Swift objexp is supported now.
+4d89c9e buildpackages: walk the whole config tree to find sha1 to build
+c103579 buildpackages: honour install priorities tags, branch, sha1
+810caaf buildpackages: branches are found in refs/remotes/origin
+e54c916 buildpackages: do not override the sha1
 7250db6 CephxServiceHandler.cc: fix get_auth conditional
 1a2689f ReplicatedPG::maybe_handle_cache: do not promote before checking full
 e0d8cb1 tests: removed obsolete rbd_replay test cases
 c2a83d0 ceph-dencoder: new rbd_replay trace file types
 3ecdae8 rbd-replay: added version control to trace output file
 e692773 rgw: add support for skipping manifest parsing during GET on Swift object.
+50a2a80 suites/fs: test ceph-fuse with libcephfs permission check
 a52383d client: don't mark_down on command reply
+a630da7 buildpackages: allow for concurrent build if != sha1
+9db757b buildpackages: prefer GitbuilderProject.os_{type,version} over config.
 1e57e6d mds/Session: use projected parent for auth path check
 a1f19678 tests: add test for history alloc counter in bufferlist
 4014e31 common: perf counter for bufferlist history total alloc
@@ -6525,10 +7501,12 @@ a1f19678 tests: add test for history alloc counter in bufferlist
 b5b4a9d rbd-replay-prep: support new read/write APIs
 a1e99f0 rbd-replay-prep: avoid using assert statements on user inputs
 858059e qa: avoid using sudo in fsstress
+5716f5f smoke/basic/tasks/libcephfs: fix client debug override
 e049de3 os/FileStore: kludge sloppy hammer temp objects into temp collection
 5a11d76 ceph.spec.in: move python-sphinx BuildRequires to the right section
 96aabe7 ceph.spec.in: move BuildRequires out of subpackages
 d258bf5 ceph.spec.in: drop MY_CONF_OPTS
+21cefaf upgrade/hammer-x/point-to-point: whitelist wrongly marked me down
 468c2dd doc: remove mention of --lazy-remove from radosgw-admin manpage
 98cbf03 osd/PG: fix generate_past_intervals
 5e9cf8e doc/release-notes: fix math error in firefly notes
@@ -6568,21 +7546,67 @@ a875826 mon: always set up session; move waitlist logic
 e2e1bd9 mds: avoid emitting cap warnings before evicting session
 392ca8b filestore: add objectstore finisher name
 2c04989 common: add latency perf counter for finisher
+00a8393 White listed WAR 'failed to encode' fixes #13349
 8e930e3 messages/MOSDOp: avoid uninit/undecoded fields in print()
 362b18a mon: fix msg leak in resend_routed_requests
 c9dad52 Mon: Fix decoded message leak when this monitor is leader
+37a881a White listed WAR 'failed to encode' fixes #13349
 4698e24 tests: allow docker-test.sh to run under root
 a1f76ee tests: remove fedora docker files
 3ed25c1 librados: document new flag
 929e5d0 ceph.spec.in: correctly declare systemd dependency for SLE/openSUSE
 8d8fcee osd/ReplicatedPG: exempt MDS from the failsafe check, too
+ab76f76 upgrade/hammer-x/point-to-point: use different client for second rgw
+15e890e krbd fio tests using sync engine for format 2 and features 1 and 2.
+b5cf466 krbd test using fio for format 2 and features 1 and 2.
+8e115c9 rbd fio tests using rbd engine, tests on clones as well using features 1 and 13.
+971c5fc rbd io tests using fio, various fio options can be specified in yaml
+a0f0194 upgrade/hammer-x/point-to-point: use different client for second rgw
+62247f2 tasks/cephfs: fix FuseMount bin path in vstart
+0584b9c tasks/cephfs: fix test_journal_migration
+9eb65dd tasks/cephfs: move mds_scrub_checks
+03ea097 tasks/cephfs: move journal migration test
+176123c tasks/cephfs: extend vstart_runner's ctx&run
+99e3a40 tasks/cephfs: add --interactive for vstart runner
+904fd76 tasks/cephfs: fix FuseMount._asok_path
+20e7d2a tasks/cephfs: remove a redundant sudo
+4f8adf1 tasks/cephfs: raise error on non-string stdins
+80a9efb tasks/cephfs: warn if vstart_runner can't import mods
+0689141 tasks/cephfs: stop if needed binaries are absent
+23ae981 tasks/cephfs: add instructions to vstart_runner
+cbb8572 tasks/cephfs: updates for cmake environ
+8b846b9 tasks/cephfs: add vstart runner script
+efb62ae tasks/cephfs: mark some tests as @needs_trimming
+f8a4b08 tasks/cephfs: add needs_trimming decorator
+c0e90aa tasks/cephfs: make FuseMount.teardown safer
+1ed6272 tasks/cephfs: mark some tests as @long_running
+d4ecee8 tasks/cephfs: add @long_running decorator
+9a1ca94 tasks/cephfs: cluster_down before fs rm
+684e0de tasks/cephfs: split up TestClientRecovery
+6f8a35b tasks/cephfs: make memstore dependency declarative
+98da1e0 tasks/cephfs: refine TestClientLimits.test_client_oldest_tid
+37d5566 tasks/cephfs: fix race in TestStrays
+be749f8 tasks/cephfs: work around fuse weirdness
+e51cd6b tasks/cephfs: prefix override in FuseMount
+7e6f8d4 tasks/cephfs: use stdin/stdout in Filesystem._read_data_xattr
+48cf533 tasks/cephfs: handle endpoint not connected
+120f9d8 tasks/cephfs: use Filesystem.rados instead of direct CLI
+bab4c7a tasks/cephfs: add Filesystem.exists
+e2f8b48 tasks/cephfs: add Filesystem._prefix
+f58c119 tasks/cephfs: use raw_cluster_cmd instead of manual "ceph"
+99a8a12 tasks/cephfs: add Filesystem.delete_all
+ad562f4 tasks/cephfs: make Filesystem rados command overridable
+7884d5b tasks/cephfs: use raw_cluster_cmd in Filesystem
 81c2374 rgw: improve handling of already removed buckets in object expirer.
 662ad52 release-notes: draft v0.94.4 release notes
 2228c22 tools: ceph-release-notes handle multiple issues
+3e30d5d tasks/radosbench: wait just a bit longer
+7bb1ceb log-whitelist 'reached quota' for librados test.sh
 b915952 ceph.spec.in: Do not always restart the daemons on removal
 c95c14b ceph.spec.in: Do not always restart the daemons on upgrades
 b20a1ba ReplicatedPG: consider IGNORE_CACHE for all maybe_handle_cache calls
 1df2cc2 install-deps.sh: use %bcond_with selinux on SLE/openSUSE
+de3c9d5 rados/singleton-nomsgr/all/full-tiering: time out rados -p
 3296274 ceph_test_rados_api_aio: test pool full gets EDQUOT when FULL_TRY flag is set
 8b1f234 librados: expose OPERATION_FULL_TRY flag
 ea93ead osd: return -EDQUOT instead of -ENOSPC if it is a pool quota
@@ -6692,16 +7716,23 @@ d0e4fae mds/MDSAuthCaps: use bitmask for is_capable()
 63c29ad mds/MDSAuthCaps: move allows() into MDSCapSpec
 1d82ec4 mds/MDSAuthCaps: parse optional gid list
 57a1860 mds/MDSAuthCaps: whitespace
+f467a98 tasks/ceph_manager: %d -> %s
+01d48a2 ceph-deploy: always zap disk before creating an osd
+5a450f8 rados/singleton-nomsgr/all/11429: blackhole osd.0 to close race
+963f5e5 upgrade/hammer-x/tiering: fix upgrade sequence
+a53a80b tasks/ceph_manager: fix logging on failed pool property
 02113ac init-rbdmap: fix CMDPARAMS
 04c09ac mds: fix SnapServer crash on deleted pool
 2bb3d4b docs: Fix styling of newly added mirror docs
 0ce7491 bugfix: should call md_config_t::remove_observer on shutdown
+4e9f1df rados: add test for 13234.yaml
 6b1e4a6 cleanup: make the pool setting GET decription point to SET description
 ef59f7c doc: update doc for with new pool settings
 a965378 ReplicatedPG: clearing a whiteout should create the object
 47f4a03 ceph-objectstore-tool: delete ObjectStore::Sequencer after umount
 f20f67e pybind/cephfs: fix DirEntry helpers
 7b1882f ceph.spec.in: correctly declare systemd dependency for SLE/openSUSE
+5c7505d s3tests using ec data pool
 3f00042 rgw: set default value for env->get() call
 469d35f osd: init started to 0
 bba3ab3 mon: combine _ms_dispatch and dispatch
@@ -6709,14 +7740,23 @@ bba3ab3 mon: combine _ms_dispatch and dispatch
 0c8faf7 common/obj_bencher.cc: fix verification crashing when there's no objects
 e42c9aa ceph.spec.in: re-re-drop fdupes
 566c872 os/fs: fix aio submit method
+f133725 buildpackages: implicit for OpenStack
 d7b620f ECBackend::handle_recovery_read_complete: do not expose the hash_info when getting the obc
 892800b ECBackend::handle_sub_read: restructure hash check and fix part of 12983
 80b7237 qa/workunits/cephtool/test.sh: don't assume crash_replay_interval=45
+8cf3d15 rados/singleton-nomsgr/all/11429: grep osd.0 only
+a4f9bdb tasks/cephfs: mds allow
 c5a9275 osd/ReplicatedPG: preserve (some) flags when proxying reads
 994ec60 mds: respect max_entries/max_bytes of lssnap request
 818d790 MOSDOp::decode : Splitting message decoding, new version
 afcfb05 handle_op/do_op: Moving couple of checks from dispatcher to parallelized workers
+05228e1 buildpackages: user-data must be per os-type/os-version
+d58a0b0 add cache tiering test for hammer-x
+8941a8b rados/singleton-nomsgr/all/11439: fix greps
 2fea3a5 examples/librados/hello_world.cc:missing semicolon
+204e27f buildpackages: flock must --close
+78b2a9a buildpackages: get Ceph git url via get_ceph_git_url()
+fdcfb79 ceph-deploy: fix ceph-deploy-branch config options
 216eef5 Revert "osd: new pool settings: scrub intervals"
 04679c5 OSDMap: fill in known encode_features where possible
 c7e905e ceph-create-keys: set mds "allow *"
@@ -6725,7 +7765,12 @@ e52204c client: fix quote enforcement on subdir mounts
 15e19a4 client: refactor quota check functions
 e7f277b rgw/rgw_admin: Checking the legality of the params There is no messages When some params are invalid. so the Program should be added the function which checks params, if the params are invalid, the program will give some messages.
 f1d8a8f Objecter: repeated free op->ontimeout.
+9e95c74 upgrade/hammer-x/parallel: set infernalis settings after upgrade
+910770c releases/infernalis.yaml: final steps after infernalis upgrade
+ecc504f tasks/divergent_priors2: wait for osd to start before tell
 4a0e56f tools/ceph-kvstore-tool: handle wrong command line argv
+ea5cea8 rados/singleton-nomsgr/all/11429: more reliably create orphan pgs
+cff9bdd rados/singleton-nomsgr/all/full-tiering: test full cache tier
 0635b13 Objecter: maybe access wild pointer(op) in _op_submit_with_budget.
 482d4e5 AsyncConnection: Add new debug log
 a1eb380 osd/ReplicatedPG: fix ENOSPC checking
@@ -6738,6 +7783,7 @@ bf7e937 osdc/Objecter: set FULL_FORCE flag when honor_full is false
 95055e7 osd: add FULL_TRY and FULL_FORCE rados op flags
 7757342 qa: https://ceph.com/git -> https://git.ceph.com
 d4d65fb qa: http://ceph.com/qa -> http://download.ceph.com/qa
+0e2814d tasks/ceph_manager: ignore failure getting pg_num
 cdccf11 osd/PG: compensate for sloppy hobject scrub bounds from hammer
 acda626 osd: avoid duplicate MMonGetOSDMap requests
 f4bf14d Update Xinze affinity
@@ -6753,6 +7799,8 @@ efdaa93 mds: fix error reformatting subtreemap_test events
 5f7b3f5 filestore: fix peek_queue for OpSequencer
 6334d64 rgw:mdlog trim add usage prompt
 c053499 osd/: eliminate unnecessary pg_hit_set_history_t::current_info
+7e3a271 buildpackages: make rpm packages
+8087095 upgrade/hammer-x/stress-split-erasure-code-x86_64: specify arch
 f5359f2 osd: print min_last_epoch_clean along with pg dump
 ef909cc mon/Elector: do a trivial write on every election cycle
 2fb7b1f mon/MonitorDBStore: assert/crash if there is a write error
@@ -6773,6 +7821,7 @@ dcf647e CMake - fix check for NSS
 b02e0f9 CMake - fix libatomic_ops and gperftools checks
 3123b2c arch/arm: s/false/0/
 66d19c7 rgw: fix swift API returning incorrect account metadata
+7fbd70b suites/fs: test ceph-fuse with 32-bits ino_t
 cb2fc29 rgw: refuse to calculate digest when the s3 secret key is empty
 7e5980b rgw: improve convenience for key operate.
 36e4a80 ReplicatedPG::hit_set_setup: fix hit_set_remove_all call
@@ -6781,7 +7830,11 @@ ef97305 cls_rgw: fix bucket listing when dealing with invisible entries
 d422f28 OSDService::agent_entry: don't use PG::operator<< without pg lock
 e17c8e1 init-radosgw: specify pid file to start-stop-daemon
 d18cf51 osd: fix requeue of replay requests during activating
+1e5b352 upgrade/hammer-x: use supported distros, not hard-coded rhel7+trusty
 88cffd8 rgw: don't treat Content-Type as automatically dumpable metadata.
+91b1172 upgrade/hammer-x: use rgw civetweb
+bc9a66c upgrade/hammer-x/parallel: clients are hammer to for final workload
+58453d3 upgrade/hammer-x/point-to-point: fix x client
 4264358 erasure-code: workaround i386 optimization bug with SHEC
 f4b55f4 journaler: detect unexpected holes in journal objects
 742906a rgw: fix wrong etag calculation during POST on S3 bucket.
@@ -6807,6 +7860,7 @@ d521a75 ceph-disk: upstart must not run ceph-disk activate concurrently
 f0a4757 ceph-disk: systemd must not kill a running ceph-disk
 cc13fa0 ceph-disk: fix typos in udev rules
 b86d9fd ceph-disk: ensure ceph owner on udev change
+eab7019 tasks/ceph: wait for MDS to be active when creating a cluster
 a3a8c85 use simplifed messenger constructor for clients
 e3785b0 msg: add simplified messenger constructor
 66a9bfd osd/: remove unused pg_hit_set_history_t::current_last_stamp
@@ -6819,6 +7873,8 @@ ea97761 systemd: increase nproc ulimit
 2a01bbc mon: make all paxos-related timeouts relative to mon_lease
 5e2c665 scripts: release_notes can track original issue
 fd9ce66 osd/ReplicatedPG: tolerate promotion completion with stopped agent
+1a0f68e upgrade/hammer-x/point-to-point: upgrade client for final test
+eebd63a upgrade/hammer-x/point-to-point: use proper hammer test.sh
 e65fb1b mds: adjust MDSRank::incarnation according to mdsmap
 d6b30de osd/ReplicatedPG: If object exist and not whiteout, don't do touch for create.
 30810da osd: new pool settings: scrub intervals
@@ -6826,10 +7882,14 @@ d6b30de osd/ReplicatedPG: If object exist and not whiteout, don't do touch for c
 b97ae76 osd: make 'ceph osd pool get' work for all settable pool flags
 10235e3 osd: refactor setting write_fadvise_dontneed pool flag
 b41f574 Fix unneccessary at/near max target warn in ceph -s when using ecpool When calculated objects needing eviction, we use object - hitset_achieve. So setting max objects = 30000, ceph -s will warn you at/near if there exists hitset_achieve
+daf4f52 upgrade/hammer-x/parallel: upgrade client before final workload test
+0931889 upgrade/hammer-x: fix full_sequential indentation
 21a1e75 tests: update to match crushmap validation message
 937e4f8 rhel 5.9 librados fix, removed blkid from compilation, Fixes #13177
 4da6793 install-deps: enable python3
 170f9ad doc: do not promise backports to Dumpling
+f6a96ad upgrade/hammer-x: use eastern tz for all upgrades
+a27cb8d timezone: add eastern, pacific, random timezone snippets
 a6f07e9 doc: remove mention of ceph-extra as a requirement
 c0ef84f doc: remove ceph-extras
 387d780 doc: correct links to download.ceph.com
@@ -6837,7 +7897,12 @@ c1172ca mon: fix auth get-or-create output
 7b2fa67 ReplicatedPG::get_snapset_context: set exists to false for new ssc with can_create
 4a8b08d rgw: don't read actual data on user manifest HEAD
 bf9c005 ceph-osd-prestart.sh: no ceph-disk chown
+eae4270 upgrade/hammer: do ubuntu for point to point
+3fd609f upgrade/hammer-x: only ubuntu
 e44d1e0 ceph.spec.in: Fix up (/var)/run/ceph creation
+99c6267 do not duplicate CentOS 7 runs
+65565de erasure-code: isolate isa plugin arch constraint
+a6fc7c6 upgrade/hammer-x: run rados.py to completion before moving on
 1b3090d mon/OSDMonitor: fix crush injection error message
 524b0bd mon/OSDMonitor: only test crush ruleset for the newly created pool
 aa238e5 crush/CrushTester: allow testing by ruleset
@@ -6864,6 +7929,7 @@ eb7c21f rgw: add support for printing generic attrs on Swift container.
 8c2b8b7 rgw: add support for printing generic attrs on Swift account.
 85bece7 new release key
 9d8fb1c rgw: camelcase also after dash in rgw_extended_http_attrs.
+4cccde6 Revert "Merge pull request #567 from ceph/ceph_fuse-timeout"
 6a24d31 libcephfs: fix calling init() then mount()
 e017aab CMake: fix libcephfs shared lib generation
 7182499 install-deps.sh: disable python3
@@ -6872,6 +7938,10 @@ a825f68 client/MetaRequest: optimize func can_forward/auth_is_best.
 a195928 unify order limit
 f51afa6 client/MetaRequest: open w/ O_CREAT|O_TRUNC is write.
 8f46bd9 clinet/MetaRequest: Remove the useless condition.
+f97fde6 tasks/cephfs: switch unmount timeout to 15 minutes
+e38a6d6 buildpackages: build the repository and the packages
+685d76a ceph: wait for CephFS to be healthy before proceeding
+08bae3b Use old branches as is without 'ceph-' for firefly, hammer etc
 d0ac68b mon/PGMap: calc min_last_epoch_clean when decode
 d1505b5 doc: delete wrong description of installing RPMs
 1f7a2dc doc:Replaces 'osd host' with 'host'
@@ -6890,13 +7960,19 @@ c842555 mon: debug refs on output replies
 0b309e9 mon: fix MonSession operator<<
 89cc479 mon: do not leak messages on shutdown
 9546252 mds: cast numbers for mds health to string when print
+8c618e5 buildpackages: re-use config from install task
+d40d8ff buildpackages: pep8 compliance
+0eeb441 buildpackages: build ceph packages and upload them
 71f6529 doc: fix a broken hyperlink
 bbe27dc doc: Fixes a wrong directory name.
+2b25080 buildpackages: build ceph packages and upload them
 26bcb36 Examples: hello_world.cc, content displayed after read is not null terminated.
 4e8242a mds: reset MDSRank heartbeat timeout even when MDS is laggy
 807a34c common: osd_pg_epoch_persisted_max_stale < map_cache_size
 73d7bed logrotate: logs are now owned by ceph:ceph
 7250fb1 os/OSD.cc cast osd_max_write_size to int64_t
+39f9ce6 upgrade/hammer-x/split-*: ignore osdmap encode errors
+2460ce3 upgrade/hammer-x: hammer test.sh before and during upgrade
 139b5d6 os: require Sequencer arg for apply_transaction()
 df92112 ceph_objectstore_test: fix warnings
 1002201 os/KeyValueStore: better osr debug
@@ -6928,17 +8004,24 @@ fef7142 ceph: fix rename into sub-directory check
 68ecc55 mon: do not leak ref creating MonOpRequest
 3a7d91d msg/simple: debug refs on sent messages
 7d112c6 mon/MDSMonitor: drop incorrect m->put()
+5befd50 hammer-x: test CEPH_FEATURE_HAMMER_0_94_4
 af39f98 .gitignore: ignore src/ceph.tmpe
 98302ad gmock: ignore *.pyc
 c57e868 rocksdb: ignore m4
 51abff1 ceph.spec: respect CEPH_EXTRA_CONFIGURE_ARGS
 4a5a5b3 qa/workunits/cephtool/test.sh: make mds epoch check more tolerant
+f0c925e suites/rados/singleton-nomsgr/all/11429.yaml: double-hop and fix
 d33fea5 sd/PG: tolerate missing pgmeta object
 f15d958 osd: allow peek_map_epoch to return an error
 ff9600a osd/ReplicatedPG: remove stray debug line
 6e85433 AsyncMessenger: Kepp file_lock hold when accessing its event field
+5af28cf tasks/radosbench: allow cleanup: false
 f3f4141 doc: Updated the rados command man page to include the --run-name option under the bench sub-command
 4dea76e ceph.spec: include /etc/sysconfig/ceph
+5eb0e4d suites: sudo ceph
+dad981d tasks: sudo ceph for cli
+822fa66 rados/singleton-nomsgr/all/valgrind-leaks: verify leak checking works
+9b3f36f ceph: add option to expect valgind errors and fail if there are none
 8657081 doc: remove references to default data/metadata pools
 c3d23ca ceph-common: explicitly trigger /run/ceph creation
 ea91c4e systemd: tmpfiles.d in /run, not /var/run
@@ -6949,6 +8032,7 @@ c8bfc35 ceph.spec: install /etc/sysconfig/ceph
 3aa38bc make /var/run/ceph 770 ceph:ceph
 e7837d1 ceph.spec: make /var/{lib,log,run} owned by ceph
 f167e8d .gitignore: radosgw-object-expirer
+45c9c0a distros: alias a-supported-distro.yaml to CentOS 7
 3826203 compat: define O_DSYNC for FreeBSD
 2dbf201 compat: define XATTR_CREATE/XATTR_REPLACE for FreeBSD
 c4fe266 compat: don't reinterpret_cast NULL
@@ -7001,7 +8085,9 @@ c503e97 rgw: include RequestId as part of the Error response
 94d84cc test: mon/mon-ping.sh: make sure 'ceph mon ping' works as expected
 6907778 ceph-objectstore-tool: add mark-complete operation
 567dd1e common: OpTracker age histogram calculation is not correct
+3084453 rados: 'failed to encode ...' warnings are normal on upgrades
 06147dd rgw: preserve all attrs if intra-zone copy
+d83a95e suites/rados/singleton-nomsgr/all/cache-fs-trunc.yaml
 293d12a2 test/Makefile.am: run mon/mon-scrub.sh as part of checks
 6ceb37d test: mon/mon-scrub.sh: port clashed with other tests
 8fd40e1 librbd: remove duplicate read_only test in librbd::async_flatten
@@ -7009,6 +8095,7 @@ c503e97 rgw: include RequestId as part of the Error response
 9d9b305 os/KeyValueStore.cc: prefer ++operator for non-primitive iterators
 8810f8f SnappyCompressor.h: prefer ++operator for non-primitive iterators
 4f98dab client/Client.cc: fix realloc memory leak
+cb7eb2a suites/hammer/older: do not upgrade client while running RBD import/export
 6f9ee79 ReplicatedPG,Objecter: copy_get should include truncate_seq and size
 797caae test/librados: add test case for read object (off=0,len=0).
 19a210a osd: make read(off=0,len=0) ec-object work.
@@ -7020,6 +8107,7 @@ d741352 AsyncMessenger: add instance name in debug log when processing msg
 95685c1 rgw:add --reset-regions for regionmap update
 3ccc3bb librbd: diff_iterate needs to handle holes in parent images
 d5650c9 tests: new test case for librbd diff_iterate over discard extents
+a328e3e tasks/ceph_manager: dump pgs when recover times out
 d32a3be qa/workunits/rados/test_alloc_hint.sh: sudo to ls files
 ab4232b rgw: init_rados failed leads to repeated delete
 e48cec3 mon: disable gmt_hitset if not supported
@@ -7039,11 +8127,15 @@ e6fbe53 improve error handle of rbd metadata operation & format output
 3aefd91 erasure-code: fix gf-complete warnings
 82b0243 qa/workunits/post-file.sh: sudo
 bfe359a osd: dump full map bl at 20 when crc doesn't match
+31f643b distros/supported: remove jessie until paramiko issue is resolved
 351d957 doc: fix the typo in command example
 7080e0f Thread.h: disable copy constr and assignment op
+6064443 tasks/divergent_prior[2]: fix rados get syntax
+7b3ea31 tasks/reg11184.py: fix rados get syntax
 7d781f7 doc: 'ceph --admin-daemon ...' -> 'ceph daemon ...'
 404dd16 tests: base gmock class support for librbd
 e8749b2 librbd: support templating of ImageCtx for async state machines
+26eb35d tasks/s3tests: use ceph-$foo branches
 1c522be ceph.spec.in: put distro conditional around Group:
 0d18f9b librados_test_stub: add mock class for IoCtx operations
 e267128 ceph.spec.in: fix lttng/babeltrace conditionals
@@ -7095,7 +8187,9 @@ e54f896 ceph.spec.in: drop redundant centos from conditionals
 af8b3da Messenger: Make fast dispatch message set dispatch timestamp
 929ca5b ceph.spec.in: drop lsb-release dependency from ceph-common
 557e581   mon/MonClient: fix error in 'ceph ping mon.id'   Fixes: #12442
+8bac721 kcephfs: include debug mds 20 everywhere
 f65267c rgw : setting max number of buckets for users via ceph.conf option
+d46c3a5 tasks/scrub_test: sudo ls ...
 64962aa qa/workunits/rados/test_alloc_hint.sh: sudo to list files
 75d9f58 osd/ReplicatedPG: use apply_ctx_stats() everywhere
 eb2993a osd/ReplicatedPG: create apply_ctx_stats() helper
@@ -7110,6 +8204,7 @@ a73ac73 add snap rename test in cls_rbd
 2865de4 add unit test for snap rename in imagewatcher
 d139f35 add snapshot rename CLI
 6ce8b2a handle snap rename notify
+0553e50 tasks/ceph_deploy: install ceph-test
 65dcc2d osd: When generating past intervals due to an import end at pg epoch
 cabfe13 osd: check the length of the map before accessing the first element
 ddca321 rbd: add verbose error reporting to merge-diff tool
@@ -7126,6 +8221,7 @@ b199c49 ceph-osd-prestart.sh: fix osd data dir ownership check
 023c517 vstart.sh: enable all experimental features for vstart
 3a41ef4 ms/async: log message tx/rx at level 1
 2ca2c1b osd, test: Minor clean-up from fast-read and error handling ec changes
+5df0ceb suites/rados/singleton-nomsgr/all/11429: upgrade to hammer first
 cbe85ec doc: fix the code-block in ruby.rst
 d015d23 osd: sparse reads returning invalid extent map
 7c00bf0 cmake: update FUSE_INCLUDE_DIRS to match autoconf
@@ -7240,6 +8336,7 @@ ee20404 osdc/Objecter: optimize Objecter::tick.
 08296dc rados: make 'rados bench' support json format output Fixes: #12864 rados bench add '[--format json]' and '[-o | --output outfile]' support. output option only take effect in json format. now we can use the bench result draw performance graph easily.
 f420fe4 mds: fix shutdown while in standby
 80f10e3 osdc/Objecter: remove the unuseful code.
+8e27504 tasks/cephfs: fix mount_timeout config loading
 bd80473 add snapshot rename methods in cls_rbd
 7cc963b osdc/Objecter: Don't forget call _op_cancel_map_check when cancel linger op.
 36b6271 osdc/Objecter: In _cancel_linger_op, it should make num_unacked/num_committed decrease.
@@ -7255,7 +8352,11 @@ e4ce619 osdc/Objecter: For func op_cancel_writes it can directly call op_cancel.
 89f0112 Objecter: Take RLocker when call is_active.
 6e0f0bb ceph-disk: use /sys/dev/block/maj:min/partition to see if partition
 403144f ceph.spec: package cls_numops
+f5865d3 tasks/ceph_deploy: work with systemd
+5cb9912 distros: add debian 8 jessie, remove wheezy from supported list
+c1e8674 ceph-deploy: install w/ ceph-deploy, not install.py
 d05e531 doc: update ruby doc with the aws-sdk gem usage
+7de534c tasks/qemu: avoid adding duplicated entries to /etc/exports
 13668e6 client: set osdmap epoch for setxattr.
 fe8b1c9 in filestore, OP_SETATTR is implemented in FileStore::_setattrs
 109e5b1 make: do not compile XFS.cc if --without-libxfs
@@ -7373,16 +8474,24 @@ aac8971 ceph-disk: replace partx with partprobe
 0e34742 ceph-disk: is_mpath predicate for multipath devices
 f9cbd79 tests: ceph-disk tests may use system ceph-{mon,osd}
 42ad86e udev: add devicemapper to partuuid-workaround
+cfce56f rados/basic/msgr: enable all experimental features if random/async
+d155519 Revert "rados/basic/msgr: enable all experimental features if random/async"
 cc21514 ceph-disk: {CentOS,RHEL} >= 7 && Fedora >= 22 are systemd
+40db837 rados/basic/msgr: enable all experimental features if random/async
 a895982 common: 'enable experimental data corrupting features' now understands '*'
 a3fc6e8 CMake: update for boost_random
 afa92e5 common/SubProcess: silence compiler warnings
+ffe79e9 tasks/cephfs: timeout on wait in test_network_death
 cfcacb8 mon: LogMonitor: handle boolean options consistently
 fbbe5b0 mailmap: make h3c mailmap more robust
 51e6b71 mailmap: sort {organization,mail}map
 c901e85 doc:radosgw: correct typos of the command removing a subuser
 e92d2f3 h3c mail organization map
 a9c1601 cls: Fix successful return found by compiler warning
+cfd6a6f implement the ceph-disk suite
+d6c72c3 ceph-deploy: implement only_mon
+b181783 ceph-deploy: implement keep_running
+e5db657 ceph-deploy: ceph report on failure
 70e000a test: Fix to expect no errors on 1 bad shard and errors with 2 bad shards
 d3b06ed test: Fix comment in test-erasure-eio.sh
 c09c119 osd: Send reads to other shards if erasure coded chunk reads fail
@@ -7400,6 +8509,8 @@ ae1df24 osd: Fix admin socket help output
 5eb2a77 mon: add a new pool setting to configure fast read for EC pool
 131214d ec: add support for fast read on PGBackend/ECBackend async read
 9db8122 rgw: lock obj expirer shards when processing
+62f4423 Update rados_api.yaml(cherry picked from commit 5b9d41e3ea0f07171ce59f0bb1f5636733452062)
+da5d3c2 Initial check in to add firefly-hammer-x suite #12641
 c4a9a4b rgw: objexp related fixes
 e734b0a radosgw-admin: a new command to run objects expirer
 3dbea3c rgw: rename obj expiration hint oids
@@ -7427,9 +8538,11 @@ db27ea9 rgw: add garbage collector daemon for expired objects.
 20c7652 ceph-object-corpus: remove hammer foo and bar coll_t's
 4a1cb82 test/encoding/readable: handle nondeterministic items in corpus too
 f078a67 ceph-object-corpus: add 0.94.2-207-g88e7ee7 hammer objects
+df8cecf tasks/cephfs: fix test_pool_perm teardown
 5a4f6a8 osd: do not let OSD_HITSET_GMT reuse the feature bit
 d43c10e memstore: use thread_local page vector
 4e6548c memstore: PageSetObject for MemStore integration
+671a9df rados: test both simple, async, and a mix
 3eb36fc doc/release-notes: v0.94.3
 f5df1e4 osd: separate filter init from construction
 2777438 test: add a test for filter in cls hello
@@ -7489,16 +8602,20 @@ d7bf8cb rgw: init some manifest fields when handling explicit objs
 b610588 ceph.spec.in: remove obsolete SUSE-specific code
 df21a6e osd: expose PGLSFilter in objclass interface
 c318129 ceph.spec.in: Restart services only if they are running
+2280ca5 fix rebug rados
 55cec07 Messenger: Fix rand() generate the same sequence numbers
 ef1434a Narrow journal aio_lock locking scope in write_aio_bl
 15e5ebe common: fix code format
 2d2f0eb test: add test case for insert empty ptr when buffer rebuild
 fb1b6dd common: fix insert empty ptr when bufferlist rebuild
+ccc47d3 suites/rados/thrash: randomize hobject sort order
 347ac0f ceph_test_rados_api_tier: make PromoteOn2ndRead tolerate thrashing
 8a08acc common/hobject_t: fix is_temp() off-by-one
 7cc8d86 ceph_test_msgr: parse CEPH_ARGS
 dfd142f include/inline_memcpy: use __builtin_memcpy instead of explicit ptr copies
 98c0606 include/inline_memcpy: make prototype resemble memcpy's
+5b9d41e Update rados_api.yaml
+998ba8b Initial check in to add firefly-hammer-x suite #12641
 fc02a8a added boost timegm impl for cross platform support
 da6d5cf osd: bug fix hit_set_map size for tier pool
 582f0f6 doc: Added "Hammer" in the list of major releases.
@@ -7570,6 +8687,7 @@ dbcaa54 uuid: use boost::random:random_device
 136242b rgw: be more flexible with iso8601 timestamps
 fd72577 tests: fixed rbd cli cram integration tests
 f20f7a2 Objecter: pg_interval_t::is_new_interval needs pgid from previous pool
+766da54 ceph_deploy: if a branch is given, use it when installing rgw
 cd6ac72 librbd: error closing image while set to invalid snapshot
 622d22e osd: wait for cleanup from bench
 c5895d3 bug fix: osd: do not cache unused buffer in attrs
@@ -7676,6 +8794,9 @@ ed8d3c0 common/Formatter: add dump_object helper
 e4cff03 ceph.spec.in: Require awk for selinux post script
 9038488 selinux: Relabel files if and only if the policy version changed
 aa50321 cmake: add DiffIterate.cc to librbd
+53a714e update the list of supported distributions
+e252c47 update the list of supported distributions
+3da8672 ceph-deploy: hints for OpenStack
 15a3e86 rgw: enable perf counter for unhealthy workers
 de0b66a test: add test for the perf counter of CephContext
 5d109e9 common: support perf counter (for unhealthy workers) on CephContext
@@ -7735,6 +8856,8 @@ d742e79 tests: Add unit tests for CLS numops class
 d17f158 cls_numops: Add cls_numops client
 87f6b73 Add new cls_numops class for numeric operations
 0ba2e14 Revert "osd/ReplicatedPG: snapset is not persisted"
+8f66ecb Added singleton hammer-x test to address #12625
+8bfb1c9 Revert "Added singleton hammer-x test to address #12625"
 b18558b osd/OSDMap: test_flag returns bool
 3540fb9 osdc/Objecter: restart listing this PG if sort order changes
 35c1970 osd/ReplicatedPG: fix missing set sort order on [N]PGLS
@@ -7757,9 +8880,11 @@ fabd635 ReplicatedPG: Don't cache recovery and scrub data
 23119ff Revert "osd/HitSet: make subclasses dump deterministically"
 c8bb8a2 Revert "mon/PGMap: dump osd_epochs in deterministic order"
 fb02024 rgw: don't preserve acls when copying object
+aa84941 tasks/kcephfs: enable MDS debug
 42a8f7c rgw: cleanup dead init_bucket
 4d4fe9d crypto: fix unbalanced ceph::crypto::init/ceph::crypto:shutdown
 6c803cd ceph.spec.in: test %preun argument is zero for removal-only operations
+c1ca95c tasks/cephfs: implement TestDataScan.test_stashed_layout
 773b431 tools/cephfs: use xattr layout if present+valid
 bde85b7 tools/cephfs: pass layouts around in DataScan
 5f41e8d tests: do not test timeout mon add
@@ -7776,8 +8901,10 @@ d33ad15 Adding statfs api to KeyValueDB
 9e58c62 mon: show number of PGs in flush/evict mode in command line
 d57d36d osd: add flush/evict mode in pg stats
 af2a38b mon: fix the output of cache_io_rate_summary
+0cf220a calamari_setup: install "cli" utils on Calamari node
 b78883b tests: be more generous with mon tests timeouts
 7e6f819 doc: update rgw configuration on multiple rgw rados handlers feature
+532a4e4 calamari_setup: change to use ceph-deploy repo command
 efc8969 Doc: Correcting the default number of copies.
 6f768a7 doc: Removed reference to RAID-4
 c6cf558 CMake: cut down unnecessary linkage on rados tests
@@ -7830,6 +8957,7 @@ d372718 osd/HitSet: mark subclasses that encode nondeterministically
 aaa5b75 ceph-dencoder: mark PGMap with nondeterministic encoding
 82533aa test/encoding: c++11 STL appears to make fewer copies, yay
 e06046c cmake: check for good yasm
+317559d Added singleton hammer-x test to address #12625
 016a5d5 mon/PGMap: make PGMap dump osd_epochs too
 841ae13 mon/OSDMonitor: prevent old OSDs from starting if 'sortbitwise' is enabled
 383185b mon/OSDMonitor: osd set/unset sortbitwise
@@ -7923,15 +9051,26 @@ d171537 os/Memstore:Refactor collection_list_range and collection_list_partial
 e3147b8 rgw:segmentation fault when rgw_gc_max_objs > HASH_PRIME
 9420d24 rgw:the arguments 'domain' should not be assigned when return false
 2938fdd radosgw-admin: Create --secret-key alias for --secret
+427f48b erasure-code: CEPH_FEATURE_ERASURE_CODE_PLUGINS_V3 integration tests
+18bf415 fix indentation error in docstring
+5112202 do not ommit syntax errors in linter checks
+30f10ee tasks: fix syntax error in ceph.py
 cd4ac1c rbd: support size suffixes for size-based options
 d1735a4 rgw: rework X-Trans-Id header to be conform with Swift API.
 278a6ae qa: add fs layout case for stripe_size decrease
+63e6d87 calamari_setup: handle RHCS 1.2 (missing /mnt/MON)
+12f4dff erasure-code: add write_append_excl: false
+af97325 fix indentation error in docstring
+db69142 do not ommit syntax errors in linter checks
 880ffe9 mds: fix setting whole layout in one vxattr
 b34363a cleanup: fix the eol dumping in JSONFormatter flush & close_section
+9f53009 tasks: fix syntax error in ceph.py
 1559d5e cmake: add global lib to rbd
 6b29233 mds: initialize InodeStoreBase::damage_flags
 5d7cb4c ceph-dencoder: add RGWRegion, RGWZoneParams, RGWOLHInfo support.
 e67539e Fix location of --access=full in SWIFT user creation
+3489e5c Set the SELinux context of the logrotate config
+e195f9f Set the SELinux context of the logrotate config
 8c53a58 ceph.spec.in: Make SELinux opt-out, not opt-in
 51aae7b client: ignore permission check when fuse_default_permissions is on
 4866d89 osd: make PGLSFilter xattr read optional
@@ -7951,6 +9090,7 @@ b04bafc tests: rados striper tests use 7116 instead of 7113
 0cdd77d rgw_user.h: modify interface comments.
 add3014 Revert "rbd: remove dependency on non-ABI controlled CephContext"
 aebb9e7 tools: ceph-release-notes unicode handling
+c93fe1f tasks/ceph_manager: be silent about sending SIGHUPs
 793fe52 doc: release notes for v0.94.3
 de40c40 client/Makefile: ship InodeRef
 0e69527 mds: open base inode's snaprealm after decoding snapblob
@@ -7969,6 +9109,7 @@ e79ee92 buffer.cc: is_zero() optimization
 10a336f ObjectStore: partially remove op_ptr.zero()
 f68553e osd/osd_types.cc: get rid of str concat when making hash key
 111ecf8 radosgw-admin: use cout not cerr to print help message.
+911c686 Fix experimental feature override makes user config useless
 145364b logrotate: fix log rotation with systemd
 85cb86d doc: change "--keyfile" description in man page of rbd help
 1ca6bf6 common/hobject_t: correctly decode pre-infernalis hobject_t min
@@ -8043,7 +9184,7 @@ e4fd086 use throttle framework to throttle ops/bytes for filestore
 8a7a52d rbd:'rbd purge image' will return failure without removing any snaps if the image has a protected snap
 9574555 rgw: skip prefetch first chunk if range get falls to shadow objects
 ad5507f rgw: url encode exposed bucket
-a634ab3 remove libs3 submodule
+a634ab34 remove libs3 submodule
 bbe8457 remove rest-bench
 a8d33c9 doc: fix command line for swift user creation
 77cdb50 No CRUSH involvement in deciding PG target
@@ -8062,12 +9203,26 @@ c4872dd Log::reopen_log_file: take m_flush_mutex
 ee25b42 tools/rados: change the default max_ops to 16
 b97988d tools/rados: update the help message
 08210d6 common/syncfs: fall back to sync(2) if syncfs(2) not available
+6726f4f calamari_setup: only stop ceph on osd/mon machines
+30dbe93 calamari_setup: hack for 1.3.0 on centos: install lttng from EPEL
+ac8b0e1 calamari_setup: pep8 fix
+17c3a94 calamari_setup: Add --no-adjust-repos to ceph-deploy install --<type>
+a3c9a76 ceph_manager: don't add an osd to live_osds until it's been revived
+b2187b7 thrashosds: adds a sighup_delay option, defaulted to 0.1
+84d2403 ceph_manager: adds a do_sighup method
 fa78739 rbd: remove dependency on non-ABI controlled CephContext
+ed73f67 ceph_manager: adds a signal_osd method
+c1116a0  - Use branch from config for ceph-deploy testing instead of released branch
 9fa0112 crypto: use NSS_InitContext/NSS_ShutdownContex to avoid memory leak
 34cb85e tools/cephfs: respect mds_root_ino_[gid|uid]
 4e42414 mds: configurable uid/gid on new root inos
 d8395cf modified librados-intro.rst
 91ecba1 osd/ReplicatedPG: claim the read buffer instead of copying it
+e33065f tasks/cephfs: reset auth caps in setUp
+6864f88 tasks/cephfs: don't hardcode data pool name in test_pool_perm
+e3434b6 tasks/cephfs: test_pool_perm: reinstate proper caps on teardown
+a0ccd69 tasks/cephfs: don't hardcode client ID in test_pool_perm
+36ec6f9e tasks/cephfs: test_pool_perm whitespace
 61643c1 mds: reinstate conditional destruction
 37370c7 mds: fix standby handling in map
 4fd6c3b squashme: move peer failure handling up in handle_mds_map
@@ -8094,6 +9249,7 @@ e54135c mds: de-public-ify more members of MDS::
 557af37 mds: Make all subsystems take an MDSRank instead of an MDS
 0531d0b mds: big refactor of MDS class (stage 1)
 5d70e1c messages: include Message.h in MClientRequestForward
+4dbc052 Remove chef task
 f60da6b Anotate all the .s files
 17a3e4b rbd: rename --object-extents option to --whole-object
 afbf90d rbd: du command should take spec as an argument
@@ -8108,12 +9264,18 @@ bbc5c71 rbd: import doesn't require image-spec arg, ditto for export and path
 554c982 test/perf_local: disable tests on unsupported archs
 8778ab3 Log::reopen_log_file: take m_flush_mutex
 6f54c61 debian: Update maintainers and uploaders
+71fec93  - Fix for wip-12225, Extra argument to mon create-initial was never required
+4aa47ba Fixes #12460
+bb71120 Fixes #12460
+409f275 Fixes #12068
 824c541   common: add nested-name-specifier ThreadPool before WorkQueueVal   Fixes: #12459
 992d959 mds: fix val used in inode->last_journaled
 a140085 osd: Keep a reference count on Connection while calling send_message()
 de8a950 qa/workunits/cephtool/test.sh: escape osd.* and mon.*
 8447b08 WBThrottle::clear_object: signal if we cleared an object
 cd2d7d0 ceph-helpers.sh: don't test the osd_max_backfills value
+e8d4cf1 erasure-code: add thrash test for the fast read feature on EC pool
+2ab6029 Fixes #12068
 bc348f6 Fix "was hidden" compilation warnings
 c835422 rgw: fix radosgw start-up script.
 2513ba7 docs: Document md_config_obs_t.
@@ -8124,12 +9286,14 @@ af0d1ab docs: Document the ThreadPool and WorkQueue classes.
 c8bdf1b mds: fix crash while stopping rank
 cb38aa1 mmds/MDLog: No make sense set offset for LogEvent in _start_entry.
 518c0b3 rgw: check subuser illegal access parameter.
+2006b77 calamari_setup: disable epel again after ceph-deploy
 7a1aff8 use SEEK_HOLE/SEEK_DATA for sparse copy
 10c0b67 blkdev.cc::get_device_by_uuid: do not leak cache
 4d03030 mon: ceph osd map shows NONE when an osd is missing
 e479037 tests: robust test for the pool create crushmap test
 bef3938 os/FileStore: fix pipe file descriptor leak
 1baeb1c doc/rados/configuration: add more scrub related config
+d413124 Fixes #12425, moved versions-steps-x suite from upgrade/firefly to upgrade/firefly-x as the old location was wrong and never ran correctely to upgrade tp -x
 1eb13a2 xio: reduce quantum numbers to grow pool
 09ab814 xio: reduce the default depth of accelio msg queue
 1d728c0 xio: configurable max send inline
@@ -8222,6 +9386,7 @@ af55a90 test/perf_local.cc: fix alloc/dealloc mismatch
 95344d0 tools/ceph-objectstore-tool: add set-inc-osdmap command
 0257c15 test: add test for {get,set}-osdmap commands
 4b28bcb doc: add v9.0.2 to the release timeline
+e09f761 Added config settings to resolve and test #12021 "mon debug unsafe allow tier with nonempty snaps: true" (only for firefly-x, giant-x and upgrade/hammer/point-to-point running againts hammer branch)
 69dad39 doc/release-notes: v9.0.2
 9cfa88d test: test_rados_tool.sh update due to new import/export semantics
 d23cd13 tests: test/cephtool-test-mon.sh uses 7202 7203 and 7204
@@ -8309,6 +9474,8 @@ f3d34d8   rest_bench: drain the work queue to fix a crash   Fixes: #3896   Signe
 f8bcec2 xio: handling connection error event
 e849cf3 xio: correctly set XioConnection features with fake features for now
 1b3f899 rados: Fix bug in export of xattr which dropped first char of key on import
+afe1488 Drop chef from the teuthology suite
+7cc2843 Drop chef from tgt suite
 f08522c mailmap: Kernel neophyte affiliation
 0310fb3 mailmap: Zhe Zhang affiliation
 3cee02d mailmap: Ketor Meng affiliation
@@ -8335,6 +9502,8 @@ a063de1 mailmap: Dmitry Yatsushkevich name normalization
 2ff6bcf erasure code: shec performance optimization with SIMD instructions
 6e0498d MonitorDBStore : make monitor transaction more readable on dump
 da96a89 librados: Make librados pool_create respect default_crush_ruleset
+5b57b81 Drop chef from tgt suite
+b48e2cf Drop chef from the teuthology suite
 be422c8 (tag: v9.0.2) 9.0.2
 bbf5842 AsyncConnection: Make sign_message ahead of construct message bufferlist
 8bbe98a AsyncConnection: Fix non-fastdispatch message doesn't prepare case
@@ -8344,7 +9513,9 @@ b7e9fe1 extend clone test to mock clone operation
 1231ae0 doc/release-notes: update notes for v0.80.10
 8085d26 common: clean up code for OpTracker::check_ops_in_flight.
 8506822 doc: change tcp rcvbuf and tcp nodelay to ms tcp rcvbuf      and ms tcp nodelay
+ec12f21 tasks/ceph_fuse.py: virtual machines need flexible mount timeout
 b7b1bf2 rgw: add minimum support for copy multipart part
+35c6363 calamari_setup: handle new structure in 1.3.0 (MON and OSD repos)
 16ead95 qa: update pool quota test for internal retries
 dbcf2e4 Fixes : #12018
 67de12b Fixes : #12018 osd/OSD.cc : drop write if pool is full
@@ -8364,6 +9535,7 @@ d44c784 mds: add damage_flags_t to inode+frag
 56d6be8 tools: fix journal reset error handling
 bb70a33 client: fix typo
 40d0476 mds: correct typo in log message
+c2031ad suites/fs: enable directory fragmentation
 79197d3 rgw: If the client sends a Connection: close header respond accordingly.
 ea012fd rgw: Make vstart.sh print out swift user info
 c604dd9 Fixes: #12286 radosgw-admin: after subuser modify print only once user info.
@@ -8382,11 +9554,14 @@ fd1772e tools: Fix dump-super which doesn't require pgid
 fb22a9f osd: fix temp clearing in OSD
 1a8e7a7 Document librbd::parent_spec and librbd::parent_info.
 f9378a9 Fix mds dump_ops_in_flight crashing ocassionally
+042bd11 3-size-2-min-size: keep 4 in during thrashing
 57fbc23 ReplicatedPG::finish_promote: do not prefill new_clones
 4946d10 OSDMonitor: allow addition of cache pool with non-empty snaps with config
 8a56c48 packaging: add find and which dependencies
 5ce38b9 ceph.spec.in: install 95-ceph-osd.rules, mount.ceph, and mount.fuse.ceph properly on SUSE
 8aa758e ceph.spec.in: use _udevrulesdir to eliminate conditionals
+106f9a9 Removed rhel* configurations with a goal that they will be covered in the Octo lab and won't be used by vps runs in Sepia lab
+27e05df suites/rbd: add EC pool variant for QEMU tests
 8f7c163 rgw: fix signed/unsigned compare warning.
 caae6c9 test: fix signed/unsigned compare warning.
 e4634dd ceph.spec.in: snappy-devel for all supported distros
@@ -8399,9 +9574,13 @@ e6662e5 Workunits : suites/pjd.sh : Do make clean so make can build on current a
 577acf6 UnittestBuffer: Add bufferlist zero test case
 2674739 Fix mds dump_ops_in_flight crashing ocassionally
 d8a728e rgw: Document the layout of pools and objects
+adfa1ee Removed rhel* configurations with a goal that they will be covered in the Octo lab and won't be used by vps runs in Sepia lab
 7b31e11 bufferlist: replace Mutex with RWlock
 7db8a6a Remove git build-time dependency
 cc72dd2 StoreTest: Add zero test for SyntheticTest
+b4265e8 upgrade/hammer-x: drop symlinks to ec jobs and disable *_excl
+e248e2e suites/upgrade/hammer-x: do not pass _excl weights to ceph_test_rados
+c0b2e31 tasks/rados: make {write,append}_excl conditional
 8df81e0 tests: verify erasure code read / write after remapping
 96ec2a7 tests: ceph-helpers.sh get_osds with no trailing whitespace
 d791a72 tests: improve shell output readability
@@ -8412,6 +9591,7 @@ aefcf6d tests: ceph-helpers.sh reduce kill_daemon verbosity
 e2454ee AsyncConnection: Only prepare message when it support fast dispatch
 34b939a client: reference counting 'struct Fh'
 998fe78 common/TrackedOp: Make get_duration get correctly value.
+ad8058e calamari_nosetests: verify that calamari_branch exists
 840011b mds: safety around rejoin/resolve_done hooks
 cbc2a0f XIO: Add missing fastpath events to OSD
 b53e3e2 qa: add tests for 'ceph mds metadata' command
@@ -8452,6 +9632,7 @@ efccc58 osd/ReplicatedPG: for writefull, offset is zero so replace offset w/ zer
 a1005b1 osd/Replicated: First calc crc then call write_update_size_and_usage.
 df2c984 mon/PGMonitor: Make blocked Op message more readable.
 db1643d osd/ReplicatedPG: For WRITEFULL replica object, only truncate if new size less than old size(only truncate to new size)
+e037797 rgw: do not enable both tcp and uds for fastcgi
 2cb0273 Compressor: Cleanup unnecessary lines
 8e48ba1 Compressor: add decompress failed codes
 8f0919e Compressor: Add compressor infrastructure for ceph
@@ -8460,10 +9641,12 @@ ce0c8f8 test: ignore symlinked ceph.py file
 a30aa95 ceph.in: linter cleanup, remove unused imports
 e296793 test: create python test files for ceph cli
 5c3d074 mon: disallow adding a tier on top of another tier
+75fc41c cephfs/test_auto_repair.py: flush journal after umount
 e819a3c client: return EINVAL if iovcnt < 0 for p{read,write}v()
 19a75f1 tools: fix deprecated warning
 5f8ecf2 crush/CrushTester: fix signed/unsigned warning
 373e065 client: fix signed/unsigned warnings in preadv code
+83d920a erasure code: added shec's initial ceph-qa-suite
 67fa726 AsyncConnection: Move sign_message to write_message
 504a48a doc : missing link in "quick_guide.rst"
 db16353 mds: change mds_log_max_segments type from int to unsigned.
@@ -8489,6 +9672,8 @@ f19b2f0 rgw: keep accurate state for linked objects orphan scan
 13adf3c rgw: async object stat functionality
 16a2dbd rgw-admin: build index of bucket indexes
 767fc29 rgw: initial work of orphan detection tool implementation
+1a29801 tasks/calamari_setup.py cal_svr.run arg list is missing args keyword.
+835ad8b cephfs/test_auto_repair.py: flush journal after umount
 8a221c3 doc: homogenize Librados (Python) documentation's code samples
 8103908 rgw: only scan for objects not in namespace
 c418bd9 ceph.spec.in: remove duplicate BuildRequires: sharutils
@@ -8503,6 +9688,8 @@ daa679c rgw: error out if frontend did not send all data
 590cdc9 librbd: prevent object map updates from being interrupted
 dd212fd ceph.spec.in: clarify two important comments
 5f47b11 rgw: send Content-Length in response for GET on Swift account.
+644d74a suites: yaml for test_damage
+21abe9d tasks/cephfs: add test_backtrace
 39cf071 mds: fix MDLog shutdown process
 f2daa19 mds: drop MDSIOContext on mds->stopping
 ae387b6 mds: refine shutdown, add ::stopping
@@ -8515,6 +9702,7 @@ c097881 test/ceph-dencoder: add boilerplate
 c9c655f mds: update CInode::oldest_snap during migration
 f3e4a91 ceph.spec.in: rm reference to EOL Fedoras (< 20)
 2db9480 ceph.spec.in: package rbd-replay-prep on all Fedoras
+52fa9a3 Fixes #11570 Removed test_cls_rbd.sh per Josh's suggestion Removed 'branch: hammer' from 3-upgrade-sequence/upgrade-*
 43c1784 ceph.spec.in:BuildRequires sharutils
 bdfad0d src/.gitignore: add ceph_perf_msgr_{server,client}
 850879e Fixed inclusion of ceph-helpers file in tests after it was moved to qa/workunits
@@ -8523,6 +9711,8 @@ df539a7 move pgp_num, pg_num check to prepare_new_pool method
 0f0c6f1 xio: fix to work with commit 626360aa
 47a8447 xio: fix to work with the merge of pull request #4707
 ec2afbb xio: sync to work with accellio v1.4
+03ac59e Adds a set of rgw tests to the teuthology suite
+460bf9a Adds mod_proxy_fcgi support and makes it the default setup for rgw
 726d699 librbd: invalidate cache outside cache callback context
 0215e97 tests: add new unit tests for object map invalidation
 eb81a6a librbd: only update image flags when holding exclusive lock
@@ -8636,7 +9826,7 @@ ecdc8f6 hobject_t: modify operator<<
 afae1c7 hobject_t: adjust sort order (pool position)
 ff99af3 hobject_t: change default pool id to INT64_MIN
 959a7ce os/HashIndex: use ghobject_t::get_max() instead of hobject_t one
-3b1b5a9 osd: eliminate temp collections
+3b1b5a93 osd: eliminate temp collections
 a88c3cc osd/osd_types: remove ancient CEPH_*_NS cruft
 18eb2a5 osd: use per-pool temp poolid for temp objects
 406c8c3 ceph_test_rados: add --balance-reads option
@@ -8660,6 +9850,10 @@ b10adf6 doc: Unify ID format
 cac48bd doc : Change of "ceph-deploy mon create {ceph-node}" with "ceph-deploy mon add {ceph-node}" in "Quick-ceph-deploy.rst"
 33f4b9a doc: Modification of a sentence.
 bfa0c4a Fixes to rcceph script
+bcb1eb3 suites: hook in cephfs/test_damage
+fa16974 tasks/cephfs: add TestDamage
+cc2e9ff tasks/cephfs: allow stdin for Filesystem.rados
+655d197 tasks/cephfs: add CephFSMount.stat
 451cee4 osdc: refactor use of _is_readable
 9e09e54 osdc: handle corruption in journal
 ae0a28e mds: additional error error handling in CDir
@@ -8689,6 +9883,15 @@ e60c450 doc: Wrong restriction for a daemon's ID
 350f43e librbd: Add option to allow disabling issuing alloc hint
 8feb27d common/RWLock: allow disabling read/write lock counts
 5e756ed ceph_spec buildep python sphinx for fedora
+f7e932f tasks/cephfs: add TestDamage
+40d0c05 tasks/cephfs: add TestDataScan
+d2cec6a tasks/cephfs: add CephFSMount.ls
+e6afe66 tasks/cephfs: allow stdin for Filesystem.rados
+718eaee tasks/cephfs: add CephFSMount.stat
+0f34d07 tasks/cephfs: add Filesystem.data_scan wrapper
+b5b668e calamari_setup.py: fix tarball installs
+8defa73 calamari_setup.py: tear down mounted ISO
+029fdfd Initial checking for #12023 Signed-off-by: Yuri Weinstein <yweinste at redhat.com>
 26eba36 Bug fix for man file packaging.
 8e56a5b [rbd] support G/T units in rbd create/resize
 81eee9b doc: explain about pgp num
@@ -8698,11 +9901,14 @@ e60c450 doc: Wrong restriction for a daemon's ID
 3a55cb0 tests: display the output of failed make check runs
 d38cd63 doc: update openstack and rgw keystone
 905c31e qa: use "sudo rmdir" to remove test data of multiple_rsync.sh
+786f854 cephfs/test_client_limits.py: update test_client_oldest_tid
 a9475e1 mon/OSDMonitor : error out if pgp_num > pg_num
+c0e3f8f upgrade-x: use compatible rados api tests
 ce86b0a doc/messenger: Add doc for ceph_perf_server/client
 2e7c8e7 libcephfs: add ceph_p{read,write}v
 69316a9 crush: add crush_compat.h
 1db1abc crush: eliminate ad hoc diff between kernel and userspace
+154f055 ceph: fix up log rotation stopper
 fb71bd9 mds: add dirfrag split/merge asok commands
 ecee227 crush: shared files should use kernel-doc
 9fde186 crush: fix style issues in shared files
@@ -8721,6 +9927,8 @@ ade36cf in _zero() function, we should remove strips to minimize keyvalue DB upd
 724c139 osd_types.{cc,h}: fix three typos in comments
 f620a8a Remove rados_sync.cc, rados_import.cc and rados_export.cc deleted files
 55d9747 mon: add 'PGs' column to 'ceph osd df'
+b455e4a Single Node Ceph-deploy tests to exercise commonly used CLI and test init startup    - Add 1node tests to Smoke and include ceph-deploy test    - Review Comments , add rgw cli test
+451c5ca ceph: fix up log rotation stopper
 04e91bb rpm: add missing Java conditionals
 45392a2 mailmap: Yuri Weinstein affiliation
 5b29a57 doc: add v0.94.2 to the release timeline
@@ -8730,8 +9938,10 @@ d6f6ad0 PerfMsgr: Make Server worker threads configurable
 eba4eb2 PerfMsgr: Add tips for running ceph_perf_msgr
 97ff79d debian, rpm: Add ceph_perf_msgr_* to build file
 372eddf PerfMsgr: Add messenger perf tools
+63a563d tasks/cephfs: fix race in test_full
 832f33a qa: use "sudo cp" in multiple_rsync.sh
 290204d common: make safe_splice return if return value is EAGAIN.
+82dc23a erasure-code: lrc plugin workload
 d91808b FileStore: For clone using splice to reduce memcopy.
 7509a6c common/TrackedOp: checking in flight ops fix
 ec3c409 common/OpTracker: don't dump ops if tracking is not enabled
@@ -8761,12 +9971,15 @@ a5d9b49 FileStore and blkdev: Collect device partition information
 f5da2fc doc/release-notes: v9.0.1
 1e77fcf tests: ceph-disk tests need to install pip > 6.1
 2d76e2c tests: erasure-code non regression tests must skip isa on i386
+07eb03a tasks/cephfs: time out on ceph-fuses that don't die
 e68ea2c osdc: Make librbd cache handle NOCACHE fadvise flag.
 ac1e729 rgw: fix data corruption when race condition
 42a3ab9 os/LevelDBStore:Drop buffer_list and key_list in transaction.
 1aa9655 tools: For ec pools list objects in all shards if the pgid doesn't specify
+6573e92 ceph: update log rotation for review comments
 98e77d5 mon/PGMap: access num pgs by osd
 fa04833 mon/PGMap: fix pg_by_osd
+b255db8 thrasher: Can't test ceph-objectstore-tool if nodes turned off (powercycle)
 abe4ec2 rgw: remove trailing :port from host for purposes of subdomain matching
 a3f9cba config_opts: turn down default recovery tunables
 e401115 tests: ceph_erasure_code_non_regression s/stipe/stripe/
@@ -8777,6 +9990,12 @@ e401115 tests: ceph_erasure_code_non_regression s/stipe/stripe/
 75465eb osd/ReplicatedPG: Don't create objectcontext when remove tmp-obj for promote failed if there was has tmp-obj.
 cbf1d34 osd/ReplicatedPG: For prmote object, if met error, delete the tmp obj.
 e6334bd osd/ReplicatedPG: Only promote object success, it can requeue proxy-read ops.
+ba5b3bb add log-rotate configs to the userspace suite
+96f3eb9 ceph: support arbitrarily-named daemons in logrotate
+5935f86 ceph: enable mds log rotation
+281058c suites/multimds: use 2-node clusters for pure userspace subsuites
+dd2fa89 suites/fs: use a two-node layout for most userspace tests
+01256c9 suites/fs: switch standby-replay tests to use a smaller cluster
 437c9e1 doc: architecture minor fixes in watch notify
 21f9e1f ceph.spec.in: remove duplicate BuildRequires
 b711e31 Transaction Id added in response     Signed-off-by: Abhishek Dixit dixitabhi at gmail.com
@@ -8991,6 +10210,8 @@ be873eb ReplicatedPG::release_op_ctx_locks: requeue in scrub queue if blocked
 3e4b852 osd/: convert snap trimming to use OpWQ
 e8cddf8 OSD: add PGQueueable
 517659b FileStore: sync object_map when we syncfs
+6d3b5c6 tasks/cephfs: add TestClientRecovery.test_fsync
+df91f98 tasks/cephfs: fix typo in blacklist clearing
 8a6d626   osdc/Journaler.h: fix ceph_file_layout dump error in journaler::dump().   Signed-off-by: huangjun <hjwsm1989 at gmail.com>
 b3555e9 mon: always reply mdsbeacon
 6f49597 ceph.spec.in:ownership of dirs extension
@@ -9002,6 +10223,8 @@ ce27ae4 client: make fsync waits for single inode's flushing caps
 2517ea9 mds: don't add setfilelock requests to session's completed_requests
 ae08638 client: exclude setfilelock requests when calculating oldest tid
 83f88e7 commit: test: check daemon is alive or not agagin when sleep 60s
+3127cda rados: add write_excl and append_excl
+91b300d rados/thrash: add test for radosgw with snaps
 ddcbb66 tests: add lrc tests to osd-scrub-repair.sh
 77f322b tests: split osd-scrub-repair.sh erasure code test
 70e069d tests: cosmetic move of functions in osd-scrub-repair.sh
@@ -9027,11 +10250,14 @@ ba4a2c1 mon: Monitor: inject scrub failures
 6051e25 ReplicatedPG: start_flush: use filtered snapset
 933df03 tests: fix the main() documentation of ceph-helpers.sh
 a8e4b4f tests: remove unused CEPH_HELPER_VERBOSE in ceph-helpers.sh
+633ab41 ceph_deploy: no need to fetch overrides twice
+9c3040c krbd: add unmap subsuite
 eb9dbef doc: fix crush-ruleset-name param description
 c00e393 OSDMonitor: fix prepare_pool_crush_ruleset() retval
 29f11c7 CrushWrapper: validate default replicated ruleset config opt
 f032c40 OSDMap: respect default replicated ruleset config opt in build_simple()
 ea4f942 tests: a couple tweaks to osd-pool-create.sh
+5ff2743 tasks/cephfs: fix timing in test_full
 81faac7 debian/copyright: update copyright for test/perf*
 cc92872 scripts: Add a helper to make release notes
 2879b0c .gitignore: systemd/ceph-osd at .service
@@ -9053,6 +10279,7 @@ a7bce20 doc: updates to v0.94.2 draft release notes
 d7a2349 doc: update the development workflow
 ac347dc Template systemd/ceph-osd at .service with autotools,
 2b23327 Mutex: fix leak of pthread_mutexattr
+bd54235 tasks/cephfs: mount fusectl before listing fuse connections
 38a319d qa/cephtool: add blacklist json output check
 8ef6f86 osd: fix blacklist field in OSDMap::dump
 4cc0f2f KeyValueStore: Add collect_metadata support
@@ -9075,6 +10302,7 @@ a8fca3c cmake: add missing common/util.cc dependency
 15dd70c cmake: skip man/CMakeLists.txt
 7c1bae5 tests: don't choke on deleted losetup paths
 f9ba711 dev/rbd-diff: clarify encoding of image size
+aa0ffb3 tasks/cephfs: reset osd blacklist between tests
 ab8e9e3 tests: CEPH_CLI_TEST_DUP_COMMAND=1 for qa/workunits/cephtool/test.sh
 5c69f5e tests: ceph create may consume more than one id
 1dac80d rgw: Use attrs from source bucket on copy
@@ -9099,6 +10327,8 @@ ca6abca tools: add --no-verify option to rados bench
 5e44040 osd: randomize scrub times to avoid scrub wave
 0f7f356 osd: use __func__ in log messages
 2ab0e60 osd: simplify OSD::scrub_load_below_threshold() a little bit
+23caf52 Added script to schedule 1/14th part of rados
+a7064a7 Added script to schedule 1/14th part of rados
 f9e5b68 qa: unbreak concurrent.sh workunit
 c2d17b9 test/librados/snapshots.cc: add test for 11677
 b894fc7 tools: Don't delete, recreate and re-fill buffers in rados bench.
@@ -9236,6 +10466,7 @@ f88275d tests: tiering health report reworked
 5f252d6 tests: no agent when testing tiering agent border case
 b2c40d5 tests: uncomment tiering agent tests
 8af9c05 doc: more entries for release notes in v0.94.2
+c7e8e54 cephfs/test_client_limits.py: invalidate kernel dcache according to client_cache_size
 78c73c5 install-deps.sh: only pip install after make clean
 61030e6 client: pin request->target when target dentry is not pinned
 9591df3 client: invalidate kernel dcache when cache size exceeds limits
@@ -9253,6 +10484,7 @@ c0311cc mon/MonitorDBStore: use mon_rocksdb_options if backend is Rocksdb
 9c64bae Add an test checking if frequent used options works.
 8915dba os/RocksDB: use GetOptionsFromString
 3d591af mon: prevent bucket deletion when referenced by a rule
+f780f18 cephfs/test_client_recovery: check FUSE version before invoking flock
 9324d0a crush: fix crash from invalid 'take' argument
 944eb28 RBD: expunge test broken by xfs commit
 a97566a doc: add ceph-create-keys.rst to dist tarball
@@ -9289,6 +10521,7 @@ f76bf6c mailmap: Xingyi Wu affiliation
 0f44127 mailmap: Ning Yao affiliation
 95a881f mailmap: Joao Eduardo Luis affiliation
 2738d02 ECBackend: eliminate transaction append, ECSubWrite copy
+938db47 suites: update log whitelist for TestQuotaFull
 3699a73 mon: fix the FTBFS
 07cf4f7 doc: release notes for hammer v0.94.2
 e1f1c56 mon/PGMap: add more constness
@@ -9307,7 +10540,9 @@ a2ee8d7 AsyncConnection: Avoid assert since replacing may inject when unlock
 67da8fe osd: Show number of divergent_priors in log message
 de81cd2 osd: Fix double counting of of num_read/num_write
 dc863fb install-deps.sh: increase pip install timeout
+9c36a52 suites: tweak config in mds-full task
 b29cc4c doc: Fix .gitignore for 'doc' directory
+13c7f97 tasks/cephfs: cover pool quota in test_full
 f43bf2e cmake: add common_utf8 lib to link unittest_str_map
 1fd2885 cmake: Add missing pthread lib
 6ce5262 cmake add mds and librbd missing files
@@ -9317,8 +10552,11 @@ f43bf2e cmake: add common_utf8 lib to link unittest_str_map
 e7b196a mon: remove unused variable
 c255e80 install-deps.sh: exit on error if dependencies cannot be installed
 7b28a6f tests: pip must not log in $HOME/.pip
+c5ff21b samba: disable MDS debugging to reduce log sizes
 6b68b27 json_sprit: fix the FTBFS on old gcc
 74f3b5c AsyncConnection: verify connection's state is expected
+80eecc3 Replaced work units with ceph-deploy/ceph-deploy_hello_world.sh re: #11547 Removed per Josh's suggestion fs as redundant and renamed rados to bacis
+dcfd6a3 Replaced work units with ceph-deploy/ceph-deploy_hello_world.sh re: #11547 Removed per Josh's suggestion fs as redundant and renamed rados to bacis
 b863ccb Added a "ceph hello world" for a simple check for ceph-deploy qa suite
 13abae1 Added a "ceph hello world" for a simple check for ceph-deploy qa suite
 f36bd0e tests: need 1024 files per process not 100024
@@ -9357,10 +10595,14 @@ bc66b78 librbd: always deregister child clone from parent with deep flatten
 7be3df6 librbd: move copyup class method call to CopyupRequest
 b556d31 librbd: add new deep-flatten RBD feature
 819c980 librbd: fast diff should treat all _EXISTS objects as dirty
+8c823e7 Initial check in for upgrades/hammer stable upgrade suite Signed-off-by: Yuri Weinstein <yuri.weinstein at inktank.com>
 fc42837 librbd: fix warning
 1b6a5e9 librbd: fast diff of first snapshot loads incorrect object map
 baebe74 librbd: fast diff is incorrectly flagging objects as updated
 f25aa5f librbd: ignore lack of support for metadata on older OSDs
+38160ec reg11184: Add regression test for 11184
+b105580 divergent_priors2: Do divergent priors and ceph-objectstore-tool export/import
+a6a4aaf divergent_priors: Fix divergent_priors task
 3f2946a OSD: add op_wq suicide timeout
 f2fbfa3 OSD: add remove_wq suicide timeout
 547a704 OSD: add scrub_wq suicide timeout
@@ -9369,8 +10611,11 @@ e1073a4 OSD: add snap_trim_wq suicide timeout
 df4e5de OSD: add command_wq suicide timeout
 cd0f2b7 obj_bencher: does not accumulate bandwidth that is zero
 70585a6 rados cli: fix documentation for -b option
+3018f49 client-upgrade: added RBD notification tests
+236a93c client-upgrade: added librbd API test case
 fbd6646 test_libcephfs: Fix zero length read tests
 8add15b json_spirit: use utf8 intenally when parsing \uHHHH
+0e0d1e6 suites/fs: add another log whitelist to journal-repair
 835b12f librbd: fix the image format detection
 899dd23 configure.ac: no use to add "+" before ac_ext=c
 5b2357e configure.ac: add an option: --with-man-pages
@@ -9397,7 +10642,9 @@ c9a6e60 src/script: remove obsolete scripts
 2b5f0fc automake: allow multiple {install,all,...}-local targets
 8c7a781 build: make-debs.sh NPROC overrides make -j
 8b7953a install-deps.sh: pip wheel for python dependencies
+e4a7106 smoke: white marked down on python test
 9784e5d tests: install sudo on ubuntu-12.04 container
+7bfe0c3 Added hammer-client-x to the mix Renamed dirs to better reflect what is being tested Removed old dirs Added logging Signed-off-by: Yuri Weinstein <yuri.weinstein at inktank.com> (cherry picked from commit 3c8730d035a90f221c05526983c40f0a11da1ba6)
 125c59b tests: erasure coded pools do not allow offset on creation
 a4f1256 osd: refuse to write a new erasure coded object with an offset > 0
 5c9e9da rados: Tell that pool removal failed. Not that it doesn't exist.
@@ -9410,7 +10657,13 @@ fb484b6 mds: only add head dentry to bloom filter
 c26b21f AsyncConnection: Don't dispatch event when connection is stopped
 63eb432 librbd: don't notify_change on error when notify async complete
 9da4c45 doc: link to sepia report for lab info
+47fff70 knfs: remove the MDS debugging
+cc10752 squash: use sleep instead of utility.sleep
+91bc0e9 squash: use != not is not in radosbench
+73577f1 squash: remove utility, moved to teuthology.git
+2a60852 squash: ceph_manager: add utility_task doc string
 bd79891 (tag: v9.0.0) 9.0.0
+015ed70 suites/rados: add test for 11429
 156e55b man: do not dist man pages if sphinx is not available
 f76293c test_async_driver: add dispatch_external_event tests
 c107b8e configure.ac: do not check for sphinx-build
@@ -9434,6 +10687,8 @@ e146b04 librbd: Remove the redundant judgement.
 1de75ed Preforker: include acconfig so includers don't have to
 69971f9 osd: dout the relation between osd_reqid_t and journal seq
 4fed200 osd: dout latency info in filestore
+f99bc7c Removed redundant tests issue #11520 Reduced nodes to be locked to two
+8ab46b4 Removed redundant tests issue #11520 Reduced nodes to be locked to two
 05a5f26 librbd: object map is not properly updated when deleting clean objects
 7213352 atomic_t: change cas to compare_and_swap and return bool
 a0f96de mds: in damaged() call flush_log before ending
@@ -9455,6 +10710,11 @@ de5717e ceph_mon: output preforker error message
 3f969d6 systest_runnable: Use Preforker to do fork
 e0b389a test_libcephfs: Add tests to cover zero length write
 61cf5da packaging: mv ceph-objectstore-tool to main ceph pkg
+3f19ed4 All changes from https://github.com/ceph/ceph-qa-suite/commit/3ce379a689440ce084b2df79db59dd76e1b2db6c
+5535e4c Removed unneeded workloads Signed-off-by: Yuri Weinstein <yuri.weinstein at inktank.com> (cherry picked from commit 18a79fb8252d9ceee7162e8f774ab6bf806484c5)
+260938f Removed unneeded workloads
+097b95f Fixing commits from https://github.com/ceph/ceph-qa-suite/pull/400/commits
+f191ec5 Reduced ammount of nodes from 4 to 3 Changed default 'replica' to 2 Reduced osd to 2 per node Added logging
 735ab91 Client: Fast return if len is 0
 2586e3b pybind: fix valgrind warning on rbd_get_parent_info call
 5ccc442 osdc: invalid read of freed memory
@@ -9483,6 +10743,8 @@ ae6247a AsyncConnection: Fix connection doesn't exchange in_seq problem
 3f39894 librbd: missing an argument when calling invoke_async_request
 7029265 packaging: move SuSEfirewall2 templates out of src
 55414ae ceph-fuse: check return value on system() invocation
+99951f8 tasks/ceph_deploy: remove some dead code
+dbe2ad2 tasks/ceph_deploy: fix for multiple mons
 4757bf9 rgw: fix broken account listing of Swift API.
 1ff409e common/config: detect overflow of float values
 d62f80d common/config: detect overflow of int values
@@ -9497,12 +10759,17 @@ ea5107c librbd: librados completions are not properly released
 ed5472a tests: fix valgrind errors with librbd unit test
 6ab1bb5 tests: librbd should release global data before exit
 54c8825 librados_test_stub: cleanup singleton memory allocation
+0b961fc suites/rbd: add new valgrind memtest tests
+8392d7f tasks: add support for running fsx under valgrind
+206ba83 suites: log whitelist for journal repair test
+539117a tasks/cephfs: add test_pool_perm
 a84337b osd/ReplicatedPG: fix an indent in find_object_context
 f93df43 doc: add giant v0.87.2 to the release timeline
 86788c4 mds: remove caps from revoking list when caps are voluntarily released
 6e50f64 add perf counter for rocksdb to evaluate latency of get and transaction commit
 d2fb5bd add perf counter for leveldb to evaluate latency of get&commit
 fd11e32 doc/release-notes: v0.87.2
+0c27c90 rbd.xfstests: remove bad assert
 24f4774 Swift: Set Content-Length when requesting/checking Keystone tokens
 972e9a5 FileJournal: fix check_for_full
 3c4028e client: check OSD caps before read/write
@@ -9513,6 +10780,7 @@ fd7723a librbd: update ref count when queueing AioCompletion
 f141e02 librbd: flatten should return -EROFS if image is read-only
 594a661 librbd: allow snapshots to be created when snapshot is active
 32c41f8 cls_rbd: get_features needs to support legacy negative tests
+168b773 Initial checkin for hammer-x suite Used "ceph osd crush tunables hammer"
 e97fd50 rgw: simplify content length handling
 79d17af rgw: make compatability deconfliction optional.
 06d67d9 rgw_admin: add --remove-bad flag to bucket check
@@ -9560,16 +10828,25 @@ e972a69 auth/Crypto: avoid memcpy on libnss crypto operation
 e874a9b do_autogen.sh: unambiguously use nss unless user asks for cryptopp
 ad5a154 auth: make CryptoHandler implementations totally private
 6063a21 logrotate.conf: prefer service over invoke-rc.d
+94d4b91 xfstests: fix typo
+4a1c434 xfstests: define defaults in root task
+a263551 task/samba: ignore return code of fuser/losf
+2ed6de6 task/samba: use SIGTERM to stop samba server
 8aea730 tests: osd-bench.sh must be verbose on failure
 14cb7b8 tests: convert osd-bench.sh to ceph-helpers.sh
 5871781 ceph-helpers: implement test_expect_failure
 7a432f7 civetweb: update max num of threads
 3bfdf54 doc: don't mention ceph osd setmap
 ef7e210 librbd: better handling for duplicate flatten requests
+e850645 cephfs/test_client_limits.py: test for client oldest tid warning
 12f1b30 client: add failure injection for not advancing oldest_tid
 901fd1a mds: include size of completed_requests in session dump
 dcd9302 mds: warn when clients are not advancing their oldest_client_tid
 009664e rgw: force content-type header for swift account responses without body
+97e021d tasks/ceph_deploy: configure CephFS
+4950506 tasks/cephfs: cope with missing ctx.daemons attr
+a12e8e0 tasks/cephfs: tweak use of mon for admin commands
+26b2cfa tasks/ceph: refactor legacy FS configuration check
 b2b443c man: fix the description in NAME section
 d85e0f8 tools: ceph-monstore-tool must do out_store.close()
 ebda4ba Increase max files open limit for OSD daemon.
@@ -9583,10 +10860,12 @@ fc758ce mds: remove stray dentry from delayed list when it's queued for purging
 5c09be2 mds: move snap inode to new dirfrag's dirty_rstat_inodes
 d900252 mds: keep auth pin on CDir when marking it dirty
 2e90fa3 osd/PG: check scrub state when handle CEPH_OSD_OP_SCRUB_MAP.
+c049387 task/samba: ignore return code of fuser/losf
 d2a6728 rgw: don't use end_marker for namespaced object listing
 58a144d rgw: adjust return code if can't find upload
 6ee4f64 rgw: fail if parts not specified on complete-multipart-upload
 66edf62 librbd: always initialize perf counters
+465b86a suites/fs: update log whitelist
 66493b7 cls_rbd: get_features needs to support legacy negative tests
 d49888e rgw: prohibit putting empty attrs on Swift account/cont.
 6322051 rgw: unify mechanisms for setting TempURL and Swift account metadata.
@@ -9598,6 +10877,12 @@ f7b92f9 rgw: rectify support for GET on Swift's account with limit == 0.
 b05d144 AsyncConnection: Fix deadlock cause by throttle block
 0c75b88 AsyncConnection: Avoid lockdep check assert failure
 69bedcd Event: Fix memory leak for notify fd
+fa2a861 suites/fs: update log whitelist
+7c2e6cd tasks/ceph_deploy: configure CephFS
+f360110 tasks/cephfs: cope with missing ctx.daemons attr
+8b61310 tasks/cephfs: tweak use of mon for admin commands
+ea7c392 tasks/ceph: refactor legacy FS configuration check
+51722de suites: whitelist log messages for auto-repair test
 cc5f144 mon: osd df: fix average_util calculation
 0374f32 Update Haomai's organization
 e4ebe10 test/cli-integration/rbd: add unmap test
@@ -9606,6 +10891,7 @@ b1d3f91 rbd: allow unmapping by spec
 a1363c4 krbd: rename should_match_minor() to have_minor_attr()
 7df2cf2 client: drop inode when rmdir request finishes
 933332b tests: separate check_PROGRAMS from TESTS
+3c8730d Added hammer-client-x to the mix Renamed dirs to better reflect what is being tested Removed old dirs Added logging
 e36df09 doc: Removes references to s3gw.fcgi in simple gateway configuration file.
 41b132f test: return 0 when evicting an not existing object
 1fa343f osd/ReplicatedPG: correctly handle the evict op when obs doesn't exist
@@ -9615,13 +10901,18 @@ d42c2c4 osd: refactor the skip promotion logic
 193f1e3 osd: avoid txn append in ReplicatedBackend::sub_op_modify
 3fdace6 osd: avoid txn append in ReplicatedBackend::submit_transaction
 e89ee9e osd: allow multiple txns to be queued via backend
+924d86c http not https for git.ceph.com
+a14a951 git.ceph.com
 0a442ee tests: comment out unstable tiering tests
+f062b17 git.ceph.com
 34c467b use git://git.ceph.com
 16a3fbd ceph-authtool: exit(1) when printing a non-existing key
 dc43880 releases: table of estimated end of life
 932d59d os/LevelDBStore:fix bug when compact_on_mount
 36db6d2 os/RocksDBStore: fix bug when compact_on_mount
+f825d32 Initial checkin for hammer-x suite Used "ceph osd crush tunables hammer"
 73fb61a osd: misc latency perf counter for tier pool
+b6212ac Update latest_firefly_release.yaml
 dde64b3 FileStore:: queue_transactions, fine-grain submitManager lock there is no need to lock the code, which is dealing with encoding tls to tbl. The submitManager lock is used to make sure the sequencial submission for op. So encoding transaction data out of lock.
 b3c3a24 RGW: Make RADOS handles in RGW to be a configurable option
 3b60f5f TestPGLog.cc: fix -Wsign-compare
@@ -9658,12 +10949,32 @@ aedcce7 librados_test_stub: add list_snaps implementation for ObjectReadOperatio
 f3ce75c mon: fix min variance calc in 'osd df'
 5cc92bb rgw: shouldn't return content-type: application/xml if content length is 0
 46b19cb librbd: update image flags when enabling/disabling features
+b29e61c tasks: s/pending_create/pending_update/ in MDS
+7bc8875 tasks: implement TestStrays.test_mv_hardlink_cleanup
+a2a8c21 tasks: update test_journal_repair
+b3f0165 suites/fs: clean up whitespace in YAML
+31c1d76 suites: run fs/recovery with two MDSs
+947cc88 suites: add .yamls for the recent cephfs tests
+f9d35d7 suites: put fuse clients on separate hosts
+04730ee suites: update tasks to use cephfs_test_runner
+097ccbb tasks: update journal_repair test for 'damaged' state
+bd3ae1e tasks/cephfs: add test_strays
+abb6355 tasks/cephfs: add test_sessionmap
+2b5137b tasks: generalise cephfs test runner
+f54e541 tasks/ceph_fuse: populate ctx.mounts earlier
+2b39fe5 tasks/mds_flush: be more careful monitoring stats
+3d3b095 tasks: lots of s/mds_restart/mds_fail_restart/
+79906e3 tasks/cephfs: better multiple-mds handling
+0de712f tasks/ceph_manager: DRY in mds_status
+5c1071b ceph_manager: fix bad type assertions
+ce1196d tasks/cephfs: be tolerant of multiple MDSs
 9eb1e59 librbd: add new RBD_FLAG_FAST_DIFF_INVALID flag
 ef21647 Move ceph-dencoder build to client
 c2b3a35 Rework mds/Makefile.am to support a dencoder client build
 0b26433 rgw/Makefile.am: Populate DENCODER_SOURCES properly
 fb11c74 Dencoder should never be built with tcmalloc
 570ff6d9 librdb: add perf counters descriptions
+86bd6bc task/samba: use SIGTERM to stop samba server
 e1f7899 doc: Fix misleading overlay settting in Cache Tier
 b00b821 osd: Break the handling of PG removal event into multiple iterations
 df47e1f client: fix uninline data funtion
@@ -9744,6 +11055,7 @@ d6e2341 crush: fix dump of has_v4_buckets
 1e5b220 TestPGLog: fix noop log proc_replica_log test case
 b61e5ae TestPGLog: add test for 11358
 6561e0d PGLog::proc_replica_log: handle split out overlapping entries
+8a0de9a Removed per #11070 and resolves #11043
 64d1e90 crush/mapper: fix divide-by-0 in straw2
 9914a73 qa/workunits/rbd/copy.sh: removed deprecated --new-format option
 3b95edb tests: ensure old-format RBD tests still work
@@ -9768,8 +11080,10 @@ e3d62a9 common: make rados bench return correctly errno.
 fb51175 TestCase: Change in testcase output
 b15f6d0  Fix to some of the command line parsing (including rbd)
 b0172d8 rbd: create command throws inappropriate error messages
+7b855de RBD: added optional YAML parameters to test xfstests from different repos
 d1cb94f RBD: update expunge set for latest test, parameterize test script
 567a7ef RBD: build prerequisites for latest xfstests and update test configuration
+b0f5cb1 Increased default test RBD size to 10G to help tests pass
 600f2cc README: rm references to old Ubuntu distros
 9fed564 librados: Add config observer for Objecter instance.
 69d680f doc/release-notes: spelling
@@ -9865,11 +11179,15 @@ fa359ae client: remove useless perf counters
 2d6e069 os: add perf counters description
 43fe246 common: add perf counters descriptions
 10b882b rgw: implement base for inter-region account metadata.
+1414ca9 erasure-code: ec-cache-agent in firefly-x/stress-split-erasure-code
+12af9b7 erasure-code: rename firefly-x/stress-split-erasure-code
+f13eb91 rados: explain that the task is asynchronous by default
 8c8ea8a osdc: perf counters description added
 b235a42 Librbd: Discard all global config used for image
 e39070f man: using sphinx-1.0-build if no sphinx-build
 a8eab36 spec.in: sphinx -b man needs sphinx > 1.0
 a3cf004 man: add conf.py to the list of distributed files
+8cb28dd Revert "ceph: be less weird about passing -f to mkfs"
 0b20c6b mailmap: Ian Kelling affiliation
 50f4495 mailmap: Vartika Rai affiliation
 0f94587 mailmap: Alexandre Marangone affiliation
@@ -9906,6 +11224,7 @@ a3b000d osd: increment the dirty perf counter when cloning a dirty object in mak
 a0cd70b osd: remove unnecessary code in make_writeable
 6150757 ReplicatedPG::finish_promote: handle results->snaps is empty case
 a45a698 ReplicatedPG::finish_promote: fix snap promote head snaps
+720f4c7 Fixed #11306 Whitelist WRN "failed to encode map"
 57d2781 librbd: snap_remove should ignore -ENOENT errors
 572a2f5 librbd: get_parent_info should protect against invalid parent
 aa7f0b3 mds: batch up writes in SessionMap::save_if_dirty
@@ -9929,17 +11248,21 @@ d5d6468 doc: fix the architecture diagram in cephfs.rst
 3a5f9c3 rgw: rectify broken statistics during Swift account listing.
 b0a3941 ReplicatedPG::promote_object: do not create obc if not promoting
 bdc664f ECTransaction: write out the hinfo key on touch as well
+182cb63 ceph: fix mkfs -f bug
 486509a mds: make sure lock state not stay in XLOCK/XLOCKDONE indefinitely
 4c122c1 Set disableDataSync to false
 0bd767f Update RocksDB configuration to make it more clear
 febb5a4 librados: define C++ global flags from the C constants
 b2b4369 test: add librados global op flags test
 c7de236 os/KeyValueDB: skip experimental check for test_init
+9039388 rados_python: whitelist wrongly marked me down
 7e5b81b Revert "librados: remove the unused flags."
 8e5d4c6 osd: drop unused utime_t now arg to issue_repop
 8db4056 osd: do not update mtime when recording digest
 a4c01f3 mark kinetic experimental
 002b7fd mark rocksdb experimental
+9175f88 (scrub|rados)_test: tolerate best guess digest errors as well
+4d8bc21 rados/thrash*: make scrubs happen a lot
 1b0b598 mds: persist completed_requests reliably
 c4d8e65 Librbd: Add existing rbd configs to aware table
 cf715bd Librbd: Add tests for aware metadata config
@@ -9966,8 +11289,15 @@ ec02441 crush: CrushTester::test_with_crushtool: use SubProcess to spawn crushto
 5388521 common: SubProcess: helper class to spawn subprocess
 d789f44 mds: properly remove inode after purging stray
 ea32960 cls_rbd: fix read past end of bufferlist c_str() in debug log msg
+58174f0 calamari_setup: der.  Use dict.update() correctly
+e3ec2fc calamari_setup: Require test_image to be set
+dee0101 calamari_setup: centralize config defaults
+3a69c3f calamari_setup: remove "build test image" code; add 'test_image' cfgvar
+5644bb5 calamari_setup: mounting iso on older distros requires -o loop
+bafe87a tasks/watch_notify_same_primary: wait for watch before notify
 f9b98c9 ceph-objectstore-tool: Fix message and make it debug only to stderr
 923d532 ceph-objectstore-tool: Remove bogus comment and eliminate a debug message
+e6ce90f Make sure that ulimits are adjusted for ceph-objectstore-tool
 d6acc6a Doc: Incomplete example in erasure-coded-pool.rst
 90c38b5 rocksdb: fix 32-bit build
 ddad2d4 Makefile-rocksdb.am: update for latest rocks
@@ -9990,6 +11320,7 @@ edf64dd osd: do not double-write log entries
 34c7d2c osd: combine info and log writes into single omap_setkeys
 b486e58 osd: pass map to write_info instead of txn
 ddf0292 PG: set/clear CREATING in Primary state entry/exit
+1dddc11 erasure-code: enable ec-rados-default.yaml
 6e6771e ceph-disk: add test files to EXTRA_DIST
 b301982 ceph-disk: remove double import
 0f267c1 ceph-disk: create initial structure for tox/unit tests
@@ -10010,6 +11341,7 @@ e6bd722 test: test_common.sh: start_recovery: don't use deprecated command
 c52b75e rgw: dump object metadata in response for COPY request of Swift API.
 ccf6eaa rgw: refactor dumping metadata of Swift objects.
 94f1375 rgw: add support for X-Copied-From{-Account} headers of Swift API.
+fc39550 ensure summary is looked for the user we need (part 2)
 0f92f34 release-notes.rst: update for degraded writes revert
 e1ca446 doc: no longer call out ceph-deploy as new
 ab1740d rocksdb: update to newer version
@@ -10057,6 +11389,7 @@ ece49d1 mds: handle read/replay errors in MDLog with damaged()
 6bf1ce7 osdc/Journaler: improved error handling
 2b4d96b mds: catch exceptions in ::decode
 9897369 mds: handle encoding errors on MDSTable load
+e0ff086 Fixes #11166, whitelisted 'Missing health data for MDS'
 3edfa67 Revert "ReplicatedPG: only allow a degraded write if we have at least min_size copies"
 5e4b7b0 Revert "Merge pull request #3911 from athanatos/wip-11057"
 385fe4b rgw: send ETag, Last-Modified in response for copying Swift cobject.
@@ -10092,6 +11425,7 @@ ada7ec8 test: potential memory leak in FlushAioPP
 2e69235 rbd: correct the name of mylock in lbirbd::read
 0a0d8f6 doc: Regenerate man/ceph.8 based on ceph.rst changes
 eb890b1 doc: Break ceph osd pool get into sections based on pool type
+97e6d80 ensure summary is looked for the user we need (part 2)
 3cffbbe osd: return fast if PG::deleting is true in snap_trimmer and PG::scrub
 cd11daa Fix ceph_test_async_driver failed
 c7702bf osd/Replicated: For CEPH_OSD_OP_WRITE, set data digest.
@@ -10101,7 +11435,10 @@ ee8c50b osd/ReplicatedPG: cleanup code in ReplicatedPG::process_copy_chunk.
 b6512eb erasure code: add shec's documentation / change default layout
 a00cb31 rgw: improve metadata handling on copy operation of Swift API.
 5f7a838 doc: what does it mean for a release to be supported
+6c53005 ceph_manager: Check for exit status 11 from ceph-objectstore-tool import
 175aff8 ceph-objectstore-tool: Use exit status 11 for incompatible import attempt
+8aeacaa fs: fix up dd testing again
+c571e65 calamari_setup: oops: ice-tools is still where it was on github.com
 68719f5 osd: fix omap digest clearing for omap write ops
 d5b3bd7 os/MemStore: make omap_clear zap the omap header too
 d2467e4 ceph_test_rados_api_aio: verify omap_clear clears header, too
@@ -10121,10 +11458,12 @@ e723116 mailmap: Zhicheng Wei affiliation.
 eb4f100 mailmap: Alexis Normand affiliation
 341218c mailmap: Florian Marsylle affiliation
 698905c mailmap: Robin Dehu affiliation
+b6c2e97 ensure summary is looked for the user we need
 035ab3c mds: call wait_for_safe twice in command_flush_journal
 4c24d0c auth: reinitialize NSS modules after fork()
 f183cd7 auth: properly return error of cephx_calc_client_server_challenge
 bee7f24 Minor fix: Documentation referred to bootstrap-osd instead of bootstrap-mds
+f7ea0a4 ensure summary is looked for the user we need
 c087025 mds: improved doxygen comments in StrayManager
 0ae624e mds: update StrayManager op limit on mds_max_purge_ops_per_pg
 3a0caf9 mds: fix parent check in MDCache::eval_remote
@@ -10158,6 +11497,7 @@ a33d169 mds: extended dump() for CInode
 75ff9d6 mds: add a dump() method to MutationImpl
 842a4c3 include/frag: add a dump() method to fragtree_t
 62ee84e mds: move auth_pins attrs into MDSCacheObject
+81c493c calamari_setup: add -y to yum localinstall icesetup
 f96d58b init-radosgw*: don't require rgw_socket_path to be defined
 0712d8d PG: ensure that info.last_epoch_started only increases
 2956ae2 doc: add last_epoch_started.rst
@@ -10168,7 +11508,11 @@ cca067e configure.ac: add --disable-gitversion
 6823bcd init-radosgw*: don't require rgw_socket_path to be defined
 36d6eea rgw: don't use rgw_socket_path if frontend is configured
 830752a doc: fix doxygen warnings
+93c0b8a calamari_setup: handle iso, preserving support for tar.gz
+da668ab calamari_setup: small comment/logmsg fixes, correct git user path
 b6cdc56 osd/ReplicatedPG: Fix a memory leak in do_pg_op.
+9087b7d calamari_setup: make iceball fetch/creators return path to iceball
+34e7544 calamari_setup: Refactor/simplify in preparation for iso support
 64851f5 doc: consistent alternate cluster name arguments
 c5835ae mailmap: Xiong Yiliang affiliation
 41859dd mailmap: Raju Kurunkad affiliation
@@ -10395,6 +11739,7 @@ cb6813c tests: add Debian jessie dockerfile
 f8dec72 tests: jq is not available on Ubuntu precise
 4add63c install-deps.sh: strip | in the list of packages
 dbe2b24 rgw: flush watch after unregistering
+056dd6d Implemented "exclude_packages" feature #11043
 3530a25 Client: do not require successful remount when unmounting
 b90018f The erasure-code is actually required by libcommon
 9b3e1f0 Minor syntax fix-ups for Makefile.am redesign
@@ -10412,6 +11757,8 @@ ef87a25 doc: osd map cache size is a count, not MB
 67b776e FileStore: add config option m_filestore_seek_data_hole
 2b3197e FileStore: moving SEEK_DATA/SEEK_HOLE detection logic into init
 a88712a rgw - make starting quota/gc threads configurable
+4250e22 Implemented "exclude_packages" feature #11043
+2539a48 Implemented "exclude_packages" feature #11043
 9b372fe ceph needs gmock/gtest to be statically linked
 e82ac10 rgw: only finialize finisher if it's not null
 7bab9f7 rgw: fix watch initialization and reinit on error
@@ -10448,13 +11795,19 @@ cb840cc librbd: remove unneeded assert from unregister_watch()
 e32da3e rgw: do not pre-fetch data for HEAD requests
 924f85f ceph.spec.in: loosen ceph-test's dependencies
 8dc0bf8 osdc/Objecter: clean up oncommit_sync (and fix leak)
+7ad8ac6 tasks/cephfs: clean up core on deliberate crash
 98a2e5c rados: translate errno to str in CLI
 f7d35b9 osdc/Objecter: count oncommit_sync as uncommitted
 099264f hadoop: workunits don't need java path
 2f2ace3 qa: update old replication tests
+fbaf65b hadoop: fixup for new replication test
+d2b52d9 Control the nfs service correctly on rpm-based systems
+7402d5f Fixes #11013, use time.sleep instead of manager.sleep which isn't there.
+2e4d884 Control the nfs service correctly on rpm-based systems
 a3af64c ceph.in: print help on 'osd' or 'mon'
 69161f0 common: add perf counters description
 dce28b4 FileStore: fiemap implementation using SEEK_HOLE/SEEK_DATA
+811c02e Fixes #11013, use time.sleep instead of manager.sleep which isn't there.
 891f814 mds: flush immediately in do_open_truncate
 a6a6df6 cmake:  build fixes
 137800a librbd: delay completion of AioRequest::read_from_parent
@@ -10490,12 +11843,17 @@ fb2caa0 librbd: add log message for completion of AioRequest
 7a5a635 mon: do not hardwire crushtool command line
 eca153e test/cli-integration/rbd: updated to new CLI
 6c2d929 rbd: permit v2 striping for clones and imports
+cdaa11d Use an NFS mount instead of virtio-9p in the qemu task
 b5050b6 rbd: fixed formatted output of rbd image features
 6cff494 qa/workunits/rbd/copy.sh: remove all image locks
 be7b4c3 update some .gitignore files
 a7b3443 submodules: --recursive needed for gtest in gmock
 dbcd55f gmock: add git submodule
 7b41871 gmock: remove in-tree code copy
+0c9fb0f workunit: include /usr/sbin in the PATH for all commands
+d25aa7b rename test to tests, follows best practices for tests dirs
+b0ac351 create a new get_acl helper to ensure proper xml is compared
+20a1ae7 tests for the new acl helper in radosgw_admin
 0e58463 librbd: missing callback log message for CopyupRequest object map update
 ed9e358 librbd: hide flush log message if no flushes are pending
 d3a3d5a rgw: Swift API. Complement the response to "show container details"
@@ -10504,6 +11862,8 @@ eb13f2d rgw: don't overwrite bucket / object owner when setting acls
 e2283e3 librbd: flush pending AIO after acquiring lock
 472db64 librbd: hold snap_lock between clipping IO and registering AIO
 4ececa3 qa/workunits/fs/misc: fix filelock_interrupt.py
+d20d923 tasks: fix intermittent failure in TestFlush
+5b2fd3d tasks/cephfs: don't run iptables in parallel
 7f36312 doc: ext4 has a journal
 7e89f51 mon: do not pollute directory with cvs files from crushtool
 7602b12 osd/ReplicatedPG: use swap to reassign src_obc in do_op
@@ -10519,7 +11879,9 @@ caa9022 rgw: update makefile to enable civetweb config
 0f8be6f civetweb: update submodule
 4698fbe TestAsyncDriver: Fix typo in ceph_test_async_driver
 3cea092 doc/install/manual-deployment: fix osd install doc
+1464fb8 Changed suite name from multi-versions to client-upgrade
 a23b348 Maipo should also use the local qemu clone
+bcf7eab Changed suite name from multi-versions to client-upgrade
 843ba7d doc/release-notes: fix Takeshi's name
 bc638cf doc/release-notes: fix typo
 6e308dc doc/release-notes: v0.80.9 firefly
@@ -10546,14 +11908,54 @@ eb422a8 doc: MDS internal data structure
 694529a Client: support using dentry invalidation callbacks on older kernels
 a6ebf67 Client: add functions to test remount functionality
 cd95b29 Client: check for failures on system() invocation
+fc4e909 tasks/cephfs: fix/improve fuse force umount (again)
+e9d713b msgr: move async msgr tests to singleton-nomsgr
+69a86b3 exclude the virtualenv from being linted in tox.ini
+a8a2976 remove unused import from repair_test
+a4dc846 ignore tox hidden dir
+9157622 remove unused variable assignment
+3f3ce53 remove unused import from fuse
+35224e3 remove uneeded variable assignment for call
+976773a remove unused import in calamari_setup
+e8e1e7e remove unused imports from populate_rbd
+28d3075 remove unused imports from peering_speed
+5018e91 remove redefined StringIO import
+4ed442e stdin is no longer a kwarg
+33f7982 add the log object to ceph_manager
+9cdd278 add missing mon object to osd_failsafe
+744bc38 fix undefined var in divergent_priors
+b944b06 exclude the virtualenv from being linted in tox.ini
+20b2c3d fix path for tox.ini to work on ceph-qa-suite only
+6a4d62e initial tox.ini to run flake8
+834a724 fix path for tox.ini to work on ceph-qa-suite only
 3ec52da qa/workunits/rbd/copy.sh: explicitly choose the image format
+0a65e90 remove uneeded variable assignment for call
+3a18cb2 remove redefined StringIO import
+49a61dc stdin is no longer a kwarg
+f7c1ca4 add the log object to ceph_manager
+920552a remove unused import from repair test
+1673be4 add missing mon object to osd_failsafe
+959c491 remove unused imports from populate_rbd
+6d45352 remove unused imports from peering_speed
+9df27f7 remove unused variable assignment
+d22cd69 remove unused import in calamari_setup
+26c5a82 fix undefined var in divergent_priors
+e4ba817 fix lint errors on ceph_fuse
+7108b69 initial tox.ini to run flake8
+4068f73 ignore tox hidden dir
 7d128a0 coverity fix: removing logically dead code
 14d7e36 osd: fix negative degraded objects during backfilling
 224a2d1 TestMsgr: Don't bind addr if not standby
 0047699 AsyncConnection: Drop connect_seq increase line
+296d31e rbd: fix copy-on-read setting
+aa6240c rbd: fix copy-on-read setting
 67225cb Pipe: Drop connect_seq increase line
 286a886 client: re-send requsets before composing the cap reconnect message
 8ea5a81 client: re-send requsets before composing the cap reconnect message
+473cba0 rbd: add merge_diff test
+bb6dd0e suites/rbd: add object map tests
+e7bf3d0 rbd: add merge_diff test
+b277946 suites/rbd: add object map tests
 f3ad61a packaging: move rbd udev rules to ceph-common
 ec26f08 librbd: remove unnecessary md_lock usage
 1f9782e librbd: move object_map_lock acquisition into refresh()
@@ -10581,7 +11983,12 @@ a94ceb6 librbd: add and use a test_features() helper
 cffd93a librbd: use ImageCtx->snap_lock for ImageCtx->features
 468839e tests: add additional test coverage for ImageWatcher RPC
 915064a librbd: add ostream formatter for NotifyOp
+55c11a3 ceph: ugh fix syntax
+1922c61 ceph: ugh fix syntax
+fea2e22 Fixes #10869, added {role} to the dir name.
 260c820 fuse: do not invoke ll_register_callbacks() on finalize
+5be8b8e ceph: fix ps axuf lsof line
+18307be ceph: fix ps axuf lsof line
 1c68264 doc/release-notes: final v0.87.1 notes
 ff2d497 TestMsgr: Add inject error tests for lossless_peer_reuse policy
 9f24a8c TestMsgr: Make SyntheticWorkload support policy passed in
@@ -10603,6 +12010,7 @@ c57df9c introduce compact_set and compact_map
 e2ba6f3 mailmap: Anis Ayari affiliation
 e57a76f mailmap: Armando Segnini affiliation
 c1fce65 mailmap: Billy Olsen affiliation
+64de3cd Thrasher: log backtrace of thrown exception
 8605e05 mailmap: Ahoussi Armand affiliation
 156a4b9 mailmap: Hazem Amara affiliation
 252f5f0 mailmap: Karel Striegel affiliation
@@ -10627,8 +12035,11 @@ d1c82ea ceph.spec: specify version
 2a23eac debian: split python-ceph
 b3329a9 hadoop: workunits don't need java path
 39982b1 Split python-ceph to appropriate python-* packages
+93f2bea Use an NFS mount instead of virtio-9p in the qemu task
 07ba2df Corrected arch diagram signed off by: pmcgarry at redhat.com
 c7ed277 test/librados/misc.cc: fix -Wsign-compare
+1ee6216 ceph: ps axf too before lsof
+a68281e ceph: ps axf too before lsof
 bd40f23 tests: speed up Python RBD random data generation
 d03cc61 doc: add erasure-code-shec to plugin list
 01a113c fix build with clang/clang++
@@ -10641,16 +12052,30 @@ d03cc61 doc: add erasure-code-shec to plugin list
 c1abcb7 ErasureCodeShec.cc: prefer ++operator for non-primitive iter
 b7ea692 libradosstriper/striping.cc: fix resource leak
 c1e792d doc: update doc with latest code
+0e53f5f workunit: include /usr/sbin in the PATH for all commands
+6a348ae hadoop: fixup for new replication test
 15da810 qa: update old replication tests
 6bc2b024 hadoop: add terasort workunit
 655e616 tests: fix potential race conditions in test_ImageWatcher
+0365f8b ceph: lsof if umount fails
+97fb9d8 ceph: archive logs at very end
 9c03750 osdc: watch error callback invoked on cancelled context
 a9bfd5d ceph_test_rados_api_watch_notify: wait longer for watch timeout
+7cee949 Fixed typoes
+05e5d87 Fixed typo causing "too many values to unpack" error
+86e8e9d Fixes #10704
+78d65a8 Fixed typoes
+d1d3f1c Fixed typo causing "too many values to unpack" error
+7b2cbac Fixes #10704
+077e917 ceph: lsof if umount fails
+ca09683 ceph: archive logs at very end
 62e7b4a crush: re-organize the help to clarify ordering
+e601fb1 Fixes #10652
 b8d497e crushtool: add test for crushtool ordering
 dca3452 crushtool: send --tree to stdout
 2b92320 crushtool: name osds with --build function
 f52840c crushtool: do not dump tree on build
+57ebe7e Fixes #10652 Removed firefly-giant dir as it's not needed Fixed branch assignment in dumpling-x
 b5d6e76 doc/release-notes: v0.87.1
 91cda52 osd: better debug for maybe_handle_cache
 bee9154 osd,mon: explicitly specify OSD features in MOSDBoot
@@ -10667,6 +12092,9 @@ e0e765f osd/OSDMap: cache get_up_osd_features
 89d5200 doc: do not doxygen src/tracing
 2b63dd2 DBObjectMap: lock header_lock on sync()
 a286798 mon: do not try and "deactivate" the last MDS
+714340a hadoop: add terasort task
+3d8cfc0 Fixes #10652
+63d51d0 Fixes http://tracker.ceph.com/issues/10652
 170c88d cmake: radosgw, radosgw-admin related fixes
 4feb171 vstart.sh: can use binaries outside of ceph/src
 7c8b493 os/chain_xattr: fix wrong `size` for snprintf()
@@ -10726,8 +12154,10 @@ e95d4cc tests: remove tests for when init() is not called in shec (#10839)
 3e37c13 rm some useless codes
 d231e8b Minor: added owner to debugging output in ceph_flock()
 e7735d3 osd: number of degraded objects in EC pool is wrong when there is OSD down(in)
+fc6f275 Mirror the supported distros used in nightlies in the teuthology suite
 e0fbe5c cmake: add librbd/test_fixture.cc
 caf2e1d cmake: add os/XfsFileStoreBackend.cc to rules
+2df632e Fixes #10869, added {role} to the dir name.
 62dd0c2 erasure-code: mark the shec plugin as experimental
 f9c90e7 tests: fix unused variable warning
 3a3bb6d common: capture check_experimental_feature_enabled message
@@ -10740,6 +12170,7 @@ b80e6ae README.md: fix the indent of a command line usage
 8e806bc Docs: OSD name, not id, needs to be given to remove an item from the CRUSH map. Include command for deleteing bucket from the CRUSH map.
 db06582 osd/OSDMap: include pg_temp count in summary
 a5759e9 mon/OSDMonitor: do not trust small values in osd epoch cache
+ca04dc6 Thrasher: Fix log message
 87544f6 mon: MonCap: take EntityName instead when expanding profiles
 fd83020 mds: fix decoding of InodeStore::oldest_snap
 6918a98 tests: Dockerfile COPY with two arguments
@@ -10753,8 +12184,13 @@ de6b53a qa: hadoop plays nice with new teuthology task
 3c05c9c tests: no need for python-flask at build time
 b24a01b erasure-code: initialize all data members
 1a9d717 erasure-code: fix uninitialized data members on SHEC
+34af5a9 Run the 'tests' task for jobs in the teuthology suite that install ceph
+d047a2d Ignore *.pyc files and __pycache__ folders
 e2a5085 ReplicatedPG::on_change: requeue in_progress_async_reads close to last
 a5ecaa1 ReplicatedPG::on_change: clean up callbacks_for_degraded_object
+7b20626 rename test to tests, follows best practices for tests dirs
+9b6ff63 create a new get_acl helper to ensure proper xml is compared
+3801ac9 tests for the new acl helper in radosgw_admin
 2d2dc13 mon/PGMonitor: drop pg ls debug line
 956d8e7 AsyncConnection: Clean up unused variables
 65ce7b7 fix configure to reflect the missing libs for xio
@@ -10791,9 +12227,11 @@ e504003 rgw: remove multipart entries for bucket index when aborting
 3e54acb rgw: encode rgw_obj::orig_obj
 64d7265 librados: code reformatting only.
 3f11ab0 librados: add info about restrictions on rados_monitor_log() usage.
+a973532 Created a new suite to test teuthology
 f67bfa2 rgw: Swift API. Support for X-Remove-Container-Meta-{key} header.
 cdfc23f rgw: fix doc, A typo in command line argument name
 2f8d31e rgw: Swift API. Dump container's custom metadata.
+03b0e10 tasks/cephfs: fix fuse force unmount
 86f50a5 TestMsgr: set "ms_die_on_old_message" to true to indicate out-of-order messages
 25d53f6 AsyncConnection: Allow reply reset tag if first hit
 90d0f0d rados: add 'watch/notify' in rados --help command.
@@ -10821,6 +12259,8 @@ f814262 fix error : ceph pg ls 0
 93629d3 Pipe: conditionally compile IPTOS setting
 b025fbf librbd: consolidate all async operation flush logic
 0a00be2 xlist: added missing include
+df5d579 Corrected "branch" to be outside of the 'clients:' section
+4745c5b Corrected "branch" to be outside of the 'clients:' section
 78b1fb5 ReplicatedPG::eval_repop: check waiting_for_* even if !m
 d9c024b osd/ReplicatedPG: remove dup op mark start call
 b4f2e75 osd: add perf counter for proxy read
@@ -10841,6 +12281,7 @@ ab6c65a mon/OSDMonitor: fix nan on 'osd df' variance
 e070718 osdc/Objecter: do watch/notify op completions synchronously
 1faa947 osdc/Objecter: remove unused on_reg_ack
 9b6b7c3 Handle differently-named xmlstarlet binary for *suse
+4b6fa9d tasks/calamari_setup: ice_setup 0.2.0 has added another prompt
 155a83c librados: rectify the pool name caching in IoCtxImpl.
 293cd39 tests: run osd-scrub-repair.sh with make check
 bae1f3e FileJournal: fix journalq population in do_read_entry()
@@ -10858,6 +12299,8 @@ f86fb97 rgw: obj delete operation can use remove_objs param
 538395d rbd.py: Add rbd_read2/rbd_write2 funcion which can handle fadvise flags.
 7890256 librbd: minor ImageWatcher cleanup
 debd7f3 tests: relax librbd ImageWatcher test case state machine
+ca49215 extra_packages: stop trying to install libcephfs stuff
+977a221 extra_packages: stop trying to install libcephfs stuff
 dfee96e rgw: send appropriate op to cancel bucket index pending operation
 cbfa08e mds: disallow layout changes for files with data
 b3fdf15 mds: avoid propagrating unnecessary snap rstat to parent
@@ -10914,6 +12357,8 @@ a3fc9d4 AsyncConnection: fix wrong scope of data blocks
 adebf22 rbd_recover_tool: move rbd_recover_tool directory to src/tools subdirectory
 70ae314 client: fix O_PATH on older Linux systems
 2f49de5 ReplicatedPG: block writes on degraded objects unless all peers support it
+5e9698e overrides/short_pg_log: 30/50 was too short, make 100/200
+a965d6c Add labels to a couple common task failures
 2a83ef3 include/encoding: fix an compile warning
 71c6d98 msg: fixup for 2ffacbe (crc configuration in messenger)
 2598fc5 ObjectStore: fix Transaction encoding version number
@@ -11012,6 +12457,7 @@ a007c52 doc: add cephfs disaster recovery guidance
 a1f634b add CLI ceph pg ls [pool] [state]
 1c164cf get pg status with specific pool, osd, state
 0e046bd add is_acting_osd function to check if pg is on that osd
+14e6a4c Fixes http://tracker.ceph.com/issues/10652
 34473f7 librados: rectify the guard in RadosClient::wait_for_osdmap().
 624c056 librados: fix resources leakage in RadosClient::connect().
 65fbf22 librbd: RemoteAsyncRequest: fix comparison operator
@@ -11023,6 +12469,7 @@ eb45f86 rgw: flush xml header on get acl request
 de2e5fa rgw: finalize perfcounters after shutting down storage
 cab246d librbd: Don't do readahead for random read.
 77689f1 ReplicatedPG::C_ProxyRead: fix dropped lock
+7e8dccc rados/thrash: add short_pg_log variant to encourage backfill
 0e3af8d ReplicatedPG: only populate_obc_watchers if active
 b31221a OSD: allow recovery in peered
 818b91b osd/: go "peered" instead of "active" when < min_size
@@ -11117,18 +12564,23 @@ ca214c9 rgw: also convert sharded replicalog entries
 c4a6eab rgw: fixing rebase casualties
 791c15b rgw: convert old replicalog entries if needed
 778a53a rgw-admin: add replicalog update command
+30e5f3d do not try and install libcephfs-java via extra_packages
 1cb10d7 async: add same behavior when message seq mismatch with simple
 8d56ade tests: ensure RBD integration tests exercise all features
 b6d6f90 mon/MDSMonitor: fix gid/rank/state parsing
 9b9a682 msg/Pipe: set dscp as CS6 for heartbeat socket
 1e236a3 mds: don't join on thread which has not been runned.
 6939e8c Update git submodule to use the same https protocol
+ee3174d rados: add size/min_size combinations to rados/thrash
 e393810 librbd: make librbd cache send read op with fadvise_flags.
 a23676b librbd: Don't do readahead for random read.
 8d0295c rgw: extend replica log api (purge-all)
 6b0151c utime: extend utime parsing
 e274e10 rgw: fix replica log indexing
+8a5f76d fs: move into the ceph-fuse mount before doing a test
 0be7925 Update PendingReleaseNotes for ceph-fuse change in Giant
+c4fea8c rados: exercise a few different crush tunable profiles
+8d2715e mds_scrub_checks: only direct ops at the active MDS
 77bd883 test_msgr: add auth enabled basic test
 408db65 async_msgr: crc configuration in messenger
 ce941f6 async: Delete authorizer when connected
@@ -11138,6 +12590,8 @@ e5ddc50 tests: bring back useful test 'ceph tell osd.foo'
 5b8d47c PG looking-up/checking does not need to hold the osd_lock since it is being protected by pg_map_lock, remove the assertion.
 7c59bc0 cleanup: replace some length() with !empty()
 17add06 cleanup: replace some size() with !empty()
+d261e32 Added "branch: dumpling" to fix #10577
+9e9d7c9 ceph: set crush tunables
 9a9670c tests: better EPEL installation method
 bf05ec1 tests: replace existing gtest 1.5.0 with gmock/gtest 1.7.0
 5301b2b librbd: trim header update not using AIO
@@ -11159,8 +12613,17 @@ e6f1280 librados: Expose RadosClient instance id through librados
 398bc96 librbd: Create async versions of long-running maintenance operations
 87ef462 rgw: format mtime of radosgw-admin bucket stats
 dc1630e librbd: trim would not complete if exclusive lock is lost
+f2224b9 suites/rbd: add copy-on-read tests
+8747897 suites/rbd: split qemu and librbd tests
+f6145a3 suites/rbd: Add rbd api tests with exclusive locking features
 3347e0d bug: error when installing ceph dependencies with install-deps.sh
 4e90a31 osd: add failure injection on pg removals
+3e540b2 rados/singleton: test interrupted pg removal
+41a99f5 mds_flush: fix the flush test output expectations
+cb74060 ceph: no need to stop() before restart()
+83dd3d5 ceph: add wait_for_failure command
+c234887 samba: run only on ubuntu until #10627 is resolved
+1dcff96 samba: run only on ubuntu until #10627 is resolved
 9b220bd ceph.spec.in: use wildcards to capture man pages
 51e3ffa rgw: reorder bucket cleanup on bucket overwrite
 313d6a5 rgw: access appropriate shard on bi_get(), bi_put()
@@ -11178,6 +12641,7 @@ f1f6f0b ReplicatedPG::hit_set_persist: write out oi.digest, these objects are im
 050a59e osd/OSDMap: remove unused variables
 55dfe03 librbd: schedule header refresh after watch error
 33f0afd doc: Some files still use http://ceph.newdream.net
+9ace238 teuthology: some suites still use http://ceph.newdream.net
 2f9b3bd ceph-debugpack: fix bashism of {1..10}
 5652a1d cls_rgw; fix json decoding and encoding of certain type
 dfccd3d bufferptr: Make set_offset() don't beyond the length.
@@ -11228,9 +12692,19 @@ b04f698 Doc: Fix the extra blank space in doc/start/quick-rbd.rst
 dc11ef1 PGBackend: fix and clarify be_select_auth_object
 26656e3 rgw: fix bucket removal with data purge
 b18b14b ObjectStore::_update_op: treat CLONERANGE2 like CLONE
+3c339d4 suites/rados: exercise heartbeat prioritization
 4d3b49e rbd: ensure aio_write buffer isn't invalidated during image import
 500f4b4 rgw: assign versioned_epoch on bucket listing response
+e587612 Formatting changes
+14e2765 Fix wrapped line
+bff2358 Fix objectstore name changed to keyvaluestore
+b239dad ceph_manager: Fix Thrasher kill_osd() to ignore pool going away
+4eda296 ceph_manager: Thrasher fixes to run() calls to set check_status = False
+e0b0d41 ceph_manager: Re-enable Thrasher ceph-objectstore-tool testing
+4a195c7 ceph_manager: In Thrasher randomly remove past intervals
+f9e088f ceph_manager: In Thrasher disable ceph_objectstore_tool tests if old release missing command
 8cbfac4 Wrong HTTP header name
+8215931 suites/fs: isolate quota task
 a0af5de qa: move fs quota to its own dir
 2ce38db osd: fix some compile warning
 b9be97f Doc: Fix the typo in doc/rbd/rados-rbd-cmds.rst
@@ -11247,6 +12721,7 @@ b1b299d osd: revert d427ca35404a30e1f428859c3274e030f2f83ef6
 988d007 ReplicatedPG::on_change: clear cache at the end
 8453f71 mon: Do not allow empty pool names when creating
 68af510 ceph-disk: do not reuse partition if encryption required
+e92d2e4 rados: include msgr tests
 48963ba test_msgr: Rename unittest_msgr to ceph_test_msgr
 b311e7c mon: PGMonitor: skip zeroed osd stats on get_rule_avail()
 8be6a6a mon: PGMonitor: available size 0 if no osds on pool's ruleset
@@ -11367,10 +12842,13 @@ b771524 rgw: get rid of put_obj_meta(), replace with put_system_obj()
 aec13bd rgw: pass around object context refrences, remove unused code
 db143ea rgw: remove plain object processor
 ce2abde rgw: start reorganizing RGWRados
+a6b8e82 tasks/mds_flush: use new stray/purge perfcounters
 ffee996 cmake: Fix CMakelist.txt to remove depleted files
 d53275c ceph_test_objectstore: fix keyvaluestore name
 738f868 PGBackend: do not update oi digest on inconsistent object
 8ab3c41 osd: new pool safeguard flags: nodelete, nopgchange, nosizechange
+2c1cef6 tasks/mds_journal_repair: add test_table_tool
+bf13a4e tasks/mds_journal_repair: create new test
 6613358 Revert "Merge remote-tracking branch 'origin/wip-bi-sharding-3' into next"
 66f3bd6 tools: output per-event errors from recover dentries
 3e15fd5 tools: handle hardlinks in recover_dentries
@@ -11474,6 +12952,8 @@ b383b52 rgw: enable s3 get/set versioning ops
 7cd5e9d rgw: restful op to set bucket versioning
 0d97b40 rgw: get bucket versioning status op
 8ed79d6 rgw: add versioning_enabled field to bucket info
+927a990 Restored giant-x on next
+21a4afc Restored giant-x suite on master
 50547dc mon: PGMonitor: fix division by zero on stats dump
 dbaa142 rgw: bilog marker related fixes
 c4548f6 pybind: ceph_argparse: validate incorrectly formed targets
@@ -11583,7 +13063,9 @@ d532f3e remove unused hold_map_lock in _open_lock_pg
 5b0e8ae mailmap: Yehuda Sadeh name normalization
 3f03a7b doc/release-notes: v0.91
 4ca6931 doc/release-notes: typo
+3d420ff radosgw_admin: don't use boto for testing 301 responses
 e7cc611 qa: ignore duplicates in rados ls
+adc9bda rgw: set default region and zone to all clients
 e5591f8 qa: fail_all_mds between fs reset and fs rm
 26a2df2 mailmap: Josh Durgin name normalization
 d6a9d25 doc/release-notes: v0.80.8
@@ -11628,6 +13110,8 @@ f3a57ee rgw: wait for completion only if not completion available
 fc5cb3c osd/ReplicatedPG: remove unnecessary parameters
 78b2cf0 osd: force promotion for watch/notify ops
 c8bef13 osd/OpRequest: add osd op flag CEPH_OSD_RMW_FLAG_PROMOTE
+2e5dcf2 test: add write_fadvise_dontneed.yaml to test write_fadvise_dontneed.
+717dffe rados.py: Add write_fadvise_dontneed field
 a78a93e rgw: bi list, update marker only if result not empty
 24aec12 rgw: fix memory leak
 33dc07c rgw: initialize RGWBucketInfo::num_shards
@@ -11666,8 +13150,17 @@ f9b280e Adjust bi log listing to work with multiple bucket shards. Signed-off-by
 90a3920 Add a new field to bucket info indicating the number of shards of this bucket and make it configurable.
 364b868 mon/Paxos: consolidate finish_round()
 67a90dd mon: accumulate a single pending transaction and propose it all at once
+e580eca repair_test: hinfo_key error injection
+ae68658 ceph_manager: add ceph-objectstore-tool wrapper
+0465c0c ceph_manager: add object helpers
+f3bd336 repair_test: rework indentation to make it pep8 clean
+f08d95a repair_test: rework to remove levels of indirection
 d159586 PendingReleaseNotes: make a note about librados flag changes
 725d660 (tag: v0.91) 0.91
+bd4926c ceph_manager: rework indentation to make it pep8 clean
+50e7e6d ceph_manager: fix docstring typo
+7234481 ceph_manager: define pool context
+7029dba Revert "add erasure code repair / scrub tests"
 9264d25 common/Formatter: new_formatter -> Formatter::create
 617ad5d common/Formatter: improve json-pretty whitespace
 83c3b13 common/Formatter: add newline to flushed output if m_pretty
@@ -11676,6 +13169,20 @@ e2a7b17 osd/PG: remove unnecessary publish_stats_to_osd() in all_activated_and_c
 b578a53 qa: set -e explicitly in quota test
 8d706cd osd/ReplicatedPG: init ignore_cache in second OpContext ctor
 792ac7c osd/ReplicatedPG: fix cancel_proxy_read_ops
+0117451 repair_test: hinfo_key error injection
+236049b ceph_manager: add ceph-objectstore-tool wrapper
+372665d ceph_manager: add object helpers
+f3a91d2 repair_test: rework indentation to make it pep8 clean
+9203259 repair_test: rework to remove levels of indirection
+764f615 ceph_manager: rework indentation to make it pep8 clean
+1f14626 ceph_manager: fix docstring typo
+666aecc ceph_manager: define pool context
+e2494c5 Fix objectstore name changed to keyvaluestore
+7d4bb55 ceph_manager: Fix Thrasher kill_osd() to ignore pool going away
+cf8bc06 ceph_manager: Thrasher fixes to run() calls to set check_status = False
+1903b7b ceph_manager: Re-enable Thrasher ceph-objectstore-tool testing
+b6f7b17 ceph_manager: In Thrasher randomly remove past intervals
+0df5e84 ceph_manager: In Thrasher disable ceph_objectstore_tool tests if old release missing command
 7c664fa Doc: Fix the indentation in doc/rbd/rbd-snapshot.rst
 1c01c3a tests: create unittest_librbd
 c105003 tests: add mock librados API for supporting unit tests
@@ -11687,6 +13194,9 @@ c105003 tests: add mock librados API for supporting unit tests
 e3678f4 mon: check min_size range.
 7945f8d librbd: flush pending AIO requests under all existing flush scenarios
 2dd0f03 librbd: AIO requests should retry lock requests
+4c27048 tasks/calamari_setup: build ice-tools virtualenv if building iceball
+bbe22d2 tasks/calamari_setup: remove HTTP-downloaded iceball, if any
+4377f00 tasks/calamari_setup: use ice_version to completely specify iceball
 6176ec5 librbd: differentiate between R/O vs R/W RBD features
 544ed96 librbd: Add internal unit test cases
 17f22d9 librbd: Add ImageWatcher unit test cases
@@ -11697,6 +13207,7 @@ ccadff1 librbd: Integrate librbd with new exclusive lock feature
 f5668d6 librbd: Create image exclusive lock watch/notify handler
 2ecd874 osd: enable filestore_extsize by default
 b76d0dc os/FileStore: verify kernel is new enough before using extsize ioctl
+c3eee83 fs: enable client quota on cfuse_workunit_misc
 f4ff12a drop ceph_mon_store_converter
 a4152db mon/MDSMonitor: add confirm flag to fs reset
 8630696 qa: add `fs reset` to cephtool tests
@@ -11732,6 +13243,7 @@ d5e8ef5 doc: Fix PHP librados documentation
 a16e72c doc: mon janitorial list is now a wishlist
 19955fc doc: Replace cloudfiles with swiftclient in Python Swift example
 5461368 mon: paxos: queue next proposal after waking up callbacks
+1f557c5 Calamari_setup now allows > 1 mon.
 d375532 rgw: return InvalidAccessKeyId instead of AccessDenied
 dd57af2 rgw: return SignatureDoesNotMatch instead of AccessDenied
 a238834 tests: install parted in centos Dockerfile
@@ -11754,12 +13266,22 @@ c528d87 erasure-code: test repair when file is removed
 bcdbbd5 os: fix confusing indentation in FileJournal::corrupt
 ecc3bca os: remove debug message leftover in FileJournal
 63c8dd0 msg: initialize AsyncConnection::port
+b1e35a4 tasks/mds_auto_repair: remove unneeded log settings
+997c0c3 tasks/cephfs: remove `config` from Filesystem cons
+4ff4819 tasks/cephfs: umount unused clients before running
+8630c4a tasks/cephfs: move ROOT_INO into filesystem module
+5e3f576 tasks/cephfs: be more precise in set_clients_block
+951d6c2 tasks/cephfs: use set_conf() from test case class
+cfbc277 tasks/cephfs: move common setUp/tearDown to parent
+6d79e39 tasks/cephfs: move wait_until helpers to testcase
+213641f tasks/ceph: update mds auth caps to 'allow *'
 c60f88b Bump memstore_device_bytes from U32 to U64
 8811df3 FileStore: return error if get_index fails in lfn_open
 20be188 osd: assert there is a peering event
 492ccc9 osd: requeue PG when we skip handling a peering event
 49d114f librados: Translate operation flags from C APIs
 8d52782 rgw: use gc for multipart abort
+5644a32 Use qemu-kvm in the qemu task for rpm based systems.
 6edfcc1 mds: allow 'ops' as shorthand for 'dump_ops_in_flight'
 1617fbb osd: allow 'ops' as shorthand for 'dump_ops_in_flight'
 3d2fbf7 tests: group clusters in a single directory
@@ -11769,6 +13291,8 @@ bd7be04 doc: don't suggest mounting xfs with nobarrier
 3646e1e encoding: wrap ENCODE_START/FINISH arguments
 fa96bb4 librbd: Stub out new exclusive image feature
 478629b rgw: index swift keys appropriately
+31ff241 Inherit teuthology's log level
+f6bcbe3 tasks/mds_flush: remove #10387 workaround
 97cc409 mon/MDSMonitor: fix `mds fail` for standby MDSs
 c400ba1 mon/MDSMonitor: respect MDSMAP_DOWN when promoting standbys
 487c22a init-ceph: stop returns before daemons are dead
@@ -11817,6 +13341,7 @@ dbae922 osd: Add Transaction::TransactionData for fast encode/decode
 3bd0042 osd: remove unused Transaction fields
 de02134 tests: resolve ceph-helpers races
 bea2d4a qa: drop tiobench suite
+8a31712 drop tiobench tests
 8618a53 cli: ceph: easier debugging (pdb) with 'CEPH_DBG=1'
 226b61a common/PriorityQueue: add test cases for it
 aff4499 common/PrioritizedQueue: do not 'using namespace std'
@@ -11855,6 +13380,8 @@ b7b4534 common: log which experimental features are enabled.
 55405db ms: mark async messenger experimental
 44ce7cc os: rename keyvaluestore-dev -> keyvaluestore; mark experimental
 8a55972 common: add 'enable experimental data corrupting features'
+7696799 Revert "Move output in task/s3readwrite"
+3960530 Revert "Move output in task/s3readwrite"
 783956c tools: ceph-monstore-tool: validate start/stop vals
 60e9c3f tools: ceph-monstore-tool: output number of paxos versions dumped
 b27ca4c tools: ceph-monstore-tool: raw map to stdout if outfile not specified
@@ -11881,7 +13408,9 @@ e4e1777 AsyncMessenger: fix the leak of file_events
 aa56ee4 mon: provide encoded canonical full OSDMap from primary
 d7fd6fc osdc/Objecter: improve pool deletion detection
 a540ac3 librados: only call watch_flush if necessary
+0220cbb Revert "tasks/ceph_manager: dump raw cluster command at debug level"
 6b030aa mds: add default ctor for quota_info_t
+b0ebeba tasks/ceph_manager: dump raw cluster command at debug level
 ee6529b AsyncMessenger: Fix leak memory
 b41a739 10132: osd: tries to set ioprio when the config option is blank
 9aa65aa Makefile: Restore check
@@ -11912,6 +13441,7 @@ e89bafb remove unmatched op code comparasion
 6f8aad0 tests: recovery of a lost object in erasure coded pools
 f4da356 objectstore: add fadvise_flags operations in ObjectStore::Transaction::append func.
 992e7ea tests: remove spurious lines from osd-scrub-repair.sh
+083f896 suites/fs: hook in mds_flush
 bb921a8 packages: add python-virtualenv and xmlstarlet
 6ce1469 tests: reduce centos docker intermediate steps
 b15bd60 tests: add docker-test.sh --ref giant|firefly etc.
@@ -11952,6 +13482,7 @@ e68d771 osd: drop vestigal invalid_snapcolls fields from scrub
 3f7b2cf osd/osd_types: add {data,omap}_digest to object_info_t
 8a75800 osd/ReplicatedPG: kill some dead scrub code
 056de09 osd/ReplicatedPG: set and invalidate data/omap digests on osd ops
+ecd9de4 rados/thrash: add readproxy facet
 9d5d491 doc: Adds updated ceph-deploy man page under man/
 8c38cc6 doc: Fixes a typo in ceph-deploy man page.
 46a1a4c If trusty, use older version of qemu
@@ -11961,22 +13492,37 @@ e68d771 osd: drop vestigal invalid_snapcolls fields from scrub
 ecbdbb1 tests: temporarily disable unittest_msgr
 08bd1e1 (tag: v0.90) 0.90
 49c2322 doc: Instead of using admin socket, use 'ceph daemon' command.
+7696afd tasks: create mds_flush task
 a302c44 ceph-disk: Fix wrong string formatting
 2f63e54 cleanup : remove sync_epoch
 19dafe1 Remove sepia dependency (use fqdn)
 1eb0cd5 osd: only verfy OSDMap crc if it is known
 1b7585b stop.sh: only try killing processes that belong to me
 0af2a1c qa/workunits/rest/test.py: fix pg stat test
+25ae69f Add calamari suite definitions.
+efa6c36 Add calamari_nosetests task.
+1bba98b Remove obsolete tasks/calamari
+75287f0 ceph_objectstore_tool: fix pep8 errors
+3c2aaa9 ceph_objectstore_tool: --op list now prints [pg,object]
+cec763b rados/singleton: debug watch-notify test
 2a1bd76 .gitmodules: update ceph-object-corpus auth repo to github
 623ebf0 osd: clear ReplicatedPG::object_contexts when PG start a new interval
 1f9c087 AsyncConnection: Fix time event is called after AsyncMessenger destruction
 20ea086 PipeConnection: Avoid deadlock when calling is_connected
 9783a5c test/msgr/test_msgr: Fix potential unsafe cond wakeup and wrap check
+95e6578 rados/thrash: enable client debug for api tests
 bba4d35 librados: init last_objver
 2cd9dc0 messages/MClientCaps: init peer.flags
 679652a osd: fix leaked OSDMap
 18f545b librados: Avoid copy data from librados to caller buff when using rados_read_op_read.
 001ea29 Messenger: Create an Messenger implementation by name.
+d3864df suites/fs: hook in mds_full task
+849e259 tasks: add mds_full task
+f9609c4 tasks/cephfs: let get_config use non-mon services
+8fa6b15 tasks/cephfs: enable osd epoch get from mounts
+f94ec69 tasks/mds_client_limits: fix initial rm -rf
+9c55b81 tasks/kclient: add 'debug' option
+dedcc64 tasks/cephfs: enable kclient for mds_* tasks
 3a2cb71 mds: fix asok on rank 0
 8de9a0f doc: Adds updated man page for ceph under man/
 8b79617 doc: Changes format style in ceph to improve readability as html.
@@ -11993,8 +13539,10 @@ c664818 doc: Adds updated man page for ceph-deploy under man/
 e638469 doc: Updates man page for ceph-deploy.
 a806778 qa: test zero size xattr
 42dc937 librados: avoid memcopy for rados_getxattr.
+eec179c ceph_objectstore_tool: Changes for rename of binary ceph-objectstore-tool
 4f72ba5 ceph_objectstore_tool: Rename generated binary to ceph-objectstore-tool
 1b2b344 MDS: do not allow invocation of most commands on an inactive MDS
+e05d7e3 kcephfs: test inline data
 69fa532 ceph.spec.in: quote %files macro in comment
 aea232c client, librados, osdc: do not shadow Dispatcher::cct
 378ebb7 python-rados: refactor class Rados a little bit
@@ -12037,6 +13585,7 @@ c765de6 common/hobject_t: fix whitespace
 69e169d os/DBObjectMap: new version v2; drop support for upgrading from v1
 462bad3 ceph_test_rados: generate mix of small and large xattrs
 456255b os/DBObjectMap: include hash in header key for EC objects
+47d56d7 radosgw-admin: wait before trimming usage
 9f53eeb doc: add cephfs ENOSPC and eviction information
 a8babcb client: add 'status' asok
 6fdf890 client: propagate flush errors to fclose/fsync
@@ -12085,6 +13634,7 @@ e90818f mailmap: Dan Mick name normalization
 cc05518 mailmap: Adam Spiers affiliation
 41707ee mailmap: Nilamdyuti Goswami affiliation
 3886734 ceph_test_rados_api_io: fix new test
+98f750b krbd: add huge_tickets workunit
 e3ba3d2 ceph_test_rados_api_watch_notify: use 5 min timeout for notifies
 a2572c3 ceph_test_stress_watch: do not unwatch if watch failed
 35f084d qa: add script to test how libceph handles huge auth tickets
@@ -12181,6 +13731,7 @@ b038e8f Call Rados.shutdown() explicitly before exit
 78eed52 ceph_test_rados_api_watch_notify: weak assert cookie is valid (it's a ptr)
 6f43c6c osdc/Objecter: pass correct cookie value to error
 b34e545 os/FileStore.cc: insert not empty list<Context*> to op_finisher/ondisk_finisher.
+0f9192f tasks/watch_notify_same_primary: fix test for new watch/notify
 7ab4a39 ceph.conf: update sample
 efd9d8d tests: Minor cleanup to librbd test
 78a15ee Fix libstriprados::remove, use strtoll insdead of strtol
@@ -12192,6 +13743,7 @@ bab7122 OSD: FileJournal: call writeq_cond.Signal if necessary in submit_entry
 4036b91 os: FileJournal:: fix, uninitialization of FileJournal throttle Since after firefly, take() in class throttle add if(0 == max.read()) return. If throttle is not initialized with max throttle value, it actually does not work. So initialize it in FileJournal
 6b51a9f mds: set dirfrag version when fetching dirfrag is skipped
 17c72f5 ceph-osd: remove extra close of stderr
+09b82e2 tasks: add test case for readonly MDS
 5836899 Revert "client: support listxattr for quota attributes"
 89b2fee mon: 'osd crush reweight-all'
 dd7b58f crush: set straw_calc_version=1 for default+optimal; do not touch for presets
@@ -12207,6 +13759,7 @@ dfcb1c9 client: cleanup client callback registration
 eeadd60 crush/CrushWrapper: fix _search_item_exists
 a198dee Modifying the docs to add the Get pool commands to match the CLI. Signed-off-by: Chris Holcombe <chris.holcombe at nebula.com>
 3a84602 Include common/likely.h in buffer.h
+1156a74 Fixes #10269 http://tracker.ceph.com/issues/10269
 e8b412c mailmap: Zhiqiang Wang name normalization
 c0ce4a5 Cleanup:Use get_type()instead of get_header().type
 c2d9333 WBThrottle: make bytes/ios/inode_wb's perf counter effective
@@ -12224,6 +13777,7 @@ b3e3fae AsyncConnection: Avoid hungry if mark_down's caller is the eventcenter's
 f5bf75f python-rados: Add object lock support
 f6d81d3 PipeConnection: Modify "is_connected" semantic
 2dfda54 Messenger: Add unit tests
+f3b3d75 radosgw_admin: test full sync using existing tests
 dc67cd6 rgw: switch to new watch/notify API
 1ac17c0 osd_types: op_queue_age_hist and fs_perf_stat should be in osd_stat_t::operator==
 9029813 common/ceph_context: don't import std namespace
@@ -12259,6 +13813,7 @@ f55a1f8 mds: disallow flush dentry/journal when MDS is readonly
 f4f1880 mds: properly unregister asok commands
 818a807 mds: drop dirty dentries in deleted directory
 ff901b5 arch: add support for HW_CAP based neon runtime detection
+4487c3b Changed workloads in 2-workload to run in parallel
 360d627 pybind/test_rados: add test for wait_for_complete_and_cb()
 19212cd rados.py: fix misnamed 'wait_*' routines, add true wait/wait-with-cb
 a53dbab librados:: carry IoCtx ref from WatchInfo
@@ -12390,8 +13945,13 @@ e3e5741 tests: vstart_wrapper.sh must call vstart.sh with a list of daemons
 b8f6b5f doc: Added rbd-replay-many and restructured index.
 54d5ed3 doc: Fixed index syntax.
 3012c4a doc: add CentOS 7 to recommended OSes
+108dab6 Update vps.yaml
+2bb1e52 Update vps.yaml
 6862891 doc: Adds man page for ceph disk in TOC.
 491da51 client: invalidate kernel dentries one by one
+8fc3550 tasks/ceph: move FS creation inside Filesystem
+89beed1 tasks/ceph: conservative PG count in FS pools
+d761831 tasks: add mds_auto_repair
 2fa4884 mds: fix race of trimming log segments
 70e1a5d doc: Document RBD Replay
 131f092 mds: don't blindly create empty object when dirfrag is missing
@@ -12408,6 +13968,7 @@ cfef515 mds: disallow slave requests when MDS is readonly
 4f6474f mds: disallow write operations when MDS is readonly
 01df222 osd: tolerate sessionless con in fast dispatch path
 0f1c9fd msg: do not const methods that return a mutable pointer
+26a33c3 Move output in task/s3readwrite
 0d6c803 osd/osd_types: drop category from object_info_t
 5ecdce3 osdc/Objecter: drop category from copy-get
 d229548 osd/ReplicatedPG: drop cateogry support from CREATE
@@ -12450,11 +14011,15 @@ dbb5a48 librados: Only public API symbols from the shared library
 03a61d2 Minor typos and trailing spaces
 782a74c KineticStore: Fix compile error and add _test_init
 21798f9 doc: fix some typos in ObjectStore.h
+7fd26c4 Update vps.yaml
+7e0fd50 Update vps.yaml
 016080d doc: Adds the updated man page for ceph-disk utility.
 8a48847 doc: Updates the man page for ceph-disk utility with some changes.
 3b00c6f safe_io: do not set ending \0 in safe_read_file()
 e6410eb added some consts Signed-off-by: Michal Jarzabek <stiopa at gmail.com>
 17b5fc9 mon: OSDMonitor: allow adding tiers to FS pools
+6086ad5 Fix fro #10178
+3d47b73 Fix for #10178
 9a118d5 doc: Adds man page for ceph-disk utility.
 242dd1c doc: Removes ceph-deploy usage instances from ceph-disk man page.
 cb820f8 erasure-code: test NEON arch selection logic
@@ -12502,11 +14067,29 @@ eaa9889 mailmap: Update Dan Mick's attribution
 e424d07 client: Fix problem in get_quota_root with update parent and ancestor
 6c7bb8c blkdev: using strncpy instead of strcpy.
 1fe8b84 PGLog: include rollback_info_trimmed_to in (read|write)_log
+6923e68 ceph_objectstore_tool: minor fixes
+a09d3a3 ceph_objectstore_tool: fix flake8 issues
+f10edda ceph_objectstore_tool: add assert so that teuthology knows when this test fails
+9a089ef ceph_objectstore_tool: skip unused osds
+be39bd5 ceph_objectstore_tool: run tests on erasure coded pools
+fbb33f3 ceph_objectstore_tool: only run get/set bytes for replicated pools
+2b57f4c ceph_objectstore_tool: use CephManager::get_pool_dump
+d0caf5c ceph_objectstore_tool: add pgnum option
+c9e6ede ceph_objectstore_tool: encapsulate into a function
+d591226 ceph_objectstore_tool: add hinfo_key tests for erasure coded objects
+9ef8887 ceph_objectstore_tool: keep all json object representation
+f9367ae ceph_objectstore_tool: tests only needs 1 machine
+b925be1 ceph_manager: add pool type constants
+fd1ec39 ceph_manager: fix typo in get_pgid docstring
+2abb9f9 ceph_manager: factorize with get_pool_dump
+6dac43f ceph_manager: factorize with get_osd_dump_json
 627f138 Updated sepia hardware list.
 7a868fd ReplicatedPG: remove unused parameter in function write_update_size_and_usage
 0d89db5 mds: store backtrace for straydir
 a79ba32 mds: verify backtrace when fetching dirfrag
 5177759 KeyValueStore: Fix parse_header_key
+5fc42b9 ceph_manager: add get_pool_dump to return the json pool dump
+f353c15 ceph_manager: add get_osd_dump_json to get the full output
 9d84d2e osd: deep scrub must not abort if hinfo is missing
 92662a9 mailmap: Loic Dachary name normalization
 77c1a35 rgw: support swift storage policy api
@@ -12521,6 +14104,8 @@ dcecfb8 MemStore: Return -ENODATA when collection_getattr hit nonexist attr
 00b275b StoreTest: Add collection_getattr(s) tests
 ffb6f78 KeyValueStore: Remove assert for collection_getattr method
 f3dab44 ceph_objectstore_tool: Add feature called set-allow-sharded-objects
+852fe69 Further changes for calamari_setup.py
+357fd22 Add calamari_setup
 b3021b0 ceph_objectstore_tool: Add utility routine get_osdmap()
 86baf2d ceph_objectstore_tool: Clear ...INCOMPAT_SHARDS from feature if exporting replicated pg
 d3d5852 FileJournal: add journal_discard to control ssd whether support discard
@@ -12541,6 +14126,8 @@ fed3b06 ceph-disk: run partprobe after zap
 0d350b6 librbd: protect list_children from invalid child pool IoCtxs
 de547c9 Fix bug #10096 (ceph-disk umount race condition)
 06fc39c mon: PaxosService: can be readable even if proposing
+a10e7d3 Revert "Copied giant-x suite to next branch"
+2c442c9 Copied giant-x suite to next branch
 d8a7db8 mon: Monitor: use session's entity name for audit log
 125b58d mon: Monitor: forward session entity name to leader
 ca8e1ef mon: Monitor: stash auth entity name in session
@@ -12588,6 +14175,8 @@ a5a0d5c doc: Adds build-doc guidelines for Fedora and CentOS/RHEL.
 83e8b07 librbd: Only public API symbols from the shared library
 8dde6a6 ceph_test_rados_api_tier: fix cleanup of whiteouts
 34e4d24 osd/ReplicatedPG: allow whiteout deletion with IGNORE_CACHE flag
+b4f1412 ms die on skipped message = false for all krbd, kclient tasks
+219b72b ms die on skipped message = false for all krbd, kclient tasks
 a04bb13 Mailmap: add Rongze organization
 ef2565e vstart.sh: complain less about fullish mon disks
 cba4ed4 librados: Fix operator= null ptr references
@@ -12618,6 +14207,7 @@ a46fb02 Makefile: include 'ceph' in base target
 e8a60ce mailmap: BJ Lougee affiliation
 e8daba6 mailmap: Derrick Schneider affiliation
 e386588 mailmap: Federico Gimenez name normalization
+c58af9f fs/basic/tasks/cfuse_workunit_misc: timeout at 6h
 1cd7422 mon: ceph-monstore-tool must close()
 d6be062 tests: fix compilation warning
 0ee6437 doc: remove superfluous white spaces (for testing)
@@ -12633,6 +14223,7 @@ ebc8875 AsyncMessenger: Support select for other OS such as Windows
 079a8d7 Add myself to <contact at intel.com>
 5ce0919 ceph_objectstore_tool: Fixes to make import work again
 a69b845 vstart.sh: warn less
+0a88b34 fs/multiclient: force mpi tests onto trusty
 6c80525 qa: handle CEPH_CLI_TEST_DUP_COMMAND on ceph osd create
 3776f07 common/Formatter: fix JSON dump of floating point values
 1b6cf7c mon/PGMap: fix {recovery,degraded,unfound}_ratio units, type
@@ -12682,6 +14273,7 @@ c0836ca osd: use encoded osdmap bl instead of reencoding
 c914df2 qa/workunits/cephtool/test.sh: ceph, not ./ceph
 66b920a qa/workunits/fs/misc: combine sudo and echo effectively
 197a2ab OSD: batch trim for pg_log
+f790691 smoke/basic/tasks/libcephfs_interface_tests.yaml: debug client
 9850227 crush: fix incorrect use of adjust_item_weight method
 2d7adb2 erasure-code: erasure_code_benchmark exhaustive erasure exploration
 3ff2816 erasure-code: add erasure_code_benchmark --verbose
@@ -12694,6 +14286,7 @@ efe121d erasure_code: implement ceph_erasure_code to assert the existence of a p
 f590130 erasure-code: store and compare encoded contents
 c44bdb1 erasure-code: document pool operations
 54ee8ee KeyValueStore: move buffers from strip_header to BufferTransaction
+5f19ef7 tasks/radosbench: no log to stderr
 a0c1f22 tests: use kill -0 to check process existence
 17f5c36 tests: looping to wait for an osd to be up is expected
 79f8b81 tests: increase timeout to accommodate slow machines
@@ -12708,6 +14301,7 @@ f80499e osd/ReplicatedPG: flush snaps immediately
 a21bca1 mailmap: Loic Dachary affiliation
 740a1bd tools: error handling on journal import/export
 3e0295f doc: Added Dumpling to Firefly upgrade section.
+6c26c07 mds_scrub_checks: Run scrub and flush commands against the MDS.
 15d487f MDS: clean up internal MDRequests the standard way
 07e0831 MDS: CInode: break out of validation early on symlinks
 f1677e7 common/ceph_strings: add some MDS internal op names to ceph_mds_op_name()
@@ -12739,8 +14333,11 @@ aed1498 MDS: CInode: add a fetch_backtrace() utility function
 80fac9e mds: add an MDSInternalContextWrapper and MDSIOContextWrapper
 dc307ac tools: persist trimmed_pos in journal dump
 fb29e71 tools: fix MDS journal import
+74e7761 repair_test: Wait for OSDs to come up before proceeding with test
+0bbe983 Add scrub_test and repair_test to rados basic suite
 abc995b qa/workunits/fs/misc: combine sudo and echo effectively
 3aa7797 qa: use sudo even more when rsyncing /usr
+edb780a tasks/cephfs/mount: use seperate for testing flock and posix lock
 2a61735 Fedora 19 uses systemd but there is no systemd-run available in the release (rhbz#1157938), this patch makes sure that the init scripts check for availability of systemd-run before they use it (otherwise, they fall back to the default method)
 5ac05d4 Fix tests on btrfs: leftover subvolumes removed
 762eda8 osdc: fix Journaler write error handling
@@ -12808,8 +14405,11 @@ c5f8d6e osd: past_interval display bug on acting
 c96fe59 doc: update RBD for Juno
 56ee3b4 doc/release-notes: it's 8MB, not 32MB
 f7431cc msg/Pipe: discard delay queue before incoming queue
+1c329a4 upgrade/dumpling-firefly-x: sync iwth giant
+4c90d9d upgrade/firefly-x: sync with giant
 c51c8f9 (tag: v0.87) 0.87
 ce6f22d AsyncMessenger: Add kqueue support
+71d92c6 Added timeout: 6h to one more workunit
 5a4c3aa client: allow xattr caps in inject_release_failure
 214ac9f doc: include 'fs new' stuff in cephfs quickstart
 1fef4c3 Get the currently atime of the object in cache pool for eviction
@@ -12912,18 +14512,25 @@ f76f83c osdc/Objecter: fix tick_event handling in shutdown vs tick race
 9d09e37 ECTransaction: Remove unused typedef.
 60eaeca .gitmodules: ignoring changes in rocksdb submodule
 a9dd4af rgw: send http status reason explicitly in fastcgi
+c3b53c3 apache: switch to use the apache worker mpm
+35c9cae apache: change template to load mpm worker module
 44a8d59 java: fill in stat structure correctly
 cb9262a Objecter: resend linger ops on any interval change
 1a3ad30 ReplicatedPG: writeout hit_set object with correct prior_version
 8ae942a Remove unnecessary expressions about conf_journal_sz
 024efeb EC: document the LRC per layer plugin configuration
+4ddadf0 Thrasher: Disable ceph_objectstore_tool tests if old release missing command
 a460c3b check rocksdb library when '--with-librocksdb' option is enabled
 95a0ee1 qa: use sudo when rsyncing /usr so we can read everything
 fa07c04 qa: use sudo when rsyncing /usr so we can read everything
+3db4d6f apache: change template to load mpm worker module
+f660ada apache: switch to use the apache worker mpm
 4128814 FDCache: purge hoid on clear
 3abbd4c shared_cache: add purge and tests
 c116b4b shared_cache::add: do not delete value if existed
 227ecd8 mon: Monitor: MMonGetMap doesn't require caps
+523cb63 ceph_manager: ceph_objectstore_tool testing off by default
+1b8d319 Smarter s3tests branch selection
 9803ced init-radosgw.sysv: set ulimit -n before starting daemon
 e81d887 Make better error reporting for client's call to rados.Rados
 d9ff3a6 PG:: reset_interval_flush and in set_last_peering_reset
@@ -12954,6 +14561,8 @@ e72dfb4 Default configure with nss instead of cryptopp
 a566610 doc: update injectargs syntax
 a458bd8 cli: do not parse injectargs arguments twice
 f1afb18 cli: add verbose information related to argument parsing
+98ef86a erasure-code: enclose no-lrc in a task
+85221bc rgw: mark ec related rgw suites as slow backend
 a4fcc21 mailmap: Jiantao He name normalization
 b42627b mailmap: Ma Jianpeng name normalization
 13643d6 mailmap: Wyllys Ingersoll affiliation
@@ -12961,6 +14570,7 @@ b42627b mailmap: Ma Jianpeng name normalization
 d80ea6a mailmap: Mehdi Abaakouk affiliation
 4eee5f0 mailmap: VRan Liu affiliation
 d8b260a mailmap: Yann Dupont affiliation
+263805d rgw: mark ec related rgw suites as slow backend
 65be257 Fix read performance regression in ObjectCacher
 349eb51 tests: mon id is incorrectly skipped
 9fbc083 rados command: Add format support for ls operation
@@ -12994,17 +14604,52 @@ ea100ac KeyValueStore: Add clone op detect to promote error
 f207416 mailmap: Cheng Cheng name normalization
 f76a676 osd: log when scrub,deep scrub, repair starts
 64d977b client: fix signed/unsigned compilation warnings
+54eb13f s/thrashosds/workload for now in dumpling-firefly-x/stress-split suite
+949cfc6 replace thrashosd with a workload. avoid running 2 thrashosds in the same job.
 cb290a1 osdc: Fix compiler warning
 2ae1cba mon: MDSMonitor: wait for osdmon to be writable when requesting proposal
 0dd473c mon: MDSMonitor: have management_command() returning int instead of bool
 91b2a2b mon: MDSMonitor: don't return -EINVAL if function is bool
 1d1ae41 mon: MDSMonitor: check all conditions are met *before* osdmon proposal
 07b7f10 mon: MDSMonitor: return if fs exists on 'fs new'
+542f3e4 s/branch:dumpling/
+ab6c737 do not upgrade client when workload is in progress do not run 2 rgw tasks in a row from the same client
+676053c Cleaned up confusing upgrade-sequence numbering
+ecf6732 Revert "Changed s3tests to run off "branch: giant-noreason" per Yehuda's fix for #9169"
+eed790b Changed s3tests to run off "branch: giant-noreason" per Yehuda's fix for #9169
 2c06413 mds: fix null dereference
 9e6ae73 mds: reply -EOPNOTSUPP for unknown lock type
+1fd89f4 apache: switch to use the apache worker mpm
+14b5a9a apache: change template to load mpm worker module
+a295c18 Thrasher: Disable ceph_objectstore_tool tests if old release missing command
+7e41c93 tasks/thrashosds: support overrides
+bdbcf76 ceph_manager: ceph_objectstore_tool testing off by default
+01b556a Smarter s3tests branch selection
+4ed7b8e Added  timeout: 6h to workunit
+ad24d98 erasure-code: isa plugin thrashods suite
+81fa93c erasure-code: dumpling-firefly-x/parallel/6-final-workload is incorrect
+963c635 erasure-code: enclose no-lrc in a task
+6146d66 fixed dumpling-firefly-x suite to upgrade to firefly branch during upgrade-sequence0
+57588e2 rgw: mark ec related rgw suites as slow backend
+4d38b5d upgrade/dumpling-x: do not do ec test on mixed cluster
+10c4a22 erasure-code: upgrade/firefly-x/stress-split-erasure-code
+e98813a erasure-code: do not include k=3,m=1 in dumpling-x
+cd3fc3d erasure-code: add thrash-erasure-code to suites/rados
+e48a0a3 erasure-code: unfound test needs a non empty file
+7a2df8d erasure-code: fix bugous ec-rados-default.yaml in some suites
+75ee20d erasure-code: CEPH_FEATURE_ERASURE_CODE_PLUGINS_V2 integration tests
+9aee760 erasure-code: assert that firefly-x/stress-split cannot load lrc
+1c5526d fixed firefly-x suite to not upgrade mon.c twice and optimized the suite as well. included another upgrade combination to dumpling-firefly-x suite
+70592df optimized the suite and added tests to upgrade client first
+6bae21c optimized the suite and added ceph_objectstore_tool: false to thrashosds wherever applicable
+47e4d17 Added ceph_objectstore_tool: false, fixes #9805
+116ddeb removed upgrading clients in parallel to workloads and also do not run rgw workloads on mixed osds.
 bb9add6 test: fix compile warning in bufferlist.cc
 a49255f librbd: fix compile warning in librbd/internal.cc.
 69b5c72 doc: updates on Backfill Reservation
+4a3574c Added print
+d46b316 Added client.0 upgrade to firefly step
+cb18611 Fixes #9729, running incompatible tests while the cluster is mixed between firefly and dumpling Changed to rados/test-upgrade-firefly.sh version
 4606af7 mon: unit tests for osd crush rename-bucket
 aa67560 mon: add the osd crush rename-bucket command
 29d13d4 crush: unit tests for CrushWrapper rename item/bucket methods
@@ -13013,9 +14658,12 @@ aa67560 mon: add the osd crush rename-bucket command
 f810710 add unittest for Striper::get_num_objects
 236895e crush: improve constness of CrushWrapper methods
 7b66ee4 when non-default striping is used, internal methods can operate on extra objects
+39fe58a Added print
+4f99720 Added client.0 upgrade to firefly step
 3741aab auth: unit tests for auid display
 5558afa qa/workunits/rbd/import_export.sh: be case insensitive
 fcc3692 auth: add display auid to KeyServer::encode_secrets
+e45d0be Fixes #9729, running incompatible tests while the cluster is mixed between firefly and dumpling Changed to rados/test-upgrade-firefly.sh version
 fa2ff33 auth: add display auid to KeyRing::encode_formatted
 2cbebc3 doc: correct command of `config push`
 107cb0d rgw: correct "If-Modified-Since" handle.
@@ -13023,13 +14671,25 @@ ac92c45 qa/workunits/fs/misc: Add a workunit for file lock interruption
 b0e6e85 mds: fix neighbor lock check
 b61468d doc/dev/sepia: notes about sepia email list and irc channels
 6705180 doc/release-notes: v0.80.7
+f4432e6 apache: set MaxRequestsPerChild to 0
 c9f9e72 Revert "Objecter: disable fast dispatch of CEPH_MSG_OSD_OPREPLY messages"
 00907e0 msg/simple: do not stop_and_wait on mark_down
+8a87a08 tasks/s3tests: add slow backend configurable
+7a0f52d multiversion/dumpling-x: remove rgw
 22637f4 doc: update lab notes
 7022679 librbdpy: Added missing method docstrings
+b966da7 multi-version: do not run new ceph_test_rados against dumpling
 6753923 mds: reply -EOPNOTSUPP for unknown lock type
 c4bac3e mds: fix inotable initialization/reset
 c95bb59 mds: fix inotable initialization/reset
+0ecd9ea Bring in file to fix missng symlink.
+e1c0a99 Removed secod ceph-fuse
+6140917 tasks/s3tests: add slow backend configurable
+df6d69d Removing files not in master
+5533b1d Fixed dirs duplicates
+4db9517 document 'command' requirements on admin_socket method
+5e88efd Synced up d-f-x and f-x suites with the latests from master
+935566d Addded missing ceph task
 f1fccb1 rpm: 95-ceph-osd-alt.rules is not needed for centos7 / rhel7
 b73fe1a doc: remove node requirement from 'mon create-initial'
 264f0fc doc: remove whitespace
@@ -13043,17 +14703,23 @@ e464a77 client: use atomic variable to track reference of MetaRequeset
 fbf4d47 KeyValueStore: Make clone error message more friendly
 b426460 GenericObjectMap: sync transaction avoid lookup_parent failed
 674c911 StoreTest: Add clone xattr test
+61a7888 Minor modifications, added prints
 d947050 osd/osd_types: consider CRUSH_ITEM_NONE in check_new_interval() min_size check
+8fcdb09 upgrade/dumpling-x: do not do ec test on mixed cluster
 5ff4a85 rpm: 95-ceph-osd-alt.rules is not needed for centos7 / rhel7
 50987ec libcephfs.h libcephfs.cc : Defined error codes for the mount function Used new error codes from libcephfs.h to replace the magic numbers in the mount functon found in libcephfs.cc.
+47de6f0 Removed dupication of overrides
 7bab093 return value of handle_message for MSG_OSD_SUBOP/MSG_OSD_SUBOPREPLY should be true
 d955676 rados: Use strict_strtoll instead of strtoll
 809ddd2 osdc/Objecter: fix use-after-frees in close_session, shutdown
 72a2ab1 osdc/Objecter: fix tick() session locking
+48be6e4 Incorporated Tamil's comments
 d98b755 librados: Fix function prototypes in librados.h
 d458b4f PGLog::IndexedLog::trim(): rollback_info_trimmed_to_riter may be log.rend()
 022bace rados: Add tests for CLI strict parsing
+181e0fe Fixed typo in client name
 26fe180 test: Fix rmpool in test_rados_tool.sh
+194a008 Added print statement
 459b2d2 mds: Locker: remove unused variable
 6a2303a client: add helper function that updates lock state
 b17b43a fuse: enable fuse_multithreaded by default
@@ -13063,12 +14729,19 @@ e075c27 common: link mds/flock.o to libcommon
 a8d597f Fix error message when stripping with format 1
 66afcd9 Check pointer before deleting
 2c7c03c ceph_erasure_code_benchmark: fix parameter handling
+8813371 tasks/mds_client_recovery: file lock test
 8021581 client: add missing dendl and s/dout/ldout/
 fa539b9 qa/workunits/fs/misc: fix syntax error
+36b8cd5 Still having fun fixing yaml sytax !
+d189b8a Removed brackets (finily!)
 9b18d99 PG::choose_acting: in mixed cluster case, acting may include backfill
 3dd4cca rgw: set length for keystone token validation request
+3779b44 Fixed one more typo
 3cd8a7f mds: Locker: fix a NULL deref in _update_cap_fields
+41d6ca2 Fixed empty line
+ec01b9c First draft of stable release upgrade suite for giant
 3b9dcff rados: Parse command-line arguments strictly
+202259a Fixed upgrade steps flow
 aa138eb mds: MDirUpdate: initialize discover count to 0 if we're not discovering
 2a9ed93 mds: MDSAuthCaps: init "read" param in default constructor
 ce4436c client: print out mode, uid, gid if they are changed
@@ -13076,6 +14749,9 @@ ce4436c client: print out mode, uid, gid if they are changed
 c43c85f test_rgw_admin_log.cc: fix use-after-free
 c60a170 test/librados/c_write_operations.cc: free ressources
 86ebaf6 os/FileStore: Because do_fiemap will do fsync, so don't do fsync() before calling do_fiemap.
+5b34f93 Fixed git issues
+780c4dd Fixed typo
+8fbcf32 Added a step 'upgrade-sequence0' to initial install task, renamed dirs
 042c536 rados_list_parallel.cc: cleanup before return from run()
 3bba7ce test/librbd/fsx.c: fix potential unterminated string handling
 b414b1d ErasureCodeIsa.cc: reduce scope of variables
@@ -13086,6 +14762,8 @@ d75856b osd/ReplicatedBackend.cc: prefer ++operator for non-primitive iterators
 4669233 mount.ceph.c: ensure '\0' terminated string
 865a0dc build: add ceph-erasure-code-corpus to gitmodule_mirrors
 f06ffba Makefile.am: make sure everything was build before make check
+acf33b5 suites/fs: enable debug on recovery tests
+6ac9efe tasks/cephfs: say which test failed in exception
 9b3d345 qa: move mon_mds tests last
 e27cf41 qa: cephtool tests for `tell mds.X`
 620a722 qa: fixup cephtool test when MDS exists
@@ -13171,6 +14849,8 @@ d14ca34 0.86
 efb23b8 common: set_ioprio debug message including pid
 62f0ef4 common: do not set ioprio if pid is not set
 19c92d8 doc: missing osd prefix to ioprio config options
+b4e0395 Removed client upgrade, fixes #9642
+7ba50e0 tasks/ceph_manager: enable log for ceph_objectstore_tool
 97dcc05 (tag: v0.86) 0.86
 32e8bcd Run configure without liblttng on squeeze as well.
 be6de4a Run configure without liblttng on squeeze as well.
@@ -13184,7 +14864,9 @@ b2e4bd5 msg: move SimpleMessenger to msg/simple/
 5eff0ee msg: use localized cct for derr
 06aef6f doc/release-notes: v0.86
 10fe7cf ceph_objectstore_tool: Accept CEPH_ARGS environment arguments
+7509b6d Fixed typo
 6aba0ab Add reset_tp_timeout in long loop in add_source_info for suicide timeout
+6cdf323 Fix directory names.
 52ac520 tools: remove old ceph.cc
 63c7e16 test/osd/Object: don't generate length of 0
 abe4c35 doc: update kernel recommendations, add tunables link
@@ -13200,7 +14882,9 @@ ca10ce4 Add one more case ( "=" ) and test get_str_vec as well. Signed-off-by: Y
 f1becf9 qa: ceph tell must retry on ENXIO
 234b066b Crush: Ensuring at most num-rep osds are selected
 5c6c366 debian/control: fix python-ceph -> ceph file move to allow upgrades
+7ad5429 upgrade suite for stress-split tests for d-f-x
 35fd272 messages: provide an explicit COMPAT_VERSION in MMDSBeacon
+16e564e erasure-code: isa plugin thrashods suite
 177a33b MemStore: Need set/get_allow_sharded_objects() to work for ceph_objectstore_tool
 0b155d0 ceph_objectstore_tool: MemStore needs a CephContext
 7f6c31b debian/control: BuildDepend on lsb-release
@@ -13227,15 +14911,23 @@ d6b702c mon: MonCap: add new profiles
 6f69837 test: minor case fix
 eb2f0f4 ceph-disk: bootstrap-osd keyring ignores --statedir
 fa0bd06 ceph-disk: bootstrap-osd keyring ignores --statedir
+fbb36a4 erasure-code: upgrade/firefly-x/stress-split-erasure-code
+8c1e7b0 erasure-code: do not include k=3,m=1 in dumpling-x
+9d2cc2f erasure-code: add thrash-erasure-code to suites/rados
 19be358 PG::actingset should be used when checking the number of acting OSDs for a given PG. Signed-off-by: Guang Yang <yguang at yahoo-inc.com>
 8253ead osdc/Objecter: use SafeTimer; make callbacks race-tolerant
+6065f0b powercycle: fuse_default_permissions = 0 for kernel build
+fc88521 krbd: fsx: bump number of ops, enable discard
 6c37984 mailmap: Yehuda Sadeh name normalization
 beff616f ceph-disk: set guid if reusing a journal partition
 50e8040 tools: rados put /dev/null should write() and not create()
+c598b8e erasure-code: unfound test needs a non empty file
 0b0a373 mailmap: update email address
 188370a doc/release-notes: fix attributions for 8702 fix
 c0dc3a5 doc/release-notes: v0.80.6
+7dc6f94 added ceph config option to firefly-x in order to turn off mon warning
 5b41d80 rgw: swift GET / HEAD object returns X-Timestamp field
+41b578f Fixing branch assumptions
 29356d8 qa: fix osd pool ls invalid test
 a1aa06b ReplicatedPG: dump snap_trimq on pg query
 34f38b6 ReplicatedPG: do not queue the snap trimmer constantly
@@ -13243,12 +14935,15 @@ b29bf00 ReplicatedPG: clean out completed trimmed objects as we go
 3374a0b BtrfsFileStoreBackend.cc: fix string overflow
 8b7fc61 test_librbd.cc: fix compiler warning, cast to size_t
 e506f89 Objecter: check the 'initialized' atomic_t safely
+2e2e6eb s/giant/firefly when setting the crush tunables
 0f731ae Revert "ReplicatedPG: clean out completed trimmed objects as we go"
+7fa0e17 s/giant/firefly
 fea0154 mon: break recovery status onto separate lines
 46a76d5 mon: put 'ceph status' quorum status on new line
 e127c89 mon: put 'ceph status' health items on separate lines
 da9ae5c ceph.spec: fix typo
 63b30d4 librbd: Skip tier pools in list_children and snap_unprotect
+471f261 removing upgrade suites that are no longer required
 63d0ec7 rgw: add civetweb as a default frontend
 d8ae14f librados: Add rados_pool_get_base_tier call
 5b58f16 test_librbd.cc: fix compiler warning
@@ -13261,9 +14956,13 @@ d8ae14f librados: Add rados_pool_get_base_tier call
 a470c96 mon: fix JSON `fs ls` output
 2955b3d ObjectStore: Add "need_journal" interface to make aware of journal device
 05fd507 Pipe: avoid redundancy new/delete for struct iovec
+8265f0d remove dumpling-firefly-giant as dumpling-firefly-x covers this test
+3276f8f fixed firefly-x and dumpling-firefly-x
 cefb1a3 mon: wait for paxos writes before touching state
 46c1d93 mon: flush paxos write before setting shutdown state
 4072ef7 mon: move paxos write wait into a helper
+824031b upgrade/dumpling-giant-x: remove
+2844971 upgrade/cuttlefish, emperor, old: remove
 624aaf2 PG: release backfill reservations if a backfill peer rejects
 62e2bca osd: swap state spinlock for atomic_t
 a8ac4b6 osdc/Filer: drop probe/purge locks before calling objecter
@@ -13273,11 +14972,13 @@ d34c21c test-shared-cache:   Initial draft for the unit test of "common/shared_c
 de87d54 common: document C_GatherBuilder and C_Gather
 a67c2f9 mds: Add session to sessionmap when its state becomes opening
 1feba20 Objecter: init with a constant of the correct type
+9a2974e Fix workunit branches.
 46d5518 osd: do not bind ms_objecter messenger
 fe3434f debian: move ceph_rest_api.py into ceph
 8cda623 ceph.spec.in: move ceph_rest_api.py into ceph
 b241624 ceph.spec: fix python-flask dependency
 e42424e debian: python-flask is needed by ceph, not python-ceph
+ff03b46 tasks/mds_client_recovery: client trim its cache on reconnect
 984a147 mailmap: Riccardo Ferretti affiliation
 8d9fcbc mailmap: Casey Marshall affiliation
 26941ea mailmap: Feng Wang affiliation
@@ -13299,6 +15000,12 @@ b167f70 mailmap: Sahid Ferdjaoui affiliation
 b386b59 mailmap: JuanJose Galvez affiliation
 e6bba0d mailmap: Roman Haritonov affiliation
 e133a92 mailmap: Yongyue Sun affiliation
+70bcda5 suites: enable mds_client_limits test
+c2d298a tasks: wait for mds active before mounting clients
+0073e25 tasks: rename FuseMount.get_client_id to get_global_id
+b77b3be tasks: add mds_client_limits
+1fa1501 tasks: generalise CephFSTestCase
+b6ccf0d tasks: generalize config writing for Filesystem
 beade63 qa/workunits/cephtool/test.sh: fix thrash (ultimate)
 5d1d9db librados: cap the IoCtxImpl::{aio_}*{write,append} buffer length
 5f029ff os/FileStore: using FIEMAP_FLAGS_SYNC instead of fsync() before call fiemap.
@@ -13306,9 +15013,12 @@ beade63 qa/workunits/cephtool/test.sh: fix thrash (ultimate)
 becc114 librados: test s/E2BIG/TooBig/
 32195f9 librados: cap the rados_aio_*{write,append} buffer length
 f777fc6 osd: Make RPGTransaction::get_bytes_written return the correct size.
+62fa6b1 Fixing branch assumptions
 7849d79 crushtool: add --show-location <id> command
 33501d2 librados: cap the rados*{write,append} buffer length
 f8ac224 ceph-disk: add Scientific Linux as a Redhat clone
+e68f657 Use the correct versions of existing tests
+5b8fa62 ec-rados-default is not fit for dumpling parallel upgrade
 6b4d1aa Crush: Ensuring at most num-rep osds are selected
 5ca7ea5 ceph-disk: add Scientific Linux as a Redhat clone
 7827e00 os: io_event.res is the size written
@@ -13332,12 +15042,23 @@ fc1380b mailmap: Thorsten Glaser affiliation
 0f884fd For pgls OP, get/put budget on per list session basis, instead of per OP basis, which could lead to deadlock.
 7f87cf1 ReplicatedPG: clean out completed trimmed objects as we go
 2cd9b5f tests: use memcmp to compare binary buffers
+b77ef02 ec-rados-default is not fit for dumpling parallel upgrade
+8852355 added multi-version for giant-clients vs firefly-servers
+2ac486d added upgrade suite for dumpling-firefly-giant
+3ea4b1d smoke: fix pjd debug conf syntax
+8f49a7d tasks: wait for active after mds restart
+67ca844 erasure-code: assert that firefly-x/stress-split cannot load lrc
+c7603c0 added upgrade suite for dumpling-firefly-giant
 c17ac03 ReplicatedPG: don't move on to the next snap immediately
 255b430 osd: initialize purged_snap on backfill start; restart backfill if change
+bc8fb2b adjusting crush tunables when running a mixed version of mons and osds. bug 8765
 f833f12 rgw: rados->set_attrs() updates bucket index
 44cfd88 rgw: PutObjMetadata, clear bufferlist before appending into it
 4be53d5 PG: check full ratio again post-reservation
+f466267 added workload tag
+e36c888 rgw: increase the number of OSDs for erasure coded pools
 9c825ec documentation: revise placement group number guide
+a5a1cce included an option to ceph_objectstore_tool, whenever we have keyvaluestore_backend as a configurable parameter
 7f71c11 ceph-mon: check fs stats just before preforking
 9687150 erasure-code: isa/lrc plugin feature
 f51d21b erasure-code: restore jerasure BlaumRoth default w
@@ -13360,11 +15081,13 @@ ce8eefc osd/ReplicatedPG: do not clone or preserve snapdir on cache_evict
 682b9da os/FileJournal: do not request sync while shutting down
 544b8c7 ReplicatedPG::on_removal: clear rollback info
 781f05c Revert "PG::init: clear rollback info for backfill as well"
+55e5160 erasure-code: fix bugous ec-rados-default.yaml in some suites
 a53ead1 osd: Cleanup boost optionals
 28b7b93 mds: remove spurious logging
 226c0c7 test: check python rados aio_read with buffers containing null
 8bda44f pybind: Fix aio_read handling of string buffer
 9d9c8c7 Filer: add lock to protect strcut PurgeRange
+cdcb315 fixed indentation in the yaml
 b47fdd4 rbd: Use a rolling average to compute RBD write throughput
 f3acae4 rgw_main.cc: add missing virtual destructor for RGWRequest
 eeb74a1 os/GenericObjectMap.cc: pass big parameter by reference
@@ -13388,6 +15111,12 @@ af039ce test/librados/aio.cc: close resource leak
 639c981 documentation: explain ceph osd reweight vs crush weight
 f51e368 erasure-code: fix assert overflow
 13780d7 mon: osd find / metadata --format plain fallback
+9b1c3fe suites: enable mds_client_limits test
+d9ec7f2 tasks: wait for mds active before mounting clients
+3e07bd1 tasks: rename FuseMount.get_client_id to get_global_id
+7274289 tasks: add mds_client_limits
+d777d71 tasks: generalise CephFSTestCase
+6f36269 tasks: generalize config writing for Filesystem
 0fb0967 documentation: tiering typo
 7ac60d8 tests: check osd health in cephtool/test.sh
 4d75c4c tests: qa/workunits/cephtool/test.sh ! and -e
@@ -13398,6 +15127,12 @@ fb5a244 osd/Watch: handle con without sessions
 93dccdb osd/ReplicatedPG: handle sessionless op con in do_osd_op_effects
 5d37850 osd: drop dead shutdown message handler
 bb45621 common: Add cctid meta variable
+1af3281 Update giant.yaml
+a06d674 Update giant.yaml(cherry picked from commit 90b6eec23fad6b3af1305de9c59a4759e076ebd3)
+5fc0414 Update giant.yaml
+84a63fa Create dumpling-giant-x stress-split upgrade suite
+403a859 Update giant.yaml(cherry picked from commit 35b3f4617c54dcb9705ca31489e2348bdb0e18b5)
+1140e18 dumpling-giant-x: use giant version of workunits
 c421b55 mon: re-bootstrap if we get probed by a mon that is way ahead
 d81cd7f mon/Paxos: fix off-by-one in last_ vs first_committed check
 9132ca4 rbd-fuse: Fix memory leak in enumerate_images
@@ -13408,6 +15143,10 @@ d6913ae mds: fix not journaling client metadata
 600af25 client: introduce a new flag indicating if dentries in directory are sorted
 59c8976 qa/workunits/fs/misc: Add a workunit for ACL
 2bd7cee client: trim unused inodes before reconnecting to recovering MDS
+d1f9f10 suites: turn on debugging for smoke cfuse pjd test
+1f950d2 Update giant.yaml
+90b6eec Update giant.yaml
+3dd1314 Update giant.yaml
 95ee699 client: Replace client readahead logic with Readahead
 ad45d8c librbd: Add read-ahead
 830373e osdc: Fix ObjectCounter::_readx perf counters
@@ -13416,6 +15155,7 @@ ad45d8c librbd: Add read-ahead
 5495570 msg: hexdump messages we can't decode at configurable debug level
 7e13ac8 rgw: Export user stats in get-user-info Adminops API
 480c372 Revert "crushtool: safeguard for missing --num-rep when --test"
+7d08640 tasks: escaping '*' when deleting files
 288f05a mds: use new Session::human_name in health msgs
 6320e53 mds: implement const SessionMap::get_session
 32b51bb mds: generate friendly name for client sessions
@@ -13437,6 +15177,7 @@ c837fb9 mds: return xattrs when client explictly requests xattrs
 10b8966 crushtool: safeguard for missing --num-rep when --test
 9a65b39 doc: Fixed broken hyperlink.
 399fa80 script/run-coverity: update submodules
+366ee00 tasks: more substantial IO for journal migration
 123c3fb test/mon/mkfs.sh: add check for default keyring
 8ea86df test/ceph-disk.sh: resolve symlinks before check
 8b27997 mon: pool create must not always create a ruleset
@@ -13450,6 +15191,7 @@ daf6379 mailmap: correcting Zhiqiang Wang's mailmap and org affiliation
 457790d KeyValueStore: Reduce redundant "make_pair"
 1a8b91b doc: Add keyvaluestore config description
 bb49547 KeyValueStore: Reduce redundancy set_header call
+38f7af0 s/tasks/workload Signed-off-by: tamil <tamil.muthamizhan at inktank.com>
 306fb2f mds: set new inode's xattr version to 1
 1b9226c osd: subscribe to the newest osdmap when reconnecting to a monitor
 56ba341 osdc/Objecter: fix command op cancellation race
@@ -13457,6 +15199,7 @@ baf7be9 osdc/Objecter: cancel timeout before clearing op->session
 1149639 ceph-disk: mount xfs with inode64 by default
 ded1b30 erasure-code: preload fails if < 0
 27208db doc: Added feedback.
+bda325b suites/fs: update to latest mdtest
 a140439 mds: limit number of caps inspected in caps_tick
 bf590f8 mds: keep per-client revoking caps list
 a6a0fd8 xlist: implement copy constructor
@@ -13470,10 +15213,14 @@ e6062b8 mds: add a health metric for failure to recall caps
 21f5e18 client: fix potentially invalid read in trim_caps
 9007217 client: more precise cap trimming
 c328486 client: fix crash in trim_caps
+1d9101c tasks: fix race in test_stale_caps
+4daf2dd tasks: typo in mds_client_recovery
+bc25767 tasks: handle failure cleanly in test_stale_caps
 83fd1cf bugfix: wrong socket address in log msg of Pipe.cc
 868b6b9 doc: osd_backfill_scan_(min|max) are object counts
 cdb7675 rbd: ObjectCacher reads can hang when reading sparse files
 ddd52e8 init-radosgw.sysv: Support systemd for starting the gateway
+991a83c Removed cuttlefish part foxes #9461
 d32b428 doc: Added bucket management commands to ops/crush-map
 d446a65 documentation: jerasure plugin is sub section of erasure code profile
 4fb3e29 documentation: isa plugin is sub section of erasure code profile
@@ -13481,7 +15228,9 @@ d446a65 documentation: jerasure plugin is sub section of erasure code profile
 b8a1ec0 doc: fixes a formatting error on ops/crush-map
 c3c6468 mds: update segment references during journal rewrite
 a8c943a log: add simple test to verify an internal SEGV doesn't hang
+67a7fd2 rgw: set debug rgw=20
 2313ce1 client: fix a message leak
+7a0ef8e First draft of firefly-giant-x suite
 e3fe18a global/signal_handler: do not log if SEGV originated inside log code
 558463e log: add Log::is_inside_log_lock()
 386f2d7 mds: update segment references during journal rewrite
@@ -13508,14 +15257,19 @@ b7bdb93 erasure-code: fix erasure_code_benchmark goop
 3945ead OpTracker: The TrackedOp::events list was not protected while dumping ops
 1de43e0 OpTracker: Removed _dump_op_descriptor_unlocked call for log level < 5
 3fac790 OpTracker: Sharding logic is implemented to improve performance
+d43850e Added RHEL7 to matrix.
 2fbe82d doc: Fixed syntax error.
 7dbf750 doc: Updated authentication notes. Fixed syntax error.
+25d2537 erasure-code: CEPH_FEATURE_ERASURE_CODE_PLUGINS_V2 integration tests
+d6f02d6 erasure-code: assert that firefly-x/stress-split cannot load lrc
 7281638 mds: sleep in progress thread if laggy and waiting_for_nolaggy waiters
 6fb5769 mds/Beacon: do not reconnect to mon in quick succession
 4ad5db0 client: include ll_ref when printing inode
 cf70b90 test: Fix ceph_test_rados_watch_notify to delete the pools it creates
 2e4e98b ReplicatedPG: Make perfcounter record the read-size for  async-read.
 cf34e00 ReplicatedPG: record correctly subop for perfcounter.
+fd12e0d Revert "Use same upgrade-test as firefly."
+5be4aaf Use same upgrade-test as firefly. The changes for firefly upgrade tests apply to giant as well.
 117de00 erasure-code: mon, osd etc. depend on the plugins
 5a05e6b [rgw][s3] Allow colon ':' in access key
 f05c977 mon: add 'osd pool ls [detail]' command
@@ -13527,6 +15281,7 @@ f05c977 mon: add 'osd pool ls [detail]' command
 b40cce7 osd: set min_read_recency_for_promote to default 1 when doing upgrade
 d734600 Change CrushWrapper::crush to private
 5a784cd osdc/Objecter: drop bad session nref assert
+f6582f8 tasks: add watch_notify_same_primary
 3c6e888 osd/ClassHandler: fix build
 d165238 FileStore: report l_os_j_lat as commit latency
 70ce400 osd/ClassHandler: improve error logging
@@ -13550,6 +15305,9 @@ c664179 KeyValueStore: Fix upgrade from non-exist superblock OSD version error
 2f2c37f mailmap: Marco Garcês affiliation
 0204998 mailmap: Erik Logtenberg affiliation
 284fb49 mailmap: François Lafont affiliation
+c729372 Create dumpling-giant-x stress-split upgrade suite
+35b3f46 Update giant.yaml
+8755466 dumpling-giant-x: use giant version of workunits
 6307536 configure: do not try yasm on x32
 d5777c4 doc: Fixed broken links and clarified terms for new authentication docs.
 e2de11a doc: Minor cleanup.
@@ -13581,6 +15339,8 @@ eaa2bb0 rgw_formats: dump_format_ns implementation
 2fb51b2 Formatter: add a dump_format_ns to base Formatter
 14d87bd Refactor ErasureCodeLrc::create_ruleset
 eae88da mds: clear objects' dirty flags after log segment is expired
+80242da Updated README
+8c43f47 First draft for a new suite dumpling-giant-x
 69638df doc: fix missing bracket
 35663fa doc: attempt to get the ayni JS into all head tags
 409c955 OSDMonitor.cc: fix potential division by zero
@@ -13596,6 +15356,8 @@ ece990d rados_list_parallel.cc: cleanup before return from run()
 4f35714 Parallelize RBD import/export
 f53bf53 PendingIO.cc: use static_cast instead of c-style cast
 cbd324d FileStore.cc: use static_cast instead of c-style cast
+c0650cf added a test to use single disk for both data and journal.
+1749ba9 added dmcrypt and separate_journal_disk options
 ca6eb61 client: vxattr support for rstat
 c2443b9 doc: Provided additional detail on daemon-specific caps syntax.
 8569b93 doc: Updated Keyring settings from comments and ceph-deploy defaults.
@@ -13630,14 +15392,24 @@ a754ce5 ErasureCodeLrc.cc: fix -Wmaybe-uninitialized compiler warning
 16cbaba osd/PGLog.h: prefer ++operator for non-primitive iterators
 8f368c5 mailmap: Ashish Chandra affiliation
 5fd50c9 mailmap: Boris Ranto affiliation
+bbf569d tasks: fix mount race in mds_client_recovery
 a5b4c58 lockdep: increase max locks (1000 -> 2000)
 9fac072 documentation: add the mark_unfound_lost delete option
 bec3032 osd: MissingLoc::get_all_missing is const
 e13ddc7 tests: qa/workunits/cephtool/test.sh early fail
 fc499aa mailmap: add .peoplemap
+1052f5c add osds to firefly-x upgrade suite
 177202e erasure-code: lowercase LRC plugin name
+a624d44 erasure-code: do not schedule isa workloads just yet
 4c9fdbf common/LogEntry: fix warning on x86_64
 a24c8ba common/LogEntry: fix warning on i386
+05eee9f ceph_manager: Add test code to use export/import to move a pg
+0cdf6e8 ceph_manager: Implement export/import when thrasher kills an osd
+9ade22d ceph_objectstore_tool: Add task for testing of tool of the same name
+fb7befb rgw: increase the number of OSDs for erasure coded pools
+378113c erasure-code: do not schedule lrc workloads just yet
+a019c84 erasure-code: add to suites/upgrade/*-x final workloads
+5d2a33d erasure-code: add various erasure-code workloads
 acfe62e Revert "os/FileJournal: For journal-aio-mode, don't use aio when closing journal."
 c776a89 os/FileJournal: stop aio completion thread *after* writer thread
 fa45ed8 osd/ReplicatedPG: do not evict blocked objects
@@ -13667,6 +15439,8 @@ e0b19e3 qa: fix+cleanup hadoop wordcount test
 76b8e57 erasure-code: preload the default plugins in the mon
 c3e1466 Test: fixing a compile warning in ceph_objectstore_tool.cc
 1b42726 Cache tiering: use local_mtime to calculate the age of objects during evicting
+f158594 added config_options to enable adding ceph config file entries to ceph-deploy task
+b3dfe47 Added dmcrypt option and ability to choose same or different disk for ceph journal
 335c1f7 doc: Added rbd-replay-prep and rbd-replay manpages to block device TOC.
 b965398 doc: Fixed broken hyperlink.
 7948e13 doc: Added sysctl max thread count discussion.
@@ -13793,6 +15567,8 @@ c6f4b42 sample.ceph.conf: some updates
 c2f21c0 osd: fix osd_tp shutdown
 8346e10 PG: mark_log_for_rewrite on resurrection
 cb4c5e8 OSD::session_notify_pg_create: requeue at the start of the queue
+1ba6e02 Moved mon.c to the first host with mon.a and mon.b to address the issue Sage found: "... supposed to have half dumpling, half x osds. but the steps that upgrade and restart the mons upgrade the packages on the second host (which should remain dumpling w/ osd 3,4,5)"
+752356f Moved mon.c to teh first host with mon.a and mon.b to address teh issue Sage found "... supposed to have half dumpling, half x osds.  but the steps that upgrade and restart the mons upgrade the packages on the second host (which should remain dumpling w/ osd 3,4,5)"
 3c847c5 common: ceph_context: add admin socket command 'config diff'
 ef51160 common: config: let us obtain a diff between current and default config
 4b8b25e tests: histogram prevent re-use of local variables
@@ -13802,7 +15578,9 @@ ee02293 tests: histogram prevent re-use of local variables
 0416b88 update license for libcephfs
 e6da732 PG: recover from each osd at most once
 8a1723f PG: make the reservation sets more descriptively named
+3afa81b Fixed typo
 bf3e483 mds: fix FP error in ROUND_UP_TO
+9316756 Added sequential: clause, so test stop thrashing the mon after it runs that one workload
 4672e50 osd/OSDMap: encode blacklist in deterministic order
 a15ad38 vstart: start rgw on port specified by env var
 dbe6c79 don't update op_rw_rlatency/op_w_rlatency when rlatency is zero
@@ -13817,6 +15595,8 @@ d817a6a doc: Minor changes.
 5db51d6 doc: Added a few comments and links to other relevant docs.
 751b3e2 rgw: fix test to identify whether object has tail
 c7e1b9e ceph_test_rados_api_tier: make PromoteOn2ndRead test tolerate retries
+31df59f erasure-code: add jerasure k=3 m=1 workload
+12a391e thrashosds: increase osd revive timeout (75s -> 150s)
 73733dd documentation: update recovery config options defaults
 97b1916 CollectionIndex: Collection name is added to the access_lock name
 3e85041 rgw: admin ops create user API can not determine existing user
@@ -13939,9 +15719,12 @@ bcc69ed mds: adapt to new objecter interface
 74ce4f2 rbd: fix signess warning
 f7c0001 common: remove spurious uint32_t in buffer.c
 6ad8e61 Fix FTBFS on alpha due to incorrect check on BLKGETSIZE
+0ec5bd1 mplement ceph.created_pool
 2554243 mds/Server: rename perfcounters
 b0cc869 mds: rename a bunch of metrics
 31ef1a9 mds: set l_mds_req on client request
+9e53972 set boto timeout, too, for s3tests
+9d466aa tasks/s3tests: push boto config with idle_timeout setting
 06682c4 vstart.sh: debug rgw = 20 on -d
 00c677b rgw: use a separate callback for civetweb access log
 850242c rgw: separate civetweb log from rgw log
@@ -13953,6 +15736,14 @@ f246b56 common/shared_cache: dump weak refs on shutdown
 8c69054 osd: make coll_t::META static to each file
 493577b mds: fix crash killing sessions without conn
 dcf8c03 mds: logging in SessionMap
+0d37b8e suites/fs: add cephfs_journal_tool test
+83a745d gitignore: ignore vim temp files
+1855e09 suites/fs: add client recovery
+d001cc2 tasks/mds_client_recovery: use existing clients
+bb52a97 tasks/mds_client_recovery: network freeze test
+8211d83 tasks/ceph_fuse: enable umounting from config
+1e7bfb8 tasks/workunit: fix log message
+5c29ae6 tasks/ceph: add ceph.stop task
 01ce249 Revert "Merge pull request #2253 from adamcrume/wip-lttng"
 3ced97b mon: make dispatch(), _ms_dispatch() void
 610f4be mon: always process the message in dispatch
@@ -14041,14 +15832,19 @@ ae59946 lttng: Trace OpRequest
 7fa513e lttng: trace mutex::unlock
 115cfb3 tracing: bootstrap lttng-ust with mutex events
 e870fd0 os/FileJournal: For journal-aio-mode, don't use aio when closing journal.
+70a1f18 use 'mon create-initial' always
+5b946e1 ignore errors on informational service status
 a66a493 os/FileJournal: Only using aio then alloc the related resources.
 c8e2b89 os/FileJournal: Tune the judge logic for read_header.
+389ad61 added a test with erasure_code_profile parameters
+e5c5bcf rgw: add erasure_code_profile configuration
 3ed8c68 erasure-code: do not preload the isa plugin
 4c2ae69 add pom.xml so to deploy the libcephfs to maven repository. to build a jar, version=0.80-rc1-2008-gf71c889 mvn package -Dversioin=; and mvn deploy command will deploy the jar to maven central
 e45f5c2 TrackedOp:_dump_op_descriptor is renamed to _dump_op_descriptor_unlocked
 f680a24 TrackedOp: Removed redundant lock in OpTracker::_mark_event()
 5026a89 Rebased civetweb version to master's
 e665e62 Rebased and changed debug option
+bfe31be Add centos/rhel7 to distros 'all' folder.
 98b24f3 vstart.sh to enable support for simple RadosGW
 57778e2 os/FileJournal: signal aio_cond even if seq is 0
 5bf472a os/FileJournal: Update the journal header when closing journal
@@ -14077,6 +15873,7 @@ b016f84 doc: add notes on using "ceph fs new"
 948178a ceph_mon: check for existing mon store before opening db
 8336f81 (tag: v0.84) 0.84
 bda2301 qa/workunits/rbd/qemu-iotests: touch common.env
+a02c90b suites/hadoop: update workunit names
 1dc1fb8 qa/workunits/hadoop: move all hadoop tests into a hadoop/ dir
 3d3fcc9 qa/workunits/hadoop-wordcount: fix/use -rmr command
 adaf5a6 qa/workunits/hadoop-wordcount: use -x
@@ -14094,7 +15891,11 @@ cc3b5ad mailmap: Abhishek Lekshmanan affiliation
 5045c5c qa/workunits/rest/test.py: use rbd instead of data pool for size tests
 3279f3e qa/workunits/rest/test.py: do snap test on our data2/3 pool
 6d7a229 qa/workunits/rest/test.py: fix rd_kb -> rd_bytes
+f7b32bc rgw: httpd instead of httpd.worker
+6392758 rgw: need alll of mod unixd, version, authz
 0e07f7f osd: fix theoretical use-after-free of OSDMap
+27b7ece tasks/rgw: include mod_authz
+2aae919 tasks/rgw: get mpm_event frmo mods-available, not mods-enabled
 904a5f1 vstart.sh: make filestore fd cache size smaller
 932e478 mon: track stuck undersized
 190dc2f mon: track pgs that get stuck degraded
@@ -14112,6 +15913,7 @@ a314999 osd: num_objects_misplaced
 29e93f7 os/FileStore: rename start_sync() -> do_force_sync()
 dd11042 os/FileStore: fix mount/remount force_sync race
 0395914 mailmap: Loic Dachary affiliation
+9de5bd1 Add extra conf for Apache 2.4
 c83c90c rgw: update civetweb submodule
 0d6d1aa init-ceph: don't use bashism
 7df67a5 Fix -Wno-format and -Werror=format-security options clash
@@ -14119,11 +15921,17 @@ ae0b9f1 osd: fix feature requirement for mons
 0db3e51 ReplicatedPG::maybe_handle_cache: do not forward RWORDERED reads
 5040413 ReplicatedPG::cancel_copy: clear cop->obc
 2f0e295 unittest_osdmap: test EC rule and pool features
+984f3ce rgw.yaml: keep the client.0 key even if empty
 0b27610 Remove Old Wireshark Dissectors
 16dadb8 osd: only require crush features for rules that are actually used
 1d95486 crush: add is_v[23]_rule(ruleid) methods
+821b2a4 replace locally instantiated CephManager
+9782465 initialize ctx.manager in ceph.py
+f53ea25 move functions from ceph to ceph_manager
+da00662 rgw: s/idle_timeout/default_idle_timeout/
 b22d693 lttng: Add distro packaging
 6891f4e lttng: Fix "make distcheck"
+4f9f023 rest-api tests: enable debugging
 c54f1e4 mon/Paxos: share state and verify contiguity early in collect phase
 3e5ce5f mon/Paxos: verify all new peons are still contiguous at end of round
 5c2d232 erasure-code: remap chunks if not sequential
@@ -14152,6 +15960,10 @@ a1c3afb OSDMonitor: Do not allow OSD removal using setmaxosd
 5d3a7e5 rgw: copy object data if target bucket is in a different pool
 aec684b add calamari to the api/index section
 ac70490 doc: update kernel recommendations (avoid 3.15!)
+54a7298 rgw: add default_idle_timeout to allow override
+6237acb rados.py: avoid CephManager creation race
+69ef854 s/apache2/apache
+4b15d01 use the right syntax for RHEL/CentOS distros to check for ceph status
 5374386 doc: Added user management link to quick start.
 5e8eae7 doc: Removed cephx intro. Moved details to user management, config, and architecture.
 3aa3e96 doc: Removed cephx guide. Replaced by user management.
@@ -14162,6 +15974,7 @@ aac6aa2 doc: Removed auth intro and auth docs and added user management to index
 0a49db8 msg/PipeConnection: make methods behave on 'anon' connection
 8512904 lttng: Support --with-lttng=check
 bb046ed mon/Paxos: put source mon id in a temp variable
+f00afcc overrides: rgw must not be nested in ceph
 d74d3f1 mds/MDSMap: fix incompat version for encoding
 369c639 mds/MDSMap: drop trailing else in favor of early return
 b2c1fa8 test/system/systest_runnable.cc: debugging on start and end
@@ -14240,7 +16053,12 @@ b75e8a3 ceph-disk: add get_partition_base() helper
 c7a1ceb ceph-disk: display information about dmcrypted data and journal volumes
 5be56ff osd/ReplicatedPG: only do agent mode calculations for positive values
 7b3714c osd: fix some line wrapping
+0b2a847 s/apache/apache2
+90379e0 Added a facet to test both apache and civetweb as frontend for rgw
+b5f7d84 bug 8927: enable civetweb testing for rgw
 df945a9 osd: fix require_same_peer_instance from fast_dispatch
+74025f9 Fixed syntax
+90f647f Added idle_timeout: to fix time-outs problems for some tests
 3d7e2b3 osd: inline require_osd_up_peer
 e86fdef rgw: move generic server usage after all options
 b09b856 mon/Paxos: add perfcounters for most paxos operations
@@ -14249,6 +16067,7 @@ f80ed26 ceph-disk: move fs mount probe into a helper
 ea90d9f Revert "ReplicatedPG: do not pass cop into C_Copyfrom"
 300b5e8 ReplicatedPG: do not pass cop into C_Copyfrom
 24aeca9 ReplicatedPG::maybe_handle_cache: do not skip promote for write_ordered
+3f18b02 Added overrides for vps time outs
 984f614 erasure-code: isa plugin must link with ErasureCode.cc
 1088d6c ceph-disk: fix log syntax error
 41e4461 doc/changelog: v0.67.10 notes
@@ -14308,6 +16127,8 @@ b6bf33c doc: Added configuration discussion at end of gateway install with links
 4260767 osd_types: s/stashed/rollback_info_completed and set on create
 d0ccb1c make ceph-disk use the new init flag for cluster
 23b4915 allow passing a --cluster flag to the init script
+4e1e929 Update module references
+0e1df3c Import teuthology tasks (master branch)
 6bd2b0f mds: Make min/max UID configurable for who is allowed to create a snapshot
 a5ecf15 powerdns: Update README with better markdown
 bf9726a mon/OSDMonitor : Use user provided ruleset for replicated pool
@@ -14315,6 +16136,7 @@ f1aad8b RadosClient: Fixing potential lock leaks.
 26750fc os/FileStore: force any new xattr into omap on E2BIG
 cc3112e rados: use STD{IN,OUT}_FILENO for magic values
 e3819b6 qa/workunits/rados/test_rados_tool: add a few xattr tests
+189e4a9 Removed upgrade/dumpling from master branch as it should be only in dumpling branch
 645c28a rados: optionally read setxattr value from stdin
 59a715a rados: don't add \n to getxattr
 e5b67f7 Update doc
@@ -14334,6 +16156,7 @@ ee2dbdb mon/PGMonitor: remove {rd,wr}_kb from pool stat dumps
 adb2791 mon/PGMonitor: add _bytes fields for all usage dumps
 895318c README.md: word wrap
 500b95e README: symlink from README.md
+f9aa9c1 ignore errors if 'ps aux' fails
 0114b33 erasure-code: rework ErasureCode*::parse methods
 77690f6 erasure-code: move to ErasureCode::decode_concat
 54394fa erasure-code: move to ErasureCode::to_{int,bool}
@@ -14354,11 +16177,17 @@ f773b24 powerdns: Define a application variable when not invoked from Shell
 b9b022e add annotation for rocksdb config option
 8dcfbd8 osd: simplify dout_prefix macros
 80829d7 osd: reorder OSDService methods under proper dout_prefix macro
+97f317d rados/singleton: make cache mode cycling include readforward mode
 047c18d doc/release-notes: make note about init-radosgw change
+fe6f461 create a --version flag for teuthology
+7d0a072 pull the version and description in setup.py
+a2bb186 add an initial version to teuthology
 354c411 doc: Added 'x' to monitor cap.
+0d1fe79 Tweak usage statement
 7b2c8b3 cls_rgw: fix object name of objects removed on object creation
 8519e9a rgw: need to pass need_to_wait for throttle_data()
 0620624 rgw: call processor->handle_data() again if needed
+21e2763 suite: fix build_matrix when non-yaml present in % dir
 516101a EC-ISA: provide a 10% faster simple parity operation for (k, m=1). Add simple parity unit test for k=4,m=1
 985b7c2 osd/ECBackend: clean up assert(r==0) in continue_recovery_op.
 8363a94 erasure-code: HTML display of benchmark results
@@ -14385,11 +16214,16 @@ d8e6415 mailmap: Ma Jianpeng affiliation
 98997f3 msg/SimpleMessenger: drop msgr lock when joining a Pipe
 e36babc os/MemStore: fix lock leak
 e93818d rgw: need to pass need_to_wait for throttle_data()
+14b03bc adding multi-version suite to test compatibility of clients running newer version of ceph against servers running older version.
 3de7b7c doc/release-notes: fix syntax error
+542dbd2 Added ec-readwrite.yaml to the mix to enable erasure code coverage
 c95e91e os/KeyValueStore: clean up operator<< for KVSuperBlock
+54af8b2 Tasks are failing since using "data" pool no longer part of default install
 1417ede ceph_test_rados_api_tier: test promote-on-second-read behavior
 0ed3adc osd: promotion on 2nd read for cache tiering
+3440d66 Expand '~' in ssh key paths
 13b9dc7 osd: add local_mtime to struct object_info_t
+03be707 kcephfs/thrash: add standby mds
 57fd60c rocksdb: -Wno-portability
 c574e65 autogen.sh: debug with -x
 213e0ac debian, rpm: build rocksdb statically if we can
@@ -14420,9 +16254,11 @@ caf554b osd/ReplicatedPG: improve agent_choose_mode args
 ce4e559 vstart.sh: limit open files
 ea4996d osd/ReplicatedPG: evict blindly if there is no hit_set
 5d1c76f osd/ReplicatedPG: check agent_mode if agent is enabled but hit_sets aren't
+f84458b ceph_manager: increase osd revival timeout from 75 -> 150 seconds
 51c1f2a FileStore: Add omap_backend to "<<" operator
 7faed14 Add superblock to KeyValueStore
 b879e74 KeyValueStore: use generic KeyValueDB::create()
+7c3fc40 valgrind: ignore ec plugin factory leaks
 9df9d28 mon/OSDMonitor: fix i386 floating point rounding error
 aa9ae1f qa/workunits/cephtool/test_daemon.sh: verify ceph -c works with daemon
 22d20f3 qa/workunits/cephtool/test_daemon.sh: typo
@@ -14431,10 +16267,15 @@ aa9ae1f qa/workunits/cephtool/test_daemon.sh: verify ceph -c works with daemon
 6d89a99 ceph.in: Pass global args to ceph-conf for proper lookup
 0190df5 osd: prevent old clients from using tiered pools
 605064d test/cli-integration/rbd: fix trailing space
+4686115 add another call to use "ps aux"
+ec342d8 tell us if you are really not running any more
 d700076 mon: s/%%/%/
 0f8929a cls_rgw: fix object name of objects removed on object creation
+1c7c2eb nuke: allow nuking vpm hosts
 061c8e9 librbd: enable rbd cache by default; writethrough until flush
 4e1405e erasure-code: create default profile if necessary
+ec2f949 kcephfs: thrash mds too
+4918179 task/mpi: Explicit check for version mismatch
 5f65b4d os/FileJournal: When dump journal, using correctly seq avoid misjudging joural corrupt.
 7b169a0 rocksdb backend optimization
 708b5b8 add --with-librocksdb-static configure options
@@ -14457,14 +16298,21 @@ c283ad4 mds: handle replaying old format journals
 6832ec0 mds: make MDS::replay_done clearer
 e587088 mds: remove unused purge_prealloc_ino
 6be8087 mds: separate inode recovery queue from MDCache
+5d2ce7f Fixed -x in the suite, this fixed http://tracker.ceph.com/issues/8862
 0d70989 python-ceph: require libcephfs.
+b8687a5 Fix package_version_for_hash() call
 78ff1f0 (tag: v0.83) 0.83
 06c4736 Remove reference from mkcephfs.
 4045b2e doc/release-notes: typo
 df1bad8 doc/release-notes: v0.80.5 release notes
+0147c74 task/mds_journal_migration: fix cleanup
+431ae2f suites/fs: add a standbyreplay configuration
 e99acf9 OSD: add require_same_peer_inst(OpRequestRef&,OSDMap&) helper
 e179e92 OSD: introduce require_self_aliveness(OpRequestRef&,epoch_t) function
 f36cffc unittest_crush_wrapper: fix build
+3f3da88 Update README
+f2b7422 Update README
+7dcb8bd dumpling-firefly-x: document structure of suite
 eb2f1ea OSD: use OpRequestRef& for a few require_* functions
 1526546 Remove reference from mkcephfs.
 9b03752 Fix some style and checking issue
@@ -14477,6 +16325,7 @@ eb697dd librbd: make rbd_get_parent_info() accept NULL out params
 04d0526 PGMonitor: fix bug in caculating pool avail space
 b08470f configure.ac: link libboost_thread only with json-spirit
 9d23cc6 configure: don't link blkid, udev to everything
+cc7c9b2 task/workunit: add sudo to dir delete
 de9cfca Only write bufferhead when it's dirty
 1c26266 ObjectCacher: fix bh_{add,remove} dirty_or_tx_bh accounting
 727ac1d ObjectCacher: fix dirty_or_tx_bh logic in bh_set_state()
@@ -14484,19 +16333,29 @@ de9cfca Only write bufferhead when it's dirty
 d858fdc Add rbdcache max dirty object option
 b8a5668 Reduce ObjectCacher flush overhead
 288908b Revert "Merge pull request #2129 from ceph/wip-librbd-oc"
+c88e1cb rados/singleton/all/rest-api: add mds.a
+05fb534 added a new test for rgw_readwrite with front end as civetweb instead of apache
+ac8ab1e added a new test for rgw_readwrite with front end as civetweb instead of apache
 0553890 rgw: call processor->handle_data() again if needed
+531f89c Changed upgrade order and re-enabled rados/tests.sh
 d3de69f mds: fix journal reformat failure in standbyreplay
 8fb761b osd/ReplicatedPG: requeue cache full waiters if no longer writeback
 36aaab9 osd/ReplicatedPG: fix cache full -> not full requeueing when !active
 ba9d52e librbd: store and retrieve snapshot metadata based on id
 c5f766b ceph_test_rados_api_tier: do fewer writes in HitSetWrite
 f360920 common/RefCountedObject: fix use-after-free in debug print
+883f2a7 task/osd_recovery: use rbd instead of metadata pool
 14cad5e rgw: object write should not exceed part size
 fc83e19 rgw: align object chunk size with pool alignment
+4e56521 task/osd_backfill: use 'rbd' instead of 'data' pool
 1f9c732 doc: Add additional hyperlink to Cache Tiering defaults.
 4047660 doc: Update doc from user feedback.
 d1dfb9b osd: fix bad Message* defer in C_SendMap and send_map_on_destruct
 5740266 test: catch a straggler still using 'data' pool
+2ed1131 task/workunit: general tidy
+396dc07 task/workunit: clean up dir deletion
+a96a7b0 task/cephfs: generalise Filesystem for multi-MDS
+175057f rados/verify: increase api tests timeout
 4eb18dd os/FileJournal: Update the journal header when closing journal
 63c1711 msg/SimpleMessenger: drop local_conneciton priv link on shutdwon
 2545e80 librbd: fix crash using clone of flattened image
@@ -14504,6 +16363,7 @@ d1dfb9b osd: fix bad Message* defer in C_SendMap and send_map_on_destruct
 c511822 doc: update radosgw man page with available opts
 e259aca rgw: list all available options during help()
 99e80a5 rgw: format help options to align with the rest
+279f39d use notcmalloc builder for valgrind leak checking
 95aaeb6 osd: use Connection::send_message()
 be91daf common/LogClient: use con-based send_message
 694ced9 client: use con-based send_message
@@ -14537,6 +16397,10 @@ ea14d7b Refactor Messenger class family.
 956f287 osd/ReplicatedPG: do not complain about missing clones when INCOMPLETE_CLONES is set
 54bf055 osd/osd_types: add pg_pool_t FLAG_COMPLETE_CLONES
 67d13d7 mon/OSDMonitor: improve no-op cache_mode set check
+c2fc561 Remove oddly-placed, broken import
+479a1fe Rename CephState to DaemonGroup and move to orchestra
+7d0adce Move DaemonState to teuthology.orchestra
+16ad68d clusters/fixed-3-cephfs.yaml: put client.0 on separate node, remove client.1
 d4faf74 ceph_test_objectstore: clean up on finish of MoveRename
 3ec9a42 os/LFNIndex: use FDCloser for fsync_dir
 6fb3260 os/LFNIndex: only consider alt xattr if nlink > 1
@@ -14550,31 +16414,49 @@ c0cb56f ceph.spec.in: add bash completion file for radosgw-admin
 c57811f Fix/add missing dependencies:
 793e05a ceph.spec.in: whitespace fixes
 dae6ecb ceph.spec.in: split out ceph-common as in Debian
+a546e20 rados/objectstore: move objectstore related tests from singleton-nomsgr
 a05a0da common/random_cache: fix typo
 5efdc62 common/RandomCache: Fix inconsistence between contents and count
 356af4b osd/ReplicatedPG: debug obc locks
 6fe2782 osd/ReplicatedPG: greedily take write_lock for copyfrom finish, snapdir
 0962650 osd: allow greedy get_write() for ObjectContext locks
 ccd0eec OSD: introduce require_up_osd_peer() function for gating replica ops
+367d4da added debug messages for radosbench.yaml
+efce341 Fail sooner if an invalid config_yaml is passed
+456aff8 Removed rados/test.sh for now
 253ca2b os: make name/attr max methods unsigned
 daac750 os/KeyValueStore: make get_max_object_name_length() sane
+0e2149c Remove kcon_most
+828bb2a ceph_deploy: use new ship_utilities location
+344d597 Remove mds role from rados & rgw tests
 e311a08 uncomment cleanup command
 c264774 init: add systemd service files
 d87e5b9 powerdns: RADOS Gateway backend for bucket directioning
+fb6ab3b Modified print task
 b551ae2 mon: AuthMonitor: always encode full regardless of keyserver having keys
+1382440 upgrade/dumpling-firefly-x: remove unnecessary files
+e962fa7 upgrade/dumpling-firefly-x: upgrade clients to firefly
 1518fa2 osd: init local_connection for fast_dispatch in _send_boot()
+c6ee02d upgrade/dumpling-firefly-x: use correct tests against final workload
+54404d9 upgrade/dumpling-firefly-x: rename final upgrade yamls; expect quorum
+04d4476 upgrade/dumpling-firefly-x: run firefly tests against firefly
+76da3e7 upgrade/dumpling-firefly-x: test rados api
+a22d5c8 upgrade/dumpling-firefly-x: drop useless line
 34b0efd ObjectCacher: fix bh_{add,remove} dirty_or_tx_bh accounting
 8a05f1b ObjectCacher: fix dirty_or_tx_bh logic in bh_set_state()
 d358741 Wait tx state buffer in flush_set
 3c7229a Add rbdcache max dirty object option
 5cb4b00 Reduce ObjectCacher flush overhead
 9061988 osd: init local_connection for fast_dispatch in _send_boot()
+2b4f740 rados/singleton-nomsgr/all/alloc-hint: fix config syntax
 b6f3aff Fix mismatched tags (struct vs. class) inconsistency
+72b3417 task/mds_journal_migration: use existing clients
 2aa3edc os/FileStore: fix max object name limit
 f4bffec ceph_test_objectstore: test memstore
 6f312b0 os/MemStore: copy attrs on clone
 8dd6b8f os/MemStore: fix wrlock ordering checks
 a2594a5 osd/MemStore: handle collection_move_rename within the same collection
+2dad906 move ship_utilities to install task (from ceph test)
 3467110 ceph-dencoder: don't link librgw.la (and rados, etc.)
 b1a641f rgw: move a bunch of stuff into rgw_dencoder
 1c17077 libosd_types, libos_types, libmon_types
@@ -14585,7 +16467,9 @@ d7209c1 rgw: dump prefix unconditionally
 dc417e4 rgw: list extra objects to set truncation flag correctly
 82d2d61 rgw: account common prefixes for MaxKeys in bucket listing
 924686f rgw: add NextMarker param for bucket listing
+b779e5a Removed upgrade-sequence to avoid failures (becasue 3-firefly-upgrade is downgrading then to firefly causing failures)
 4a63396 rgw: fix decoding + characters in URL
+02a6c91 rados/singleton-nomsgr/all/alloc-hint: enable xfs extsize for test
 09a5974 crushtool: Send output to stdout instead of stderr
 e6cf618 rgw: improve delmited listing of bucket
 49fc68c utf8: export encode_utf8() and decode_utf8()
@@ -14596,6 +16480,7 @@ e60dd0f osdc: refactor JOURNAL_FORMAT_* constants to enum
 8eef89e doc: fix example s/inspect/journal inspect/
 5438500 mds: fix journal reformat failure in standbyreplay
 ed3bc4c osdc/Journaler: validate header on load and save
+a06df52 clusters: Remove mds from default fixed-* configs
 18ca6b6 test: add a missing semicolon
 0cd0268 qa: generalise cephtool for vstart+MDS
 bb5a574 mon: carry last_failure_osd_epoch across `fs new`
@@ -14603,50 +16488,104 @@ b936a27 mon/MDSMonitor: fix msg on idempotent `fs rm`
 06a8f7b configure: do not link leveldb with everything
 0193d3a AUTHORS
 14a9ca6 logrotate.conf: fix osd log rotation under upstart
+b140ce9 Re-enabled rgw tests, #8846 should be fixed now
+eba43a9 Fixed prints Signed-off-by: Yuri Weinstein <yuri.weinstein at inktank.com>
+cbe9ee2 Added prints
+c1896d5 Fixed missed line
+be0330c Removed 'firefly' branch name,  upgrade step will be set to 'next' or 'master' from teuthology-suite comman line
 7b342ef doc: Add Note about European mirror in Quick Start
 0f11aae remove suse service restarts
 e3a5756 remove ceph restarts on upgrades for RPMs
+04f040a Again fixed identations
+2a82fd6 Fixed indentation
 4d6899c qa/workunits/cephtool/test.sh: fix erasure_code_profile get test
+2679a02 Added print tasks
 ce9f12d qa/workunits/cephtool/test.sh: test osd pool get erasure_code_profile
 e8ebcb7 mon: OSDMonitor: add "osd pool get <pool> erasure_code_profile" command
 5ccfd37 vstart.sh: default to 3 osds
+7c0f1d8 Commented out rgw tests
 5f6b11a mon/MDSMonitor: make legacy 'newfs' command idempotent
+5355681 Disabled rwg tests for now to wait for fix for radosgw crash #8846
 b89ab5f rgw: don't try to wait for pending if list is empty
 19e68ac rbd: respect rbd_default_* parameters
 e891a93 rbd: remove accidental repeated option
 0f87c55 librbd: use order-agnostic default stripe parameters
 f9f2417 rgw: don't try to wait for pending if list is empty
+7fb0f64 Replace branch dumpling with firefly to fix capitalization issue 2048k vs 2048K
+b8c443b Fix get_hash() arg ordering
 420f0a4 set the default log level to WARNING
 3e0d980 init-ceph: wrap daemon startup with systemd-run when running under systemd
 99dfaf7 doc/release-notes: v0.80.4
 80ea606 Fix size of network protocol intergers.
 2f43cef doc: Extended discussion for building docs on CentOS / RHEL.
 124f97f doc: Added a script to build docs on CentOS / RHEL.
+63fb271 rgw: set data extra pool correctly
+32bffa5 Added client.0 upgrade step
+2aea9de Commented out test.sh as failed tests: LibRadosMisc.Operate1PP can't work across dumpling and firefly releases
+29c06f0 Add and use new fetch() method
+79da400 Fix a bug where results_email was being ignored
+26c49ec task/ceph.py: deep-scrub instead of scrub
+36441a2 README: teuthology-suite
+cc754ba Enabled tunables
+c842165 Changed restart order, mds after osd
 6c48d07 doc: add cephfs layout documentation
+9c8dd98 task/kclient: remove redundant double loop
+403ba2c task/cephfs: use dedent for embedded python
+785a272 task: fix admin_socket for non-int ids
+8bb77ed task: add mds_client_recovery
+91e56bb task/mds_journal_migration: only mount my client
+d91f028 task: refactor kclient into KernelMount
+6cba497 task: create FuseMount class from ceph_fuse
+d81d77d kclient: remove `kclient` workunit
 af740ec cephfs: pool safety & print deprecation warning
 d915ceb update hadoop-wordcount test to be able to run on hadoop 2.x. The hadoop and mapreduce library are no longer hard coded so they can be specified to point to the right path. The relative paths hdfs are changed to absolute paths. A sample command to run the test on hadoop 2.x is TESTDIR=/home/test HADOOP_HOME=/usr/lib/hadoop HADOOP_MR_HOME=/usr/lib/hadoop-mapreduce sh workunits/hadoop-wordcount/test.sh starting hadoop-wordcount test
 cceab2b qa: retire kclient-specific layout test
 95f5a44 ceph.spec: move ceph-dencoder to ceph from ceph-common
 b37e3bd debian: move ceph-dencoder to ceph from ceph-common
+7a1f589 Trying without tunables again
 01cd3cd XfsFileStoreBackend: default to disabling extsize on xfs
+657bd36 Fix the incomplete substitute_placeholders() fix
+f557ac5 Add a debug statement to reset_repo()
+56ad957 Be smarter about choosing branches
+fa42928 Changed restart sequence
+51a01c0 Try diff restart
 fc597e5 doc/release-notes: some additional warnings and recommendations against adjusting tunables
+6c05747 task: move Filesystem class to shared location
+47087ff Fall back to master for suite_branch
+18a3a09 Added back tunables
 e17e9d8 ceph_test_rados_api_tier: fix [EC] HitSet{Read,Write,Trim} tests
 a4ed336 mds: add `session ls` and `session evict` to asok
 0e0be07 client: include ID in mds_sessions asok
 52a2bc5 mon: remove unused attribute notified_global_id
 b120a48 common/admin_socket: remove dead code
 bb47ff3 osd: fix confusing debug output for op_applied
+18900a1 Removed tunables temporarily for testing
 586d3ee doc: Fixes a broken link on the rados deploy osd page.
 447f849 doc/release-notes: v0.80.3
+3150bba rados: test ceph-post-file
 29f20b7 qa/workunits/post-file.sh
+c68b5fc Add a flag to dump the entire job body.
+4f61db9 Add missing placeholder
+dcd22a5 Add a --dry-run flag to teuthology-schedule
 c9e1e82 rbdmap: per-device post-map/pre-unmap hooks
+0ccee81 Don't drop the rest of os.environ
+f8658de Use os.path.expanduser() instead of os.environ
+b6c7ba2 Fix typo
+0ad1058 Fix lock paths
+7418de9 Use the ceph_branch if suite_branch isn't found
 c93da05 osd/osd_types: be pedantic about encoding last_force_op_resend without feature bit
 712d5d1 osd/osd_types: remove useless encode_compat local var for pg_pool_t::encode
+fe1d476 marginal: fix pjd overrides for fuse_default_persmissions
+bc1c12a multimds: fix pjd overrides for fuse_default_permissions
+9547a7f Discourage modifying defaults in instances
+4a6352e Don't use double underscores
+a924606 Run tests for base class in subclass tests
 50e93c2 qa/workunits: cephtool: adjust pool name where missing as it has changed
 cf94cf3 crush: include CRUSH_V3, v2/v3 rules checks in dump_tunables()
 daadff4 doc: minor format fix for radosgw admin docs
 b844ec9 rbdmap: per-device mount (Closes: #8538)
 02683ac rbd.cc: Check io-size avoid floating point exception.
+cdd4d04 fix typos
 6cd3457 qa/workunits: cephtool: cleanup after pool creation
 704b0a3 qa/workunits: cephtool: pool needs to be a tier to be used as such
 49db676 qa/workunits: cephtool: test erroneous 'tier remove'
@@ -14655,12 +16594,29 @@ df59449 qa/workunits: cephtool: split get/set on tier pools from get/set tests
 8e5a8da mon: OSDMonitor: be scary about inconsistent pool tier ids
 64bdf6c osd: pg_pool_t: clear tunables on clear_tier()
 f131dfb mon: OSDMonitor: limit tier-specific pool set/get on non-tier pools
+caa65c3 Added steps to cover for 'ceph osd crush tunables firefly'
+8b6e824 Tweak fetch_teuthology_branch()
+24b44cb Clone ceph-qa-suite and add it to PYTHONPATH
+9257b87 Use config.src_base_path
+49725e0 Add src_base_path to defaults.
+c105827 Store the suite_branch in the job config
+fb97e85 Use the new JobConfig object
+c886a93 Make teuthology.config *far* more robust
+70d87bd Update existing unit test, and add another.
+0f69e3d Use a dict for a job template instead of a string.
+9e5338f Add test for config substitution
+46b48c9 Remove unused import
+1a4bde2 Fix install_except_hook()
+277cc10 Added dumpling-firefly-x upgrade suite per http://tracker.ceph.com/issues/8796
 026b127 doc/changelog/v0.80.2: include tag
 59c00e5 os: add prototype KineticStore
 74f5e5e PG::op_must_wait_for_map: pass the epoch rather than the map
+3c3cdcb Don't execute network-using tests by default
 98f92d8 doc: Added CentOS/RHEL install for git.
+1d48f66 Don't use master as the default teuthology branch
 115c078 rgw: modelines
 c4afaf9 rgw: fix RGWObjManifestRule decoder
+cbc73f7 task/ceph: Make cephfs_setup cuttlefish-compatible
 0839e2a doc: Added keyring location note to resolve pull abandoned pull request #1946.
 4692257 nuke dlist
 a3e5c6d Add random_cache.hpp to Makefile.am
@@ -14672,6 +16628,7 @@ c930a1f Work around an apparent binding bug (GCC 4.8).
 228760c Fix the PG listing issue which could miss objects for EC pool (where there is object shard and generation). Backport: firefly Signed-off-by: Guang Yang (yguang at yahoo-inc.com)
 bd6ba10 doc: Clean up formatting, usage and removed duplicate section.
 15d5b51 doc/release-notes: v0.80.2
+6a231ec Revert "enable kernel debugging on kclient fsx runs"
 29c2bb2 Revert "qa: add an fsx run which turns on kernel debugging"
 c15e524 Revert "qa: add an fsx run which turns on kernel debugging"
 2217f2b doc: fix a typo in quickstart doc
@@ -14696,20 +16653,27 @@ c92feeb messages: MForward: fix compat version
 aefbac5 osd: fix session leak when waiting on map
 17ad083 osd: clear Sessions for loopback Connections on shutdown
 231fe1b Revert "OSD: move waiting_for_pg into Session"
+7262d29 watch-suite.sh: new syntax
 aa1be2e OSD: fix debug logging output
 b700963 ceph.spec.in: add bash completion file for radosgw-admin
 235e4c7 ceph.spec.in: rhel7-related changes:
+5d192ed Make the archive dir a positional argument
 7cf8132 Fix/add missing dependencies:
 ec8af52 ceph.spec.in: whitespace fixes
 e131b9d ceph.spec.in: split out ceph-common as in Debian
 08fa16b common: seq_read_bench argument order changed The argument order for seq_read_bench in src/common/obj_bencher.h has been changed to match the argument order in obj_bencher.cc
+4d7c09e suite: default to 9h results_timeout
 cca5841 test: generalise default_pools in test_rados
 a7a631d tests: don't depend on 'data' pool in rbd test
 cf5f535 doc/release-notes: clarify CRUSH notes about tunables
 d84d720 decrement WBThrottle perfcounters in clear_object
+18e556b task/ceph: move set_max_mds into cephfs_setup
 16df4c3 mds: use client-provided time stamp for user-visible file metadata
 73b2928 Remove exclusive lock on GenericObjectMap
 d104979 Add Header cache to KeyValueStore
+f3145e9 ceph.conf: drop min pg per osd warning
+62ead22 remove stray dirs
+8920c4b remove stray workload dir
 c0806bb doc: mention kernel support for rbd format 2
 c7937ff doc: Fix a typo regarding requiretty for RHEL based platforms
 54af810 mon: check changes to the whole CRUSH map and to tunables against cluster features
@@ -14723,7 +16687,18 @@ fc8d198 osd: debug Session refs
 45991c0 common/RefCountedObject: assert nref == 0 in dtor
 ff1521f common/RefCountedObject: optionally take initial ref count to ctor
 0547417 common/RefCountedObject: optionally debug
+b7eecd4 Don't mess with ~/src/teuthology by default
+4881b2e Relocate teuthology checkouts to ~/src/
+6e19ba4 task/ceph: add option to avoid doing scrub
+f6ec958 nuke: fix unmounted multiple fuse mount points
+8738304 run_tasks: catch malformed task case
+6934bbe task/ceph_manager: generalize admin_socket calls
 2226b91 qa: support running under non privileged user
+3428b09 Remove default results_server
+dbadcd0 Allow setting results_email in teuthology.yaml
+34a7b31 Don't remove the teuthology repo if errors occur
+97bf86c Optionally, don't remove a repo when errors occur
+63fd33e Add another unit test
 f7086d3 Automagically setup submodules on first run.
 0c7c722 ceph_argparse_flag has no regular 3rd parameter.
 909850e [werror] Fix c++11-compat-reserved-user-defined-literal
@@ -14732,29 +16707,57 @@ ddc04c8 mon: OSDMonitor: 'osd pool' - if we can set it, we must be able to get i
 0392ddb ReplicatedPG: Removed the redundant register_snapset_context call
 2f089d8 OpTracker: The optracker enabled/disabled check is added
 63be0f2 OpTracker: use mark_event rather than _mark_event
+5f2aa56 fix create_pool_with_unique_name call to use profile name
+7cdfb05 document create_pool erasure_code_profile_name argument
 bb3e1c9 qa/workunits/rest/test.py: make osd create test idempotent
+71182fd Allow testing with online repos
+cb7fdfb Add a unit test simulating simultaneous checkouts
+43c4595 Add a couple unit tests for teuthology.parallel
 7e1deb6 mds: defer encoding/submitting log events to separate thread
 44199d6 mds: use mutex to protect log segment list
 6d8ccdd mds: add thread to encode/submit log events
 70c0723 osd: add dump_reservations asok command
 6483710 common/AsyncReserver: add dump()
+88cc7c0 marginal/multimds: fuse_default_permissions = 1 for pjd
+2168153 add rgw.create_ec_pool default profile
+5e88022 rados/thrash: set osd_max_backfill = 1 sometimes
+1d12dd7 ls: make --archive-dir default to .
 9ce5ff9 mon: clear osd request queue latency info on down or up
+ec95ee2 Rename suite_base to suite_dir
 f8c88a4 OSD: wake_pg_waiters after dropping pg lock
+434348a Add --suite-branch and --suite-base
 e2b151d mds: Update default FS name
 f62f7f5 qa: update cephtool EC pools test to respect IDs
 710561c mon/MDSMonitor: EC check in 'fs new' like newfs
 44eb259 qa: add a check for crash_replay_interval autoset
 c0ffa01 mon: Set crash_replay_interval automatically
 82d3fcc qa: Update data pool ID for vxattrs test
+42a7350 Use a test-specific name/email for the git calls
+d6f1752 Add more unit tests
+c604149 add erasure code workload to upgrade/firefly-x
+bbb2524 erasure-code: fix typo in create_pool argument name
+dd8f098 erasure-code: do not prepend 'ceph' to the erasure-code-profile command
+2e5d170 erasure-code: str() to avoid + conversion errors
 917ef15 test: use 0U with gtest to avoid spurious warnings
 522174b qa: support running under non privileged user
 8697d6a OSD: await_reserved_maps() prior to calling mark_down
 6f97206 osd: allow osd map cache size to be adjusted at runtime
+32d094a Fix linter errors
 bcc09f9 qa/workunits/cephtool/test.sh: sudo ceph daemon
+e431abd Add a basic validation of the branch value
+bfd8226 Run unit tests offline
+b9f26ed documentation typo
 959f2b2 PGLog: fix clear() to avoid the IndexLog::zero() asserts
+98e0617 erasure-code: separate profile from pool creation
+7e388c9 background_exec: run something in the background
+1cd8076 daemon-helper: optional kill an entire process group
+f8282df rados/singleton/all/thrash_cache_writeback_forward_none: thrash cache pool drain
 e0d3b78 rgw: fix uninit ofs in RGWObjManifect::obj_iterator
 73b929b osd: improve tests for configuration updates
+5a61f44 Split up repo helper into separate functions
 2dec8a8 qa/workunits/suites/fsx.sh: don't use zero range
+641a294 task: update 'newfs' users to 'fs new'
+8be756a task/ceph: Invoke newfs for CephFS as needed
 83f1906 mon/MDSMonitor: log warning while MDS up but no FS
 b7f09c2 mon/MDSMonitor: fix incorrect comment
 fc0f8bd mon/MDSMonitor: s/enabled/get_enabled()/
@@ -14788,11 +16791,15 @@ b066e16 common: move #include syscall into ifndef DARWIN
 656bc04 common/Thread: add missing #include for pid_t
 b2bcf52 ceph.in: handle DYLD_LIBRARY_PATH on OS X
 12079a7 doc: dev: cache-pool.rst: fix typo s/objets/objects/
+6b94807 Use corrected tube_name
+8583306 Return corrected tube_name
+491999b Use teuthology.beanstalk
 4689467 PG: pass OpRequestRef by ref to avoid refcounting overhead
 d48a737 osd/: in scrub, check for and remove obsolete rollback objs
 953c33e osd/: plumb rollback_trim_to through interfaces and messages
 ac11ca4 PGLog: add rollback_info_trimmed_to_riter to IndexedLog and use in PG
 62027ec doc/release-notes: v0.82
+d56b59d add upgrade/firefly-x based on dumpling-x
 7fae941 rgw: allocate enough space for bucket instance id
 2207ed1 PGLog: fix logging whitespace in trim
 af4970c OSDMap: avoid passing pg_t by value
@@ -14800,6 +16807,19 @@ d72eec0 PG: block outgoing peering messages until the last interval is durable
 f9f89be PG: add machinery to temporarily buffer outgoing recovery messages
 f749812 os/: add async flush_commit() method to Sequencer and implementations
 0debfe1 OSD: pass param by ref to avoid copying overrhead
+55b65d6 Document checkout_repo()
+25a4067 Drop a "fix" for a bug that probably never existed
+a4c3a02 Add a note about teuthology scheduling
+f5bed55 Improve missing branch detection and logging
+abd3590 Add unit tests for repo_utils
+807d6fd Use 'ceph-qa-suite.git' for the repo name
+b16c48a Also handle teuthology repo checkouts
+88d26e4 Handle ceph-qa-suite checkouts
+6e3e669 Generalize error message
+484693c Fix linter errors
+93fd6b8 Move repo checkout code to new module repo_utils
+5e3c13d Use a more informative variable name for dest_path
+09baca9 document how to specify a facet in teuthology-suite
 1f3fbc9 mds: print sequence number of log segment
 55ed85b mds: introduce sequence number for log events
 a17462c mds: add get_metablob() to log events
@@ -14809,7 +16829,11 @@ a17462c mds: add get_metablob() to log events
 1c93c61 MOSDOp: The functions are returned by const ref and parameters passed by ref
 ad81a98 Revert "ceph-disk: Enable creating multiple osds per dev"
 e02957d test: use (unsigned)0 with gtest to avoid spurious warnings
+1c0dc1c smoke: add rbd fsx + thrashing
 f8df9bd scratchtool.c: cleanup do_rados_getxattrs()
+3fa6dd2 smoke: add a mon thrash test
+64ec775 smoke: add some rados osd thrashing test
+e99eba9 smoke: specify fs on each run
 4e9c2c1 osd: fix pg_stat_t int -> int32_t
 238b1a3 osd: fix pg_shard_t int -> int32_t
 709f0c4 osd: fix pg_interval_t int -> int32_t
@@ -14836,6 +16860,36 @@ db6cc13 qa/workunits: cephtool: split into functions
 be70c1f osdmaptool/test-map-pgs.t: fix escaping to fix run
 dc1a4df Revert "Give meaningful error when submodules are not checked out"
 9695535 Make <poolname> in "ceph osd tier --help" clearer.
+2e18fdd Only delete jobs that match the pattern
+e7323da Swap order of job_id and run_name when deleting
+f4321b0 Skip the kernel stanza if the branch passed is '-'
+ba1eae9 Add more docstrings
+e1597a0 No need to explicitly return None here
+667a223 Update unit tests to reflect renamed function
+7082a3d Add a few more docstrings and a debug statement
+743edd6 Fix a couple linter errors
+6df3b13 Remove schedule_suite.sh
+173c7b8 Fix bug where the base yaml wasn't being merged
+fd13a18 With dry_run, quote any individual args
+b6cb3f8 Add lots of unit tests for teuthology.suite
+91c7385 Raise ScheduleFailError even if not sending email
+356ab7e Do everything that schedule_suite.sh does
+1531708 Add default results_email
+89b38dd Don't schedule an email job if email isn't set
+26fdef0 Split prepare_and_schedule() out of main()
+1be15ed Use the return value of schedule_suite()
+3b69efa Organize arguments into logical sections
+b790c4d Tweak usage formatting
+19d7307 Don't reread the entire yaml file...
+d92f538 Remove unused functions
+0aabafd 'and' is a thing
+675f087 Take machine_type instead of worker
+d14c44f Add functions for querying gitbuilder repos
+4fed92c Silence connection pool logging
+ef9638a Remove 'template' feature
+43505b2 Only allow scheduling one suite per call.
+f5af797 Port to docopt
+1ccbff8 Refactor collection scheduling out of main()
 76361b8 mon: simplify output
 385fd6c do_autogen.sh: --disable-static
 14085f4 (tag: v0.82) 0.82
@@ -14847,6 +16901,8 @@ a5c704b RadosStriperImpl.cc: catch exception by reference
 6d79863 rgw/rgw_rados.h: use static_cast instead of c-style cast
 0b3a398 osd/OSD.cc: parse lsb release data via lsb_release
 d7350a3 rados.cc: fix pool alignment check
+77670ae thrashers/mapgap.yaml: ignore osd_map_cache_size warning
+3134a32 If archive_base is None, use config's
 2b007c2 mon: MDSMonitor: print pool name along with id during 'newfs' errors
 378b5ad qa/workunit: cephtool: test mds newfs and add_data_pool with ec pools
 d6f6813 mon: MDSMonitor: do not allow ec pools to be used for data or metadata
@@ -14870,7 +16926,11 @@ da03e9e MDCache.h: init 'umaster::safe' in constructor
 2210ee3 test/objectstore/store_test.cc: prefer ++operators for iterators
 3e93d4a osd: tests for osd bench
 74be320 Use sized integer.
+82d3a3d smoke-tests -> smoke
+28a2253 smoke: remove old smoke suite
+a312f9a Update setup.py
 66a5f3b doc: Fixed Typo in pools documentation - replaced '-' with '_' in example set-quota comands.
+5cd50a4 Allow killing jobs by passing a 'jobspec'.
 e189a66 log the command that is being run with subprocess
 78cbac4 mailmap: Dmitry Smirnov name normalization
 efefbfd mailmap: koleosfuscus affiliation
@@ -14887,6 +16947,8 @@ a58fbf7 mailmap: Colin Mattson affliation
 22c028d mailmap: Red Hat acquires InkTank
 c270172 mailmap: Sebastien Ponce affiliation
 39a4b78 mon: test that pools used in tiers cannot be removed
+09d02ab Revert "rados: Fix cache_flush, cache_try_flush, cache_evict and add hit_set_list"
+fc772d7 Add a couple tests for teuthology.schedule
 1de9071 osd/osd_types.cc: dump correct pg_log_entry_t member variable
 363496a osd: use appropriate json types instead of stream when dumping info
 97772c2 mon: name instead of id in "has tiers" message
@@ -14926,6 +16988,7 @@ c5b5ed6 ceph_test_rados_api_tier: disable LibRadosTierECPP::HitSetWrite
 0bb0095 Revert "erasure-code: create default profile if necessary"
 f53bed1 mon/OSDMonitor: fix build error
 1c72465 osd: verify osd config sanity checks
+ec422a9 Make conf_file optional
 4bd1b5e PendingReleaseNotes: note about keyvaluestore-dev on-disk format change
 d93e74e common: Enforces the methods lru_pin() and lru_unpin()
 d48ed68 common: Fixes issue with lru_clear() + add new test
@@ -14936,6 +16999,7 @@ d48ed68 common: Fixes issue with lru_clear() + add new test
 18f5807 Make KeyValueStore not use expected_write_size
 360de6a erasure-code: create default profile if necessary
 f3ec7d0 osd: add sanity check/warning on a few key configs
+0cb6b71 Add .ropeproject to .gitignore
 4786a48 osd: remove non const get_erasure_code_profile
 a1c13c5 tests: prevent kill race condition
 5c1f9aa osd: improve osd pool create error message readability
@@ -14953,11 +17017,13 @@ e29beff erasure-code: remove jerasure internals dependencies
 e720314 doc: Updated the OS Recommendations for Firefly.
 2e3302c doc: Updated the example configuration.
 5a31df2 doc: Updated doc for more recent versions.
+09d3f91 Add [--] to usage statement
 2eab1c1 Update RBD doc for OpenStack
 a290d349 test_librbd_fsx: fix sign-compare gcc warning
 40c48bc qa: add script to test krbd setting ro/rw ioctl
 b2542f8 rgw: set a default data extra pool name
 94c8f70 doc: Made mention of "incomplete" status.
+c883e31 enable kernel debugging on kclient fsx runs
 29c33f0 qa: add an fsx run which turns on kernel debugging
 f978722 FileStore: remove the user_only param from _fgetattrs
 bb4e3a9 FileStore: remove user_only options from getattrs through the ObjectStore stack
@@ -14980,9 +17046,16 @@ b7f6147 doc: Added background discussion to clock settings.
 ecbb005 OSD: The thread pool variable name changed The variable names are more meaningful now. op_tp -> osd_tp and op_sharded_tp -> osd_op_tp
 2a5d83d ShardedTP: The config option changed The config option for sharded threadpool is changed to osd_op_num_threads_per_shard instead of osd_op_num_sharded_pool_threads. Along with osd_op_num_shards this will be much more user friendly while configuring the number of op threads for the osd.
 a0e48b8 ShardedTP: Changes related to conforming to Ceph coding guidelines
+f69f2df Automatically watch 'multi' tube if tube_name
 2e3f4bc doc: Fixed typo.
 6733947 Fix for bug #6700
+78dbc36 Refactor teuthology.schedule...
+78610b0 Remote 'delete' and 'show' features
+b2f0dda Remove unused import
 a4923f5 fix compilation warnings
+3d1df3a Better variable names.
+dbf984e Disable "fix" for corrupt repo detection
+2e5c697 Use correct cwd
 63cc7f9 Add test for objectstore
 50c8fee Fix write operation on a deleted object in the same transaction
 737c13e Remove SequencerPosition from KeyValueStore
@@ -14992,19 +17065,27 @@ ef06515 doc: fix typo in erasure coding section
 1f99cda mon: gather perf stats on elections
 8f36d96 mon: gather perf stats on session adds, removes, counts
 ecda2fe OSD: move waiting_for_pg into Session
+b7c1e35 Update to reflect relocated get_jobs()
 1f40c35 Add set_alloc_hint test to ceph_test_objectstore
 5dd9b2a Make KeyValueStore support set_alloc_hint op
 b0c66a7 doc: Fixes spelling errors on README
 910d73c Added RPM and debian packaging for libradosstriper, creating a new package called libradosstriper1
 fa01ca6 Added unit test suite for the Rados striping API.
 d160ce2 Implementation of the radosstriper interface.
+875e2d2 rados: Add hit_set_list missing from possible op weights
+2a7de82 Move teuthology-ls's implementation to ls.py
 a6c34e4 osdc/Objecter: mark all ops as known-if-redirected
+87e7ff0 Fix corrupt repo detection
 bc3b30e XfsFileStoreBackend: call ioctl(XFS_IOC_FSSETXATTR) less often
 750b1db XfsFileStoreBackend: nuke redundant goto in set_extsize()
+479a67e creating smoke-tests suite to include basic tests from all modules
 524a155 rgw: reduce calls to rgw_obj.set_obj()
+3dae48c rados: Fix cache_flush, cache_try_flush, cache_evict and add hit_set_list
 e31d3fe doc: Descrption => Description Correct spelling error.
 0ca43d8 doc: Use write_full for C++ example Latest version of librados uses write_full when writing entire object.
 0bd6f67 OSD::calc_priors_during: handle CRUSH_ITEM_NONE correctly
+825db50 Added requested comment.
+242d9de Remove unused variables and functions.
 2081c99 include/atomic: make 32-bit atomic64_t unsigned
 64e99d8 ceph-objectstore-test: fix warning in collect_metadata test
 e1ad0bf Added a striper interface on top of rados called radosstriper.
@@ -15024,11 +17105,13 @@ e24213e MemStore.cc: silence gcc -Wunused-variable
 51abf20 Revert "Remove unused variables in MemStore.cc"
 a325e3e Revert "Remove unused variables in KeyValueStore.cc"
 cac902e os/KeyValueStore.cc: fix possible null pointer deref warning
+cadcbf3 Remove unused import
 3ee3e66 librbd/internal.cc: check earlier for null pointer
 f17a963 test/librbd/fsx.c: fix gcc warning
 f31e4c8 libs3: update to latest git master of ceph/libs3
 18c07ec common/addr_parsing.c: fix realloc memory leak
-5f86652 daemon_config.cc: add some more asserts
+5f866526 daemon_config.cc: add some more asserts
+549b52b Update --help output
 703d0eb rgw: set meta object in extra flag when initializing it
 23b657c Remove unused variables in KeyValueStore.cc
 307ba48 Remove unused variables in MemStore.cc
@@ -15037,15 +17120,19 @@ f31e4c8 libs3: update to latest git master of ceph/libs3
 c50f85e bloom_filter, remove unecessary operators
 90cc6dd bloom_filter, add assertion to test validate element_count()
 c323c5b Fix keyvaluestore fiemap bug
+ed3ec39 Support installing ceph kernels from gitbuilder on rpm machines.
 3ec32a6 librados: simplify/fix rados_pool_list bounds checks
+0f04af0 Add a timeout for unmounting
 5569d40 documentation: add osd erasure-code-profile {set,rm,get,ls}
 8ff4edd documentation: update osd pool create erasure
 22bc886 doc: fix 'rbd map' example
 4f834fa doc/release-notes: v0.81
 f4e81d3 librbd: clarify license header
+af73e30 Remove some duplicate code.
 884a6b3 RadosClient: Avoid the DispatchQueue for OSD responses.
 0cc9ade doc: Updated monitor output and added usage calc explanations.
 9c32cb2 doc: Added usage for pool quotas.
+f6c4d00 krbd: add librbd_fsx -K job
 86754cc doc: Added more discussion of new CRUSH types.
 cabb8f0 doc: Added a section for ceph df.
 8de9501 (tag: v0.81) 0.81
@@ -15053,10 +17140,15 @@ cabb8f0 doc: Added a section for ceph df.
 c18cbef qa: add run_xfstests_krbd.sh wrapper
 cd65246 qa: catch up with xfstests changes
 703166c qa: cp run_xfstests.sh run_xfstests-obsolete.sh
+ae0d26a Try a different approach to zombie prevention
+3968f6f radosgw-admin: adjust bucket link interface
 601e25e erasure-code: Ceph distributed storage system
 9bac31b scripts/run-coverity: don't explicitly specify tool version
 e158ad9 erasure-code: make decode_concat virtual
+86c60f7 rbd: change test_script to run_xfstests_krbd.sh
 6aa45b1 common: s/stringstream/ostream/ in str_map
+2551c52 rbd: recognize 'randomize' parameter
+670735f krbd: catch up with run_xfstests.sh changes
 319cb50 Make KeyValueStore support "ceph osd perf" command
 06c0a42 Update INSTALL to mention the submodules/recursive
 2dbd85c WorkQueue: The variable name is corrected. Modified the variable name from shardedpol_cond->shardedpool_cond
@@ -15070,11 +17162,18 @@ c24ef00 ceph-common: The Sharded threadpool worker logic changed Now, the _proce
 70afaaa rgw: fetch object attrs on multipart completion
 669b605 PGLog: initialize complete_to
 b300318 rgw: if extra data pool name is empty, use data pool name instead
+205c455 Give daemons a five-minute timeout for stopping
+7035c17 Add RemoteProcess.__repr__()
 38405d3 qa/workunits/cephtool: test setting options using SI units
 5500437 common/config.cc: allow integer values to be parsed as SI units
 40587d4 test/strtol.cc: Test 'strict_strtosi()'
 67dc575 common/strtol.cc: strict_strtosi() converts str with SI units to uint64_t
+eac2c2a Update users of the teuthology.orchestra.run APIs
+b386f5e Refactor teuthology.orchestra.run
 9c56c86 rgw: calc md5 and compare if user provided appropriate header
+5ca3c72 multimds: fix fuse_default_permissions syntax
+59ee17d Avoid piles of zombies
+0fcfaee Add comment explaining 'preexec_fn=os.setpgrp'
 cae085b msg_types.h: Don't use strncmp for comparing sockaddr_in.
 00b9211 doc: Improve man page for rados bench
 2da2699 doc: update pools documentation
@@ -15088,7 +17187,10 @@ e52b9c6 doc: Added osd pool default size setting example.
 20a04c6 doc: Moved redundant text out of quick-common.
 6786d60 common: WRITE_{EQ,CMP}_OPERATORS_1
 8679cdb osd_types: add pg_log_t::rollback_info_trimmed_to
+b26ba60 Check for broken repos before updating
 f1b890e osd: fix bad is_active() assert in share_map()
+abc722c More verbose logging for daemon restarts
+2363a17 Bump log level to info
 f153bc1 doc: Made additional changes s/CRUSH/Ceph for pull request #1855
 c08f481 doc: alter doc CSS for the removal of Apex Sans
 7f46b7b doc: removed Apex Sans font, replace with font from GFE
@@ -15100,10 +17202,17 @@ e06c58c mon: set min_size to data chunk count for erasure pools
 2f63a30 mon: right justify df values
 2339d4a vstart.sh: -e to create an ec pool called 'ec'
 297f616 crush: add get_rule_weight_map
+c478354 Don't block on teuthology-results processes
 0b5a674 rest-api: key missing for per "rx" and "rwx"
+c427af0 osd_heartbeat_grace from 20 -> 40 when running valgrind
+a4a9a1f Correct logging when running teuthology-results
 634780a remove unused variables, gets all tox envs passing
 23b75b5 add backport of collections.Counter for python2.6
 59b7113 intial take on ceph-brag-client and tox. Python 2.7 passes
+0b97eba updated the get_system_type function
+6f80151 tasks/mds_journal_migration: create.
+554f8c8 task/ceph: separate ceph.conf into function
+79dfe2c tasks: Use '_' instead of '-' in names
 bc85b5d mailmap: Aristoteles Neto affiliation
 0b88659 mailmap: Chris Glass affiliation
 7582fa9 mailmap: Christian Theune affiliation
@@ -15140,11 +17249,18 @@ e741ea4 os/FileStore: include filestore backend, f_type in osd metadata
 2ceb13a rgw: check appropriate entity permission on put_metadata
 ab0db34 documentation: adapt PG formula for erasure coded pools
 d3af8fa Update architecture.rst:Calculating PG IDs
+d3c2300 Don't crash when an invalid branch is passed
+a6aebe2 Set results_server by default
 99b9682 documentation: update osd pool default size from 2 to 3.
 4d4b77e cephfs-java: build against older jni headers
+3b382b7 Process queued jobs synchronously
+50dc3ea Added dumpling v0.67.9 version, removed older versions v0.67.2, v0.67.4, v0.67.8 This will reduce number of tests to run, some  may failed on old versions, see http://tracker.ceph.com/issues/8409
 6069ff0 doc/release-notes: v0.67.9
+08ace54 Add exception hook to teuthology-worker
 f51e33b Avoid extra check for clean object
 9235dcb mon: set MMonGetVersionReply tid
+a4f00ab rados/singleton-nomsgr/all/objectstore: skip ceph cluster setup
+5be30f0 rados/singleton-nomsgr/all/objectstore: clean up
 ba53889 README: move package dependencies into separate files
 77066a2 README: clean up some whitespace
 c08adbc Fix set_alloc_hint op cause KeyValueStore crash problem
@@ -15166,6 +17282,8 @@ fd1f9bd mds: do rstat timestamps (rctime, fragstat mtime) in terms of op stamp
 3569e80 mds: set mds_stamp on lock acquisition
 e4c9c34 mds: add {mds,op}_stamp to Mutation
 401319a ceph.spec.in: remove BuildRoot
+e207e90 multimds: fuse_default_permissions = 0
+bf99313 multimds: factor install+ceph tasks out of other collections
 93a5b88 tools/cephfs: error handling in EventOutput
 3207c50 osdc/Journaler: fix obj count in Journaler:erase
 2621b5d tools/cephfs-journal-tool: handle --help cleanly
@@ -15202,20 +17320,27 @@ f7e9ff1 tools: Create cephfs-journal-tool
 107821f Fix formatting of header
 ad2e20f client: set timestamp in MDS requests
 a91e072 mds: include timestamp in MClientRequest
+c8881bb Fixed broken quotations
+6e893ad Fixed formatting and added a better variable for time out Signed-off-by: Yuri Weinstein <yuri.weinstein at inktank.com>
 d71839a doc: clarify openstack cow sentence
 0f7f1e8 doc: note cinder multi-backend restriction
 e92f2d9 doc: link to ephemeral rbd patches
+216e5c9 task/: add populate_rbd_pool for quickly spinning up a pool with rbd stuff
 13d6c3c doc: quick-ceph-deploy cleanup Improve documentation in quick-ceph-deploy.rst Use admin-node consistently. ceph should be installed on admin-node for the following reasons:  "ceph-deploy admin admin-node" assumes that /etc/ceph exists.  "ceph health" requires the use of ceph
 d40ba05 doc: mention admin-node in common documentation
 29f615b ReplicatedPG::start_flush: fix clone deletion case
 5ff95db HashIndex: in cleanup, interpret missing dir as completed merge
 bc897b8 rados.py: clarify recent change on write return value
+8d9d724 ceph_manager: reset timeout if we are making progress in wait_for_recovery
 6372118 doc: Clean up pre-flight documentation Mention recent Ceph releases. Move important message about sudo and ceph-deploy closer to the use of ceph-deploy. Mention files created by ceph-deploy comment Separate apt-get from yum command
 06d05fd doc: fix link to FAQ The location of the ceph wiki FAQ has changed. Now, the link from ceph documentation matches the current FAQ location
 03e3ccd doc: Restored glance_api_version=2 setting.
 e8756be osdc/Objecter: flag ops that have been redirected
 cf2b172 osd: skip out of order op checks on tiered pools
 e47049b erasure-code: fix URL in developer notes
+33dbfff Avoid calling yaml.safe_load() twice on each job
+debf84e Split progress indicator out to separate functions
+a242184 increased sleep to 100 as 90 seemed insufficient Signed-off-by: Yuri Weinstein <yuri.weinstein at inktank.com>
 23787ba mailmap: Sahid Orentino Ferdjaoui affiliation
 30ae96a Ensure autogen.sh to be executed at the top-level
 fb8f469 mds: add getter for ESession.client_inst
@@ -15247,12 +17372,17 @@ affce7a mds: journal rename source inode when rollback rename
 08b79ea mds: journal EImportFinish after adjusting subtree auth
 c18da04 osd: fix narrow session refcount race
 2c4391b osd: fix session leak in ms_handle_fast_connect
+6eac0ba Added more logging Signed-off-by: Yuri Weinstein <yuri.weinstein at inktank.com>
+686c01b Added sleep log info line Signed-off-by: Yuri Weinstein <yuri.weinstein at inktank.com>
 15350de Add header cache to DBObjectMap
 ee92a39 MDS: add admin socket cleanup on shutdown
 a78b14e OSDMonitor: set next commit in mon primary-affinity reply
 6dfc544 sample.ceph.conf: minor update
 290ac81 OSD: fix an osdmap_subscribe interface misuse
 6ec3c46 osd: include osd_objectstore in metadata reported to mon
+1a503af increased time wait to 90 sec so tests does not fail
+d8c7439 Add Pulpito links to teuthology emails
+c3dccbf Downgrade paramiko to work around an SFTP bug
 405063b workunits: provide some output in the dirfrag.sh test
 aec5634 osd_types: remove the pool_id argument from (is|check)_new_interval
 f47c160 PG: replace is_split, acting_up_affected with should_restart_peering
@@ -15265,8 +17395,11 @@ ca833bd doc: Added clarifying text to CRUSH add command.
 48337e0 doc: Omitted glance_api_version=2 to fix creating images from volumes.
 17930a1 doc: Changed example to use virtio and put key usage into one line.
 8dd1190 Improve Bash completion for various tools
+5dfc570 Add missng docstrings to repair_test.py
 00225d7 test: fix some templates to match new output code
 20aad8f doc: update instructions for RPM distros
+010f83f Fix unit tests under Jenkins
+de32179 Use VersionNotFoundError packages are missing
 26151ec mds: lower IO priority of storing backtrace
 019483f mds: reduce verbosity of handle_client_file_{readlock,setlock}
 ca313c2 mds: add a Server::submit_mdlog_entry() to provide event marking
@@ -15282,8 +17415,17 @@ ae80a1f MDS: add stubs for an AdminSocketHook
 0d89e5c MDCache: pass the causative message to request_start_slave()
 06d6d32 mds: remove a couple leftover declarations of MDRequest
 428319e doc/release-notes: v0.80.1
+971c065 Use config.archive_base if one isn't passed
+d945e56 Add retries to orchestra.connection.connect()
+dfb2352 Fix typo
+2b8232a Better logging
 19f8849 doc: Improvements to qemu installation.
 6e4455d doc: Added note on Default requiretty for CentOS and others.
+47f5d83 Use try_mark_run_dead()
+456a114 Add try_mark_run_dead()
+ad01246 Move list of exceptions to catch
+756a6bf Move "no results server" warning
+e0e0126 Fix name parsing
 8b682d1 prioritise use of `javac` executable (gcj provides it through alternatives).
 89fe035 pass '-classpath' option (gcj/javah ignores CLASSPATH environment variable).
 0f4120c look for "jni.h" in gcj-jdk path, needed to find "jni.h" with gcj-jdk_4.9.0
@@ -15314,7 +17456,7 @@ d69fd90 test_rgw_admin_opstate.cc: prefer ++operators for iterators
 0f899c8 test_rgw_admin_meta.cc: prefer ++operators for iterators
 f523d64 TestErasureCodePluginJerasure.cc: prefer ++operators for non-primitive types
 014f050 test/ObjectMap/KeyValueDBMemory.cc: use empty() instead of size()
-d9fff40 mon: restore previous weight when auto-marked out osd boots
+d9fff40d mon: restore previous weight when auto-marked out osd boots
 87722a4 mon: remember osd weight when auto-marking osds out
 45281d9 common/perf_counters: use atomics instead of a mutex
 bf3ba60 atomic_t: add atomic64_t
@@ -15336,6 +17478,23 @@ e8b4789 mds/flock.cc: remove dead initialization of 'new_lock_start'
 63d92ab mon/OSDMonitor: force op resend when pool overlay changes
 45e79a1 osd: discard client ops sent before last_force_op_resend
 dd700bd osdc/Objecter: resend ops in the last_force_op_resend epoch
+0465bdb Don't pass a custom logger anymore
+22b51be Use 'stderr' and 'stdout' as logger names
+470f824 Catch any Unicode errors that manage to sneak in
+60bba80 Express hostnames as child logger names
+a58174d Use Remote.user
+3e65d18 Add Remote.user attribute
+3352b58 Use Remote.shortname in logs
+30d1d51 Make Remote.shortname actually short
+36fe6a5 Remote.hostname doesn't have to be a property
+b2648b2 Fix PEP-8 issues
+8567352 Pass hostname to execute()
+4295530 Use 'true' instead of 'echo online'
+d0f7a47 Add Remote.ensure_online()
+3adb7d4 Use Remote.hostname in logs
+29d3299 Consolidate log file setup into shared function
+5dbce8b Use Unicode format strings
+085c508 Revert "Revert "Show hostname instead of IP in errors""
 b3203e5 rbd.cc: remove used parameter from set_pool_image_name()
 fe75075 test_librbd.cc: fix sizeof() in malloc call
 eb2def8 CrushWrapper.cc: fix sizeof() call in calloc
@@ -15344,17 +17503,22 @@ cdbe6cf client: use __func__ instead of incorrect function name in insert_readdi
 3eb2a77 client: make less noise when unlinking during readdir
 d1c872d client: invalidate dentry leases when unlinking
 d852a69 client: audit unlink() callers
+b7a7383 Allow .teuthology.yaml to set downburst path
 3b867d3 TrackedOp: create an "initiated" event on construction
 bdee119 msg: Fix inconsistent message sequence negotiation during connection reset
 b5e4cd1 osd: fix MOSDMarkMeDown name
 6b858be osd: handle race between osdmap and prepare_to_stop
 b640301 osd: fix state method whitespace
+8460c7a Force log lines to be interpreted as UTF-8
 ba01445 Fixed missing initializers issues
+cd7f268 Use binary flag for paramiko ChannelFiles
 60b1071 Removed extra semicolons
+c0ba105 Use master as default for debian upgrade.
 5986f74 :doc Ceph OSD is standard name This is a method of standardizing the usage of OSD so that "Ceph OSD" is the daemon, and OSD maintains its industry standard usage of Object Storage Device.
 ddc2e1a rgw: calculate user manifest
 589b639 osd/ReplicatedPG: carry CopyOpRef in copy_from completion
 db4ccb0 ReplicatedPG: block scrub on blocked object contexts
+6fbf98b Further clarify 'too many values to unpack' error.
 3152faf osd/osd_types: add last_force_op_resend to pg_pool_t
 0f19626 ceph-disk: partprobe before settle when preparing dev
 5690232 rbd-fuse.c: remove ridiculous linebreak
@@ -15393,6 +17557,7 @@ d130763 vstart.sh: fix client admin socket path
 8059c9f rgw_rest_metadata.cc: fix -Wparentheses-equality
 8a0c016 ReplicatedPG.cc: fix -Wparentheses
 a0f59df test_rgw_manifest.cc: fix VLA of non-POD element type
+b105a07 rbd_fsx: expose krbd and related fsx options
 817985b test_librbd_fsx: align temporary buffers allocated in check_clone()
 ab9de9c test_librbd_fsx: wire up O_DIRECT mode
 c4a764c test_librbd_fsx: fix a bug in docloseopen()
@@ -15433,6 +17598,7 @@ f9a91f2 Update doc to reflect the bahavior change for filestore_merge_threshold
 cc65c39 client: add debugging around traceless reply failures
 545d8ad rgw: extend manifest to avoid old style manifest
 9968b93 rgw: fix stripe_size calculation
+4a3728d 2-workload testrgw needs to be sequential.
 6c2b173 mds: handle export freezen race
 a09070a mds: allow negetive rstat
 22abd7b mds: cancel fragmenting dirfrags when cluster is degraded
@@ -15451,18 +17617,29 @@ f386e16 mds: pre-allocate inode numbers less frequently
 7d1fd66 mds: maintain auth bits during replay
 09beebe ceph-disk: fix list for encrypted or corrupt volume
 bd8e026 rgw: don't allow multiple writers to same multiobject part
+a1838b2 Rewrite most file-retrieval functions
+01cf367 Fix linter errors
+8bed6ab FIx mktemp dir and redundant Paramiko connecting.
+36b07b8 Use SFTPClienti get for long reads/writes
+091d1fe Revert "Revert "Clean up remote.py and misc.py changes.""
+0268487 Revert "Revert "Handle raw data I/O.""
+38578b8 Revert "Restrict paramiko to old versions for now"
+7b1eec9 Use longer varchar for locked_by in DB.
 03b0d1c rgw: cut short object read if a chunk returns error
 2d5d309 Pipe: wait for Pipes to finish running, instead of just stop()ing them
 6ec99f7 librbd: check return value during snap_unprotect
 6f2edda ObjectCacher: remove useless assignment
 3e387d6 osd/ReplicatedPG: fix whiteouts for other cache mode
+ea3bef1 rados.py: Add pool_snaps option for ceph_test_rados test command
 5cc5686 rgw: send user manifest header field
+4ad23dc rgw: fix indentation for cache_pools
 e65a9da Revert "Fix installation into user home directory, broken by d3f0c0b"
 b78644e (tag: v0.80) 0.80
 cdbbf86 doc: Fixed artifacts from merge.
 a31b9e9 doc: Added sudo to setenforce. Restored merge artifact.
 5158272 doc: Added erasure coding and cache tiering notes. Special thanks to Loic Dachary.
 08a4e88 Variable length array of std::strings (not legal in C++) changed to std::vector<std::string>
+33b4bfc ceph_manager: reset osd weights to 1 when waiting for clean
 ae434a3 client: check snap_caps in Incode::is_any_caps()
 4bf20af SimpleMessenger: Don't grab the lock when sending messages if we don't have to
 b038f0c OSD: rename share_map_incoming and share_map_outgoing
@@ -15522,14 +17699,22 @@ fc3318e doc: Fix hyperlink.
 a7e7219 doc: Index update and librados.
 fcbc5fa doc: Quotas for Admin Ops API.
 e97b56e doc: New Admin Guide for Ceph Object Storage.
+99e67ab rados/thrash: Add pool_snaps variants
+c86df77 Restrict paramiko to old versions for now
+0c8a3e2 Revert "Handle raw data I/O."
+02504c3 Revert "Clean up remote.py and misc.py changes."
 7539281 Fix installation into user home directory, broken by d3f0c0b
+8e1e4ba marginal/multimds: fuse_default_permissions = 0 for ceph-fuse
 24c5ea8 osd: check blacklisted clients in ReplicatedPG::do_op()
 f92677c osd: check blacklisted clients in ReplicatedPG::do_op()
 c64b67b ceph-object-corpus: rebase onto firefly corpus
 077e6f8 ceph-object-corpus: v0.80-rc1-35-g4812150
+794c946 ceph_manager: fix float stringification
 8bd4e58 Fix out of source builds
 3aee1e0 Fix clone problem
+a723ddf ceph_manager: fix typo
 fd970bb mon: OSDMonitor: disallow nonsensical cache-mode transitions
+d2d7b94 cache-snaps.yaml: set target_max_objects to test snap flush/evict
 72fdd55 osd/ReplicatedPG: fix trim of in-flight hit_sets
 8472805 Revert "ReplicatedPG: block scrub on blocked object contexts"
 f47f867 osd: Prevent divide by zero in agent_choose_mode()
@@ -15544,6 +17729,7 @@ be5a99d SimpleLock.h: remove unused private function clear_more()
 4fe31c1 linux_fiemap.h: remove twice included int_types.h
 b05e04e Dumper::dump_entries(): remove not needed variable
 9a716d8 rgw_bucket.cc: return error if update_containers_stats() fails
+ce7fa18 ceph_manager: randomly reweight in osds
 89044a6 mon/PGMonitor: set tid on no-op PGStatsAck
 4e0eaa9 mon/OSDMonitor: share latest map with osd on dup boot message
 5a6ae2a mon/PGMonitor: set tid on no-op PGStatsAck
@@ -15552,6 +17738,7 @@ b05e04e Dumper::dump_entries(): remove not needed variable
 d024594 mailmap: Florent Bautista affiliation
 61a2f06 mailmap: Warren Usui name normalization
 7b192f7 mailmap: Guang Yang name normalization
+5844c23 Bump paramiko to 1.12.0
 4662890 sample.ceph.conf update:
 9cf470c osd/ReplicatedPG: agent_work() fix next if finished early due to start_max
 9f1a916 osd/SnapMapper: pass snaps set by const ref
@@ -15564,11 +17751,20 @@ cf25bdf osd: prevent pgs from getting too far ahead of the min pg epoch
 c879e89 doc: Include links from hardware-recommendations to glossary Included :term: in parts of hardware-recommendations so that glossary links appear. Signed-off-by: Kevin Dalley <kevin at kelphead.org>
 cc04322 mds: note MDiscoverReply encoding change in corpus
 e597068 mds: remove mdsdir in the final step of shutdown MDS
+18334ea rados/thrash: Fix workload of cache-agent-big
+27b276e rgw: test with ec + cache pool
+c5da7b2 rgw: option to create a cache pool
 1f4a3e1 mds: bump protocol
+b080355 Fix syntax of erasure coded pool creation
 1ac05fd doc/release-notes: changelog link
+d1b9353 Add branch name to job config
+ab9645f Add suite name to job config
+ba66c6b Add /build and /*.yaml to gitignore
 ffef20f doc/release-notes: final v0.67.8 notes and changelog
 0454962 Fixes a very minor typo in the release notes
 78b3c93 doc: documenting links to get-involved Create a link from documenting-ceph so that it is easy to find the github repository used for ceph.
+013a3b6 added new correctd tag 67.8
+4322ade added latest dumpling tag v0.68.8
 0f3235d ReplicatedPG: block scrub on blocked object contexts
 e66f2e3 ReplicatedPG: block scrub on blocked object contexts
 4bac8c7 rados.h,ReplicatedPG: add CEPH_OSD_FLAG_ENFORCE_SNAPC and use on flush
@@ -15592,11 +17788,26 @@ ef0de7a OSDMap.cc: prefer prefix ++operator for non-trivial iterator
 5562428 OSDMonitor.cc: prefer prefix ++operator for non-trivial iterator
 e4b3109 KeyValueStore: rename s/logger/perf_logger/
 a84fed6 crush/mapper.c: fix printf format for unsigned variable
+3fa6271 Calculate a timeout to use based on queue size
+041666b Add --runs, to print only run names
+a9d7aa3 Refactor teuthology.beanstalk
 21bbdf5 mds: avoid adding replicas of target dentry to rename witnesses
 3a7d668 mds: allow early reply when request's witness list is empty
 41d93aa mds: include authority of the source inode in rename witnesses
+adc51e1 Drop usage of safe_while
+ea9c034 Use the new ResultsReporter.session object
+ec72137 Add a requests.Session object to ResultsReporter
+61e469b Remove unused timeout arg to ResultsReporter init
+74eff43 Clean up remote.py and misc.py changes.
+257e145 Handle raw data I/O.
+3faeb08 When deleting all of a run's jobs, delete the run
+5339c1f Changes so these are not installed and still removed
 68b440d osd: automatically scrub PGs with invalid stats
 d01aa5b mon: OSDMonitor: return immediately if 'osd tier cache-mode' is a no-op
+ce77884 Changes invocation of serialize_remote_roles to internal task to avoid being run during nuke
+1532af4 Moves node: remote mapping to the internal task.
+d71a874 These will likely go somewhere better before merge
+063b6a2 Fixes #8050 Adds a cluster.yaml that is written by interactive task
 f689e5f mds: remove discover ino
 913a5dd mds: remove anchor table
 8217600 doc: Ensure fastcgi socket doesn't clash with gateway daemon socket.
@@ -15605,6 +17816,8 @@ ec11bf7 doc: Fixed inconsistent header.
 63b2964 doc: Added rhel-6-server-optional-rpms repo.
 f674f36 Copy range using fiemap not entire length
 3920f40 rbd-fuse: fix unlink
+f261687 valgrind: fix tcmalloc suppression for trusty
+bab84d4 Revert "valgrind.supp: be less picky about library versions"
 5d340d2 librbd: add an interface to invalidate cached data
 e08b8b6 librbd: check return code and error out if invalidate_cache fails
 b1df2c3 Changed the -i parameter to -r in order to avoid a conflict with a generic flag interpreted by the common code.
@@ -15614,7 +17827,9 @@ a027100 rgw: fix url escaping
 060105c ReplicatedPG: we can get EAGAIN on missing clone flush
 d83b8f5 ReplicatedPG: do not preserve op context during flush
 a60e15a doc/release-notes: v0.67.8 notes
+694827b Allow scrubbing while thrashing
 bcf92c4 rgw: fix url escaping
+2cbe1dc Only attempt to use sudo if necessary
 27ec495 Added Java Example
 8f64b5c Update librados-intro.rst
 3e41f92 client: cleanup unsafe requests if MDS session is reset
@@ -15624,10 +17839,15 @@ b8aa58a client: drop dirty/flushing caps if auth MDS' session is reset
 998b365 Changed the java code example
 5d49782 mds: terminate readlink result in resapwn
 d0f1806 ceph_test_rados_api_tier: increase HitSetTrim timeouts
+f102e49 Post last_in_suite jobs, but delete when run
+5de353e Update unit test for Cluster.__repr__()
+e6e2874 Fix Cluster.__repr__()
 9ac264a Skipping '_netdev' Debian fstab option
 499adb1 rados.h,ReplicatedPG: add CEPH_OSD_FLAG_ENFORCE_SNAPC and use on flush
+ee69c7a rgw: update idle_timeout for rgw_s3tests_multiregion.yaml
 ddf37d9 Use new git mirror for qemu-iotests
 1885792 ECBackend::continue_recovery_op: handle a source shard going down
+af20985 Don't push last_in_suite jobs to paddles
 c0c2361 brag : implement --verbose on client
 7009211 brag : document the zero argument behavior
 2b16a81 brag : meaningfull error messages
@@ -15644,13 +17864,18 @@ f631854 rbd: deprecate --no-settle option
 2651750 rbd: add libkrbd convenience library
 bad34e9 client: check cap ID when handling cap export message
 383d21d client: avoid releasing caps that are being used
+3a2c888 rados: add ec and rep lost_unfound_delete tests
+e64d831 task/: add tests for ec and rep mark_unfound_lost delete
 d726251 doc: Fix hyperlink to CRUSH maps.
 6902e22 doc: Added cache tiering settings to ceph osd pool set.
 0d964bc doc: Added new cache tiering doc to index/TOC.
 44e4e3d doc: Added new cache tiering doc to main docs.
+8350b6e Bump psutil version requirement
 2182815 ReplicatedPG: handle ec pools in mark_all_unfound_lost
 6769f4d (tag: v0.80-rc1) 0.80-rc1
 245923e ReplicatedPG: enable mark_unfound_lost delete for ec pools
+387110b rados/singleton/all/cephtool: whitelist scrub vs split vs agent issue
+9078513 Fix for #8115
 009e874 qa/workunits/rbd/copy.sh: skip some tests when tiering is enabled
 c0bff43 qa/workunits/rbd/copy.sh: fix test
 5daf538 ECBackend: use std::swap for boost::optional
@@ -15687,6 +17912,8 @@ f7e7588 ReplicatedPG::agent_load_hit_sets: take ondisk_read_lock
 e4a048c ECMsgTypes::ECSubWrite: fix at_version indentation
 ddf1e98 osd: track the number of hit_set archive objects in a pg
 1fb90c9 ReplicatedPG::hit_set_persist: clean up degraded check
+1448cdf Work around #8166
+b7394ef multimds: bump up timeout for misc.yaml
 95d0278 ReplicatedPG::mark_all_unfound_lost: delete local copy if necessary
 61b6564 Simple mechanical cleanups
 7a61cdb buffer: adjust #include order
@@ -15695,33 +17922,62 @@ f9e9365 Revert "ReplicatedPG::get_snapset_context: assert snap obj is not missin
 82edda2 test: handle the create-pg delay when testing cache split syntax
 b2112d5 mon: OSDMonitor: HEALTH_WARN on 'mon osd down out interval == 0'
 09985d2 mon: wait for PaxosService readable in handle_get_version
+d7967b4 rbd/thrash: factor out install + ceph
+e97b865 rbd: do most tests with a (small) cache pool in front
+03a8444 rbd/basic: factor out install + ceph
+007d975 Require requests >= 1.0
+c623b3d rados/thrash: whitelist 'must scrub before tier agent can activate'
 8fb2388 osd_types: pg_t: add get_ancestor() method
+7afc277 rados: include objectstore tests
 7e697b1 ReplicatedPG::recover_replicas: do not recover clones while snap obj is missing
+0e90c69 watch_tube() belongs to the beanstalk module
+e9a1c77 Update requests version
 3ad51c8 osd_types::object_stat_sum_t: fix add/sub for num_objects_omap
 3d0e80a osd/ReplicatedPG: check clones for degraded
+5dbc642 s/wait-for-package/wait_for_package/
 93c0515 osdc/Objecter: fix osd target for newly-homeless op
+03b8cda Refactor try_delete_jobs()
+d12e6f4 Be slightly less verbose about logging
+741c773 Look for archive_base in config
+ee33192 When deleting jobs, also delete them from paddles
+8fdea4d Submit queued jobs to paddles
+8a4de41 Rename teuthology.queue to teuthology.worker
+1449e75 Use shared methods to connect to beanstalkd
+165f5d5 When killing a run, delete paddles jobs
+66a2742 Add methods for querying and deleting jobs
 881680e mon: set leader commands prior to first election
+fc94879 safe_while: Don't sleep() on the first attempt
+e323392 Pass -D flag to teuthology report
 40e8dbb mon: EBUSY instead of EAGAIN when pgs creating
 f22e2e9 spelling corrections
 18caa1c OSD: split pg stats during pg split
 5e4a5dc osd_types::osd_stat_sum_t: fix floor for num_objects_omap
 a3d452a common/obj_bencher: Fix error return check from read that is negative on error
+4b9202b Update to use psutil 2.x API
 4db1984 osd/ReplicatedPG: add missing whitespace in debug output
+8b93c03 Generate subtasks instead of copy/pasting them
+761d769 Don't run apache functions if not using apache
+089dda1 Optionally use civetweb instead of apache
 924064f mds: dynamically adjust priority of committing dirfrags
 0640a08 mds: fix cap revoke confirmation
 8c7a5ab Use string instead of char* when saving arguments for rest-bench
 0d2177a ReplicatedPG::get_snapset_context: assert snap obj is not missing
 015df93 mon/OSDMonitor: require force argument to split a cache pool
+823219b Don't pass apache's config to radosgw
+12af2ab Rename some functions and variables
 c252345 osd: OSDMap: have osdmap json dump print valid boolean instead of string
+f82f663 Fix all but one of the PEP-8 issues
 aa6df59 mds: Fix respawn (add path resolution)
 f6db1bc mds: share max size to client who is allowed for WR cap
 358bde5 Add clone test on store_test
 308758b Make rados/rest bench work for multiple write instances without metadata conflict. Signed-off-by: Guang Yang <yguang at yahoo-inc.com>
+4c01513 Improve unlock error messages.
 9b7fa38 ReplicatedPG::process_copy_chunk: don't check snaps if we got head
 43b7c3a ReplicatedPG::finish_promote: soid.clone may have been trimed, fix assert
 3f7861e ReplicatedPG::agent_work: skip if head is missing
 d39e003 ReplicatedPG::cancel_flush: requeue dup_ops even if !op
 edda6f7 ReplicatedPG::_rollback_to: fix comment, clone certainly could be missing
+eef2bf6 Fix kvm issues for Trusty
 37ed4b6 ceph_test_stress_watch: test over cache pool
 d0a7632 Use cpp_strerror() wherever possible, and use autoconf for portability
 502cc61 ReplicatedPG::agent_work: skip hitset objects before getting object context
@@ -15771,6 +18027,7 @@ da0d382 Revert "RWLock: don't assign the lockdep id more than once"
 632098f common_init: remove dup lockdep message
 3c54a49 Wordsmith the erasure-code doc a bit
 f6c2073 mds: finish table servers recovery after creating newfs
+9fa5c5f big: test xfs + btrfs
 3db7486 mds: issue new caps before starting log entry
 07e8ee2 test: Add EC testing to ceph_test_rados_api_aio
 69afc59 test: Add multiple write test cases to ceph_test_rados_api_aio
@@ -15781,8 +18038,10 @@ d211381 pybind: Check that "key" is a string
 9812720 librados, test: Have write, append and write_full return 0 on success
 008663a rgw, radosgw-admin: bucket link uses bucket instance id now
 6ce7116 civetweb: update subproject
+6dc2990 Change status to _status everywhere (fix regression)
 43d837d rgw: radosgw-admin object rewrite
 4c99e97 mon/OSDMonitor: ignore boot message from before last up_from
+364d0b4 ceph.conf: longer client_moutn_timeout (which also applies to librados)
 a8f0953 osd/ReplicatedPG: adjust obc + snapset_obc locking strategy
 86b8594 mon: Monitor: suicide on start if mon has been removed from monmap
 02048dc mds: guarantee message ordering when importing non-auth caps
@@ -15802,6 +18061,11 @@ b297689 auth: make AuthClientHandler::validate_ticket() protected
 957ac3c RWLock: don't assign the lockdep id more than once
 4d3d89b auth: remove unused tick() method
 2cc76bc auth: add rwlock to AuthClientHandler to prevent races
+d4f2a8c Reduce runs in the dumpling-emperor upgrade suite.
+0550dd1 more statuses redefinition fixes
+30f3b01 rename variable to avoid collision
+b476ec2 remove unused import
+15a3acb fix redefinition of region
 2e8035f osd: Fix appending write to return any error back to caller
 3371a25 test: Fix Seg fault in ceph_test_rados
 edd542e tools: Improve ceph_scratchtoolpp
@@ -15814,6 +18078,7 @@ ae09361 mailmap: Yan, Zheng name normalization
 409999c rbd: Prevent Seg fault by checking read result in snap_read_header()
 9c6733e librados: Allow rados_pool_list() to get NULL buffer if len == 0
 1848a23 librados: Fix typo for read functions documentation
+b3218ee changed mon data avail warn  to avoid monitor storage warnings
 a8330f5 librbd: fix zero length request handling
 22a0c1f osd: do not block when updating osdmap superblock features
 43f0519 doc: Made minor changes to quick start preflight for RHEL.
@@ -15825,6 +18090,7 @@ ab7a25c doc: Notes and minor modifications to gateway installation doc.
 79ac2f7 osd/PG: set CREATING pg state bit until we peer for the first time
 4de49e8 os/FileStore: reset journal state on umount
 1cdb738 vstart.sh: make crush location match up with what init-ceph does
+96e1774 changed idle_timeout to 300
 d2edd9c osd: drop unused same_for_*() helpers
 5d61161 osd: drop previous interval ops even if primary happens to be the same
 d3833dd osd: make misdirected checks explicit about replicas, flags
@@ -15846,6 +18112,7 @@ a5c7b27 MDCache: use a null_ref instead of NULL in a few places
 a6a0800 Server: use MutationRef instead of raw pointer
 4dedab6 MDS: switch cache object classes to use MutationRef instead of raw pointers
 9a4a429 ceph_test_rados_api_misc: print osd_max_attr_size
+a0c1952 ceph.conf: don't force lockdep on mds
 4b66868 doc: Removed --stable arg and replaced with --release arg for ceph-deploy.
 7273d9e osd/ReplicatedPG: warn if invalid stats prevent us from activating agent
 02d7e84 osd/ReplicatedPG: dump agent state on pg query
@@ -15899,21 +18166,28 @@ b219c8f ReplicatedPG: fix CEPH_OSD_OP_CREATE on cache pools
 be8b228 osd: Send REJECT to all previously acquired reservations
 18201ef doc/release-notes: v0.79 release notes
 4dc6266 Fix byte-order dependency in calculation of initial challenge
+b97c380 rados/thrash: rejection backfill reservations sometimes while thrashing
+946a968 rados/singleton-nomsgr: add multi-backfill reservation rejection test
 6cb50d7 ReplicatedPG::_delete_oid: adjust num_object_clones
 0f2ab4d ReplicatedPG::agent_choose_mode: improve debugging
 0552ecb rgw: only look at next placement rule if we're not at the last rule
+39166b2 upgrade/dumpling-x: run dumpling version of s3tests
 eb23ac4 ReplicatedPG::agent_choose_mode: use num_user_objects for target_max_bytes calc
 cc9ca67 ReplicatedPG::agent_choose_mode: exclude omap objects for ec base pool
 a130a44 osd/: track num_objects_omap in pg stats
 9894a55 ReplicatedPG: handle FLAG_OMAP on promote and copyfrom
+a60b50d FIxed dumpling - emperor upgrade suites
 a11b3e8 ReplicatedPG::do_op: use get_object_context for list-snaps
 78e9813 ReplicatedPG: do not create snapdir on head eviction
 31df91e osd: add 'osd debug reject backfill probability' option
+267307e upgrade/dumpling-emperor-x: do not warn about tunables
+a5a5ba3 upgrade/dumpling-emperor-x: ignore 'scrub mismatch' from mon
 d323634 qa: test_alloc_hint: set ec ruleset-failure-domain to osd
 8e46fe0 stop.sh: unmap rbd images when stopping the whole cluster
 afc5dc5 stop.sh: do not trace commands
 0110a19 stop.sh: indent 4 spaces universally
 e4a8535 vstart: set a sensible default for ruleset-failure-domain
+552f9d8 multimds: test ceph-fuse and kclient
 c43822c lockdep: reset state on shutdown
 7a49f3d lockdep: do not initialize if already started
 6bf46e2 OSDMap: bump snap_epoch when adding a tier
@@ -15954,7 +18228,11 @@ dde1c91 osd/ReplicatedPG: continue scrub logic when snapset.head_exists doesn't
 c2e5a42 osd/ReplicatedPG: handle snapdir properly during scrub
 ed5a5e0 rgw: reverse logic to identify next part
 48fbce9 ReplicatedPG: improve get_object_context debugging
+2e997aa Report job updated time
+796af56 Handle jobs with no targets
+2e283ce multimds: fuse_default_permissions = 0 for kernel build test
 5c9b8a2 osd/PG: debug cached_removed_snaps changes
+addfed2 When ignoring a raised exception, at least log it
 824da20 librbd: skip zeroes when copying an image
 e44f85c qa/workunits/cephtool/test.sh: test 'osd pg-temp ...'
 2d4ec6a mon/OSDMonitor: clear primary_temp on osd pg_temp updates
@@ -15965,7 +18243,9 @@ d3183e3 java/test: ceph.file.layout xattr is still not there now
 cd1a9c4 Add ceph-client-debug and jerasure shared objects to RPM spec file.
 81853c6 mon/PGMap: clear pool sum when last pg is deleted
 8c761c4 mon: make 'ceph osd erasure-code-profile set ...' idempotent
+a536fd1 krbd, kcephfs: trash osd primary-affinity
 7d321d8 qa/workunits/rados/test_alloc_hint: fix erasure syntax
+4efb57d Provide real error messages for unfound (sub)tasks
 2826fda doc: fix typos in glossary
 7fa025e .gitignore: add examples/librados files
 14418a2 autotools: s/ssse3/sse3/ typo
@@ -15982,6 +18262,7 @@ c3292e4 ceph_test_rados_api_tier: improve cache tier + scrub test
 cfd6f23 osd/ReplicatedPG: tolerate trailing missing clones on cache tiers
 b8ea656 java/test: ceph.file.layout xattr is not there now
 4f9f7f8 qa/workunits/fs/misc/layout_vxattrs: ceph.file.layout is not listed
+f895d16 valgrind.supp: be less picky about library versions
 b71e64d mds: find approximal bounds when adjusting subtree auth
 fd28ad5 doc: erasure-code development complete
 399de24 erasure-code: do not attempt to compile SSE4 on i386
@@ -15991,6 +18272,7 @@ bd6e35c rbd.cc: yes, cover formatted output as well.  sigh.
 b5a6320 Revert "ceph-conf: no admin_socket"
 44afc23 init: fix OSD startup issue
 fd76fec rbd.cc: tolerate lack of NUL-termination on block_name_prefix
+cb699f6 rados/monthrash: shorten mon pings so that freezing triggers reconnects
 056151a mon/MonClient: use keepalive2 to verify the mon session is live
 d747d79 msgr: add KEEPALIVE2 feature
 1aa1d93 ReplicatedPG: hit_set_setup, agent_setup, skip if !active
@@ -16020,8 +18302,11 @@ e811b07 mds: properly journal fragment rollback
 e535f7f mds: avoid journaling non-auth opened inode
 ffcbcdd mds: handle race between cache rejoin and fragmenting
 6963a8f mds: handle interaction between slave rollback and fragmenting
+9f3a664 rgw_pool_type: remove accidentally added empty file
 72eaa5e doc: fix typos in tiering dev doc
 1b5e8f4 mds: properly propagate dirty dirstat to auth inode
+5651ee8 upgrade/dumpling-x/parallel: restart after cuttlefish->dumpling upgrade
+dfdeb5c radosgw-agent: coerce max-entries config to a string
 38d4c71 Pipe: rename keepalive->send_keepalive
 c64d03d mon/OSDMonitor: require OSD_CACHEPOOL feature before using tiering features
 69321bf mon/OSDMonitor: prevent setting hit_set unless all OSDs support it
@@ -16032,10 +18317,14 @@ eb71924 osd/ReplicatedPG: tolerate missing clones in cache pools
 7cb1d3a qa/workunits/mon/pool_ops.sh: fix test
 233801c qa/workunits/mon/pool_ops.sh: use expect_false
 72715b2 ceph-conf: no admin_socket
+68343ee rados/thrash/workloads: make cache-agent-big use an ec base pool
 fb20823 jerasure: fix up .gitignore
 acc31e7 ceph-conf: do not log
 ffd69ab rgw: use s->content_length instead of s->length
+b6ad5c6 rgw: idle timeout config moves to the external server line
+fd6056b schedule_suite: ugly hack to set priority when scheduling
 501e31d logrotate: do not rotate empty logs (2nd logrotate file)
+158f9ba Revert "Lines formerly of the form '(remote,) = ctx.cluster.only(role).remotes.keys()'"
 91176f1 erasure-code: test encode/decode of SSE optimized jerasure plugins
 b76ad97 erasure-code: test jerasure SSE optimized plugins selection
 30e7140 osd: increase osd verbosity during functional tests
@@ -16044,6 +18333,8 @@ e9878db arch: add SSE3, SSSE3, SSSE41 and PCLMUL intel features
 c07aedb autotools: intel cpu features detection
 cc0cc15 erasure-code: gf-complete / jerasure modules updates
 12d4f38 erasure-code: allow loading a plugin from factory()
+b454bd6 rgw: add erasure coded data pool variant
+d693b3f Lines formerly of the form '(remote,) = ctx.cluster.only(role).remotes.keys()' and '(remote,) = ctx.cluster.only(role).remotes.iterkeys()' would fail with ValueError and no message if there were less than 0 or more than 1 key. Now a new function, get_single_remote_value() is called which prints out more understandable messages.
 506d2bb logrotate improvement: do not rotate empty logs
 dc3ce58 osd: do not make pg_pool_t incompat when hit_sets are enabled
 92859ed ReplicatedPG: include pending_attrs when reseting attrs in WRITEFULL
@@ -16055,20 +18346,38 @@ b6a431b ReplicatedPG: disable clone subsets for cache pools
 555ae12 ReplicatedPG::do_osd_ops: only return ENOTSUP on OMAP write ops
 6cb8595 ReplicatedPG::make_writeable: fill in ssc on clone
 21fc535 osd: trim copy-get backend read to object size
+5aa5566 Don't explode when finding an empty job
+67844e0 suite: allow priority to be specified when scheduling
 18c3e9e osd: fix tests due to no default erasure-code ruleset
 29f7420 Revert "osd: create the erasure-code ruleset in OSDMap::build_simple"
+b2cf052 rgw: allow overriding options
 4cf9a73 fix bug in 'defaultweight' calculation on OSD start.
 2779e2a Make sure s3_utilities are found.
+812e48a radosgw-admin: skip data sync tests when only metadata is configured
+f89c0d5 rgw: fix ec-data-pool config
+f250da8 Add log statement to find_run_info()
+b3d0c19 Exit gracefully on KeyboardInterrupt
+03974bf Add --description flag
+a6658fd s/JOB/PATTERN/
+cfe4c8b Tweak usage
+4bedc54 Correct help text
+381ecea Fix lots of linter errors
 38bcd3c osd: start_flush() should check for missing clones and return if requested
 bf87562 osd: Error from start_flush() not checked in agent_maybe_flush()
 ed43aa0 osd: Add logging of missed l_osd_agent_skip cases
 d1d99df osd: Improve logging output including pg_shard_t as osd#(shard)
+70ab6e0 Add --preserve-queue to teuthology-kill
+c404be6 Postpone creation of logger object
 4ac7808 minor corrections to package descriptions
 012bb5f minor init.d scripts lintianisation:
+47ba171 rgw: add option to use erasure coding instead of replication
+bf90414 rgw: set max-entries to 10 for data sync test
 14b743b rgw: don't modify rgw_bucket for data extra pool
 7989cbd rgw: multipart meta object uses extra data pool
 f023f90 rgw: zone placement info includes extra data pool
 3677076 rgw: add data_extra pool to bucket
+0cb00b1 radosgw-agent: default to 1000 max entries
+3e16830 internal: cleaner fix for binary gibberish in logs
 58ef0da mailmap: Inktank jenkins bot
 286131e mailmap: Huang Jun name normalization
 0160d19 mailmap: Tyler Brekke name normalization
@@ -16077,6 +18386,8 @@ a310ea2 mailmap: Guang Yang affiliation
 2faf271 mailmap: Mohammad Salehe affiliation
 27c28ad mailmap: Sharif Olorin affiliation
 9fd61c7 mailmap: Stephan Renatus affiliation
+b677bdd internal: ignore binary junk in kernel logs
+190d818 ceph: ignore daemon types that aren't configured
 01b9966 qa: Add ceph_multi_stress_watch for rep and ec
 6ec28fd ensure pybind is in the PYTHONPATH
 37899fa be nitpicky about missing references
@@ -16095,13 +18406,17 @@ e4f2d9f doc/release-notes: 0.78 final notes
 1817c23 rgw: get rid of a memory allocation
 1e7cd10 rgw: remove memory allocation
 f6c746c (tag: v0.78) 0.78
+752a76f radosgw-agent: use our mirror instead of github
 28d8e7f Revert "ReplicatedPG: disallow trans which atomically create and remove an object"
 49a0190 doc/release-notes: 0.78 notes
+4e2f36f rgw: handle empty conf case
 dbcf447 erasure-code: gf-complete detects SSE at runtime
 8c7f6c1 autotools: AX_SSE detects the compiler SSE capabilities
 5a3f6c7 test: Add erasure coding to stress watch test
 6fb6588 test: Reorg multi_stress_watch to prepare for ec option
 b110275 test: Fix ceph_filestore_dump.sh test for new EC pool creation
+798daf5 upgrade/dumpling-x/stress-split: set fastcgi idle timeout to 2 min
+3a2b77c rgw: allow fastcgi idle timeout to be adjusted
 dad0faf tests: use ^ instead of ! as invalid char
 d4d77d7 doc/release-notes: stop confusing sphinx
 78ede90 objecter: waive OSDMAP_FULL check for MDS
@@ -16112,6 +18427,7 @@ ddbb2f7 erasure-code: add gf-complete / jerasure submodules
 5c34a0f erasure-code: remove copy of gf-complete / jerasure
 0d167d2 mds: fix NULL pointer dereference in Server::handle_client_rename
 272b53b mds: avoid infinite loop in MDLog::submit_entry()
+6fd5ca9 upgrade/dumpling-x/parallel: test cuttlefish->dumpling starting point
 3cadbfb mds: fix potential invalid pointer dereference
 91c88c1 mds: rdlock dirfragtree lock when renaming directory
 a867166 mds: don't mark scatter locks dirty when dirfrag is dirty
@@ -16134,9 +18450,16 @@ b227426 Add NO_VERSION to avoid rebuilding ceph_ver.h and relinking
 cfb04b2 Makefiles: remove libkeyutils from every binary except two
 e9eb641 remove gf-complete / jerasure sub modules
 fdcf3eb ReplicatedPG::do_op: delay if snapdir is unreadable
+7088885 internal: ignore ext4 recovery msg
+0bfc365 distros: add rhel 6.4
+6252d28 distros: add wheezy
+8e748c0 ceph.conf: tolerate 500ms of clock drift (up from 250)
 7f7a998 mds/Locker: fix null deref on cap import
 4221e0d build: add gf-complete/jerasure to gitmodule_mirrors
+a27ead6 distros: add 14.04 to supported list
+8d4c467 distros: test rhel/centos 6.5 instead of 6.4
 25d04fb osd: dump priority queue state on dequeue at level 30
+99f4e9d symlink all distros facets to a common set of 'supported' targets
 ff11965 osd: fix OpWQ dump locking
 4a3464c common/PrioritizedQueue: include first item's cost in dump
 de576d5 common/PrioritizedQueue: constify a few things
@@ -16149,8 +18472,11 @@ b4420ff PG::find_best_info: fix log_tail component
 074c880 mon: Monitor: handle invalid 'quorum' command argument
 652056e mon: Properly handle errors from 'cmd_getval()' when needed
 543c642 erasure-code: disable SSE extensions
+d7f3eb3 knfs: make it a list
 1c5411a erasure-code: update gf-complete v1 submodule
 d5e38d4 erasure-code: update jerasure v2 submodule
+2997660 knfs: async export
+6acfa6d ceph_manager: update ec_pool creation parameters
 eac224a doc: update the erasure-code dev documentation
 b273011 osd,mon: use profile instead of properties
 8cf85f0 mon: add osd crush create-erasure functional tests
@@ -16163,18 +18489,27 @@ f8aa1ed mon: set the profile and ruleset defaults early
 063de51 osd: obsolete pg_pool_t properties with erasure_code_profile
 04d2fd1 mon: add the erasure-code-profile {set,get,rm,ls} MonCommand
 fa1d957 mon/Paxos: commit only after entire quorum acks
+a42e197 rados/monthrash: add pool create/delete tests
+2b90be2 ceph_manager: default to 16 initial pgs per pool
+847462b ceph_manager::wait_for_clean: reset timeout if we make progress
 aed0744 os/FileJournal: return errors on make_writeable() if reopen fails
 c31f38c ReplicatedPG: if !obc->obs->exists, lock snapdir obc if exists
 9ee1084 ReplicatedPG: disallow trans which atomically create and remove an object
 f094400 Add file to store mirror location of module's.
+ef2edcd Pass '--object-sync-timeout 30' to radosgw-agent
 a9f8a9e ceph.in: Better error on bad arg to 'tell'
+98e27d7 Fix max_job_time timeout
 1a451f2 mon: functional tests teardown must be run on error
 514b5e3 mon: add helper to selection functions implementing tests
 e4b4b1f osd: OSDMap::erasure_code_profile accessors
 c4f8f26 osd: add OSDMap::erasure_code_profile
 2b9bd26 mds: avoid spurious TMAP2OMAP warning
+57c8fd9 prevent undefined values in finally block
 e39c213 ceph.in: do not allow using 'tell' with interactive mode
+8f542db schedule_suite.sh: turn up journal log too
+d3c294d Make distro kernels work on Trusty.
 b2af217 mds: Fix remotebit::dump for less common types
+eb6cd3a nkfs: 6h timeout of kernel build
 9d77ce1 tools/rados: Allow binary file output of omap data
 be31998 erasure-code: make libcommon include liberasure-code
 e6d9066 erasure-code: add ostream to factory()
@@ -16194,13 +18529,16 @@ cf25946 mon: create-erasure uses crush_ruleset_create_erasure
 6a16eac mon: create crush_ruleset_create_erasure helper
 1ae3314 client: force getattr when inline data is missing
 04de781 OSD::handle_pg_query: on dne pg, send lb=hobject_t() if deleting
+2c87559 multimds: fix misc -> fs/misc paths
 9e21840 mds: include inline data in lock messages
 5b3422a mds: fix corner case of pushing inline data
+7597172 upgrade/dumpling-x/stress-split: tolerate legacy crush tunables
 b2fcc6e Remove code duplication from s3 tests.
 979e8b4 PG::build_might_have_unfound: check pg_whomai, not osd whoami
 0f75c54 osd/ReplicatedPG: fix enqueue_front race
 ef1d7c9 rados.py: Fixed docstring syntax warnings.
 02b746d doc: Fixed release notes syntax warnings.
+752380d added print task
 9cd67bb doc: Fixed hyperlink.
 599a8d7 test: Add ceph_filestore_dump.sh to test ceph_filestore_dump
 31a6679 tools: Fix ceph_filestore_dump to fully operate on EC pools
@@ -16221,12 +18559,19 @@ fba88de ceph-mon: be a bit more verbose on error
 70d87df ceph_mon: output error message if unable to bind.
 5ad9c16 ceph_mon: all output after initial fork go to dout/derr
 c95234a ceph_mon: split postfork() in two and finish postfork just before daemonize
+22c461b upgrade/dumpling-x: more mon scrub whitelist
 ceac36b doc/release-notes: 0.78 draft nodes; firefly draft notes
+2e487d8 knfs: test v3 and v4
 87c911c osd/ReplicatedPG: release op locks on on commit+applied
+4773a27 knfs: add fsstress and kernel build
+e1694b4 knfs: restructure a bit
+7b0eda0 nfs -> knfs
 c5b557e qa/workunits: misc -> fs/misc
+adca646 workunits/misc -> fs/misc
 8c8b3e9 PGLog: remove unused variable
 282497e osd: add tunables for cache_min_{flush,evict}_age
 fa6887b osd: set default cache_target_{dirty,full}_ratios based on configurable
+8b4c8cb upgrade/dumpling-x: whitelist mon scrub mismatch
 a72b636 mds: fix empty fs rstat
 f2124c5 ceph_test_rados: wait for commit, not ack
 dd946e0 MOSDOp: include reassert_version in print
@@ -16254,7 +18599,9 @@ fb4ca94 mailmap: Danny Al-Gaaf name normalization
 fb8ff44 doc/release-notes: note that WATCH can get ENOENT now
 2cbad1b test/librados/watch_notify: create foo before watching
 9d549eb test/system/st_rados_watch: expect ENOENT for watch on non-existent object
+efdee0d upgrade/dumpling-x: upgrade final client node for final test
 b23a141 RGWListBucketMultiparts: init max_uploads/default_max with 0
+7fb5e14 upgrade/dumpling-x: full librados tests after full upgrade
 4057a30 AbstractWrite: initialize m_snap_seq with 0
 90a2654 ReplicatedPG::already_(complete|ack) should skip temp object ops
 72bc1ef AdminSocket: initialize m_getdescs_hook in the constructor
@@ -16267,12 +18614,21 @@ a576eb3 PG: do not serve requests until replicas have activated
 980d2b5 ECBackend: when removing the temp obj, use the right shard
 dc00661 osd_types: print lb if incomplete even if empty
 8e76e4e build-doc: fix checks for required commands for non-debian
+1ccabd8 lock: allow -a with --brief
 dc82cd7 debian: make ceph depend on ceph-common >= 0.67
+d5442a5 multimds: new (separate) suite for multi-mds tests
+88efa65 Revert "fs/basic: multimds"
+cfbbcf7 upgrade/dumpling-x/stress-split: use dumpling workunits
+b455846 Handle newer btrfstools.
 d573710 rgw: don't overwrite bucket entry data when syncing user stats
+a5a94ae Handle newer btrfstools.
 2fbd772 qa/workunits/cephtool/test.sh: fix thrash (more)
 64a6b26 doc/release-notes: fill in some firefly history
 f4196cc doc/release-notes: firefly draft release notes
+72094a8 fixed final_load problem
+1c61133 radosbench: cleanup will probably take longer than writing the objects
 24774a8 osd/ReplicatedPG: fix typo
+728bd3c upgrade/dumpling-x: upgrade mon before osd
 3d5a4b5 ReplicatedPG: CEPH_OSD_OP_WATCH return -ENOENT if !obs.exists
 00bf3b5 osd/ReplicatedPG: do not include hit_set objects in full calculation
 1836b6c osd: hit_set_persist(): Verify all objects aren't degraded
@@ -16286,7 +18642,9 @@ fa30eb5 rados.py: fix typo in Ioctx::read() docstring
 745f72c Fixed get_status() to find client.radosgw fields inside of ps output.
 880bc3a Fix get_status() to find client.rados text inside of ps command results.
 fbd9c15 osd: Remove unused checkpoint code
+69ed31c lock.py: allow --brief to stand on its own (without --list)
 d3e3df7 mds: fix owner check of file lock
+ed6a200 Add print task.
 8a72de3 ReplicatedPG: adjust pending_attrs correctly in copy_from
 6669e4d ReplicatedPG: _delete_head should adjust pending_attrs
 60c1b9a ReplicatedPG: use pending_attrs in rollback
@@ -16317,22 +18675,37 @@ d61fcfa ceph-filestore-dump.cc: pass OSDSuperblock by reference
 246564b pg_t::get_split_bits: add assert to silence coverity
 aba5b7c rbdmap: bugfix upstart script
 2e342d6 FileStore: support compiling without libxfs
+7cb750c radosgw-admin: test data sync with more than max-entries objects
+dcad92e radosgw-admin: check that data deletions work as well
+1143539 radosgw-admin: fix typo in data sync test
+a74cf33 radosgw-admin: ignore 301 test against the same src and dest regions
+55fad94 rgw: create clients in all zones when regions are used
+1d8dcc4 rgw utils: add function to get data log window configuration
+8ba02bf radosgw-agent: add logging to radosgw-admin task tests
+871fe02 rgw: set placement targets for zones
+93f5d60 radosgw-admin: don't compare pools for different zones
+4d3c1a1 Adds radosgw-agent small file sync test
 2626604 erasure-code: LARGEST_VECTOR_WORDSIZE is always 16
 2beb2a5 erasure-code: upgrade to jerasure version 2
 b74115a autotools: set SIMD_FLAGS with SSE flags
 4105ab8 erasure-code: use jerasure as submodules
 eb6ffdb erasure-code: remove jerasure version 1
+f92d3a5 rgw: add data sync test
+e5a43b7 rgw: use different keys for different system users
 18506ad Removed all regular expression parsing and used '-f json' instead
 4cb6b2a Modified num_bytes attribute in components_count to be a raw integer
 18bdee6 MDSMonitor::prepare_command: fix "mds set"
 7884780 osd_types.cc: add missing break in operator<< case handling
+2cf0908 upgrade/dumpling-x: fix order
 83731a7 ReplicatedPG::finish_ctx: clear object_info if !obs.exists
 a7afa14 config.cc: add debug_ prefix to subsys logging levels
+a4dfbc8 workunit: change timeout 1h -> 3h
 55c23a1 qa: add script for testing rados allocation hint
 54ffdcc get-involved.rst: update information
 d1a888e swift/containerops.rst: fix some typos
 93b95a2 radosgw/troubleshooting.rst: s/ceph-osd/OSD/
 2223a37 radosgw/config-ref.rst: fix typo
+0b9d893 Add missing space in error message
 87618d4 session_authentication.rst: fix some typos
 682c695 release-process.rst: fix some typos
 72ee338 doc: s/osd/OSD/ if not part of a command
@@ -16341,6 +18714,13 @@ bbd1c4b filestore-filesystem-compat.rst: fix typo
 ae123a6 corpus.rst: fix typo
 cf9f017 config.rst: fix typo
 5aaecc7 cephx_protocol.rst: fix typo
+e471f40 Make try_push_job_info() retry using safe_while
+73849c1 Update safe_while's suggested usage pattern
+c980984 Add optional _raise parameter
+eb66767 Pass timeout to _spawn_on_all_clients()
+4e01884 Log correct action in CephManager.remove_pool()
+72c63f1 Log timeout value
+b4205ca Iterate more sensibly over processes
 2cbb0a4 architecture.rst: fix typos
 f581bda rados: add set-alloc-hint command
 a4cbb19 rados/operations/control.rst: fix typo
@@ -16350,6 +18730,7 @@ a4cbb19 rados/operations/control.rst: fix typo
 7c77ff6 TestPGLog: add a test case verifying case where we have the prior_version
 e830f9f TestPGLog: check on last_update in run_test_case
 4d6a74d TestPGLog::proc_replica_log: call doesn't adjust olog
+9db6656 upgrade/dumpling-x: do not thrash primary-affinity
 71b4474 client: fix Client::getcwd()
 617ce67 mds: introduce LOOKUPNAME MDS request
 1c8c618 qa/workunits/cephtool/test.sh: fix 'osd thrash' test
@@ -16358,18 +18739,45 @@ b62f9f0 mon/OSDMonitor: feature feature check bit arithmetic
 c8b34f1 mon/PGMonitor: improve debugging on PGMap updates slightly
 819cce2 mon/OSDMonitor: make osdmap feature checks non-racy
 b9bcc15 mon/OSDMonitor: prevent set primary-affinity unless all OSDs support it
+204b3ac Change default workunit timeout to 1h
+9109008 stress-split: use dumpling version of rbd.py tests
 5f7efec tools/rados/rados.cc: use write_full for sync_write for ec pools
+ec38bd3 Use safe_while's action arg
+73f5af2 Add optional 'action' parameter to safe_while
 38fd666 qa: workunits/mon/rbd_snaps_ops.sh: ENOTSUP on snap rm from copied pool
 c13e1b7 mon: OSDMonitor: don't remove unamanaged snaps from not-unmanaged pools
 135c27e osd: Add hit_set_flushing to track current flushes and prevent races
+7604a1b Update safe_while users to reflect new defaults
+8258c84 Change safe_while defaults to 6s 10x no increment
+081a5c4 Look for ready() in the right place
+1778d35 Use a timeout of config.get('time') * 2
+0be5f1f Introduce a timeout to radosbench's join phase
+aea501b fs/basic: multimds
+0815e4f rados/singleton-nomsgr: add allocation hint test
+fbb05b7 Remove the mds-mon-osd upgrade sequence
 3dd09e3 Removed nw_info from sysinfo
 09a317f Made crush_types to be a map of type to count, so we can tell how many racks/rows/hosts/etc are there
 e53aed2 SubmittingPatches: clarify "github fork" in preference to "clone"
+4b11d07 Mark this 'while True' loop with 'finite' comment
 c9eaa65 Changed Availability section of ceph-mds.rst to reference ceph-mds, not ceph-mon. Signed-off-by: James Ryan Cresawn <jrcresawn at gmail.com>
+20bfc97 Give up on wait_until_healthy() after 15min
+94d73bd suite: fix build_matrix for + case
+136775b Don't warn on legacy crush tunables
+57259b5 rados: use backwards compatible args
+70de7d5 Revert "Do not spawn a parallel task if dictionary entry does not exist."
 09668a4 osd: fix agent thread shutdown
+bda2fd3 upgrade: fix typo in subsection name
+487be43 upgrade: fix indentation for final-workload
+022a6a3 Revert "The parallel task expects dicts here..."
+79e3483 The parallel task expects dicts here...
 7411c3c logrotate: copy/paste daemon list from *-all-starter.conf
 b6872b2 ReplicatedPG::trim_object: use old_snaps for rollback
 b5b67d1 ReplicatedPG: use hobject_t for snapset_contexts map
+7b497e1 get test-upgrade-firefly.sh from the dumpling branch
+7d1e97f Revert "Update rados_api_tests.yaml"
+5146143 upgrade/dumpling-x: more indentation
+0bf78e6 upgrade/dumpling-x: fix indentation
+182667a Fix parallel workunit issues.
 b436930 qa/workunits/rest/test.py: do not test 'osd thrash'
 237f0fb os/ObjectStore: dump COLL_MOVE_RENAME
 f888ab4 ReplicatedPG: consistently use ctx->at_version.version for stashed object
@@ -16391,10 +18799,17 @@ a71ddb0 mon: make quorum list (by name) be in quorum order
 8b3934f PGBackend::rollback_stash: remove the correct shard
 1ddec86 FileStore::_collection_move_rename: propogate EEXIST
 ca12e0d qa/workunits/mon/crush_ops: use expect_false
+9a8bf66 ceph.conf.template: add in sensible erasure coding defaults
+c3766c5 upgrade/dumpling-x: skip rados api tests that fail against firefly OSDs
+e6698af ceph_manager: fix erasure coding m, k values
 e016e83 test: Fix tiering test cases to use ---force-nonempty
+e69da0a Log job PID
 0592368 mon: warn when pool nears target max objects/bytes
 f6edcee mon/PGMap: return empty stats if pool is not in sum
+f0ac0f6 upgrade/dumpling-x/parallel: do all final workloads
+bce605d upgrade/dumpling-x: do not upgrade client host
 640ff98 test: Use non-default out/ dev/ paths in vstart
+c3c0b08 Add a 6h timeout to workunits
 1685c6f crush: revise JSON format for 'item' type
 d4950a1 mailmap: Danny Al-Gaaf affiliation
 0eac1ba mailmap: Bjørnar Ness affiliation
@@ -16406,6 +18821,7 @@ a85d0ef mailmap: Steve Stock affiliation
 8fdfece ReplicatedPG::fill_in_copy_get: fix early return bug
 364fed8 packaging: use wildcard for test files in Debian
 65f3354 Make symlink  of librbd to qemu's folder so it can detect it.
+1929196 Fix dumpling-x upgrade suite.
 d0b1094 ECBackend,ReplicatedPG: delete temp if we didn't get the transaction
 f2a4eec PGBackend/ECBackend: handle temp objects correctly
 308ea1b ECMsgTypes: fix constructor temp_added/temp_removed ordering to match users
@@ -16434,6 +18850,8 @@ c029c2f mon/OSDMonitor: add 'osd tier add-cache <pool> <size>' command
 eddf7b6 osd/ReplicatedPG: fix agent division by zero
 08efb45 OSDMonitor: do not add non-empty tier pool unless forced
 12909bb mds: check projected xattr when handling setxattr
+adb04f8 Removed branch ref to fix http://tracker.ceph.com/issues/7584
+8e88922 Update rados_api_tests.yaml
 20fe162 TestPGLog: tests for proc_replica_log/merge_log equivalence
 9a64947 TestPGLog::proc_replica_log: adjust wonky test
 6b6065a TestPGLog::proc_replica_log: adjust to corrected proc_replica_log behavior
@@ -16458,6 +18876,7 @@ a234053 OSD,config_opts: log osd state changes at level 0 instead
 28c29c1 Revert "ObjectCacher: remove unused target/max setters"
 d00a927 Revert "librbd: remove limit on number of objects in the cache"
 195d53a rgw: off-by-one in rgw_trim_whitespace()
+2bf2702 increase verbosity for OSDs for ceph-deploy tests
 37e7817 In database delete Session.flush() has to be called appropriately, to avoid foreign key conflicts in delete() request to the database
 f3d6491 Following changes are made 1. Increased the String length for distro, version and os_desc columns in osds_info table 2. Corrected version information extraction in client/ceph-brag 3. Removed the version_id json entry when version list returned for UUID 4. Updated the README to reflect point 3
 3cc8b27 Modifed the String variables in db.py to be of fixed length to support databases which doesn't have VARCHAR support
@@ -16475,6 +18894,7 @@ e782051 mailmap: Andrey Kuznetsov affiliation
 7b6d417 mailmap: Wang, Yaguang affiliation
 855edc6 Fix typo ceph-disk
 43b7b0b mailmap: The Linux Box affiliations
+7889acb ceph-manager: fix ec_pool parameters
 62fd382 osd_types,PG: trim mod_desc for log entries to min size
 d4118e1 MOSDECSubOpWrite: drop transaction, log_entries in clear_buffers
 718cda6 TrackedOp: clear_payload as well in unregister_inflight_op
@@ -16506,6 +18926,7 @@ a7b7c31 client: use ceph_seq_cmp() to compare cap seq/mseq
 0bf5f86 store_test.cc: fix unchecked return value
 7eefe85 histogram.h: fix potential div by zero
 500206d ReplicatedPG.cc: fix ressource leak, delete cb
+fd507ed Allow setting kdb to fail.
 fbb1ec8 ECBackend: don't leak transactions
 b0d4264 OSD::handle_misdirected_op: handle ops to the wrong shard
 123ff9e osd: stray pg ref on shutdown
@@ -16514,6 +18935,10 @@ b0d4264 OSD::handle_misdirected_op: handle ops to the wrong shard
 bfad17b librados: fix ObjectIterator::operator= for the end iterator
 a850a38 doc/dev/cache-pool: fix notes
 f0241c8 mon/OSDMonitor: make default false-positive-probability 5%
+bd9748d Added --limit option to teuthology-suite.
+8dfcfa4 mds_thrash: Fix a potential getitem on None
+22825c2 mds_thrash: Refactor gevent usage + get traceback
+f12426c mds_thrash: PEP8-ize whitespace
 30aa2d6 client: clear migrate seq when MDS restarts
 c1e40c6 client: fix race between cap issue and revoke
 5c55eb1 client: check mds_wanted for imported cap
@@ -16521,23 +18946,44 @@ c1e40c6 client: fix race between cap issue and revoke
 6797d30 client: call handle_cap_grant() for cap import message
 154efb1 client: don't update i_max_size when handle reply from non-auth MDS
 9a0ef6a Fix python-requests package dependencies.
+3c87b84 Worker logging tweaks
+0dcf3f4 --dead implies --refresh
+d42f31e Symlink worker log after child starts
+3447812 In find_job_info(), also look for orig.config.yaml
 c07a758 mds: Add dump-journal-entries
 7a985df mds: Create MDSUtility as base for Dumper/Resetter
 410c507 mds: Fix Dumper shutdown
+a3e052b suites/fs/basic: add mds_creation_retry
+f8a2a53 Push complete info when reporting jobs as dead
 7ba3200 mds: fix nested_anchors update during journal replay
+6ba8985 fix docstring typo
+7cc9375 task: Add mds_creation_failure
 1040d1b osd/OSDMap: respect temp primary without temp acting
+26f00fc Make help a bit more obvious. Misc tweeks.
 8020dcf Fixed get_status() to find client.radosgw fields inside of ps output. Fixes: 7375 Signed-off-by: Warren Usui <warren.usui at inktank.com>
 8200b8a Fix get_status() to find client.rados text inside of ps command results. Added port (fixed value for right now in teuthology) to hostname. Fixes: 7374 Signed-off-by: Warren Usui <warren.usui at inktank.com>
 be2748c OSDMap::_pg_to_up_acting_osds: use _acting_primary unless acting is empty
 dc079eb OSDMonitor: when thrashing, only generate valid temp pg mappings
+9e55f6f Add missing fedora yamls.
+67ed25f Run ceph-deploy on fedora19 not old fedora18.
+d741cd9 Add missing fedora yamls.
+9fb8231 Run ceph-deploy on fedora19 not old fedora18.
+e04f8fd Add teuthology-queue command for beanstalk Managmeent.
 891343a rados.py: add aio_remove
 9f7f4ed Revert "osd/PG: fix assert when deep repair finds no errors"
 728e391 osd: Don't include primary's shard in repair result message
+2591935 use itertools for seconds sum
+60892ca tests for the new while helper
+38cead6 Flip logic of checking whether a branch can report
+171a5e1 add a helper for while loops
 3ee71a5 doc: troubleshooting-mons.rst: better document how to troubleshoot mons
 69082a6 mds: add mds_kill_create_at for testing
 27968a7 ceph_test_objectstore: fix i386 build (again)
+87ebe46 upgrade/dumpling: add recent dumpling point releases
+bad8e60 upgrade/dumpling: upgrade to latest dumpling, not emperor
 14ea815 mon/OSDMonitor: fix osdmap encode feature logic
 7357b6e PG: skip pg_whoami.osd, not pg_whoami.shard in scrub feature check
+c6a22b3 rados/thrash/workloads: enable copy_from for the ec workloads
 696fa36 OSD::project_pg_history needs to account for acting_primary/up_primary
 0442b45 Objecter/OSDMap: factor out primary_changed() into static OSDMap method
 d0359f7 PG: clarify same_primary_since updates regarding primary rank
@@ -16597,12 +19043,72 @@ a9677e1 Revert "ReplicatedPG::recover_backfill: adjust last_backfill to HEAD if
 133ddb7 packaging: match all test files
 1c129df packaging: add missing file
 bd59db2 ceph-object-corpus: prune some old releases
+6e28480 schedule_suite: debug filestore = 20 too
 76046cf dencoder: check for radosgw build option
 5373ee2 osd: use ceph scoped shared_ptr
+f9d8c10 removed leftover dirs again
+fee57a4 removed one upgrade-sequence step and re-named the rest of steps
+70e217f schedule_suite: debug osd = 20 for the time being
+a53454b jenkins: fail fast during job execution
+fd6f61d removed unfinished stress-split suite from
+8ba6a2b removed obsolete suite from upgrades dir and added dumpling-emperor-x parallel suite
+ca600a2 Add unit test for task.devstack.parse_os_table()
 0ebb1f8 mon/MDSMonitor: Cope with failures during creation
+5411613 Add debug output for reconnect() failures
+05842e9 Add explanations to asserts
+cfb7b94 Guard against ctx.config being unset or None
+dadc9f7 Do not spawn a parallel task if dictionary entry does not exist.
 c6e6ced PG::build_might_have_unfound: skip CRUSH_ITEM_NONE
+b92578e Remove unused import
+b638dc4 Don't run the exercise subtask by default
+741b4de Add a retry to the rbd volume verification
+aecd98d Fix incorrect exception name in docstring
+830748a Move external docs URLs
+f7b1d92 Wait 30s for Keystone to start
+11b60fe Change usages of StringIO.read() to getvalue()
+fdaaee5 Source openrc before creating volume
+f53028f Add a new subtask: smoke()
+7c57127 Forget about saving the exercise.sh log, for now
+85e63dd Allow using different devstack branches
+5c12d36 Add devstack.exercise subtask
+e3f336d Make reconnect() use Remote.reconnect()
+61e9ac4 Move reboot() to teuthology.misc
+1228ac8 Add documentation to reboot()
+3dcde65 Add note about memory requirements
+021f84a Simplify create_if_vm's downburst config logic
+27bad44 Move restart_apache() to after the reboot
+e73df9c Make rejoin-stack.sh start screen in detached mode
+a4bf1f8 Actually call Remote.reconnect()
+1a0be5a Tweaks to is_online and reconnect()
+a1ed3d4 Add logging calls to each helper function
+fe0aedf Set apache ServerName, reboot, restart devstack
+297096b Add Remote.reconnect()
+2289496 Use new init parameters for Remote
+8dfac43 Make Remote.is_online more accurate
+71b4bfe Remote instances can now establish SSH connections
+1c71558 Add Remote.is_online property
+a2a5aed Add Remote.hostname property
+69beebc get_file() returns a str, not a StringIO...
+375d7dd Use ConfigParser.set() instead of update()
+a55c22a Work around a bug in ConfigParser
+119b438 Call seek(0) on StringIO objects before reading
+b474d28 Fix typoed virsh call
+f500634 Don't pass owner to copy_key()
+b33686f Use get_file() and sudo_write_file()
+cedd128 Split out key generation
+76588e9 Add update_devstack_config_files()
+b40f5f7 Add set_libvirt_secret() and split out other steps
+c6b5c01 Split out devstack-ceph configuration
+b674048 Copy ceph auth keys over to devstack node
+74355e5 Add optional 'owner' arg to sudo_write_file()
+ed51256 Fix linter errors.
+10e12c2 Add skeleton devstack task
+dfe6052 Add copy_file() to scp files between remotes
 1f30d1a ECBackend: deal with temp collection details in handle_sub_write
+b4ce61c Add back in print in ceph_manager that the last checkin accidentally removed
+30f8938 Fix formatting and add docstrings to ceph_manager.py
 c703a89 ReplicatedPG::on_global_recover: requeue degraded, then unreadable
+474d86c upgrade: dumpling-next -> dumpling-x
 caf2edf rgw: minor cleanup
 1307ba9 mds: Table read failure should log at error level
 81bcf43 mds: Don't create the /.ceph directory
@@ -16622,10 +19128,14 @@ b3bb9ef doc/release-notes: v0.77
 fa96de9 doc/release-notes: v0.67.7
 1bca9c5 (tag: v0.77) v0.77
 40bdcb8 osd/,mon/: add (up|acting)_primary to pg_stat_t
+d78b9b0 jenkins: add debugging output to shell script
+f3ac787 Fix syntax error in first docstring added.
+10fee0e Revert "Show hostname instead of IP in errors"
 0427f61 rgw: fix swift range response
 2b3e3c8 rgw: fix etag in multipart complete
 859ed33 rgw: reset objv tracker on bucket recreation
 53de641 radosgw-admin: don't generate access key if user exists
+f0bfff1 add jenkins-pull-requests-build script
 9e8882e BackedUpObject::get_xattr()  pass function parameter by reference
 53b3689 TestRados.cc: use !empty() instead of size()
 86b0879 ErasureCodeBench: prefer prefix ++operator for non-trivial iterator
@@ -16662,7 +19172,11 @@ a40cd50 osd/ReplicatedPG: load older HitSets into memory
 afbd58e rgw: don't try to read bucket's policy if it doesn't exist
 3ed68eb rgw: return error if accessing object in non-existent bucket
 b0dcc79 radosgw-admin: gc list --include-all
+b9e1341 rados/thrash: add ec-radosbench workload
+386650d rados/thrash: add ec workloads
 609f4c5 Throw a Timeout exception on timeout.
+2718dbc radosbench: simplify pool creation and add ec_pool support
+e371565 task/: add ec_pool and append to rados.py
 1975441 dencoder: fix for new rgw manifest code
 b3ce188 cls/rgw: fix debug output
 3fb6e25 test/rgw: manifest unitest
@@ -16700,6 +19214,8 @@ d3c6f17 test/librados: use connect_cluster_pp() instead of duplicating it
 9630f2f test/librados: create general test case classes
 6273ba4 test/librados: move test.cc into its own library
 abca34a Objecter: keep ObjectOperation rval pointers for writes
+4c930a0 Revert "Capture stderr and include in CommandFailedError"
+c93f14f filestore_idempotent: filestore -> objectstore path change
 4bee6ff osd/ReplicatedPG: clean up agent skip debug output
 d1a185b osd: l_osd_agent_{wake,skip,flush,evict}
 dbec109 osd: l_osd_tier_[dirty|clean]
@@ -16752,6 +19268,7 @@ f2826fa src/osd: pass in version to recover to recover_object
 1c93e03 osd/: Add a shard parameter to SnapMapper to handle multiple pg shards
 f678aef TestPGLog: remove test with DELETE op with prior_version = eversion_t()
 09d611d PGLog::merge_old_entry: simplify the oe.version > ne.version case
+5f8eedf rados/thrash: test rados against cache pool with agents
 e6a1122 osd: l_osd_tier_promote
 d116e55 PG: just ignore rollbacks on objects we have already failed to rollback
 0e7b10a PG::remove_snap_mapped_object: use pg_whoami.shard
@@ -16771,6 +19288,9 @@ d8b6d4c ReplicatedPG: release_op_ctx_locks in all-applied, not all-committed
 e0b0508 ReplicatedPG: reject unaligned writes on ec pools
 211fc4e librados: add calls to expose required write alignment
 4c1338f SimpleMessenger: init_local_connection whenever my_inst changes
+749bf40 iozone: kill empty suite
+66ffaa6 kcephfs, krbd: do not thrash primary-affinity
+3d0ce69 thrashosds: allow primary-affinity thrashing to be disabled
 bd8fcd2 osd: improve whiteout debug prints
 63f5a79 osd/ReplicatedPG: make agent skip blocked obcs
 7997646 ReplicatedBackend: print char ack_type as int
@@ -16853,6 +19373,8 @@ e65c280 osd: add pg_pool_t::get_pg_num_divisor
 fb4152a histogram: move to common, add unit tests
 85a8272 histogram: rename set -> set_bin
 8b68ad0 histogram: calculate bin position of a value in the histrogram
+63ad9aa thrashosds: randomly adjust primary_affinity
+dc95aef ceph.conf: mon osd allow primary affinity = true
 d921d9b qa: do not create erasure pools yet
 4560078 common: ping existing admin socket before unlink
 c673f40 osd/OSDMap: include primary affinity in OSDMap::print
@@ -16861,24 +19383,40 @@ ba3eef8 mon/OSDMonitor: add 'mon osd allow primary affinity' bool option
 c360c60 ceph_psim: some futzing to test primary_affinity
 f825624 osd/OSDMap: add primary_affinity feature bit
 8ecec02 osd/OSDMap: apply primary_affinity to mapping
+1d6aceb upgrade: reorg
 871a5f0 ceph.spec: add ceph-brag
 4ea0a25 debian: add ceph-brag
 57d7018 ceph-brag: add Makefile
 cf4f702 mon/Elector: bootstrap on timeout
 4595c44 mon: tell MonmapMonitor first about winning an election
 7bd2104 mon: only learn peer addresses when monmap == 0
+64ff4ab Fixed task/args.py docstrings.
+c631190 Add docstrings.  Fix formatting issues for rgw.py Fix s3tests calls for rgw-logsocket.py.
 3c76b81 OSD: use the osdmap_subscribe helper
 6db3ae8 OSD: create a helper for handling OSDMap subscriptions, and clean them up
 5b9c187 monc: new fsub_want_increment( function to make handling subscriptions easier
+6df622b Fix docstrings and minor formatting in task/ssh_keys.py
+0af0807 Fix docstrings for task/blktrace.py, peering_speed_test.py, proc_thrasher.py and recovery_bench.py.
 7d398c2 doc/release-notes: v0.67.6
+ef97f0c Fix docstrings in radosgw-admin-rest.py
+e0ed09c Add docstrings to the orchestra code.  Also fix minor formatting issues (mostly column start locations).
+a6eee01 Review: switch to selecting remotes by roles in the 'normal' way
+006428c Review: refactor packaging routines to common file
 0ed6a81 osdmaptool: add tests for --pool option
 f98435a osdmaptool: add --pool option for --test-map-pgs mode to usage()
 eedbf50 osdmaptool: fix --pool option for --test-map-object mode
+6810610 Add docstrings to internal.py
 e44122f test: fix signed/unsigned warnings in TestCrushWrapper.cc
+5c36a5c Add doc strings to Swift tests
+567961b Readjust the indentation of mon_clock_skew_check.py and mon_thrash.py. Added docstrings.
+12b7904 Review: Rename calamari_testdir/ to calamari/
+bee587c Added some docstrings.
+54e9e3e fixed yam typos
 64cedf6 OSD: disable the PGStatsAck timeout when we are reconnecting to a monitor
 794c86f monc: backoff the timeout period when reconnecting
 60da8ab monc: set "hunting" to true when we reopen the mon session
 1a8c434 monc: let users specify a callback when they reopen their monitor session
+de46859 added distros
 589e2fa mon: ceph hashpspool false clears the flag
 7834535 mon: remove format argument from osd crush dump
 020e543 mon: do not goto reply if a ruleset exists in pending
@@ -16905,23 +19443,34 @@ d012119 mds: remove xattr when null value is given to setxattr()
 9035227 doc/release-notes: do not downgrade from v0.67.6
 7533b3b doc/release-notes: note about dumpling xattr backport
 cc1e844 PendingReleaseNotes: note about cephfs backtrace updates
+2a6ec2d added parallel dumping-emperor-next upgrade tests
 74951ca osd/OSDMap: pass pps value out from _pg_to_osds
 e107938 osd/OSDMap: fix _raw_to_up_osds for EC pools
 1cc8c25 mon/OSDMonitor: add 'osd primary-affinity ...' command
 cee9142 osd/OSDMap: add osd_primary_affinity fields, accessors, encoding
+f065510 Fix flake8 failures
 af4c142 mon/OSDMonitor: fix legacy tunables warning
 a0b2c74 osd/OSDCap: handle any whitespace (not just space)
 824dd52 mon/MonCap: handle any whitespace (not just space)
 5a6c950 packaging: do not package libdir/ceph recursively
+d53a5b6 Spelling
+d39dd8c Log correct name of context during errors
+ab2a01f Catch a case where we weren't getting tracebacks
+ca83b7b Timestamp stdout log output
+896412f Remove spurious print
+d0a4349 Capture stderr and include in CommandFailedError
+eafeece Show hostname instead of IP in errors
 840e918 tests: fix packaging for s/filestore/objectstore/
 b64f1e3 tests: fix objectstore tests
 f34eb1b mds: force update backtraces for previously created FS
+cee713f Add docstrings to s3 related tasks.
 b5c10bf Fix bad dealloctor
 a4b3b78 correct one command line at building packages section
 33692a2 osdmaptool: fix cli test
 fed8396 tset_bufferlist: fix signed/unsigned comparison
 8ca3d95 rgw: multi object delete should be idempotent
 bf38bfb rgw: set bucket permissions, cors on swift creation
+e26bb35 Add calamari setup/test tasks
 2682b64 doc: Incorporated feed back from Loic and Dan.
 0da9621 doc: Adds additional terms for use with librados.
 e1a49e5 ObjectCacher: remove unused target/max setters
@@ -16932,6 +19481,7 @@ d136eb4 mon: allow firefly crush tunables to be selected
 e3309bc doc/rados/operations/crush: describe new vary_r tunable
 525b2d2 crush: add firefly tunables baseline test
 37f840b crushtool: new cli tests for the vary-r tunable
+453d72b don't use notcmalloc flavor for valgrind
 e88f843 crush: add infrastructure around SET_CHOOSELEAF_VARY_R rule step/command
 f944ccc crush: add SET_CHOOSELEAF_VARY_R step
 e20a55d crush: add infrastructure around new chooseleaf_vary_r tunable
@@ -16943,6 +19493,7 @@ e98e344 client ownership info validation corrected
 f2f4eb5 Updated client code to complete PUT and DELETE requests
 69682ad1 memstore: fix touch double-allocation
 9292cc2 doc: highlight that "raw" is the only useful RBD format for QEMU
+ad6d412 Fix #7369: "sed expression must be raw string"
 9e62beb qa: add script for testing rados client timeout options
 79c1874 rados: check return values for commands that can now fail
 8e9459e librados: check and return on error so timeouts work
@@ -16951,9 +19502,12 @@ d389e61 msg/Pipe: add option to restrict delay injection to specific msg type
 3e1f7bb Objecter: implement mon and osd operation timeouts
 9bcc42a alert the user about error messages from partx
 42900ff use partx for red hat or centos instead of partprobe
+97617c9 Fix broken symlinks.
 6926272 common/buffer: fix build breakage for CEPH_HAVE_SETPIPE_SZ
 a5f479c configure: fix F_SETPIPE_SZ detection
 450163e configure: don't check for arpa/nameser_compat.h twice
+ceffee4 Make sure osd_scrub_pgs gets called if healthy call fails.
+b2d3431 fs: Fix bad "mds set inline_data" syntax
 c1d2a99 libcephfs: fix documentation for ceph_readlink
 dbaf71a mailmap: Moritz Möller is with Bigpoint.com
 4cf2c72 Server changes to deploy in a production env 1. Added the wsgi entry point app.wsgi 2. Updated client code to mandate the update-metadata to have url to publish and unpublish 3. Updated the README to describe a bit about the server operations as well.
@@ -16968,12 +19522,16 @@ a2dae0d mailmap: Somnath Roy is with SanDisk
 5d20c8d mailmap: Ray Lv is with Yahoo!
 1a588f1 Rename test/filestore to test/objectstore
 3d65660 script to test rgw multi part uploads using s3 interface
+9506377 rgw task for multipart upload script
 5d59dd9 script to test rgw multi part uploads using s3 interface
 0bac064 added script to test rgw user quota
+b4a2a2b s/rgw_user_quota.pl/rgw_user_quota.yaml
 8cb3dad doc: Added Python doc.
 22afd6c doc: Added inline literal tag.
 aaa2799 doc: Adds Python to index and sets maxdepth to 2.
+24d89e2 task for rgw user quota testing
 0f2386a script to test rgw user quota functionality
+69f29d1 Move osd_scrub_pgs call to finally block of ceph main task.
 16ae4f8 fuse: fix for missing ll_ref on create
 c1b0714 client: For rebase need to fix ll_readlink() call in Synthetic client
 349b816 client, libcephfs: make readlink sane
@@ -17010,17 +19568,24 @@ e1e6c45 Completed model and controller code 1. GET, PUT and DELETE request are a
 30fd0c5 Intial version of the server code. 1. Database modelling is done 2. PUT request is completed
 fc963ac erasure-code: move test files to a dedicated directory
 7baa62f erasure-code: move source files to a dedicated directory
+f0bc87a created upgrades:dumpling-next suite to test parallel upgrades and partial upgrades.
+5422c41 s/rgw_tests.sh/s3_bucket_quota.pl
+330612d schedule_suite: be slightly more verbose
 9ecf346 rgw: initialize variable before call
 ce0e3bd qa/workunits/snaps: New allow_new_snaps syntax
 22b0057 mon: test osd pool create pg_pool_t::stripe_width behavior
+7759473 Enable killing runs that haven't even started yet
 dfc90cf mon: osd pool create sets pg_pool_t::stripe_width
 33b8ad8 common: add osd_pool_erasure_code_stripe_width
 798b56a unittests: update osdmaptools with stripe_width
 11c11ba mon: add erasure-code pg_pool_t::stripe_width
+c2bd846 valgrind: fix msync suppression
 922e5cf osd: fix type mismatch warning
 6fda45b os/kvstore: remove used var
 994bdea os/kvstore: trivial portability fixes
 377a845 common: simpler erasure code technique
+a07db3c increase mon verbosity for all ceph-deploy tasks
+decfe7c increase mon verbosity in logs
 04b1ae4 rgw: fix rgw_read_user_buckets() use of max param
 fdeb18e mon: MDSMonitor: Forbid removal of first data pool
 c7d265a mon: OSDMonitor: Refuse to delete CephFS pools
@@ -17054,9 +19619,11 @@ cc66c6a erasure-code: test ErasureCodeJerasure::create_ruleset
 f019c90 erasure-code: implement example create_ruleset
 6ca9b24 erasure-code: add crush ruleset creation API
 2dca737 erasure-code: the plugin is in a convenience library
+2bcc600 Implement the rest of the checks for automatic pg scrubbing. osd_scrub_pgs now: 1.) Insures that all pgs are clean and active. 2.) Performs the scrub of all OSDs. 3.) Waits until all pgs have been scrubbed or no progress in scrubbing has     been made for two minutes.
 e47e49d doc/release-notes: v0.77 draft notes
 2c504ea doc/release-notes: v0.76
 60ca6f6 client: fix warnings
+9b2b2c2 misc.py: helper roles_to_remotes for generic use
 1829d2c librados: add timeout to wait_for_osdmap()
 0dcceff conf: add options for librados timeouts
 a23a2c8 os/KeyValueStore: fix warning
@@ -17066,6 +19633,8 @@ eb9ffd5 mon: use 'mds set inline_data ...' for enable/disable of inline data
 e5ed1b2 mon: do not force proposal when no osds
 3b99013 (tag: v0.76) v0.76
 7ff2b54 client: use 64-bit value in sync read eof logic
+021d94c fs/basic: test with inline data
+be3ea71 valgrind: another tcmalloc suppression
 2f85b8c doc: Incorporated feedback.
 684e5c4 Pipe, cephx: Message signing under config option
 5fde828 cmp.h: boost tuple comparison is replaced by regular comparison
@@ -17074,7 +19643,13 @@ e5ed1b2 mon: do not force proposal when no osds
 03f7f77 Throttler: optionally disable use of perfcounters
 ee48c87 common/shared_cache.hpp: compact to a single lookup where possible
 27b5f2b common/shared_cache.hpp: avoid list::size()
+e871716 Scrub osds when ceph task finishes Fixes: 7198 Signed-off-by: Warren Usui <warren.usui at inktank.com>
 ee4cfda doc: rgw: el6 documentation fixes
+a321b6e README.rst: describe new pp object and unlock_on_failure option.
+1845a5a interactive.py: add handy "pp" for prettyprinting at the prompt
+77ec431 internal.py: add global config key 'unlock_on_failure'
+b80e510 Make libvirt import optional (OS X)
+aaf9b51 Fix MDS wait when get_mds_status returns None
 48fbcce osd: Change some be_compare_scrub_objects() args to const
 ce1ea61 osd: Change be_scan_list() arg to const
 e1bfed5 common: buffer::ptr::cmp() is a const function
@@ -17085,9 +19660,11 @@ f9128e8 osd: Move PG::_scan_list() to backend as ReplicatedBackend::be_scan_list
 d508079 OSDMonitor: use deepish_copy_from for remove_down_pg_temp
 61914d8 OSDMap: deepish_copy_from()
 802692e os/KeyValueStore: fix warning
+c489e44 install.py: look for 'local' in config directly, don't warn if not found
 0389f1d mon/OSDMonitor: encode full OSDMap with same feature bits as the Incremental
 b9208b4 OSDMap: note encoding features in Incremental encoding
 e4dd1be pybind: improve EnvironmentError output
+de771dd Update README.rst to reflect changes in libvirt
 754ddb1 rgw: fix build on centos
 1628423 mailmap: Rutger ter Borg affiliation
 3a1a8c3 mailmap: Laurent Barbe affiliation
@@ -17133,6 +19710,8 @@ fd260c9 cls_user: init cls_user_stats fields in ctor
 d0f13f5 OSDMap: fix deepish_copy_from
 d7b0c7f ceph-disk: run the right executables from udev
 318e208 OSD: don't assume we have the pool in handle_pg_create
+cc3956a fix indentation level for task
+bdafbc0 make sure we are using a dict
 2a737d8 leveldb: add leveldb_* options
 11cf9bb rgw: fix multipart min part size
 12ba8a3 Add a virtual interface init, open, create_and_open to KeyValueDB
@@ -17143,11 +19722,15 @@ ff5abfb buffer: make 0-length splice() a no-op
 26ace1e test_rgw_admin_opstate: get it compiled
 28c7388 osdc/Striper: test zero-length add_partial_result
 f513f66 osd: OSDMonitor: ignore pgtemps from removed pool
+81d1405 Attempt to fix #7241
+427bf93 Fix PEP-8 linter complaints
 3c77c4c OSDMap: use deepish_copy_from in remove_redundant_temporaries
+1408a69 correct user name
 368852f OSDMap: fix damaging input osdmap from remove_down_temps
 bd54b98 OSDMap: deepish_copy_from()
 9e52398 packaging: apply udev hack rule to RHEL
 64a0b4f packaging: apply udev hack rule to RHEL
+e424d78 Be more verbose about log file locations
 9265d76 client: Avoid uninline empty inline data
 b99e9d3 mds: Handle client compatibility
 ddbaa5c mds: Shutdown old mds when inline enable
@@ -17215,6 +19798,8 @@ edcf9fe cls/user: a new op to retrieve user header
 c7b4d00 rgw: move bucket add / remove to new user objclass
 23aa65f rgw: replace user bucket listing with objclass call
 248c4ae rgw: new user objclass
+a200fa9 removed roles from task file
+50722a7 Symlink worker logs into job archive dir
 b90570f Fix 404 broken links to logging and debug configuration
 4553e6a Fix trailing space
 7bed2d6 called sysinfo.append(meta) in get_sysinfo within the while loop
@@ -17226,6 +19811,7 @@ ea026c6 doc: Added domain pool, and changed zone configs to use domain tool so t
 6f6b1ee ReplicatedBackend: fix uninitialized use warning/bug
 df4df46 Monitor: use a single static accessor for getting CompatSet features off disk
 6915053 doc: Adding more information on style and usage for documenting Ceph.
+cf250a8 use create-initial for deploying monitors
 2216afd doc: Fixed omission of a comma in zone configuration.
 55ab35b FileStore: perform LFNIndex lookup without holding fdcache lock
 1560cc0 mongoose: git rm src/mongoose
@@ -17237,6 +19823,7 @@ d26e766 civetweb: fix module uri
 f2f7475 rgw: switch mongoose to civetweb
 08fa34d osd/OSDMap: do not create erasure rule by default
 6f8541c osd: use ceph:: scoped hash_map
+47dcdb3 rados/thrash: add cache + snaps workload
 289a400 Corrected ownership info which was presented as string, added stub code for unpublish
 ecbdeb1 PGBackend: clarify rollback_stash name and method comments
 4fc4573 PG: drop messages from down peers
@@ -17298,12 +19885,15 @@ b7d100b FileStore::_collection_move_rename: remove source before closing guard
 06f7a98 ReplicatedBackend.h: don't need to be active for pushes
 518774d ObjectStore: improve name of the queue_transaction which cleans up the transaction
 c03d027 hobject: admit that gen_t is actually version_t
+022e531 added roles for bucket quota tests
 8060afd MOSDMap: reencode maps if target doesn't have OSDMAP_ENC
 9792500 doc: add-or-rm-mons.rst: better explanation on number of monitors
 9eac5e3 Added update_metadata, clear_metadata, and usage description code
+e4abe40 added roles to rest yaml
 dcca413 added perl script for rgw bucket quota tests
 28c75f8 removing rgw_tests.sh
 54caa01 removing rgw_tests.sh
+98c7873 modified rgw suite for bucket quota tests
 8b8ede7 modified the port to 7280 in the script instead of the default 80
 850b4f4 script for rgw bucket quota testing
 339bed1 mon/MonCommands: 'invalidate+forward' -> 'forward'
@@ -17329,6 +19919,7 @@ aae4700 libc++: use ceph::shared_ptr in installed header
 4c4e1d0 libc++: use ceph:: namespaced data types
 8e86720 libc++: create portable smart ptr / hash_map/set
 7e7eda4 OSDMap: Populate primary_temp values a little more carefully
+35e5b12 suite for rest
 47bc71a fixed the syntax for test maxmds=2
 a13ebd3 fix for the test maxmds=2
 ad203d5 doc: Fixed ruleset typo.
@@ -17341,6 +19932,7 @@ d1de32c doc: Added default-placement to list of placement targets. Added SSL com
 85267cf rgw: sign loadgen requests
 e8a4b30 rgw: loadgen frontend read uid, init access key
 57137cb rgw: add a load generation frontend
+e8bb165 call wait() on the teuthology-results Popen object
 f01202d Fixes: #7172
 b1a853e rbd: expose mount_timeout map option
 8ec7fa8 PendingReleaseNotes: note ceph -s fix
@@ -17358,9 +19950,13 @@ add59b8 doc: Added additional monitor icon.
 86c1548 rgw: handle racing object puts when object doesn't exist
 5c24a7e rgw: don't return -ENOENT in put_obj_meta()
 a84cf15 rgw: use rwlock for cache
+381d4aa lock.py: request only rsa keys from ssh-keyscan
 790dda9 osd: OSDMap: fix output from ceph status --format=json for num_in_osds num_up_osds returns as an int value, while num_in_osds returns as a string. Since only an int can be returned from get_num_in_osds(), num_in_osds should should also be an int to remain consistant with num_up_osds.
 3194d66 doc: Fixed keyring command and updated for current conventions.
+66312f7 ceph_manager: in test_map_discontinuity, delay killing osd for 20s
 ec5f7a5 client: ceph-fuse use fuse_session_loop_mt to allow multithreaded operation if "fuse multithreaded = 1". Signed-off-by: Moritz Moeller mm at mxs.de
+53fc2d9 Log a warning when killing long-running jobs.
+769ef8a Kill jobs that run for over 3 days (configurable)
 e1fd0e8 first commit
 ac5a9fe mon: larger timeout for mon mkfs.sh test
 50808af ceph-disk: larger timeout in the test script
@@ -17496,6 +20092,8 @@ b8dfcc1 mds: use OMAP to store dirfrags
 d429ab5 osd/OSDMonitor: fix 'osd tier add ...' pool mangling
 f49d9cd osd: fix propagation of removed snaps to other tiers
 3b3511c mon: debug propagate_snaps_to_tiers
+9a9ee61 valgrind: ignore tcmalloc uninitialized memory
+495f216 thrashosds: change min_in from 2 -> 3
 631d0c7 erasure-code: erasure code decode interface helper
 d2b2f5d erasure-code: jerasure implementation of chunk size helpers
 eb2374e erasure-code: refactor the example to use chunk size helpers
@@ -17503,6 +20101,7 @@ eb2374e erasure-code: refactor the example to use chunk size helpers
 fa43d9c organizationmap: joe.buck at inktank.com is Inktank
 8604e76 organizationmap: match authors with organizations
 ce95cef mongoose: fix warning
+bf481b9 kernel: use utsrelease string for need_to_install() purposes
 af0269d mailmap: make roald at roaldvanloon.nl primary
 f18b310 mailmap: remove company name from Pascal de Bruijn name
 39db90f mailmap: add Andrew Leung, Carlos Maltzahn and Esteban Molina-Estolano
@@ -17530,6 +20129,7 @@ a5f8cc7 rgw: convert bucket info if needed
 a3eb935 mailmap: add Yehuda Sadeh fixes for Reviewed-by:
 f6b52fd mailmap: add Sage Weil fixes for Reviewed-by:
 ca9acb9 mon: implement 'mds set max_mds|max_file_size'
+1b4368b schedule_suite: 2x replication for ceph-deploy
 0940d8f osd: Change waiting_on_backfill to a set<int>
 695255e osd: Interim working version with backfill reserve state changes
 a07d682 osd: Recovery reservations need to include backfill peers
@@ -17541,16 +20141,21 @@ ba19006 osd: Config OSD when CEPH_FEATURE_OSD_ERASURE_CODES feature first seen
 09d021b doc: Updated for hostname -s and resolving to non-loopback IP address.
 ed605f0 doc: Fixed hyperlink.
 b780f4b osd: Remove redundant incompat feature
+18b158e add another host so that replicas meet the minimum required
 294cdfb erasure-code: relax zero copy requirements in tests
 656de1b erasure-code: ensure that coding chunks are page aligned
 9ba6599 EC-JERASURE: rewrite region-xor function using vector operations to get ~ x1.5 speedups for erasure code and guarantee proper 64-bit/128-bit buffer alignment
 31f3745 mailmap: Adds Christophe Courtaut
 7d5674c doc: Added librados introduction doc. Still wip.
 33e78a4 doc: Updated terminology. Added librados intro to index.
+f32adf0 Fix a bug where ctx.config['targets'] was looped through again in connect().  The bug caused vm behavior to happen for a target if any of the machines in the cluster was a vm.  The code was also changed to set the key to none only if rsa or dsa keys were used on a vm.
 7acb0a1 on_exit: remove side effects from asserts
 e3d0b0a common: fix large output in unittest_daemon_config
 822ad58 configure: support Automake 1.12
+0d5763e add missing/required OSDs to the rest of ceph-deploy tasks
 b1976dd radosgw-admin: fix object policy read op
+0c9129b error after 15 minutes of waiting for gatherkeys
+5c04966 Enable reporting of entire runs as dead
 3f34dc7 common: unit tests for config::expand_meta
 9485409 common: recursive implementation of config::expand_meta
 87db534 common: cosmetic inversion of tests arguments
@@ -17569,8 +20174,15 @@ bb8b750 add autotools-generated files to .gitignore
 f2e33e8 mon: get rid of --keyring /dev/null hack
 d110c91 doc: Removed dash from --dmcrypt option.
 c772b6d ceph-disk: fix false positive for gitbuilder
+3cffea4 Re-raise exceptions caught in the watchdog
+027929e Use response.text if response.json is None
+f92174f Strip stdout lines
+68b259f Catch and log unhandled exceptions in the watchdog
 e2ee528 osdc/ObjectCacher: back off less during flush
+c6a9de0 Add 'emperor' to list of branches with reporting
+eaa3e12 Work around a change in pip 1.5 regarding wheels
 daefe81 Be more explicit how to discover available API calls
+d3afebe Be safer when calling ./bootstrap
 de8522f ceph-disk: tests for the --data-dir code path
 a71025d ceph-disk: implement --sysconfdir as /etc/ceph
 ca713f4 ceph-disk: implement --statedir as /var/lib/ceph
@@ -17588,12 +20200,16 @@ ad6b4b4 ceph-disk: add --prepend-to-path to control execution
 324804a ceph-disk: fix activate() indent
 de00505 ceph-disk: remove noop try:
 b82ccfb ceph-disk: fix Error() messages formatting
+131b218 Use CentOS Gitbuilder sha1 instead of Fedora for non-ubuntu.
 830583f osd: Correction to #6690 change
 6b8d418 init-ceph: pass config file path when adjust crush position
 2ba6930 ceph-disk: cannot run unit tests
 24417f9 test/cli-integration/rbd: silence stderr
+d88ec89 break out of the while loop after 15 minutes
+4c0086e upgrade/parallel/stress-split: api tests needs to run on dumpling host
 8220549 inttypes: detect and define missing integer types
 8f91cac endian: check byte order on OSX
+39d6f28 upgrade/parallel/stress-split: use dumpling tests against mixed d+e
 46a5674 doc/release-notes: fix bobtail version
 f6bbcf4 mon: tests for ceph-mon --mkfs
 41987db doc/release-notes: v0.74
@@ -17610,6 +20226,8 @@ cae663a osd/ReplicatedPG: improve debug output from check_local
 ac547a5 rbd: return 0 and an empty list when pool is entirely empty
 e91fb91 librbd: better error when unprotect fails on unprotected snap
 42e98ac Be more explicit how to discover available API calls
+b4f524e Sleep once outside of the watchdog loop
+04fe727 rados: add rados tool test
 eeba294 mon: remove fixture directory between runs
 1eafe8d mon: make ceph-mon --mkfs idempotent
 f0ae4ab mon: create mon-data directory on --mkfs
@@ -17619,7 +20237,11 @@ a194513 mon: do not daemonize if CINIT_FLAG_NO_DAEMON_ACTIONS
 10aa220 mon: set CINIT_FLAG_NO_DAEMON_ACTIONS when appropriate
 e0bae95 Fix typos in erasure code documents
 2b0a435 osd_types: add missing osd op flags
+3d895a0 format bullets in README
 f8e413f msgr: fix rebind() race stop the accepter and mark all pipes down before rebind to avoid race
+56c70e1 Set the content-type in report_job()
+ba8f999 Split out ResultsSerializer.job_info()
+e985ca2 Port from httplib2 to requests module
 8fcfc91 qa: test rados listomapvals with >512 keys
 be5afa2 rados: allow listomapvals to list all k/v pairs
 c165483 (tag: v0.74) v0.74
@@ -17632,8 +20254,10 @@ ce8b26e erasure: add dummy symbol to avoid warnings
 da5a082 make: add top-level libcommon dependency
 f7a66d6 make: restrict use of --as-needed to Linux
 87db89e librados: read into user's bufferlist for aio_read
+6b87147 valgrind.supp: ignore libnss3 leaks
 0b40bbd common: evaluate --show-config* after CEPH_ARGS
 b5c17f6 vstart: set fsid in [global]
+ee3d227 upgrade/parallel/stress-split: fix test
 d7d7ca8 Fix qa/workunits/rados/test_cache_pool.sh typos
 cc67b7b Fix test/filestore/store_test.cc error
 e8e174e rados: include struct timeval definition
@@ -17649,6 +20273,7 @@ a48d038 test: fix VLA of non-POD type
 c7e1c4b c++11: fix std::lock naming conflicts
 bbcb022 kvstore: only build on linux
 1fec818 spinlock: add generic spinlock implementation
+a448f81 rename upgrade/upgrade-* -> upgrade/*
 12f4631 qa/workunits/rest/test.py: rbd pool ruleset is now 0
 b286e4f ceph_test_rados_api_tier: retry EBUSY race checks
 b88af07 libcephfs: get osd location on -1 should return EINVAL
@@ -17692,6 +20317,7 @@ e7bf5b2 librados: lockless get_instance_id()
 ac14d4f osdc/Objecter: maintain crush_location multimap
 746069e crush/CrushWrapper: simplify get_full_location_ordered()
 dcc5e35 crush/CrushWrapper: add get_common_ancestor_distance()
+2247a3d upgrade/upgrade-parallel: don't test copy-from on dumpling
 0903f3f mon/OSDMonitor: use generic CrushWrapper::parse_loc_map helper
 8f48906 crush/CrushWrapper: add parse_loc_[multi]map helpers
 8fc66a4 osd/ReplicatedPG: fix copy-get iteration of omap keys
@@ -17701,6 +20327,7 @@ dcc5e35 crush/CrushWrapper: add get_common_ancestor_distance()
 19213e6 doc: Fix caps documentation for Admin API
 ac10aa5 mon: fix forwarded request features when requests are resent
 2e4c61b osd/ReplicatedPG: include omap header in copy-get
+d781348 rbd: bump the default scratch size for xfstests to 10G
 537a7c3 crush: misc formatting and whitespace fixes
 fa6a99a crush: use kernel-doc consistently
 6e36794 crush/mapper: unsigned -> unsigned int
@@ -17716,15 +20343,18 @@ df1704e osd: pool properties are not an array
 df0d038 mon: osd create pool must fail on incompatible type
 af22b0a packaging: erasure-code plugins go in /usr/lib/ceph
 203c5d6 mon: s/rep/replicated/ in pool create prototype
+c70e086 Revert "valgrind.supp: ignore libnss3 leaks"
 d192062 ceph_test_rados: update in-memory user_version on RemoveAttrsOp
 750da11 osd/ReplicatedPG: clear whiteout on successful copy-from
 37eac2b ceph_test_rados: check existence on is_dirty completion
 173b060 mon/OSDMonitor: propagate snap updates to tier pools on update
 baa74d5 osd/OSDMap: implement propapage_snaps_to_tiers()
 cf34af8 rgw: add -ldl for mongoose
+a47d16e rados/trash: add cache workload
 9b182c7 ceph_test_rados_api_tier: more grace for HitSetTrim
 2bb90b3 ceph_test_rados: update in-memory user_version on RemoveAttrsOp
 a0d1521 replace pool type REP with REPLICATED
+572dc88 valgrind.supp: ignore libnss3 leaks
 45449b8 doc/release-notes: missed a name
 b0e42d8 doc/release-notes: v0.72.2
 c10ba91 pipe: add compat for TEMP_FAILURE_RETRY symbol
@@ -17739,8 +20369,10 @@ cf53d8a rgw: abstract RGWProcess
 cdc178f Revert "Enable libs3 support for debian packages"
 1e238e6 mon: pool create will not fail if the type differs
 5f1957d doc/release-notes: v0.67.5
+1810781 Fix spelling error in teuthology/task/locktest.py comment
 98a1525 unittests: fail if one test fail
 9ab947c buffer: use int64_t instead of loff_t
+f8ce69c Add ability to mark jobs as 'dead'
 03693ac osd: git ignore erasure code benchmark binary
 42b4fe1 osd: erasure code benchmark is installed is part of ceph-test
 81dee1b osd: erasure code benchmark workunit
@@ -17748,6 +20380,7 @@ a619fe9 osd: erasure code benchmark tool
 a36bc5f osd: set erasure code packet size default to 2048
 c7d8ba7 osd: better performances for the erasure code example
 ff9455b osd: conditionally disable dlclose of erasure code plugins
+018164a rados/thrash: add small objects ceph_test_rados workload
 8879e43 osd: Fix assert which doesn't apply when compat_mode on
 0bd5cb6 Add backward comptible acting set until all OSDs updated
 8d31f71 osd/ReplicatedPG: fix promote cancellation
@@ -17774,8 +20407,14 @@ ffdaa5f vstart.sh: --cache <pool> to set up pool cache(s) on startup
 ea519b4 qa/workunits/rados: test cache-{flush,evict,flush-evict-all}
 71cd4a2 rados: add cache-flush, cache-evict, cache-flush-evict-all commands
 ad3b466 osd/ReplicatedPG: implement cache-flush, cache-try-flush
+e312048 Allow passing multiple job_ids
 edaec9a osd: Fix assert which doesn't apply when compat_mode on
+220779c Implement single-job killing
+eeeb626 For teuthology-kill, s/suite/run/
+37815b7 Do not run local handling fix if local parameter is not found. Fixes: 7042 Signed-off-by: Warren Usui <warren.usui at inktank.com>
 ac16a9d osd: remove remaining instances of raid4 pool types (never implemented)
+9a29c3e Log calls to teuthology-report more verbosely
+b014c71 Catch every exception here, for now.
 40a48de mds: fix Resetter locking
 087fe57 packaging: revert adding argparse and uuidgen
 8272538 packaging: make check needs argparse and uuidgen
@@ -17797,10 +20436,15 @@ bdeaa84 osd: OSDMap: add 'get_up_osds()' function
 73992d2 osd: OSDMap: check for erasure pools when getting features
 178f684 osd: OSD: add binary compat feature for Erasure Codes
 bfc86a8 include/ceph_features: add CEPH_FEATURES_OSD_ERASURE_CODES
+20b7998 Increase timeout by 50% in thrashers/mapgap.yaml
+5d8795d moved samba suite a level up [suites/fs/samba to suites/samba]
 fe13684 added execute permission to the script
 d9e33ea rgw workunit to test bucket quota
+085ad88 task to test rgw bucket quota
 c8890ab rgw: fix use-after-free when releasing completion handle
 d6a4f6a rgw: don't return data within the librados cb
+031be56 Use saucy gitbuilder for arm package checking.
+5320db5 rados: add in more (optional) op types
 e6ad4d4 osd: make obc copyfrom blocking generic
 8dec2b2 librados, osd: add flags to COPY_FROM
 e624e16 crush: silence error messages in unit tests
@@ -17816,6 +20460,7 @@ e4537d3 ARCH: add variable for sse2 register
 7e4a800 osd/ReplicatedPG: fix hit_set_setup() on_activate()
 19cff89 Add backward comptible acting set until all OSDs updated
 b153067 erasure-code: tests must use aligned buffers
+0680a13 bump the OSDs to three per mon/host
 f5d32a3 mds: drop unused find_ino_dir
 c60a364 Fix typo in #undef in ceph-dencoder
 9e45655 qa: add ../qa/workunits/cephtool/test.sh to unittests
@@ -17830,7 +20475,10 @@ b082c09 crushtool: reorg test-map-* cli tests
 c5bccfe ceph_test_rados_api_tier: fix HitSetRead test race with split
 7e618c9 mon: move supported_commands fields, methods into Monitor, and fix leak
 deded44 mongoose: update submodule
+a0eb1a8 Use shell=True to call teuthology-report
+c22ee52 Catch OSError if script isn't in $PATH
 ef10a5c rgw: fix memory leak
+420fff6 Revert "Use path when calling teuthology-report. …"
 824b3d8 FileJournal: use pclose() to close a popen() stream
 6696ab6 FileJournal: switch to get_linux_version()
 fcf6e98 common: introduce get_linux_version()
@@ -17894,6 +20542,7 @@ aa365e4 mon: typo s/degrated/degraded/
 e57239e common: fix rare race condition in Throttle unit tests
 938f22c common: format Throttle test to 80 columns
 ba55723 common: fix perf_counters unittests for trailing newline in m_pretty
+e4b5ab8 Use path when calling teuthology-report. …
 0dc59af osd/ReplicatedPG: fix promote: set oi.size
 697151e osd/osd_types: fix operator<< on copy-get operation
 f50389d ceph_test_rados_api_tier: test undirty on non-existent object
@@ -17951,8 +20600,10 @@ b7946ff doc: Added additional comments on placement targets and default placemen
 3d768d2 osd/ReplicatedPG: maintain stats for the hit_set_* objects
 9814b93 osd/ReplicatedPG: set object_info_t, SnapSet on hit_set objects
 dabd5d6 vstart.sh: --hitset <pool> <type>
+d4edaec use 3 OSDs in fs tests
 5bb0476 test/libcephfs: release resources before umount
 897dfc1 use the new get_command helper in check_call
+2e2b8fe Skip the 'dead' report on old branches
 eae8531 rbd: modprobe with single_major=Y on newer kernels
 8a473bc rbd: add support for single-major device number allocation scheme
 784cc89 rbd: match against both major and minor on unmap on newer kernels
@@ -17961,35 +20612,59 @@ a421305 rbd: switch to strict_strtol for major parsing
 24a048b Document librados's rados_write's behaviour in reguards to return value.
 a865fec osd/ReplicatedPG: debug: improve hit_set func banners
 b6871cf osd/ReplicatedPG: do not update current_last_update on activate
+36c0344 Use saucy gitbuilder when grabbing sha1 for arm.
+966dad5 Make sure to report all results.
+7f135ec Enable reporting of single jobs
+3d23b9b Remove the child's stderr completely
+9ff4d4a Fix FSID not being set in ceph.conf
+625f479 When starting a job, tell paddles it's running
 bcde200 vstart.sh: add --memstore option
 a9334a1 use the absolute path for executables if found
 43561f7 remove trailing semicolon
 a33c95f radosgw: increase nofiles ulimit on sysvinit machines
+a7f87f3 Longer timeout after sync/reboot.
 71cefc2 doc/release-notes: sort
 ee3173d doc/release-notes: fix indentation; sigh
 3abc189 doc/release-notes: v0.73
 03429d1 PendingReleaseNotes: note CRUSH and hashpspool default changes
 bb50276 Revert "Partial revert "mon: osd pool set syntax relaxed, modify unit tests""
 0cd36e0 mon/OSDMonitor: take 'osd pool set ...' value as a string again
+f3ce07c Respect .ssh/config when opening SSH connections
 e19e380 replace sgdisk subprocess calls with a helper
 4b6d721 osd: enable HASHPSPOOL by default
 fb47d54 mon: if we're the leader, don't validate command matching
 2bfd34a mon: by default, warn if some members of the quorum are "classic"
+0eb784b Added handling of a 'local' option inside install.py which specifies a local directory containing deb or rpm files to be installed.
 e620057 add apt-get install pkg-config for ubuntu server
 b8884e0 MemStore: update for the new ObjectStore interface
+b3acff1 Use continue, not break
+4a6e47c Tweak logic for pid lookup
+77145f1 Fix indentation
 8ac1da8 crush: remove crushtool test leftover
+57574fe Don't show child's stderr, but show archive path
 ec609ca Elector: use monitor's encoded command sets instead of our own
 e223e53 Monitor: encode and expose mon command sets
 420a2f1 man: update man/ from doc/man/8
 8d60cd1 man: Ceph is also an object store
 faaf546 os/MemStore: do on_apply_sync callback synchronously
+339b7c4 Add debug statements
 d8ad51e (tag: v0.73) v0.73
 990b2b5 ceph_test_rados_api_tier: make HitSetWrite handle pg splits
+b86aefc restructured upgrade suites to be under one folder called upgrade.
+6c856a2 rados: allow existing pool(s) to be used
+2266eeb ceph.conf: put 2x command in [global]
 a6f4d71 Elector: keep a list of classic mons instead of each mon's commands
+48b8ba4 Create a DateTime object from the timestamp
+5ea5018 Make -a optional
+025ab36 Add missing req: psutil
+d7289f7 Auto-restart
 a888a57 crush: implement --show-bad-mappings for indep
 20263dd crush: add unitest for crushtool --show-bad-mappings
 fbc4f99 crush: remove scary message string
 472f495 crush: document the --test mode of operations
+1b80f4a nuke: ignore exceptions while issuing reboot command
+478ecc3 Remove unused variable.
+ce8ff0a Added additional comments.
 ea86444 Monitor: Elector: share the classic command set if we have a classic mon
 f1ccdb4 Elector: share local command set when deferring
 ba673be Monitor: import MonCommands.h from original Dumpling and expose it
@@ -18011,12 +20686,14 @@ c928f07 crush: output --show-bad-mappings on err
 ef4061f librbd: remove unused private variable
 ad3825c TrackedOp: remove unused private variable
 3b39a8a librbd: rename howmany to avoid conflict
+a276606 ceph.conf: default to 2x
 539fe26 wbthrottle: use feature check for fdatasync
 663da61 rados_sync: fix mismatched tag warning
 60a2509 rados_sync: remove unused private variable
 43c1676 mon: check for sys/vfs.h existence
 c99cf26 make: increase maximum template recursion depth
 e2be099 compat: define replacement TEMP_FAILURE_RETRY
+c0a4327 nuke: fix sync before reboot timeout
 3b3cbf5 crush/CrushCompiler: make current set of tunables 'safe'
 8535ced crushtool: remove scary tunables messages
 4eb8891 crush/CrushCompiler: start with legacy tunables when compiling
@@ -18112,6 +20789,7 @@ aa63d67 os/MemStore: implement reference 'memstore' backend
 c98c104 tools: ceph-kvstore-tool: output value contents to file on 'get'
 00048fe mon: Have 'ceph report' print last committed versions
 cc64382 mon: MDSMonitor: let PaxosService decide on whether to propose
+856f834 Implement a watchdog for queued jobs
 5823146 os/ObjectStore: make getattrs() pure virtual
 11e26ee s/true/1 and s/false/0
 cf09941 mon: MDSMonitor: implement 'get_trim_to()' to let the mon trim mdsmaps
@@ -18121,12 +20799,17 @@ cf09941 mon: MDSMonitor: implement 'get_trim_to()' to let the mon trim mdsmaps
 ccc6014 crush: CrushWrapper unit tests
 b9bff8e crush: remove redundant test in insert_item
 8af7596 crush: insert_item returns on error if bucket name is invalid
+4211926 A create_if_vm call was made more than once when a lock-many style lock was performed.  This caused downburst to run twice, and the second downburst fails as a result of the first downburst running.
+94f7dd1 Implement --downburst-conf parameter for teuthology-lock. Load the appropriate yaml information when found (this formerly did not work).  Make sure teuthology --lock works with a downburst entry in the yaml files.  Document how this works in README.rst.
+f8b5965 added fs:xfs to upgrade suites
 3b8371a os/ObjectStore: prevent copying
 a70200e os/ObjectStore: pass cct to ctor
 35011e0 Call --mbrtogpt on journal run of sgdisk should the drive require a GPT table.
 cae1083 ObjBencher: add rand_read_bench functions to support rand test in rados-bench
 e829859 doc/rados/operations/crush: fix more
 7709a10 doc/rados/operations/crush: fix rst
+8543d0e Added docstrings.  Cleaned up code (broke up long lines, removed unused variable references, pep8 formatted most of the code (one set of long lines remains), and changed some variable and method names to conform to pylint standards).
+5cc6099 rbd: make default size larger for xfstests
 68fdcfa FileSTore: do not time out threads while they're waiting for op throttle
 7ff7cf2 doc: Partially incorporated comments form Loic Dachary and Aaron Ten Clay.
 5e34beb init, upstart: prevent daemons being started by both
@@ -18168,6 +20851,7 @@ f11b380 osd: read into correct variable for magic string
 f57dad6 OSDMonitor: prevent extreme multipliers on PG splits
 26c00c5 OSDMonitor: return EEXIST if users try to do a PG merge
 4fdc5d9 workunits: use integers instead of true/false for hashpspool command
+7b392a8 white listed "wrongly marked me down" to avoid false alarm in the nightlies
 c77ce90 doc: Fixed hyperlink to the manual installation section.
 648f3bc doc: Added a link to get packages.
 16b7576 osd/OSDMap: fix typo and crush types helper
@@ -18198,6 +20882,7 @@ bafb5c3 doc: clarify crush rule create-simple and fix typos
 b0dce8a mds: Add assertion to catch object mutation error
 09a4c1b mds: remove superfluous warning of releasing lease
 c409e36 mon: osd dump should dump pool snaps as array, not object
+4c7dd50 tgt and iscsi code need some minor fixes.  Moved the settle call during simple read testing.  In iscsi.py, generic_mkfs and generic_mount need to be called from the main body of the task.  An extraneous iscsiadm command was removed.  The tgt size is now not hard-coded.  It is extracted from the property and defaults to 10240.
 03d63c4 buffer: turn off zero-copy reads for now
 784d188 mds: Release resource before return
 524f666 rbd: Release resource before return
@@ -18206,6 +20891,7 @@ c409e36 mon: osd dump should dump pool snaps as array, not object
 ab05580 Add missing stuff to clean target
 ae46c38 Correctly mark library modules
 94ca1cc ceph-object-corpus: revert accidental revert
+c0297b4 Changes suggested per review.
 75d4a72 buffer: enable tracking of calls to c_str()
 445fb18 buffer: try to do zero copy in read_fd
 be29b34 buffer: attempt to size raw_pipe buffers
@@ -18220,24 +20906,34 @@ ebb261f buffer: abstract raw data related methods
 fc5789d doc: Added commentary to configure pg defaults. Clarified size commentary.
 dceaef4 doc: PG splitting added to docs.
 d39676b doc: Took out "future" reference to namespaces.
+deec86c Also catch httplib2.ServerNotFoundError
 9a55d89 doc: Clarification of terms.
 b35fc1b rgw: lower some debug message
 561e7b0 rgw: initialize RGWUserAdminOpState::system_specified
+f6b5acc internal.py: nitty little spelling error
 b2ee935 PendingReleaseNotes: mention 6796 and 'ceph osd pool set' behavior change
 7c6d43c doc: rados: operations: pools: document 'osd pool set foo hashpspool'
 49d2fb7 mon: OSDMonitor: don't crash if formatter is invalid during osd crush dump
 337195f mon: OSDMonitor: receive CephInt on 'osd pool set' instead on CephString
 7191bb2 mon: OSDMonitor: drop cmdval_get() for unused variable
 50868a5 qa: workunits: mon: ping.py: test 'ceph ping'
+f7af3e7 Schedule-suite Use 'multi' tube for multiple types. Scheduling.
+c38eeec Allow ability to use multi machine type deliminated by ,- \t.
 6b5aaf3 doc: Minor updates to manual deployment document.
+d04f3a6 Skip cluster() if use_existing_cluster is True
 3502d4f init: fix typo s/{$update_crush/${update_crush/
 29178d8 doc: Cleanup of Add/Remove OSDs.
 634295d doc: Cleanup on Add/Remove monitor.
 de2bcd5 doc: Added a manual deployment doc.
 10b4bf6 doc: Added manual deployment section to index.
 f753d56 test: use older names for module setup/teardown
+bac3708 Don't run qemu-iotests on arm as well
+c5a26b3 Use shortened version in order to avoid revision/arch mishaps.
 72bba1f doc: Added fixes to osd reporting section.
+f8150d4 Add optional 'use_existing_cluster' flag
+2ae29a1 Don't run QEMU tests on ARM arch.
 86e4fd4 osd: Backfill peers should not be included in the acting set
+7119ef3 segregating the upgrade suite based on modules
 19dbf7b osd: Simple dout() fix
 82e1e7e PG: remove unused Peering::flushed
 9ff0150 PG: don't requeue waiting_for_active unless flushed and active
@@ -18250,15 +20946,23 @@ da77553 ReplicatedPG,PG: move duplicate FlushedEvt logic info on_flushed()
 0de0efa RBD Documentation and Example fixes for --image-format
 40a76ef osd: fix bench block size
 703f9a0 Revert "JounralingObjectStore: journal->committed_thru after replay"
+8292bc1 upgrade: whitelist log bound mismatch
 f0c8931 release-notes: clarify that the osd data directory needs to be mounted
 ba67b9f doc/release-notes.rst: v0.72.1 release notes
 96d7a2e doc: Fix wrong package names on Debian Wheezy for building Ceph
+5779b1c Add tests for tgt. Fixes: #6724
+39830c6 Fix ceph.repo so it uses URI value.
 878f354 ceph-filestore-tool: add tool for fixing lost objects
 bf7c09a osd_types: fix object_info_t backwards compatibility
+04322d9 ceph_manager: provide unique pool names to avoid collision
 1212a21 CephContext: unregister lockdep after stopping service thread
 dd9d8b0 ReplicatedPG: test for missing head before find_object_context
 d8d27f1 JounralingObjectStore: journal->committed_thru after replay
+3f62b92 add git clone to installation instrutions
+07db94e syslog: ignore perf nmi handler timeout
 a7063a1 Use clearer "local monitor storage" in log messages
+88792d6 Make report_job() always return an int
+96cfb11 Add some debug logging.
 dcef9fb automake: replaced hardcoded '-lboost_program_options' with a macro
 125582e autoconf: add check for the boost_program_options library
 cfb82a1 filejournal: add journal pre-allocate for osx
@@ -18266,11 +20970,16 @@ d39ff4c mon/OSDMonitor: 'osd metadata N' command
 ea16435 mon/OSDMonitor: record osd metadata key/value info
 6d40e94 osd: send host/kernel metadata to mon on boot
 aef3402 doc/release-notes: fix dup
+1fd25f1 For saya (arm) use arm gitbuilder for ceph sha1.
+5bcab43 removed force-branch option
+f0e01ad Distro kernel bug-fixes.
+03f31c6 Consolidate two excepts into one.
 21637cc doc: Updated Emperor reference to 0.72.
 8b5719f doc: Added Emperor upgrade.
 7f45e72 doc: Added dumpling to the sequence.
 efe55b1 doc: Remove redundant command for quick start preflight
 fbdfe61 trace: remove unused header
+b3e730e Also catch socket.error in try_push_job_info
 762acec mon: Monitor: make 'quorum enter/exit' available through the admin socket
 01f7b46 client: use platform-specific stat time members
 2f76ac3 mon/MDSMonitor.cc: remove some unused variables
@@ -18299,6 +21008,7 @@ c7a30b8 ReplicatedPG: don't skip missing if sentries is empty on pgls
 afb3566 conf: use better clang detection
 ac04481 assert: choose function-var name on non-gnu
 1d030d1 test: Only build death tests on platforms that support them
+d8f9820 Don't re-call logging.basicConfig()
 c6826c1 PG: fix operator<<,log_wierdness log bound warning
 f4648bc PGLog::rewind_divergent_log: log may not contain newhead
 25b7349 osd/ErasureCodePlugin: close library before return on error
@@ -18316,6 +21026,7 @@ e9880cf crush: add mising header for count
 a10345a auth: add missing header for list
 01a5a83 mon: add missing header for std::find
 e71a2f0 auth: add missing header file for std::replace
+3fd3bd9 Fix hilariously long sentry_event para
 0209568 utime: use to_timespec for conversion
 4c3b6d6 rgw: allow multiple frontends of the same framework
 e25d32c rgw: clean up shutdown signaling
@@ -18343,7 +21054,9 @@ ed3caf7 mongoose: submodule, v4.1
 d03924c galois.c: fix compiler warning
 6821a6e assert: use feature test for static_cast
 330a6a7 wbthrottle: use posix_fadvise if available
+ed81960 Don't use create_run() unless necessary
 2bf8ff4 doc: Added DNS and SSL dialog.
+3bd490f Transition from sentry_events to sentry_event
 cd0d612 OSD: allow project_pg_history to handle a missing map
 9ab5133 OSD: don't clear peering_wait_for_split in advance_map()
 545135f ReplicatedPG::recover_backfill: adjust last_backfill to HEAD if snapdir
@@ -18356,6 +21069,8 @@ c0bcdc3 osd/erasurecode: correct one variable name in jerasure_matrix_to_bitmatr
 09e1597 mon/PGMap: use const ref, not pass-by-value
 fb0f198 rbd: omit 'rw' option during map
 2db20d9 qa: don't run racy xfstest 008
+92c57f1 install.upgrade: deepcopy() overrides before we modify it
+693fee2 rgw: fix multi-region test in verify suite
 1bb5aad upstart: fix ceph-crush-location default
 c3c962e doc: radosgw workaround for OpenStack Horizon bug
 cbc15bf doc: fix typo in openstack radosgw integration
@@ -18383,6 +21098,8 @@ df3af6c docs: Fix a typo in RGW documentation
 ca4c166 test/osd/RadosModel.h: select and reserve roll_back_to atomically
 99c5319 test/rados/list.cc: we might get some objects more than once
 4a41d3d os/chain_listxattr: fix leak fix
+3e72dcb Added two new tasks.  tgt starts up the tgt service.  iscsi starts up the iscsi service and logins to an rbd image using the tgt service (either locally or remotely).  The iscsi service runs some simple tests, and then sets up the isci-image to be useable by rbd test scripts.  Later workunits can perform further testing on the isci-image interface.
+d13c29c install.upgrade: fix overrides of sha1|tag|branch
 a8c3373 doc: Updated rewrite rule.
 ba153ea doc: Incorporated feedback.
 c66beb6 doc: Created new zone sync image.
@@ -18394,6 +21111,8 @@ d6a6cf5 doc: Modified image.
 a2090ba doc: Removed the Folsom reference.
 c9aa708 doc: Restored show_image_direct and added a link to older versions.
 8193bad doc: Removed nova-volume, early Ceph references and Folsom references.
+6afe653 expanding upgrade suites to include emperor.
+84d8b49 Initial ugly commit.
 dd14013 doc: Removed install yum priorities. Duplicated where relevant.
 c254da5 doc: Added quick ceph object store with links to manual install and config.
 16e25cd doc: Restored Object Store Quick Start entry for parallelism.
@@ -18411,6 +21130,14 @@ fba056a doc: Removed install libvirt. Consolidated to install-vm-cloud.
 04710b5 doc/release-notes: fix formatting
 b605c73 doc/release-notes: fix indentation
 1de46d6 os/chain_listxattr: fix leak fix
+ed1d11b ceph_manager: remove 6116 workaround
+9ae3fce radosgw-agent: add metadata-only option to task config
+96835e9 radosgw-agent: move positional argument last
+7f13657 internal: fix log for python 2.6
+5f01bcb rgw: fix whitespace
+db98afe rgw: remove unused variables
+d65a0b9 Reflect radosgw-agent option changes in teuthology
+bdca2e4 rgw: multi region sync is only for metadata
 6efd82c ceph: Release resource before return in BackedObject::download()
 e22347d ceph: Fix memory leak in chain_listxattr
 905243b Fix memory leak in Backtrace::print()
@@ -18466,6 +21193,7 @@ e5efc29 test: Use a portable syntax for seq(1)
 fbabd42 test: Change interpreter from /bin/bash to /bin/sh
 0a1579d test: Use portable arguments to /usr/bin/env
 b926930 pybind: use find_library to look for librados
+90161a3 Fixed errors. Tests pass.
 19d0160 doc/release-notes: v0.72 draft release notes
 bf198e6    fix the bug if we set pgp_num=-1 using "ceph osd pool set data|metadata|rbd -1"    will set the pgp_num to a hunge number.
 5eb836f ReplicatedPG: take and drop read locks when doing backfill
@@ -18476,10 +21204,14 @@ f0f6750 common: add an hobject_t::is_min() function
 fe30ac6 rgw: Use JSONFormatter to use keystone API
 5733f9c rgw: Use keystone password to validate token too
 bd04a77 rgw: Adds passwd alternative to keystone admin token
+d4a632d Support --os-version as argument.
 8282e24 mon/OSDMonitor: make racing dup pool rename behave
 66a9fbe common: rebuild_page_aligned sometimes rebuilds unaligned
 c14c98d mon: OSDMonitor: Make 'osd pool rename' idempotent
 284b73b packages: ceph.spec.in is missing make as a build dependency
+bcc58eb Fix some PEP-8 issues
+ad836b1 Fix param docstring format to be more Sphinx-y
+2cac388 Add docstrings to every function. And a few other comments.
 aea985c Objecter: expose the copy-get()'ed object's category
 06b5bf6 osd: add category to object_copy_data_t
 61f2e5d OSD: add back CEPH_OSD_OP_COPY_GET, and use it in the Objecter
@@ -18492,6 +21224,8 @@ ade8f19 ReplicatedPG: add a Context *ondone to RepGathers
 b403ca8 ReplicatedPG: copy: rename CopyOp::version -> user_version
 4e139fc ReplicatedPG: copy: do not let start_copy() return error codes
 178f9a2 ObjectStore: add a bufferlist-based getattrs() function
+6dbb11d Use worker httpd instead of prefork (like ubuntu) on rpm distros.
+c977489 Use worker httpd instead of prefork (like ubuntu) on rpm distros.
 e17ff19 osd/osd_types: init SnapSet::seq in ctor
 d2b661d os/FileStore: fix getattr return value when using omap
 3a469bb os/ObjectStore: fix RMATTRS encoding
@@ -18510,7 +21244,9 @@ c275912 rgw: include marker and truncated flag in data log list api
 e74776f cls_log: always return final marker from log_list
 ea816c1 rgw: skip read_policy checks for system_users
 1d7c204 Add a configurable to allow bucket perms to be checked before key perms through rgw_defer_to_bucket_acls config option.  This configurable defaults to an empty string.  Option values include:
+705a77f rbd_fsx: do not exceed 250GB for fsx image
 0e8182e mds: MDSMap: adjust buffer size for uint64 values with more than 5 chars
+625d9b2 ceph_manager: workaround for 6116
 af1dee5 doc: clarify that mons must have qurorum during deploy
 4c8be79    rename test_arch.c --> test_arch.cc to avoid undefined reference to `__gxx_personality_v0' error.    Signed-off-by: huangjun  <hjwsm1989 at gmail.com>
 7ba4bc4 cli: ceph: add support to ping monitors
@@ -18524,6 +21260,7 @@ c521ba7 mon: MonClient: adjust whitespaces
 b8d54cd doc: Fixed typo, clarified example.
 53486af doc: Updated docs for OSD Daemon RAM requirements.
 828537c doc: Added ARM. Added Calxeda hardware example.
+6a2dd66 rgw: revert to -X for apache2
 105fb61 doc: Added install for libvirt.
 6c88d40 doc: Added install for QEMU.
 fe6520b doc: Added install for ceph-deploy.
@@ -18561,11 +21298,14 @@ bd2eeb7 ceph-mon: add debug to ip selection
 a107030 librbd: wire up flush counter
 715d2ab common/BackTrace: fix memory leak
 687ecd8 common/cmdparse: fix memory leak
+0928e04 suites/fs: snap -> snaps
+dffa296 fs/snap: fix workunit paths
 9fa357d mds: update backtrace when old format inode is touched
 34d0941 client: fix invalid iterator dereference in Client::trim_caps()
 4f299ca autoconf: fix typo on AM_COMMON_CFLAGS
 94080de common: get_command_descriptions use cout instead of dout
 8586c75 ReplicatedPG: copy: conditionally requeue copy ops when cancelled
+c8ec9fe valgrind: fix libleveldb suppression on dynamically linked leveldb
 6dff926 PG: add a requeue_op() function to complement requeue_ops().
 45d1846 doc: Removed references to Chef.
 89995ef doc/release-notes: missed mds snaps
@@ -18581,16 +21321,30 @@ a107030 librbd: wire up flush counter
 11fc80d doc/release-notes: link ot the changelog
 eb0a3b7 doc/release-notes: v0.61.9
 d3f0c0b Makefile: fix /sbin vs /usr/sbin behavior
+26930c9 More robust machine_type and pid detection
 15ec533 OSD: check for splitting when processing recover/backfill reservations
+704b72e nuke: remove old log arg to nuke_one call
+1bf3064 fs/snap: add some snap tests
+a163336 *: add copy_from to next (or later) tests
 08177f2 ceph: Remove unavailable option with clang
+26803da Rewrite portions of teuthology.kill
 e509cb1 (tag: v0.71) v0.71
+01ce92c replacing -X option with -DNO_DETACH to ensure the apache process is detached at the end of run.
 10b466e radosgw: create /var/log/radosgw in package, not init script
+5f5eb2b upgrade*: do not hard-code next as the final upgrade hop
 5c280a2 .gitignore: ceph-kvstore-tool
+ded5c21 Remove needless arg from list_locks()
+59d14b8 Make nuke use its own logger
+40e5d1e Add teuthology-kill
 14e91bf debian, specfile: fix ceph-kvstore-tool packaging
+d79552e nuke: fix import
 fd6e2b8 ceph-kvstore-tool: copy one leveldb store to some other place
 85914b2 ceph-kvstore-tool: calc store crc
+280f783 install: allow use of overrides for upgrade version
 da69fa0 tools: move 'test_store_tool' to 'tools/ceph-kvstore-tool'
+c31d747 Fix broken logic in running_jobs_for_run()
 eafdc92 common/buffer: behave when cached crc stats don't start at 0
+b236785 Fix typo
 c5cdf4e crc32c: expand unit test
 0620eea sctp_crc32c: fix crc calculation in the NULL buffer case
 b96ee5c crc32c: add a special case crc32c NULL buffer handling
@@ -18617,6 +21371,7 @@ c69e76c ReplicatedPG: remove the other backfill related flushes
 3469dd8 RadosModel: send racing read on write
 0246d47 ReplicatedPG: block reads on an object until the write is committed
 c658258 OSD: ping tphandle during pg removal
+80d9ac5 Revert "Revert "fs/samba: reenable smbtorture lock test""
 4f403c2 common: don't do special things for unprivileged daemons
 5aa237e mon, osd: send leveldb log to /dev/null by default
 ab8f9b1 doc: Update from user feedback. Needed to enable S3/Keystone.
@@ -18625,6 +21380,8 @@ bd7a7dd os/FileStore: fix fiemap double-free(s)
 26228ed ceph-dencoder: select_generated() should properly validate its input
 8b43d72 vstart.sh: create dev/ automatically
 8d7dbf8 rgw: change default log level
+39c4674 test copy_from when running ceph_test_rados
+0e00d8c turn on mds & client debugging
 70cc681 mon/PGMonitor: set floor below which we do not warn about objects/pg
 bebbd6c rgw: fix authenticated users acl group check
 08327fe mon: osd pool set syntax relaxed, modify unit tests
@@ -18636,6 +21393,8 @@ e3ba8e8 mon: PGMap: reuse existing summary functions to output pool stats
 82e3317 mon: PGMap: keep track of per-pool stats deltas
 e2602c5 mon: make 'mon {add,remove}_data_pool ...' take pool name or id
 d6146b0 common/Formatter: add newline to flushed output if m_pretty
+8b89e83 Make check for running jobs part of the loop
+bac2965 ResultsSerializer.running_jobs_for_run() and test
 f2645e1 rgw: swift update obj metadata also add generic attrs
 6641273 SignalHandler: fix infinite loop on BSD systems
 2cc5805 doc: Removed underscore for consistency.
@@ -18644,11 +21403,14 @@ f568501 mds: flock: fix F_GETLK
 3c6710b qa/workunits/misc/dirfrag: make it work on ubuntu
 b0f49e0 ReplicatedPG.h: while there cannot be a read in progress, there may be a read blocked
 bf82ba9 doc: disable cephx requires auth_supported = none
+a1d8225 Added docstrings, and improved some of the comments on several tasks.
 0f73f0a rgw: switch out param to a pointer instead of reference
 8aa7f65 test: update cli test for radosgw-admin
 91f0c82 radosgw-admin: clarify --max-size param
 ad409f8 formatter: dump_bool dumps unquoted strings
 c1acf9a rgw_quoa.{h,cc}: add copyright notice
+3191e32 Put machine_type in config.yaml
+494c3b1 Make verbosity propagate correctly to modules
 8d8ae58 doc: Created new index to incorporate side-by-side deb/rpm installs.
 bc50dbb doc: Created installation doc for Yum priorities. Ceph-specific packages need it.
 05b2e44 doc: Created new QEMU install for RPMs with provision for Ceph-specific packages.
@@ -18660,10 +21422,21 @@ cdd851b doc: Moved installation portion to the installation section.
 1dff92b doc: Moved installation portion to the installation section.
 a182535 librados: add some clarifying comments
 7ef5eb0 librados: drop reference to completion in container destructor
+3905ab9 Debug output for all http requests
+3ca9092 Fix broken lambda
+61dec6f Be more verbose when waiting for free machines
+7d99de7 Give reporting a verbose flag
 f13cc68 doc: Fixed hyperlinks. Cleanup of old references to Chef.
+2e3e127 Lack of a job_id should be a warning
+bb93b1a Exempt teuthology-updatekeys from test_invalid
+d341b58 Ignore the exit code of ssh-keyscan
+f572905 Fix a circular import
+f28a7eb Move imports to top-level
 70250e8 osd: osd_types: Output pool's flag names during dump
 7113186 osdc/Objecter: clean up completion handlers that set *prval=0
 82e9330 osdc/Objecter: only make handlers set *prval if EIO
+636dc86 Move part of report_all_runs() into report_runs()
+61606fe Set a default timeout of 20s for HTTP connections
 1c28869 mon: OSDMonitor: allow (un)setting 'hashpspool' flag via 'osd pool set'
 2fe0d0d mon: OSDMonitor: split 'osd pool set' out of 'prepare_command'
 6bbb772 test/filestore/run_seed_to_range.sh: fix -d syntax
@@ -18671,6 +21444,31 @@ f13cc68 doc: Fixed hyperlinks. Cleanup of old references to Chef.
 eb381ff mds: don't decrease file size when recovering file
 1803f3b radosgw-admin: limit user bucket-level quota
 18a271d mds: optimize map element dereference
+01b81b7 Move monkey patching to __init__.py
+bd9cbdb Don't attempt to report if there's no job_id
+cbe7d8e Add basic tests for scripts
+098389d Properly express conflicting options
+f34f3b3 Move teuthology's arg parsing to scripts/
+8351a3a PEP-8
+e475321 PEP-8
+45d1b0e Move teuthology-coverage's arg parsing to scripts/
+f4d655b PEP-8
+193b311 Move teuthology-results' arg parsing to scripts/
+477e4ae PEP-8
+7ab9726 Move teuthology-report's arg parsing to scripts/
+755ac2f Move teuthology-updatekeys' arg parsing to scripts/
+84b2e55 Move schedule() to new schedule module
+c784c79 Move teuthology-schedule's arg parsing to scripts/
+a6b57cc PEP-8
+9748985 Move teuthology-lock's arg parsing to scripts/
+6b2f278 PEP-8
+1bf3a3d Move teuthology-worker's arg parsing to scripts/
+5fd1cd0 Move teuthology-ls's arg parsing to scripts/
+7805913 More PEP-8 cleanup
+e847b10 Move teuthology-suite's arg parsing to scripts/
+7ce4dfd Move teuthology-nuke's arg parsing to scripts/
+9177513 Add scripts/ module
+4902bfa PEP-8 cleanup
 d8faa82 ReplicatedPG: remove unused RWTracker::ObjState::clear
 89d3f47 radosgw-admin: can set user's bucket quota
 cb9ebd6 doc: Minor correction.
@@ -18681,6 +21479,7 @@ fc35807 rgw: protect against concurrent async quota updates
 5bc6327 doc: Merge cleanup.
 c0c332c doc: minor clean up.
 007f06e mds: fix infinite loop of MDCache::populate_mydir().
+7b88979 ignoring -X option in apache as it was causing s3test failures on the rpms.
 1f50750 ReplicatedPG: remove the other backfill related flushes
 db6623f RadosModel: send racing read on write
 2b216c3 ReplicatedPG: block reads on an object until the write is committed
@@ -18692,6 +21491,7 @@ e21e573 os: stronger assert on FileStore::lfn_open
 0f323bc common: unintended use of the wrong bloom_filter prototype
 4b911cf ReplicatedPG: copy: use aggregate return code instead of individual Op return
 6da4b91 os/FileStore: fix ENOENT error code for getattrs()
+1c32b1b Reflect paddles API change: jobs in jobs/
 71ee6d7 mon: allow MMonGetMap without authentication
 f279641 mon: do not put() unhandle message
 0cae3a1 common/bloom_filter: add a few assertions and checks for bit_table_ == NULL
@@ -18738,24 +21538,42 @@ ff17e45 PG,ReplicatedPG: expose PGBackend to PG
 e73ec48 common/hobject: add is_degenerate method
 c8a4411 PGMap: calc_min_last_epoch_clean() will now also use osd_epochs
 091809b PGMap,PGMonitor: maintain mapping of osd to recent stat epoch
+f78f316 Add more tests for ReportSerializer
 e3bb065 (tag: v0.70, tag: mark-v0.70-wip) v0.70
+972ab1c Tweak logic of try_push_job_info()
 806725a ReplicatedPG: copy: add op progression output
 639ff9f ReplicatedPG: copy: don't leak a ctx on failed copy ops
 469d471 ReplicatedPG: assert that we have succeeded in do_osd_ops on copyfrom repeats
 f3733a2 ReplicatedPG: copy: switch CopyCallback to use a GenContext
 dc0dfb9 common,os: Remove filestore_xattr_use_omap option
+f18c8f7 Make most of lock_machines() PEP-8 clean.
+5482ba8 Make teuthology-schedule also print the job's name
+08efeb7 Store the job_id as a str, not an int.
 10a1e9b rgw: update cache atomically
 6ce762f lru_map: add find_and_update()
+f160ab6 Move teuthology.report import; add large warning
+2270db7 Add (and use) try_push_job_info() for easy pushin'
+760fe6b Move new imports to inside main()
 4634d3c mds: delete orphan dirfrags after fragmentating directory
 cd4cd3f mds: start internal MDS request for fragmentating directory
+e258555 kcephfs/mixed-clients: add missing %
+13d4884 kcephfs/mixed-clients: do not specify install version
 55d279b ceph_test_rados: do not let rollback race with snap delete
 b09a1ef ceph_test_rados: stop on read error!
 03ba740 osd/ReplicatedPG: fix null deref on rollback_to whiteout check
+6ca7495 Revert "fs/samba: reenable smbtorture lock test"
 aacd67e PendingReleaseNotes: fix typo
 fea1e0e PendingReleaseNotes: make a note about K vs k
+84e8651 schedule_suite.sh: include default machine type in job name
 d6a1799 TrackedOp: specify queue sizes and warnings on a per-tracker basis
 ea831e0 TrackedOp: give people an _event_marked() notifier instead of a virtual mark_event()
 1033588 qa: fix rbd cli tests checking size
+e786e16 Add test for test_all_runs
+93b6fa1 Clarify job vs. run
+f060449 Create a FakeArchive class to forge test results
+ff120d7 Escape reserved characters in URIs
+77b65c7 Report results synchronously.
+f0b35fc Add more documentation.
 c19935c doc: Fixed typo.
 0ce1046 doc: Added clarifying comments.
 afb4d83 librados: drop #include of int_types.h from installed headers
@@ -18774,6 +21592,17 @@ dce3d26 mon: MonmapMonitor: make 'ceph mon add' idempotent
 41a1345 TrackedOp: rework dump interface a little
 80659cc rgw: init RGWBucketStats
 29009b2 doc: Updated diagram for added clarity.
+058a27b Add create_run() and push_job_info() with docs.
+fe64887 s/submit_/report_/g
+555e55f Rename ResultsPoster to ResultsReporter
+79e27ab Add create_run; allow passing json to submit_job
+52282fb Overhaul posting logic to avoid some corner cases
+4bcae4b Add flag for refreshing already-pushed runs
+da4a7b8 Remove old method used for testing
+8b9a691 Add --server flag
+cf68a3a Reuse connection objects.
+f77ee6e Add argument parsing to make this a proper script.
+e3e0775 A first pass at integration with paddles
 2c88910 rgw: handle negative quota as non-assigned values
 07bb72a radosgw-admin: quota control api
 69180d5 rgw: dump quota params even if disabled
@@ -18781,12 +21610,15 @@ a8761a5 TrackedOp: just make CephContext member public
 2723a09 TrackedOp: template OpTracker on only T[::Ref] (ie, OpRequest[::Ref])
 ebae077 rgw: bucket stats also dump quota info
 7973d44 OpTracker: give TrackedOp a default dump() function
+149b98e rest-api.py: test that ceph-rest-api is up and servicing requests
 baf1d40 rgw: init quota
 721f170 client: remove requests from closed MetaSession
 63f5814 ceph: Update FUSE_USE_VERSION from 26 to 30.
 f8a947d client: trim deleted inode
 563517d rgw: update quota stats when needed
+3665eb7 fs/samba: reenable smbtorture lock test
 65ae9b8 COPYING: fix URL
+0ce72d7 rest-api.py: fix up client identification
 11461cb debian/copyright: sync up with COPYING
 1a56fe9 COPYING: add Packaging: section
 e70ea84 COPYING: add debian-style headers
@@ -18795,6 +21627,14 @@ a2e175b COPYING: make note of common/bloom_filer.hpp (boost) license
 f31d691 common/bloom_filter: fix whitespace
 fdb8b0d common/bloom_filter: test behavior of sequences of bloom filters
 f1584fb common/bloom_filter: unit tests
+65d93de xfs: enable sloppy crc
+0bb55a5 btrfs: enable sloppy crc
+3615c14 make xfs.yaml a symlink
+886c4fd make ext4.yaml a symlink
+92cc980 make btrfs.yaml a symlink
+47f4ccc fs: add generic osd fs selector stubs
+759bbb4 Revert "ceph.conf: filestore sloppy crc = true"
+964a929 kcephfs: add fsx back
 8835ef8 common, os, osd: Use common functions for safe file reading and writing
 c0cbd9a osd: In read_meta() leave an extra byte in buffer to nul terminate
 238a303 ErasureCode: update PGBackend description
@@ -18806,6 +21646,7 @@ d29be45 ReplicatedPG: rename finish_copy -> finish_copyfrom
 a96b12f ReplicatedPG: copy: use CopyCallback instead of CopyOp in OpContext
 d2cb2bf mds: return -EAGAIN if standby replay falls behind
 fbeabcc os/FileStore: report errors from _crc_load_... and _crc_save
+e17928d nuke: s/run_name/name/
 dfea81e ceph_test_store_tool: add 'set prefix key' feature
 398249a test: test_store_tool: optionally output value crc when listing keys
 18fcd91 test: test_store_tool: add 'crc <prefix> <key>' command
@@ -18824,6 +21665,9 @@ dcd475d osdc/Objecter: fix return value for copy_get
 7e3084e osd/ReplicatedPG: mark objects dirty in make_writeable()
 d42d2b9 osd/osd_types: object_info_t::get_flag_string()
 a0ed9c2 osd/osd_types: add object_info_t::FLAG_DIRTY
+6b248e8 Check description of machines before nuking when -a is passed
+431d264 suite: don't schedule follow-on summary job for an empty run
+504195a suite: fix num_jobs count
 9b7a2ae crush: invalidate rmap on create (and thus decode)
 482938a doc: Fixed a few typos.
 011bff3 osd/osd_types: bump encoding from 11 -> 12
@@ -18850,12 +21694,20 @@ a9df335 msgr: debug delay_thread join
 bc98013 rgw: higher level quota check functionality
 b43bc1a Use 'k' when printing 'kilo'; accept either 'K' or 'k' as input
 3d062c2 rbd: fix cli test
+3e31c49 nuke: make half-hearted attempt to sync before reboot
 cce990e osdc/ObjectCacher: limit writeback IOs generated while holding lock
 055e313 rgw: quiet down warning message
 75b94ba osd/ReplicatedPG: fix iterator corruption in cancel_copy_ops()
+4135960 Fix test
+69ea839 Add test for skipping host key verification
+589560e Port from nosetests to py.test
+f91f792 Add another config test
+757b298 Don't use properties
 3452aad ceph_argparse.py: clean up error reporting when required param missing
 409aba6 rbd.cc: add readonly option for "rbd map"
+5795777 adding mixed-clients test that uses fuse client and kernel client  in parallel.
 b032931 PendingReleaseNotes: update regarding librados change
+631476b ceph.conf: filestore sloppy crc = true
 b245ca1 os/FileStore: add sloppy crc tracking
 8912462 rgw: drop async pool create completion reference
 4605792 librados: pool async create / delete does not delete completion handle
@@ -18873,6 +21725,7 @@ c8054ac ReplicatedPG: remove unused CopyOp::waiting member, rename function for
 94478e0 OSD: unset_honor_cache_redirects() on Objecter
 35cdd89 Objecter: add "honor_cache_redirects" flag covering cache settings
 1fc24ff rbd.cc: propagate some errors to user-space when they're available
+d18d5a8 Added mixed-clients suite that exercises parallel workloads from kernel client and user space client.
 b6f278c ceph.in: fix missing exception variable in failure to open -o file
 1e9e34b rgw: don't append NULL char before json parsing
 793d9d2 ceph_json: use different string constructor for parser buffer
@@ -18881,6 +21734,8 @@ c821da9 rgw: more quota implementation
 4fe01b1 doc: changed journal aio default to true.
 a3aa005 qa: workunits: mon: test snaps ops using rbd.
 28949d5 Formatter: add dump_bool()
+2860e9d schedule: show priority on -s
+acdaf3d schedule: priority option
 efc6b5e ceph_test_rados: remove useless snapc setup
 37d7220 ceph_test_rados: update for copy_from in begin, not finish
 b06c189 common/crc32c_intel_fast: avoid reading partial trailing word
@@ -18895,6 +21750,16 @@ eec315f ceph_test_rados: fix update_object_full
 cbf0ba2 qa/run_xfstests.sh: use old xfstests until we adapt to new org
 bab72ed os: Simplify collection_list* funcs by removing dynamic_cast
 db5bbdd rgw: quota utility class
+2acceef valgrind: suppress inet_ntop noise
+21765ce Move 'import os' to inside main()
+962a9ae Add '# noqa' to suppress a linter error
+622ce11 Don't redefine variable (cherry picked from commit 083717c1b4022289806030faf23ff7c4aa371f19)
+8a11e4d Remove unused variables (cherry picked from commit 581b6b3e977b99fc58fe25e66c933c65e38dc87c)
+8497437 Remove unused imports (cherry picked from commit 586817481119c4fc4a39f8804e7871a43491e01f)
+a2c9bdc Fix undefined name errors (cherry picked from commit f59497ef2214f29d5995435d83766c7994e8f2cd)
+b301a74 Make teuthology.locker a module
+cb3b563 Add missing requirement: web.py (cherry picked from commit 1b3349525c361d4253f76729f8416ac6d9029289)
+fb5c368 add flake8 checks to teuthology
 9750965 os: Make write_version_stamp() private
 55f4aa1 osd: Remove code for reading ancient unsupported logs
 4a757eb os/ObjectStore: Interim collection_list* functions in ObjectStore
@@ -18956,23 +21821,31 @@ b144170 mds: properly return log replay error
 a65a867 autoconf: fix build out of tree
 fde0f86 Context: add GenContext templated on the callback argument
 3cffff2 mds/Server: fix LOOKUPSNAP
+fedbf35 added v0.67.2 to upgrade-dumpling suite
 12aa53c common/bloom_filter: move header from include/
 9df9155 common/bloom_filter: make bloom_filter encodable
 8dcdeb2 common/bloom_filter: make optimal parameter calculation static
 eda807e common/bloom_filter: make mode match formatting
 4d8f78b mon/PGMonitor: fix segfault when osdmap and pgmap stats are out of sync
+8de3143 If we're scheduling for non-ubuntu, look for rpms
+25bc62d nuke: add missing import os
 4799368 Makefile: don't use srcdir in TESTS
 7bbadf1 mailmap: add Matthew Roy, Matthew Wodrich
 c8cae87 qa/workunits/mon/crush_ops.sh: fix test
 812234c ErasureCode: get rid of extra copy when encoding
+80dcab7 only use host_key if it is not None
 2422b2a autoconf: remove get_command_descriptions  dependency to gtest
 04c7207 git: consolidate mails in commit logs
+2870ec5 rbd: move xfstest TESTDIR=... bit to front of command
 ac0faaa doc: Removed service ops language and added reference to operations.
 3de3256 mon/OSDMonitor: fix 'ceph osd crush reweight ...'
 073732c doc: Refactored diagram and text.
 99c10bc doc: Updated diagrams, refactored text.
 1c12eef osd/ReplicatedPG: fix leak of RepGather on watch timeout
 1d67e15 osd/ReplicatedPG: fix leak of RepGather on large-object COPY_FROM
+218776b Fix namespace collision
+f1aac66 ceph.conf: fix typo
+3955666 Add config option 'verify_host_keys'
 afa2468 ceph_test_rados: order racing read wrt to the COPY_FROM
 3faf08f librados: add OPERATION_ORDER_READS_WRITES flag
 9322305 osd/ReplicatedPG: respect RWORDERED rados flag
@@ -18980,8 +21853,12 @@ f6510c7 osd: change warn_interval_multiplier to uint32_t
 84e1f09 arch/intel: fix old comment
 366b608 arch/intel: use intel probe instructions for x86_64 only
 1bce1f0 osd: change warn_interval_multiplier to uint32_t
+3e75cf3 fix an undefined name buy importing config
+ed6c2c0 ceph.conf: reduce min pg per osd
+4043c7d Split test into two
 4bc1818 ErasureCode: fix plugin loading threaded test
 e50343e rgw: fix rgw test to reflect usage change
+058b1fa When scheduling fails, send an email.
 abb88d7 osd: revert 'osd max xattr size' limit
 73289b3 mds: be more careful about decoding LogEvents
 c32c51a ErasureCode: optimize padding calculation
@@ -19033,6 +21910,16 @@ cbf1f3c mds: evaluate stray when releasing inode/dentry's reference
 59ee51a osd/ReplicatedPG: handle COPY_FROM self
 5cb7b29 mon: fix wrong arg to "instructed to" status message
 9e98620 rgw: destroy get_obj handle in copy_obj()
+0ce6278 Don't hardcode the git://ceph.com/git/ mirror
+e6040f7 Don't hardcode ceph.com mirror for linux-firmware
+b79343c move the sitepackages to testenv section
+feefe21 tell py.test that we want teuthology stuff only
+b684241 tell tox to use site-packages
+cbda14f just use python 2.7 for now
+8d3cf9e make sure we don't hard code the path for py.test
+345bc47 append .tox to avoid going in there
+d1deb6d Don't hardcode teuthology's git repo URL
+9de95d1 Add a basic test for teuthology.config
 c28dd12 note that ceph-deploy should not be called with sudo on certain situations
 2ade5b6 doc: Updated graphic. Removed bullet points. Cleaned up RPM/YUM example.
 c55d7ac doc: Updated graphic to use same name as command line examples.
@@ -19040,6 +21927,11 @@ c55d7ac doc: Updated graphic to use same name as command line examples.
 5eb4db1 doc: Removed Get Involved from Quick Start.
 af7ad1d doc: Changed title, and removed recommendations sections.
 dc19d24 doc: Moved recommendations sections to Intro.
+bfb2760 rados: only pass --op copy_from if non-zero
+1600785 Add config.ceph_git_base_url
+db1231b Use config.lock_server
+7153c2b Add doc noting Inktank's lockserver URL
+e93c8ef Use teuthology.config.
 b1eeadd qa: workunits: cephtool: check if 'heap' commands are parseable
 296f2d0 osd: OSD: add 'heap' command to known osd commands array
 238fe27 mds: MDS: pass only heap profiler commands instead of the whole cmd vector
@@ -19048,6 +21940,9 @@ e112a83 FileStore: only remove the omap entries if nlink == 1
 7c1d2de lru_map: don't use list::size()
 532e41a common/lru_map: rename tokens to entries
 ca984e3 bufferlist: don't use list::size()
+cc4b956 add a tox file
+eb18cbf add .tox to gitignore
+eeb78b6 Update test_connect() to reflect API changes
 0681971 Makefile: add extra cls DENCODER_DEPS in the cls makefile, not rgw
 9e0b5ea OpTracker: get rid of TrackedOp::received_time for the Message
 a2d633b OpTracker: demand that a TrackedOp gets to own its Message for life.
@@ -19061,12 +21956,17 @@ a8bbb81 OpTracker: remove the references to "osd" in config variables
 5fdaccd OpTracker: add an init_from_message() to the TrackedOp interface
 24c3389 OpTracker: start making the OpTracker into a generic
 0678dcd OpRequest: remove obsolete comment about ref-counting (use OpRequestRef!)
+b993eca Update many unit tests to reflect 2yrs of changes
 53e17c2 osd: move pow2_hist_t out of osd_types and into include/
+ffe7d80 Add pytest.ini
 16ebb25 mon: fix wrong arg to "instructed to" status message
 5c46fc4 doc: Made some changes and incorporated a draft diagram.
 5bb7417 doc: Added draft of region/zone diagram.
 bcc1680 mon: fix inverted test in osd pool create
 f3718c2 code_env: use feature test for PR_GET_NAME support
+974aa6a Put helper scripts in /usr/bin
+9ae29fa Port to py.test
+4090753 Add test for teuthology.suite.build_email_body()
 08fe028 rgw: use bufferlist::append() instead of bufferlist::push_back()
 fd6646f Makefile: fix unittest_arch
 5421d6d Makefile: fix unittest_crc32c
@@ -19078,10 +21978,14 @@ c63b4ed mds: allow delay in evaluating stray
 298c39f mds: touch dentry bottom recursively
 e303b96 mds: re-integrate stray when link count >= 1
 862e0f1 mds: fix MDCache::truncate_inode_finish() journal
+1685de6 Fix typo
 9601092 os/FileStore: fix uninitialized var
 b66ac77 osdc/ObjectCacher: finish contexts after dropping object reference
 ce723b5 doc/release-notes: v0.69
+d83912e If duration is missing from summary.yaml, use 0
+aaa3cf9 Fix logs URL on Sentry pages.
 6ca6f2f (tag: v0.69) v0.69
+bbda752 Make sed expressions safe to run multiple times
 5541a1d doc: Updated link to Storage Cluster Quick Start.
 6af8e3c doc: Updated link to Storage Cluster Quick Start.
 b1d58fa doc: Updated link to Storage Cluster Quick Start.
@@ -19091,6 +21995,8 @@ a39de7b doc: Made sysvinit and service sections parallel to upstart for clarity.
 c054469 doc: Updated for 3-node deployment and multiple Linux distributions.
 31fff93 doc: Added Intro to Ceph to Index.
 fcd749f doc: Excised content from "Getting Started" and created Intro to Ceph.
+c4a47ff Add dummy suite.
+61ddeb6 Revert "suite: don't schedule follow-on summary job for an empty run"
 a80f831 ceph_test_rados: identify write seq_num in output
 3a00187 librados: test copy_from without src_version specified
 045b902 osd: allow a copy_from without knowing the src_version
@@ -19110,25 +22016,41 @@ df7c36a osd/ReplicatedPG: factor some bits into finish_copy
 971bf60 Remove unneeded junit4 check
 e38bd8d Removing extraneous code
 cd6f4bc Use a loop for testing jdk paths
+4520c5f suite: do not calculate product on an empty list (take 2)
 3cef755 fix some comments
 ae7912f doc: Added conf example.
 e55d59f doc: Updated usage.
 0be2c87 doc: Removed --fs-type option and text.
 874186b doc: Updated the usage scenario and made a few syntax edits.
+e65d6e2 Revert "suite: do not calculate product on an empty list"
 abd2fcd ErasureCode: fix uninitialized variable warning
+8b63396 Use teuthology-specific backup for /etc/sudoers
+4d2e3c2 Make run_job merge job_config['config'] if needed
+fad7107 Be less picky about trailing slashes.
 81983ba mon: OSDMonitor: update latest_full while rebuilding full maps
 4ac1570 mon: OSDMonitor: smaller transactions when rebuilding full versions
 4216eac rgw: try to create log pool if doesn't exist
+7b908b4 modified rgw tests and fs tests to get it going in the nightlies
+0d37a33 added rgw task
 b86c068 hadoop: remove hadoop shim
 e7f7483 rgw: NULL terminate buffer before parsing it
+a2b7c72 suite: don't schedule follow-on summary job for an empty run
+5a08389 suite: be verbose about how many jobs we generate
 3f8c969 make: add tmalloc lib dep in tcmalloc guard
+dda7954 suite: do not calculate product on an empty list
+31f0f7a Fix get_http_log_path(), update callers, add tests
+eb4c575 made help more readable
+2db2ba7 Fix TypeError (cherry picked from commit c4591a16e1781f8da3502b38e3dc72f629478312)
+1901a14 removed basic dir as it was empty
 daf417f osd/ReplicatedPG.cc: Verify that recovery is truly complete
 139a714 osd/OSD.cc: Use MIN() so that we don't exceed osd_recovery_max_active
+e83b5de Use check_output() and log.exception()
 4633729 mon/OSDMonitor: make busy creating pgs message more explicit
 40613b7 mon/MDSMonitor: don't reset incarnation when creating newfs
 31e3a51 rgw: don't call list::size() in ObjectCache
 91375d0 common: fix Mutex, Cond no-copy declarations
 626669a rgw: drain pending requests before completing write
+40d6c60 feature # 5942. Added examples to teuthology binaries help page
 25a608c osd/ReplicatedPG: allow RepGathers with no version
 155cdd2 osd: flag new/old temp objects in MOSDSubOp
 c4260fa osd/ReplicatedPG: drop unused issue_repop() arguments
@@ -19137,8 +22059,22 @@ c9885e7 osd/ReplicatedPG: generate one-off unique temp object names
 6cecd0d osd/osd_types: move coll_t::META_COLL definition
 17c5d76 os/FileStore: implement collection_move_rename
 ef7cffc os/ObjectStore: add collection_move_rename
+1228d8c Add teuthology branch to sentry report tags
+0ad9c87 Ensure teuthology_branch is stored in job_config
+5b3ce84 Add ctx.config to sentry info.
+fe51db6 Merge job_config and ctx.config
+725ba83 Add test for misc.get_http_log_path()
+38a1a6d Use sentry.get_ident() again
+4e9c537 Don't fail if there's no job_id
+df49257 Passing job_id to get_http_log_path() is optional
+f7537f9 Add logs URL to sentry reports
+713fa52 Add job id and actual archive dir to job config Also add job id to info.yaml
+4699ad1 Don't "import misc as teuthology"
+c1ba276 Move get_http_log_path
 288bef3 rgw-admin: Adds --metadata-key option to help Signed-off-by: Christophe Courtaut <christophe.courtaut at gmail.com>
+c9db15e s/jobid/job_id/
 0499948 rgw: when failing read from client, return correct error
+89df90f rados: add --op copy_from ... support
 558d9fc osd: bind objecter messenger to cluster address
 1d1bf41 osd: name the objecter's messenger "ms_objecter" instead of "hbclient"
 14c71ea Objecter: rename cancel_op -> cancel_linger_op
@@ -19151,6 +22087,7 @@ a45612c Objecter: add an Op::target_oid, and use it when submitting Ops
 79f02d6 Objecter: rename Op::oid -> Op::base_oid
 aeec0c6 osd: create a new request_redirect_t
 c24e170 mon: fix syntax error in osd pool create args
+5acc57f remove basedir/testdir distinction
 780954e autoconf: use $(UNITTEST_CXXFLAGS) in tests
 647188c ErasureCodeJerasure: plugin
 e9e5391 ErasureCodeJerasure: define technique Liber8tion
@@ -19164,6 +22101,7 @@ c8def86 ErasureCodeJerasure: unit test common to all techniques
 a786ad7 ceph-disk: make initial journal files 0 bytes
 8c76f3a crushtool: do not dump core with non-unique bucket IDs
 bde2772 doc/release-notes: v0.67.3
+ccbd9b0 upgrade/rgw*: use correct branch for s3tests
 c56e039 buffer: uninline, constify crc32c()
 397b4c2 osd: add empty() function to object_locator_t
 0b24be2 MOSDOpReply: more const-correctness
@@ -19189,13 +22127,17 @@ ac609a4 doc: Syntax fixes to remove gitbuilder errors.
 916901f ErasureCodePlugin: plugin registry
 b61369c ErasureCodePlugin: plugin interface
 640f2f2 ErasureCode: example implementation : K=2 M=1
+03f083c rbd/singleton: fix path
 95d61c1 Fix usage to include export and import --type options
+d47ae37 Adding in erroneously removed args
 dde21bd ErasureCode: abstract interface
 ff9beb2 doc: Syntax fixes to stop gitbuilder warnings.
 9c09701 doc: Syntax fixes to stop gitbuilder warnings.
 5900b43 doc: Syntax fixes to stop gitbuilder warnings.
 99c42e1 make sure we use a version of sphinx that we can handle
 726fe73 rados: fix typo
+922aa09 fix a few missing adjust-ulimits users
+2946dd8 upgrade: set dumpling branch for rest.yaml
 6949d22 automake cleanup: implementing non-recursive make
 99c6be2 automake cleanup: renaming buildtest
 417f1f2 automake cleanup: moving rados tool to tools subdir
@@ -19209,15 +22151,36 @@ c17d134 fix: build tests need libcrc.la
 b5137ba init-radosgw*: fix status return value if radosgw isn't running
 55a5271 cleanup: state_name in NamedState init list
 1387278 rgw: fix get cors, delete cors
+6b40144 rgw: fix valgrind when no valgrind
+a5c33a3 rgw: fix valgrind when no valgrind
+3a24579 mini upgrade suite to run across all distros
+9e03c73 misc: valgrind: fix cd behavior
+19324c6 misc: valgrind: fix valgrind.supp path
+82cfa84 Get rid of chdir-coredump.
+a60e84d ceph: fix daemon-helper typo
+2214fe1 misc: valgrind --num-callers=50
+53b8e27 Helper scripts live in /usr/local/bin now!
+edc5ef8 Move helper scripts to /usr/local/bin
+1a05f9d queue: fix stderr redirect
 39c89dc common/crc32c_intel_fast: avoid reading partial trailing word
+5cd2f08 queue: include tube name in worker logs
+f0eabc9 Be polite and restore /etc/sudoers when done
 c3529ed cleanup: passing context to NamedState for ceph_clock
 159693d cleanup: globals from libosd
+d6e8b23 Add internal.sudo subtask.
+6d1ed57 Let execute() accept a string for args.
+6406a1e Add Cluster.write_file()
+8f2fb9d Use misc.get_archive_dir()
+90f9a6a Add get_archive_dir()
+91fd041 Put sudo in front of adjust-ulimits if it's used
+8e37361 Via automagic, only call ulimit -n if running as root.
 258396d doc: Added entry for federated configuration to the index.
 da5ccf7 doc: Added draft of configuration guide for federated gateways.
 8d0a1db cleanup: reduced number of pointer indirection in osd
 5dd0a83 cleanup: removing globals from common/obj_bencher
 cf93c83 cleanup: removing refs to globals from libosd
 9c4e626 cleanup: removed last references to globals from client
+d34ba16 Run yum clean all after installing new ceph-release.
 dfcee0c osd/ReplicatedPG: set reply versions for pg ops (PGLS)
 c4414a3 osd/ReplicatedPG: set reply versions on dup op ACK
 9a55129 rgw: flush pending data when completing multipart part upload
@@ -19236,26 +22199,39 @@ df4f712 rgw: OPTIONS request doesn't need to read object info
 cfb07f1 arch: add NEON cpu feature detection
 83a4848 doc: Updated usage for --infile syntax. Added zone name for zone syntax.
 df11247 mon/OSDMonitor: fix POOL_OP_DELETE_SNAP early return
+c155dce drop obsolete PYTHONPATH settings
 e95e707 doc: Organized into sections. Added zone/region and pool details.
+89f1139 Append '/' to the end of the log directory path
 9f44de5 doc/release-notes: v0.68
+4e7db95 rbd: add rbd_snaps_ops.sh test
 b05f7ea osd/ReplicatedPG: set reply versions for pg ops (PGLS)
 5148aac osd/ReplicatedPG: set reply versions on dup op ACK
 f566102 doc: remove 'Unexpected indentation' from versions doc.
 a9a516a Correct syntax for generate swift key
+a9522ae add the missing  for fedora links in install task
 ab69d99 mon: fix typo and remove redundant sentence
 7c09ede mon: fix typo in comment
 3c9f849 doc: erasure code ghobject is made of gen_t + shard_t
+d5e265d schedule_suite.sh: turn up osd logging a little
 b4cf0f2 (tag: v0.68) v0.68
 dcbdeaf doc: Fix repo URL for Ceph cloning (dev/generatedocs)
 996af2d ceph_test_rados: test COPY_FROM
 ed68079 osd: initial COPY_FROM (not viable for large objects)
 3a8adf5 objecter, librados: add COPY_FROM operation
+a455b16 Add missing sudo from command.
 746e78c doc: Updated manual install to include sync agent, ARM packages, and DNS configuration.
 c367d6f ceph_test_rados: add missing kick for rollback
+5985c9d Add note about public teuthology logs.
+5164063 Wrap paragraphs.
 1d1f7f1 rgw: change watch init ordering, don't distribute if can't
+f744c4c Catch CommandFailedError and print debug info
+f3db12f schedule: strip out targets
+f36f16a Logs and Sentry links are public now.
+4ba3d52 Revert "Use install -d for /var/log/ceph."
 087800e osd: provide better version bounds for cls_current_version and ENOENT replies
 6019558 rgw: Allow wildcard in supported keystone roles.
 f79b379 osd/ReplicatedPG: set user_version in waiting_for_commit replies
+3816997 schedule_suite.sh: default email to ceph-qa at ceph.com
 e8506b5 osd/ReplicatedPG: do not set ctx->user_at_version unless ctx->user_modify
 99793d9 osd/ReplicatedPG: do not log a user_version on the snapdir object
 72c6c30 osd/ReplicatedPG: log previous user_version on clone
@@ -19266,6 +22242,9 @@ cc8e901 osd/ReplicatedPG: do not log user_version on deletion events
 a200e18 Validate S3 tokens against Keystone
 e48d6cb mon: fix uninitialized Op field
 a5d815d automake cleanup: uninitialized version_t
+3d64322 Revert "get_scratch_devices(): catch CommandFailedError, log "dev in use""
+80b6140 README: add deps
+35b9bea s/ceph-debug/ceph-debuginfo for rpms
 4f6c6b2 osd/ReplicatedPG: do not requeue if not primary
 b0a30a5 osd: COPY_GET operation
 8d74f41 osd/ReplicatedPG: factor {execute,reply}_ctx() out of do_op()
@@ -19282,6 +22261,7 @@ c6d0b10 osd, objecter: clean up assert_ver()
 8ba50c0 osd/ReplicatedPG: drop src_obc.clear() calls
 6473060 os/ObjectStore: add bufferlist variant of setattrs
 7ec0b4f unittest_lfnindex testing older HASH_INDEX_TAG
+416508d get_scratch_devices(): catch CommandFailedError, log "dev in use"
 8a65ae8 doc/rados/operations/pools: remove experimental note about pg splitting
 13aac48 workunits: add a test for caching redirects
 3516996 mon/OSDMonitor: 'osd tier {set,remove}-overlay <pool> [tierpool]'
@@ -19292,18 +22272,34 @@ e3fb912 Objecter: respect read_tier & write_tier for initial op submission
 b76953c Objecter: be careful about precalculated pgids
 665acc1 Objecter: add an Op::target_oloc and use it instead of base_oloc in send_op()
 e2fcad0 Objecter: rename Op::oloc -> Op::base_oloc
+df55c3e added point releases of dumpling to upgrade-dumpling suite
 64774e5 os: LevelDBStore: ignore ENOENT files when estimating store size
+dcbf50b nuke: get pid, owner from info.yaml (if present)
 e60d4e0 ceph-post-file: use mktemp instead of tempfile
+3981a8f Never use 'except:' without specifying an Exception.
+f342bae run: do not import at top level
 96aaa5e ceph_test_rados: rollback bumps user_version
 42d65b0 PGLog: initialize writeout_from in PGLog constructor
+24991c4 Add hung job description to email
 af0a0cd mon/OSDMonitor: 'osd pool tier <add|remove> <pool> <tierpool>'
 5e2c86a osd/OSDMonitor: avoid polluting pending_inc on error for 'osd pool set ...'
 ed62c45 osd_types: add pg_pool_t cache-related fields
 4f7fce5 osd/ReplicatedPG: drop dout from object_context_destructor_callback
 00b6a94 osd/ReplicatedPG: remove debug lines from snapset_context get/put
+44401f9 Workers: only log child's stderr, not stdout
+40f1d4f Don't print colon if there's no exception message. Also, remove testing exception.
+83a5ab1 Pretty-up write_initial_metadata; s/run/name/
+9550b49 Add exception hook
+f6430df run: write info.yaml on job start
+a0e86c0 Move logging setup out of main()
+27eea4a test
+649bddf run: set up archive dir and logger output sooner
 7a7361d rgw: Fix S3 auth when using response-* query string params
 91616ce ceph.spec.in:  remove trailing paren in previous commit
 b03f241 ceph.spec.in:  Don't invoke debug_package macro on centos.
+444a0ab Enabling multi-region S3 tests
+ef85fac schedule_suite.sh: allow partial 'suite' names
+4fbec83 suite: return [] for build_matrix base case
 e20d1f8 ceph_test_rados: validate user_version
 c8dcd2e osd/ReplicatedPG: set version, user_version correctly on reads
 9374dc8 messages/MOSDOpReply: fix user_version in reply (add missing braces)
@@ -19313,11 +22309,31 @@ b2b0f20 qa: workunits: mon: test snaps ops using rbd.
 0e85074 mon: OSDMonitor: return earlier on no-ops over currently committed state
 274b4b9 mon: OSDMonitor: don't propose on prepare_pool_op()
 fab7954 mon: OSDMonitor: check if pool is on unmanaged snaps mode on mk/rmsnap
+df73f50 Add run name to first line of email.
+7ce6696 suite: build_matrix as a list of filenames, not file content
+7163e89 suite: allow individual files to be scheduled
+b829543 suite: generalize collection expansion
 f808c20 PGLog: maintain writeout_from and trimmed
+8314140 suite: clean up imports
+2dd889e suite: make collection args (optionally) relative to a base dir
+802fa51 suite: make names a bit more friendly
+8894ff6 suite: add --dry-run
+6175a13 Don't assume anything about the base path here.
+a1a3358 README: update for new flexible structure.
+30c56cc mark all existing collections with %
 fd3fd59 doc/release-notes: v0.56.6 and .7 bobtail
 c240285 mon: discover mon addrs, names during election state too
+38a47ec Don't return inside __init__
 61b40f4 doc/dev/cache-pool: document cache pool management interface
 b91c1c5 add CEPH_FEATURE_OSD_CACHEPOOL
+53cea02 Add apology for non-public links
+25defd4 Indent wrapped exceptions.
+ffac4ad s3tests: extend for multi-region tests
+db51888 rgw: persist the region info
+2877e27 radosgw-agent: store radosgw_agent server/port
+6c5a7af move multi_region_enabled() into util class
+951177b internal: whitelist btrfs lock imbalance bug 6097
+c30310a ceph.conf.template: add osd_debug_pg_log_writeout = true
 1c0d75d PGLog: don't maintain log_keys_debug if the config is disabled
 fe68b15 PGLog: move the log size check after the early return
 6c432f1 doc: update to describe new OSD version support as it actually exists
@@ -19340,27 +22356,50 @@ ff1a573 Objecter: librados: mass switch from eversion_t to version_t
 37bba41 ReplicatedPG: remove long-dead branch
 f400816 MOSDOpReply: rename *_version() -> *_replay_version()
 7a7ae60 MOSDOpReply: rename reassert_version -> replay_version
+232e3d3 Fix undefined symbol errors
+6afb238 Time is an integer, in seconds.
+dfdac24 Make email formatting way, way nicer.
 b5ea74c docs: document how the current OSD PG/object versions work
 37850e1 ceph.in: add to $PATH if needed regardless of LD_LIBRARY_PATH state
+eb585d1 Move job listing logic to get_jobs()
 3266862 doc: Updated to accurately reflect that upstart applies to a single node.
 8df504c ceph.spec.in:  radosgw package doesn't require mod_fcgi
 a10ca4b librbd: fix debug print in aio_write
 228510f cleanup: removed last references to g_conf from auth
+f2a688d rados/verify: crank up debugging on system tests
 ea2fc85 SharedPtrRegistry: get_next must not delete while holding the lock
 14c31dd doc : erasure code developer notes updates
 af5281e common: move SharedPtrRegistry test after t.join
+502714b admin_socket: fix retry-on-error behavior
+81709ed Avoid double slashes in sentry event URL
+a1a261a Add tags! Task name and owner to start.
+0503803 Add sentry events to suite email.
+489c166 Also leave a list of sentry events in the summary
+4007173 Add URL to Sentry event to traceback output.
+6f00939 Use os.path.join, not urlparse.urljoin
+300374b For failures, add http links to log directories.
+fedc91c Add a catch-all __getattr__(); add comments
+12cb686 Rewrite email-generating code.
+6c486ab Tweak logging
+77706c6 internal: remove (hopefully obsolete) kern.log checks
+711a024 internal: unbreak syslog BUG checks and gz
 c5b5ce1 osd: install admin socket commands after signals
 76a38c3 mon/DataHealthService: preserve compat of data stats dump
 f0805cb test/librados/cmd.cc: tolerate thrashing on pg_command tests
+1adde58 Don't hardcode path to teuthology-suite
 d571825 WBThrottle: use fdatasync instead of fsync
 3528100 FileStore: add config option to disable the wbthrottle
+72582c2 upgrade/mixed-mons: whitelist fallout from teh cephtool test
 ed712c1 fix nss lib name
 378728e update the README with required RPM packages
 c6a7377 Improve warning message when there are unfound objects, but probing hasn't finished yet.
+c957eae upgrade/mixed-mons: upgrade osds for final phase of test
+5ec5e2c ceph_manager: wait for dump_ops_in_flight on osd revival
 96621bd mon: DataHealthService: monitor backing store's size and report it
 46fb86a mon: mon_types: DataStats: add 'dump(Formatter*)' method
 9a1badf mon: MonitorDBStore: rely on backing store to provide estimated store size
 41149c4 test: ceph_test_store_tool: output estimated store size on 'get-size'
+c861e2d queue: only git fetch once per minute per branch
 51fe5da rados-config: do not load ceph.conf
 cc3249b osd/ReplicatedPG: require write payload match length
 14a1e6e osd/ReplicatedPG: verify we have enough data for WRITE and WRITEFULL
@@ -19372,24 +22411,43 @@ cc3249b osd/ReplicatedPG: require write payload match length
 e48ef9e librados: make safe and complete callback arguments separate
 476e490 mds: remove waiting lock before merging with neighbours
 a816060 doc: Fixed broken link by adding Transitioning to ceph-deploy to this doc.
+973d5af queue: only let one worker update the teuthology checkouts at a time
 99a2ff7 os: make readdir_r buffers larger
 2df66d9 os: fix readdir_r buffer size
 7a091d3 os: KeyValueDB: expose interface to obtain estimated store size
 fe50103 mon/Paxos: fix another uncommitted value corner case
+c107ade Fix for blank yaml.
 0373d74 rgw: bucket meta remove don't overwrite entry point first
+7930f66 Update call to get_sentry_client()
+6090863 Use teuthology.config
+d8d2ef9 Add teuthology.config, the start of a better system
+86caebb nuke: clean up stray firmware.git locks
 f040020 ceph-disk: specify the filetype when mounting
 f404023 doc/release-notes: v0.67.2
+eafd591 Move git stuff to fetch_teuthology_branch()
+307284c Rewrite branch fetching.
 3a4f1ce rgw: Adds --system option help to radosgw-admin
+22fc733 queue: fetch origin, not branch
+f8f4c2d suite: only skip on non-vps if os-type is actually defined
+c39ec60 queue: only bootstrap new checkouts
 5637516 osd: add 'osd heartbeat min healthy ratio' tunable
 40f43a0 QA: Compile fsstress if missing on machine.
 4b97fcb QA: Compile fsstress if missing on machine.
+0d10628 Don't run fuse tests for ceph-deploy on ARM.
+dad8d1a Do not run multiple tests (for distros) on baremetal.
+2ea841c Allow exclude_arch and exclude_os_type in qa-suite yamls
 ab4e85d PGMonitor: pg dump_stuck should respect --format (plain works fine)
 a0f3c64 init-ceph: behave if incompletely installed
 309569a mon/MonClient: release pending outgoing messages on shutdown
+98160c5 Fix SyntaxError
+a9df6c2 Worker shouldn't attempt to rebuild an existing virtualenv
 226059e MOSDOpReply: set reassert_version for very old clients
 98583b5 yasm-wrapper: more futzing to behave on fedora 19
 3d55534 rgw: fix crash when creating new zone on init
+c773060 Use the ceph.com git mirror.
+73de8b7 Use fetch and reset --hard, not checkout and pull
 5c5980b ceph.spec.in:  remove trailing paren in previous commit
+c6293a4 Make the worker obtain the correct teuthology branch
 9b667ce ceph.spec.in:  Don't invoke debug_package macro on centos.
 02e14c7 Makefile: move all crc code into libcrc.la
 e55809a crc32c: add intel optimized crc32c implementation
@@ -19399,6 +22457,7 @@ f008ac4 arch: add cpu probing
 157f222 doc: fix erasure code formatting warnings and errors
 d70fd35 mon/Paxos: ignore do_refresh() return value
 617dc36 enable mds rejoin with active inodes' old parent xattrs
+0e87253 Fix upgrading edge-ish case
 b419924 init-rbdmap: fix error on stop rbdmap
 9242d01 ceph-monstore-tool: shut up coverity
 123f79b store: fix issues reported by coverity
@@ -19413,8 +22472,11 @@ ff70e76 ReplicatedPG: ObjectContext * becomes ObjectContextRef
 1688fb4 ReplicatedPG: add Mutex to protect snapset_contexts
 e1be37a PG: remove unused PG::_cond
 be04918 sharedptr_registry: add a variant of get_next() and the empty() method
+de270b0 install: use get() in _get_config_value_for_remote
 8784564 objecter: fix keys of dump_linger_ops
 38a0ca6 objecter: resend unfinished lingers when osdmap is no longer paused
+520a938 radosgw-admin: use check_status for task success
+ff2a209 radosgw-admin: adding radosgw-admin tests
 d26ba3a rgw: change cache / watch-notify init sequence
 576dce0 doc: Clarified quorum requirements.
 deb43d9 doc: Fixed typo.
@@ -19422,19 +22484,25 @@ bebba3c doc: fix erasure code formatting warnings and errors
 8437304 build-depend on yasm
 33783e5 crc32c: note intel crc code copyrights
 6ee1591 crc32c: add intel baseline algorithm
+df8ae04 Make client a global variable
+0af6a8a Beginnings of support for Sentry.
 552bfe5 vstart.sh: Adds more ENV variables to configure dev cluster
 2af59d5 ceph-disk: partprobe after creating journal partition
+549bac3 radosgw-admin: use dynamic ports for testing
 edf2c34 .gitignore: ignore test-driver
 9833e9d fuse: fix warning when compiled against old fuse versions
 6abae35 json_spirit: remove unused typedef
 c9cdd19 gtest: add build-aux/test-driver to .gitignore
 e8e50f6 crc32c: remove old intel implementation
 a286090 common/crc32c: refactor a bit
+71a77f2 Add get_test_user(), because hardcoding is bad
+794d224 rados/singleton: add 'wrong cluster addr' whitelist
 981eda9 mon/Paxos: always refresh after any store_state
 7e0848d mon/Paxos: return whether store_state stored anything
 b9dee22 mon/Paxos: cleanup: use do_refresh from handle_commit
 6ef1970 pybind: fix Rados.conf_parse_env test
 eca53bb mon/PGMap: OSD byte counts 4x too large (conversion to bytes overzealous)
+276157f upgrade/mixed-mon: use dumpling code for dupmling workunits
 3a83129 erasure code : plugin, interface and glossary documentation updates
 8e53301 Do not use some compilation flag invalid for clang
 1f851cb PG: remove old log when we upgrade log version
@@ -19445,36 +22513,68 @@ c339456 client: guard fallocate with #ifdefs
 c2548a5 mon: add 'pg dump delta' to get just the rate info
 00080d7 PGLog: add a config to disable PGLog::check()
 2398c1b doc: Title change.
+c044951 regression: remove
+ceb1d7b readwrite: Specifying the domain root pool
 220f7d6 osd/ReplicatedPG: remove broken AccessMode logic
 823435c examples: add a librados/hello_world program
+3531305 upgrade/mixed-mons: run final dumpling test against all-dumpling mons
+3bcff44 upgrade/mixed-mons: run cuttlefish tests against (2 cuttlefish, 1 dumpling) mon cluster
+5695de1 upgrade: fix mixed workloads
 67a95b9 ceph: parse CEPH_ARGS environment variable
 eef7cac rados pybind: add conf_parse_env()
 9dda1cc doc/release-notes: v0.61.8
 090e4c4 filestore-config-ref.rst: mark some filestore keys as deprecated
+22abe02 Whoops! Make 'MacOS X' a header again.
+3f42335 And, more formatting tweaks.
+adc3f62 Mo' betta consistency.
+59e01a4 Don't necessarily need to manually link libvirt. Also, remove $'s.
 4e86be9 librados: synchronous commands should return on commit instead of ack
 f5636be mon: make MonMap error message about unspecified monitors less specific.
+6f9c759 Remove false note about default value.
+39879ec Slightly tweak path.
+5bd7f62 Add comment about portability to get_testdir_base()
+7a2cca1 Update base_test_dir default to reflect reality
 a846294 auth-config-ref.rst: fix signature keys
 4677041 objclass: move cls_log into class_api.cc
 70790ab doc/dev/filestore-filesystem-compatibliity: remove outdated xattr notes
 2f221fe doc: Updated upgrade doc to include dumpling and incorporate ceph-deploy.
 060a463 Makefile: move objclass/*.cc to libosd.la
+558237e Detect multi-region test cases
 8ac1af8 doc/changelog: add missing file
 d0a6ff9 os/FileStore: initialize blk_size on _detect_fs()
 ed4fe32 doc/release-notes: v0.67.1
+e625316 kernel: fix sha1
 4fd34be mds: create only one ESubtreeMap during fs creation
 6bb7c62 doc: quickstart: be more explicit that node == mon node
+bbf7c71 radosgw-agent: adding debug logging
+8a47230 rename variables and log messages to be more clear
+7061e3c rgw: fix domain root pool name assignment
+b883e33 radosgw-admin: correct white space mistake
+c37faa8 add multi-region tests
 3cbf6a7 rgw: drain requests before exiting
 d08e05e ceph-post-file: single command to upload a file to cephdrop
+c3e5090 Add Rhel 6.4 and Centos 6.3 to ceph-deploy test suite.
 50cc2ef doc: Removed old mkcephfs references.
 fa10c41 doc: Removed mkcephfs references.
 31c1501 doc: Updated script for dumpling.
 16f4bd5 doc: Updated APT script for dumpling.
 e97100d doc: Removed mkcephfs references. Did a bit of clean-up work.
+7df9cf5 Kernel value of - uses doesn't install kernel.
 211aada ReplicatedPG: add osd_recover_clone_overlap_limit to limit clones
 6f0a498 config_opts: add two ceph-rest-api-only variables for convenience
+a899b58 Display error message when locking a vpm fails due to downburst errors. When doing a lock-many, do not lock any of the vpms when downburst errors occur.   Made error messages more accurate, and removed a destroy_if_vm call because the destroy was alreadly called in unlock.  Changed some print messages to be log.info displays.
+694cd67 Fix ARM releases to be quantal armv7l releases. Made grub execution conditional and not done when ARM. Use ctx parameter to change machine type to tala. Fix kernel assignments when running ARM systems.
+47224f2 Revert "Fix ARM releases to be quantal armv7l releases."
+aabfabc Fix ARM releases to be quantal armv7l releases. Made grub execution conditional and not done when ARM. Use ctx parameter to change machine type to tala. Fix kernel assignments when running ARM systems.
 aee053c mon: MonitorDBStore: output to derr instead of std::cout
+6905b76 Add yaml for rhel 6.4 in distros.
+a39e7f1 Adding multi-region tests
+d74eefd Use the json format for raw_cluster_status().
+2c9ac3e Detect multi-region test cases
+6115d45 Do more than just sleep and raise an exception.
 1c50c44 osdc/ObjectCacher: do not merge rx buffers
 b59f930 osdc/ObjectCacher: match reads with their original rx buffers
+4a47eed Fixing assumed typo.
 93d8be2 osd: Add perf tracking for all states in RecoveryState
 895d531 cls/hello: hello, world rados class
 359850b osd: enforce RD, WR flags for class methods
@@ -19500,19 +22600,44 @@ e7836e6 mon/PGMap: fix typo
 3ba4dc3 Revert "config: fix stringification of config values"
 fefe0c6 config: fix stringification of config values
 9f1ad4d Document unstable nature of CephFS
+1766730 Don't raise an exception if apt-key list fails
+83dd1c8 Passwordless sudo, not passphraseless sudo.
 3d7a949 Renamed filestore_backend test
 5a15369 store: Add (experimental) ZFS parallel journal support
 a25d73e store: Abstract the underlying filesystem functionality
+c17a18e Don't assume the ubuntu user when getting the test dir.
+3afc7d9 If get_testdir_base() exists, might as well use it...
+f41436a Tweak regex to work for non-FQDN hostnames
+54ed1d1 Note that target hostnames must be resolvable.
+5746efb Fix some instances where print is being used instead of log
+ab2d2fa Default to log level INFO.
 24ec320 Ceph-qa: change the fsx.sh to support hole punching test
+a8965a6 rados/monthash: fix 5925 yaml
+5ac68dc added upgrade-dumpling suite to test point releases for dumpling
 086abe4 doc: Fixed typo.
 4422f21 rados.py: fix Rados() unicode checking
 34da9cb rados.py: fix Rados() backwards compatibility
+3e2a269 added workunit: branch:dumpling wherever applicable
 2cfcb77 mon/PGMap: degraded what?
 49ddecd mon: status: client io, recovery io prefixes
+48a3393 upgrade from cuttlefish to next and dumpling to next
 94c3f29 OSDMonitor: add 'osd perf' command to dump recent osd perf information
+bf52544 upgrade cuttlefish to dumpling, instead of next in upgrade-fs suite
 ebde89d ObjectStore: add ObjectStore::get_cur_stats and impl in FileStore
+2813c33 renamed the folders and files appropriately
+058fab0 replace cuttlefish with dumpling in upgrade suites
+6c699cb modified upgrade tasks to run from cuttlefish to dumpling, dumpling to next and bobtail to dumpling
+63b64e2 Don't bail if ~/.teuthology.yaml doesn't exist.
+ebf476d Obligatory automatic vim whitespace cleanup commit
+2c88757 Revert "Display error message when locking a vpm fails due to downburst errors."
 16ed0b9 librados: fix async aio completion wakeup
 7a52e2f librados: fix locking for AioCompletionImpl refcounting
+4681b35 Display error message when locking a vpm fails due to downburst errors. When doing a lock-many, do not lock any of the vpms when downburst errors occur.   Do not display message on unlock with downburst error, because slot is still freed (and can be locked by someone else).
+6bc8fcd add link to homebrew
+aa464e2 add generic instructions for installation
+3e9cf89 create instructions to install on OSX
+8c5b5ef sort the requirements
+18ea023 remove install_requires from setup.py
 4f31756 doc: Added dumpling to RPM installation.
 d38a041 doc: Minor tweaks to debian install.
 456a394 doc: Added dumpling installation for Debian/Ubuntu.
@@ -19522,6 +22647,7 @@ dfd5854 remove racy test assertions
 d1a8165 client: add enclosing object for asok dumps
 0c23633 pybind/ceph_argparse: GPL -> LGPL2
 2206f55 rbd.cc: relicense as LGPL2
+0e9a937 monthrash workloads: Add task to reproduce 5925 with extra logging
 fbc65ea mon/PGMap: make pg state indentation cleaner
 6d8d726 mon: status: only include mdsmap if epoch > 1
 a9033eb mon: move recovery rate to a separate line
@@ -19534,30 +22660,57 @@ ef9c991 mon: make pg info in 'status' more informative and visually parseable
 f417b10 osdmap: move oneliner summary to separate function
 6f5d803 librados: fix MWatchNotify leak
 810c52d rgw: do not leak handler in get_handler() error path
+f1c9125 Fix a case where _get_config_value_for_remote could return None mistakenly
+000efc6 samba: wait longer for smbd to start
 e3b7bc5 (tag: v0.67) v0.67
+9d4104f do not check the jobid if check-locks is False
 977b7f5 doc/release-notes: fix rst
 f501ec7 doc/release-notes: upgrade sequence
 de7bbdb doc/release-notes: roll-up of upgrade/compat notes from cuttlefish to dumpling
 6df75da init-rbdmap: minor fix no rbd exist
 d9cb2ea init-rbdmap: fix for recursive umount
+643da65 remove fallback of the fallback. so silly
+3aa995a Remove three characters just for Alfredo
+70cd884 safer fallback. Updates docstring
+f4de515 Add notes about install task precedence
 c927f89 rgw: rgw-admin throw an error when invalid flag is passed
 b9a5664 Makefile.am: fix libglobal.la races
+d4c2576 radosgw-agent.py: refactor, enable overrides
+1d90d7b rgw.py: fix example config
+9ee6452 rgw.py: add a better error message
+e517cd4 s3readwrite.py: make user creation optional
+e3b9add rgw.py: refactor configure
+18e8014 rgy.py: make log_(data|metadata) configurable
+3fb5318 agent name should also contain port number
+4adc1da radosgw-admin: various multiregion related fixes and changes
 c81edf1 tools: ceph-monstore-tool: allow copying store contents to a new location
+fa733e4 Change "Exception" to "RuntimeError" as advised by zackc
+28e4219 tasks/ceph-deploy: allow configurations with no mds
+5be5327 Default to master and not next.
 2632846 rgw: fix multi delete
+ef1c966 Add install task to examples
+93b532f Don't hit an AttributeError if ctx.config['overrides'] is set to None
+cabb414 Make this module pep8-clean.
 c566c03 doc: complete S3 features status from existing doc page
+7c377cb Make _get_baseurlinfo_and_dist() provide the correct uri
 ef91c69 mon: mon_cluster_log_file_level
 cb50b5a ceph-disk: fix mount options passed to move_mount
 b66a3d6 config_opts.h: reduce osd_recovery_max_active and osd_recovery_max_single_start
+5433ca2 upgrade-parallel: test on multiple target distros
 b221a42 doc/release-notes: adjust whitespace
 bec6f09 doc/release-notes: v0.67 release notes
+ebd94a9 upgrade: do not wait for full quorum on mixed-version mon cluster
 44b093c ceph: retry new-style mon command if we get EINVAL from the old-style command
+b78a6c7 Make install.upgrade work with CentOS/RHEL/Fedora.
 7ed6de9 common: pick_addresses: fix bug with observer class that triggered #5205
 8bf3971 rearrange the documentation to be inserted and maintained in master
 068baae rgw: return 423 Locked response when failing to lock object
 9029555 rgw: make RGWHTTPClient callback methods pure virtual
 71177fe rgw: rename read_header() to receive_header() where needed
 1563613 rgw: rename data receive callbacks in swift token revocation
+2a7a61c Add Remote.system_type property. Also, clean up whitespace.
 eade36d PendingReleaseNotes: note 'ceph daemon ...' argument behavior change
+a0e74a9 admin_socket: split command into multiple arguments
 e1666d0 Fix compilation -Wmismatched-tags warnings
 5082fec doc: Add a page about rgw S3 API compliance
 fd06261 ant is missing from the list of packages to install
@@ -19570,9 +22723,11 @@ b2515b9 buffer: change #include order
 fd19235 ceph.in: return EINVAL on daemon command error
 0be1475 ceph.in: Re-enable ceph interactive mode (missing its output).
 2e28087 mon: fix 'osd crush rule rm ...' dup arg
+a739206 ceph.conf: open osd classes on start
 0c1fd62 qa/workunits/cephtool/test.sh: test set/unset of all osd flags
 298e7d8 mon/MonCommands: fix typo nobackfile -> nobackfill
 afa21e8 Add back the mistakenly removed "ceph osd set noscrub/nodeep-scrub"
+6acee41 add a key for ceph-deploy-branch detection
 6820390 OSD: suspend timeout on process_peering_events pg lock
 da69756 test: mon: moncap: add 'allow command foo with arg="bar.baz"' tests
 258135b qa: workunits: mon: workunit to stress the monitor's caps
@@ -19586,7 +22741,9 @@ f087d84 mon: Monitor: check caps considering command's requirements
 d0cbdde ReplicatedPG: ping TPHandle during scan_range
 95b3604 OSD: also suspend timeout while grabbing recovery_tp lock
 321f57d OpRequest: don't warn as quickly for slow recovery ops
+7a14a93 Add a useful error message in case of an invalid hostname
 0017010 mon, osd: Clean up "flush(stringstream)" continued
+70f4eeb s3readwrite.py: enable overrides
 e904018 mon, osd: Clean up "flush(stringstream); bl.append(stringstream.str())
 2e9c25f doc: fixing reported bug in CRUSH map example.
 cafccfa doc: Added many new fields to config reference.
@@ -19620,14 +22777,33 @@ f86828d mds: handle "state == LOCK_LOCK_XLOCK" when cancelling xlock
 63a21b4 mds: remove "type != CEPH_LOCK_DN" check in Locker::cancel_locking()
 3c3b2ce mds: revoke GSHARED cap when finishing xlock
 7555819 mds: fix cap revoke confirmation
+a0ac487 upgrade-parallel/rgw: use different client for final run
 e5d9ac6 qa/workunits/cephtool/test_daemon.sh: we should error on bad command
+8934753 samba: wait for smbd process to exit
+4bd5b59 rgw.py: add None object check when parsing info
 9e7d6d5 PG: set !flushed in Reset()
 826478c dev/osd_internals,src/osd: add erasure_coding.rst and PGBackend.h
+98f35a5 rgw: do not ignore leaks
+3accf1c rgw.py: adding a safety check
+9110c7d rgw: add log settings to example config
+1787a72 radosgw-agent: get verbose logs from the agent
+1ec9c1b radosgw-agent: move zone extraction to helper
+fdde365 rgw: don't make domain_root pool the same as zone root
+d79a2db rgw: create system users with the --system flag
+3eb62d2 rgy.py: make log_(data|metadata) configurable
 ae15381 rgw: only fetch cors info when needed
 b139a7c rgw: don't read cors attrs if there's no bucket in operation
 43c2712 rgw: rename sync-type to update-type
 bbac69c rgw: only check version if meta object exists
+ef68b98 radosgw-agent: rename task to match tool name
+189e04f rgw_sync_agent: fix a bunch of issues
+ad6a9e5 rgw: ignore exit status when removing base apache dir
+f930f39 rgw_sync_agent: add new rgw_sync_agent task
+48357d6 Fix for #5836 (--lock-many with vms)
 6256d68 Add the definition of PG temp to the glossary
+78b7307 Debian ceph-deploy test. Added more supported distro yamls.
+88b7d02 Fix for Debian wheezy (remove vda from block device list)
+1552a4b rgw.py: check for empty client config
 caaaf2c ceph.spec.in: merge back JUnit handling from SUSE spec
 cbf3a11 ceph.spec.in: move junit BuildRequires to the cephfs-java subpackage
 4a95796 ceph.spec.in: use snappy-devel only on RHEL derivatives
@@ -19637,47 +22813,75 @@ c70e59e mon/MDSMonitor: don't call all EINVALs "unrecognized command"
 c9e8ff2 rest/test.py: retry mds setmap in case epoch has changed
 dc1d95d rest/test.py: expect_nofail() to allow examination of reason
 4a6eff8 Verify that deleting op never in check_latest_map_ops map
+7f76c1c rgw.py: refactoring to separate user creation
 600e6ac osdc: op left in check_lastest_map_ops
 175feb9 rgw_rados.cc: fix invalid iterator comparison
+8c8e9a1 rgw.py: change --secret-key to --secret
+1cff5bd added rgw task before swift
 b4ed4e2 qa/workunits/cephtool/test_daemon.sh: sudo
+1f7127b s3/swift tests: call radosgw-admin as the right client
+2f2108b rgw: fix dir creation and keyring
 d651658 osdc: Add asserts that client_lock is held
 5bd061c test: Fix valgrind found "Mismatched free() / delete / delete []"
 c48644d qa: Add workunit that hangs for manual thrasher testing
+2b6ac0f rados/.../cephtool: let us run test_daemon.sh too
 00dc634 ceph: developer mode: set PATH, too
 e70e08c cephtool/test.sh: add tests for mon daemon command
 47d0d64 Make all AdminSocket commands use argparse/cmdmap.
+74b344e Reconnect after running chef task.
 736d6a1 rgw: fix set_buckets_enabled(), set_bucket_owner()
 0e125e0 Objecter: set c->session to NULL if acting is empty
 16adb91 (tag: v0.67-rc3) v0.67-rc3
 e747fa8 Revert "Use dh_installinit to install upstart job files"
+00c2ac5 Fix RHEL/centos ceph-deploy installs.
+e1cd49b ceph_manager: wait 20s before wait_for_clean in test_map_discontinuity
+bcf467c Remove now unnecessary try/except/raise that came about as a result of the previous check-in.
+48a977e Always raise exception if yum install fails.  This avoids later confusion when packages are missing (the old code skipped 'Nothing to do' messages, but these cases are still errors).
 ebab04e mon: add missing state name
 b8af38b mon: allow others to sync from us across bootstrap calls
 634dcf3 mon: drop useless sync_reset() calls
+b9c7445 More changes for creating vms manually with lock (no config)
+d41b4e5 Fixing teuthology-lock for os-type instead of vm-type.
 f7d1902 mon/Paxos: be more vocal about calling elections
 eb6e6da rgw: keep a region connection map
 f10c2e7 rgw: cors subresource needs to be in canonical auth header
 b5e7448 rgw: set bucket attrs are a bucket instance meta operation
 68730d8 rgw: track bucket instance oid
 acd16d1 rgw: read / write bucket attributes from bucket instance
+99a79c6 rados_util: adjust-ulimits, not enable-coredump
+8284e19 ceph_manager: wait for all_up in test_map_discontinuity
 f3eda63 mon/PGMonitor: fix 'pg dump_[pools_]json'
 099ac51 mon: fix xml element name
 ee9f04c check_new_interval must compare old acting with old osdmap
 1f13d8a OSD: suspend tp timeout while taking pg lock in OpWQ
 f1bd4e5 WorkQueue: fix bracing on reset_tp_timeout
+e703942 ceph_manager: allow-experimental-feature now causes an EINVAL
 a6cd9fe osd: get initial full map after a map gap
 e24b502 osd: fix off-by-one in map gap logic
+9dac3fe s3tests: clone correct branch
+17fa544 fix double requirements issue
 251a6a4 Use dh_installinit to install upstart job files
 b62845e doc/changelog/v0.61.7
 a46f60a doc/dev/repo-lab-access: notes
 12c1f11 ceph_test_rados: print version banner on startup
 74c1bec ceph-authtool: fix cli tests
 7b683f7 test/system/*: parse CEPH_ARGS environment
+0158f0d adjust-ulimits: 16k open files
+2f921c3 upgrade-parallel/stress-split: only upgrade on the first node
 347b5a2 ceph-authtool.8: add missing commands to man page
 86b2632 ceph_authtool.cc: update help/usage text
 4b6c569 mon/DataHealthService: do not name xml key after mon
+fb8882b upgrade-parallel: run cuttlefish api tests against cuttlefish/dumpling cluster
+d5a1d0a upgrade-fs: specify fs
+419e90e upgrade-parallel: run cuttlefish s3tests against hybrid cluster
+7646133 upgrade-parallel: restart mon after starting osd thrasher
+496c677 ceph_manager, dump_stuck: fix injectargs args
 6881ab3 debian, rpm: make python-ceph depend on python-requests
+73adf33 radosgw-admin: fix typo in import
 2ec480b replace in_method_t with a counter
 6b16cd1 unit tests for sharedptr_registry
+45ce9b0 valgrind: suppress curl, gnutlss, libfcgi leaks
+a039d4a ceph_manager: don't mark out an out osd on kill_osd(..., mark_out=True)
 09ee092 pybind/rbd.py: remove unused import of 'pointer'
 2460118 cephfs.py: remove unused imports
 865d5e9 rados.py: fix bad indentation
@@ -19697,13 +22901,20 @@ a8c1a2a common/Formatter: add dump_format_unquoted()
 8af4775 ceph-rest-api: clean up options/environment
 629326a qa/fs/.gitignore
 803a1fd ceph_test_admin_socket: fix unit test
+ba9e71a Test multi distro on ceph-deploy.
 a419354 message: Fix asserts that dont' trigger
+4c15d73 rados: add rest-api test
+404f6d2 rest-api: rename
+3f0340f Add rest_api, a new task that starts up /usr/bin/ceph-rest-api running as a daemon.
 a9ca623 librados: EINVAL on a negative osd id
 3f93691 ceph.in: make osdids() (and mon, mds) work on old mons
 4b73900 osd: humor coverity
 323bdaa mon/MonCap: mds needs to subscribe to the osdmap
 14a3e2d remove unused fiemap code
+32ff312 added fs, rbd and rgw tasks to upgrade from bobtail and point releases[v0.61.5, v0.61.6] to cuttlefish.
+5849853 rados/thrash: add in ext4
 6faf8b6 PendingReleaseNotes: note on 'ceph tell <pgid> ...'
+c01c200 Allow OS version over-ride (distro version)
 aa00ace ceph_rest_api.py: cleanup, more docstrings, unused vars
 d75b6ea ceph_argparse.py: make find_cmd_target handle tell <pgid>
 8985e1c ceph_argparse, mon: make "tell <pgid>" work (duplicating "pg <pgid>")
@@ -19715,6 +22926,9 @@ a45e296 rgw/rgw_rest_log.cc: free 'handle' to prevent memory leak
 87f8e8f test_cls_log.cc: remove empty lines
 44f43ff test_cls_statelog.cc: fix memory leak, delete 'rop'
 143b843 test_cls_version.cc: close some memory leaks
+f2cddda rgw: correct socket option name
+699d0a3 rgw: add rgw log socket to daemonized radosgw too
+65172a0 rgw: pass socket path directly to radosgw
 a90a2b4 upstart: stop ceph-create-keys when the monitor stops
 6f99622 osd: make open classes on start optional
 c24e652 osd: load all classes on startup
@@ -19727,30 +22941,48 @@ cb38762 ceph.in: admin_socket() now validates command and passes format
 ba6ca58 In general, flush in caller of dump worker rather than worker
 c562b72 FileStore: fix fd leak in _check_global_replay_guard
 1e991ed add condrestart to the sysvinit script
+99ae947 rados: add thrasher which causes map gaps
+a355d9f ceph_manager: add test_map_discontinuity to thrasher
+a0b51b1 samba: run lsof and fuser after shutdown
 c7c4c23 Formatter, admin_socket: make default formatter be json-pretty
 3f598e8 AdminSocket users: use generic formatting
 4aeb73a ceph_rest_api.py: reversed test for failed request
 47d0937 rest/test.py: earlier versions of requests.py don't quote ' ' in params
 6951d23 OSD: tolerate holes in stored maps
 fbf74d9 ceph_rest_api.py: return error in nonformatted mode
+86fc399 Add distro yamls.
 8b3d7a1 ceph_rest_api.py: actually remove the trailing / on baseurl
+4c5cc89 ceph-deploy: remove workunit branch options
 dfabc61 mon/MonCap: match param for entity (not name)
 ca8ac95 mon: translate caps=[k1,v1,k2,v2] into [caps_k1=v1, caps_k2=v2]
+aeb3586 added test_mon_destroy option to ceph-deploy task
 05b6c7e mon/Paxos: share uncommitted value when leader is/was behind
 063c71f rgw: expose the version of synced items to the poster
 81b62b5 rgw: return the update status on sync requests
 8ffc4ca rgw: add sync state parsing code and REST interface
 18eabd3 rgw: pass the sync mode into the RGWMetadataManager::put() calls.
 4f9855e rgw: add preliminary support for sync update policies on metadata sync
+c2c8a08 Added --os-type argument to run.py
 176aa39 remove old push_to_kclient.pl
+810cca1 Added get_distro() to misc.py
 ebb9ace ceph-disk: use new dumpling-style osd caps if we can, or fall back to old-style ones
+57933b6 Fix priority so it only is added once.
 b46fb62 osd: Don't put functional code inside of an assert
+3c781bd upgrade-parallel/rados: fix mon restarts
+c995732 upgrade-parallel: fix rgw syntax
+77ee5a0 ceph_manager: try both new and old tell mon.* syntax
+7373cb3 removed ceph-deploy branch option to make it run against arbitrary branch
+2a1a8b5 removed ceph-deploy branch option so the nightlies can run ceph-deploy against any arbitrary branch
+5796d76 schedule_suite: less ceph-deploy client debugging
+6914efa schedule_suite.sh: select ceph-dpeloy branch
 27a0b86 ceph_argparse.py: wrong variable used if valid() fails for Ceph{Osd}Name
 f653aa5 config_opts.h: increase xfs,btrfs wbthrottle defaults
+e52f64e upgrade-cuttlefish: run on xfs, not /
 78214af doc/release-notes: v0.61.7
 870c474 FileStore::_collection_rename: fix global replay guard
 0dc3efd HashIndex: reset attr upon split or merge completion
 37a4c4a test/filestore/store_test: add test for 5723
+2c98670 modified workunit branch to cuttlefish
 cb3ee1e rgw/rgw_metadata.h: init prefix in initialization list
 6bc0d04 test_rgw_admin_log.cc: remove unused variable 'creds'
 dda1014 test_rgw_admin_log.cc: use static_cast<>() instead of C-Style cast
@@ -19758,50 +22990,83 @@ dda1014 test_rgw_admin_log.cc: use static_cast<>() instead of C-Style cast
 e4dfe8a test_rgw_admin_meta.cc: remove unused variable 'creds'
 9d4c42f test_rgw_admin_opstate.cc: use static_cast<>() instead of C-Style cast
 4c778e2 test_rgw_admin_meta.cc: use static_cast<>() instead of C-Style cast
+803db7d schedule_suite.sh: don't need full ms debug on mons
 a8b70f0 doc/release-notes: v0.67-rc2
+75a7841 upgrade-cuttlefish: test upgrades to new point releases
 41930b5 ceph.spec.in, debian/control: python-ceph depends on python-flask
+d9cfd0e modified the task yaml to use modified ceph.restart and wait_for_mon_quorum
 0018b45 (tag: v0.67-rc2) v0.67-rc2
 fe2019c rest/test.py: cope with older requests.py versions
 fd1fd66 ceph-disk: use new get_dev_path helper for list
 0b8cad1 ceph_rest_api.py: allow config section fallback
 d7df620 global/signal_handler: poll on the control pipe, too
+1fcf8e3 ceph: make restart take both a list of daemons and other arguments
+3c9382a teuth: rgw task example config update
+4fb5781 teuth: fix issue in cleanup code
 085f129 ceph.in: remove dead code
 1579c34 rest/test.py: osd lspools should be a 'GET'
 e839420 MonCommands.h: osd pool delete "rw" perms, osd pool set no longer exp
 9285506 ceph.in/ceph_argparse.py: move find_cmd_target() to ceph_argparse.py
 c2131d4 mon/OSDMonitor: search for latest full osdmap if record version is missing
+75136a8 ceph: wait-for-osds-up option for restart
+13aca3b ceph: wait_for_osds_up
+8ad065d rgw: add multi-region and zone support
+721280b task_util: move rados command here
+6d2434b rgw: move common rgwadmin function to a new utility file
+88cab47 teuth: reworked rgw to support regions / zones
+afd0d87 teuthology: updating RGW task to support regions
 a055988 rgw/rgw_metadata.h: init cur_shard in LogListCtx with 0
 ebff1eb rgw/rgw_metadata.cc: fix possible null dereferencing
 6e6ef01 os/ObjectStore.cc: don't fallthrough after OP_OMAP_RMKEYRANGE
 27e38e4 mon/Monitor.cc: init scrub_version with 0 in constructor
 a7a7d3f test: test_store_tool: global init before using LevelDBStore
 76cd7ac mon: OSDMonitor: fix a bug introduced on 97462a32
+0260943 add test requirements to requirements file
+545fc27 remove test dependencies from setup.py
+2d9cb1f fix RST formatting issues in README file
+b3bd39c removing todo org file from the root of the project
+1e4a4d7 remove Makefile that attempted to do linting
 1cdb3ec configure.ac:  Remove -rc suffix from the configure version number.
 c8d66b7 Remove fuse-utils from Recommends in debian/control
 b7c40ec configure.ac:  Set version number to match git describe.
 4444c94 doc/release-notes: v0.61.6
 8e4a78f global/signal_handler: use poll(2) instead of select(2)
+203c53d upgrade-parallel/stress-split: fix osd names
 4183b74 mon/MonmapMonitor: make 'mon remove ...' idempotent
 2338a32 client: signal mds sessions with Contexts instead of Conds
 3207542 client: add Context*-based wait_on_list/signal_context_list helpers
+e6a30d7 Re-create guest if it already exists
+71b66eb Add distro type to schedule_suite.sh
+55cc15f Wait a little longer before recreating VMs
+52a886c Use os_type instead of vm_type. Add os_version
+c1e0812 ceph: add wait_for_mon_quorum command
+6888886 sequential, parallel: allow entries to be references to top-level config
+1c2062e upgrade-parallel: add stress-split collection
+2ce2ce9 upgrade-parallel: do staggered updates
 612a9b3 mon: add quorum_names to quorum_status command output
 9a7a055 ceph-fuse: disable getgroups_cb
+90212f7 added tasks in the yaml
 2c87d9f mon: PGMap dump shouldn't use strings containing '+' as tags
 96551f9 mon: "status" is missing a close_section for the overall output
 3dcfe38 mon: "osd stat" needs a flush now that osdmap.print_summary() doesn't
 e4d0eee mon: "mds stat" must open/close section around dump_info
 3a69fd0 ceph.spec.in: obsolete ceph-libs only on the affected distro
+4479a5a removed a blank line
 e807770 mon/OSDMonitor: fix base case for 7fb3804fb workaround
 e536d66 ceph.spec.in: obsolete ceph-libs only on the affected distro
 8814265 Enable libs3 support for debian packages
+a0edf7a rados: trim more aggressively
 97462a3 mon: OSDMonitor: work around a full version bug introduced in 7fb3804fb
+f528108 rados: keep fewer osdmaps around
 bc8d62f mon: OSDMonitor: get rid of encode_full() as we don't use it.
 a815547 mon: OSDMonitor: update the osdmap's latest_full with the new full version
 f46e8b9 doc/release-notes: v0.67-rc1
 7b3b989 qa/workunits/suites/fsync-tester.sh: lsof at end
 3f31540 qa/workunits/rest/test: cluster_down/up are now idempotent
 88f4a96 log: remove unused lock
+c812ee2 task yamls for upgrade-parallel suite
 093182b osd/ReplicatedPG: drop repop refs in ~SnapTrimmer
+77cae4b thrashosds: add delay option after recovery
 6582b31 FileStore: disable fd cacher and wbthrottle during replay
 2fd4421 PGLog::merge_log, unidex() only works from tail, we index() below anyway
 6957dbc PGLog::rewind_divergent_log: unindex only works from tail, index() instead
@@ -19828,8 +23093,13 @@ d28c18d OSD::RemoveWQ: do not apply_transaction while blocking _try_resurrect_pg
 6c4cd22 FileStore: use complete() instead of finish() and delete
 9f591a6 Finisher: use complete() not finish() and delete
 8536ff9 common/Cond.h: add a simpler C_SaferCond Context
+748d0c8 Add more stressful thrashing by doing more pg splitting
 20bc09c rgw: read attributes when reading bucket entry point
+e84c54a task: mon_clock_skew_check: grab max-skew value from ceph-mon's config
+222b296 admin_socket: loop until the socket command succeeds
 eabf2f6 ceph.spec.in:  Obsolete ceph-libs
+4a4ecde big: run for 1 hour
+9b3f59d rados: add in the mon/caps.sh tests
 db2850c test_cls_statelog.cc: fix resource leak, delete 'rop' at end of function
 5f4f87b test_cls_statelog.cc: fix resource leak, delete op at end of function
 fc1c1c6 test_cls_statelog.cc: fix resource leak, delete rop at end of function
@@ -19841,6 +23111,7 @@ c35eeae test_cls_version.cc: fix ressource leak
 3846bf2 fuse: fix fuse_getgroups detection
 6402e46 doc: Fixed formatting errors.
 eb03e9d doc: Updated RPM documentation with additional details.
+37a6624 apache.conf: load modules conditionally
 617b3f7 cls_replica_log_types.h: pass const std::list<> by reference
 6319823 mon/PGMonitor.cc: reduce scope of local 'num_slow_osds' variable
 cf29d17 rgw/rgw_bucket.cc: use static_cast<>() instead of C-Style cast
@@ -19858,17 +23129,25 @@ b084a38 osd: do not assume we have an osdmap in prepare_to_stop
 3dec530 qa/workunits/mon/caps.sh: clean up users; rename
 675d783 mon/MonCap: simplify rwx match logic
 f79d965 mon: fix command caps check
+494accb ceph-deploy: do not test mon destroy by default
+06ad2d2 task: mon_clock_skew_check: by default, use max skew from global config
+7ca59df task: mon_clock_skew_check: missing 'str'.format() key crashed the test
+51c2963 mon_thrash: tolerate scrub command failure
 fb21504 qa: workunits: mon: test mon caps permissions
+1964818 Update to describe tasks and parameters to tasks, including the install parameters requested in 4470.  Added more information to the vm section, and included a section documenting the test suites.
 0356eeb mon/PaxosService: update on_active() docs to clarify calling rules
 6d326b8 mon/OSDMonitor: discard failure waiters, info on shutdown
 8371680 mon: OSDMonitor: only thrash and propose if we are the leader
 e4f2e3e mon/OSDMonitor: do not wait for readable in send_latest()
 6edec51 Revert "mon/OSDMonitor: send_to_waiting() in on_active()"
+19bf7f7 modified the workload for mixed-mon upgrade suite
 2795eb1 Revert "mon: OSDMonitor: only thrash and propose if we are the leader"
 0a99649 Revert "mon/OSDMonitor: fix typo"
 8c5e1db ceph_rest_api.py: remove unused imports
 ce46961 ceph.in: better error message when daemon command returns nothing
 06ae53e mon: improve osdmap subscription debug output
+f0123db rgw: turn off continue print on centos
+29e7db2 install: remove ceph-release rpm file after it's used
 934ad88 rgw: grab the old mtime when doing a user metadata put
 f4675dc test: switch the cls_replica_log tests to use a test fixture
 da8584f rgw: remove extra unused param from RGWRados::get_attr()
@@ -19894,6 +23173,7 @@ bfadcd2 osd/ReplicatedPG: fix obc leak on invalid LIST_SNAPS op
 561ac0b osd: break con <-> session cycle when marking down old peers
 41c67e0 osd: make ms_handle_reset debug more useful
 6ebb486 doc: Update syntax for rbd children
+d874b57 sleep: new task
 8574b3c mon/PGMap: don't mangle stamp_delta in clear_delta()
 99fa208 osd: log PG state changes at level 5
 c549e62 mon/PGMap: avoid negative pg stats when calculating rates
@@ -19908,8 +23188,11 @@ c4d4f34 doc/release-notes: fix typo
 053659d msg/Pipe: work around incorrect features reported by earlier versions
 f0feabe Message,OSD,PG: make Connection::features private
 d1b47f4 test: update cli test for radosgw-admin
+5263a6f failed dict.get evaluates to None
+65f49be safer get by falling back to a dictionary
 76040d9 rgw: Adds --rgw-zone --rgw-region help text.
 1b8d50e doc/release-notes: amend 0.61.5 release notes
+4db5b93 ceph: do not ignore osd leaks
 c9ba933 mon/MonClient: fix small leak
 4ed7942 init-ceph: don't activate-all for vstart clusters
 f9e9f9c mon/PGMonitor: fix 'pg map' output key names
@@ -19926,15 +23209,24 @@ d3748b2 osd: break con <-> session cycle when marking down old peers
 2428bfd osd: make ms_handle_reset debug more useful
 921a4aa cls_lock: fix duration test
 dd0246d mds: tracedn should be NULL for LOOKUPINO/LOOKUPHASH reply
+382d17e reorg kernel -> krbd + kcephfs
 f3f92fe FileStore: add global replay guard for split, collection_rename
+0985f8c nuke: killall ceph-disk, too
 723d691 msg/Pipe: do not hold pipe_lock for verify_authorizer()
+5f9a1d8 Worker processes by machine type instead of teuthology branch.
+320032f schedule_suite.sh: escape ceph-deploy overrides
+2173d33 ceph-deploy: support overrides
 a59493e doc/release-notes: add/fix changelog links
 d21d39e doc/release-notes: v0.61.5
 29c0252 mon: fix off-by-one in check for when sync falls behind
 07dfb6f rgw: drop unused assignment
+99c4012 lock: filter machine type for --list, --list-targets
+608d8a2 lock: make --summary list all machines by default
+1d16a9b lock: drop machine-type default, but require for lock-many
 aa460c4 mon: make 'health' warn about slow requests
 82722ef osd: include op queue age histogram in osd_stat_t
 2e216b5 qa/workunits/cephtool/test.sh: test 'osd create <uuid>'
+9e91395 ceph.conf.template: enable osd debug verify stray on activate
 b41f1ba PG: start flush on primary only after we process the master log
 278c7b5 ReplicatedPG: replace clean_up_local with a debug check
 1a84411 msgr: fix a typo/goto-cross from dd4addef2d
@@ -19949,12 +23241,17 @@ ad548e7 msg/Pipe: unlock msgr->lock earlier in accept()
 9f1c272 msg/Pipe: avoid creating empty out_q entry
 579d858 msg/Pipe: assert lock is held in various helpers
 0ebf23c ceph_mon: obtain backup monmap if store is marked with 'force_sync'
+c395687 radosgw-admin: adapt task to recent changes
 d150193 mon/OSDMonitor: make 'osd pool mksnap ...' not expose uncommitted state
 56c5b83 qa/workunits/cephtest/test.sh: put 'osd ls' before any 'osd create' tests
 ad9a104 mon: MonCommands: remove obsolete 'sync status' command
+e8c58d3 rados: use 'ms inject intenral delays' during thrashing
+a24aaa4 added overrides for ceph-deploy
 884fa2f OSD::_try_resurrect_pg: fix cur/pgid confusion
 7e16b72 mon/AuthMonitor: make 'auth del ...' idempotent
 f129d17 qa/workunits/cephtool/test.sh: mds cluster_down/up are idempotent
+b15513f workunit: set CEPH_CLI_TEST_DUP_COMMAND
+5135267 added conf section to ceph-deploy task
 f2fa01e ceph: send successful commands twice with CEPH_CLI_TEST_DUP_COMMAND
 d45429b mon/MDSMonitor: make 'mds cluster_{up,down}' idempotent
 9c4a030 osdmaptool: fix cli tests
@@ -19991,9 +23288,13 @@ cc49d3f rgw/rgw_rest.cc: fix malloc memory leak
 64b512c rgw/rgw_bucket.cc: remove unused local variable
 a937a12 rgw_admin.cc: fix same expression on both sides of '||'
 5ea4c5e cls_rgw_client.cc: reduce scope of local variable
+a81848f big: reenable big cluster
+e3d9084 Created tasktest to test sequential and parallel tasks. Added sequential task and parallel task. Changed _run_one_task to run_one_task (now called by new tasks too).
 408014e rgw: handle ENOENT when listing bucket metadata entries
+47696d2 calling mon destroy command after mds create
 eef4458 rgw: fix bucket placement assignment
 39e5a2a OSD: add config option for peering_wq batch size
+5c7b1e1 fs: run everything on btrfs, not /
 b46930c mon: make report pure json
 daf7672 ceph: drop --threshold hack for 'pg dump_stuck'
 4282971 msg/Pipe: be a bit more explicit about encoding outgoing messages
@@ -20015,6 +23316,8 @@ aa60f94 mon: once sync full is chosen, make sure we don't change our mind
 1dfd06d test_rgw: fix a number of unsigned/signed comparison warnings
 01ec4f1 rgw: Fix typo in rgw_user.cc
 fe13f88 doc: Modifies keyring path in radosgw config page.
+382ab4a nfs: run osds on btrfs
+4ae00d7 fs: disable ceph-deploy test
 cf9571c test_rgw_admin_meta: fix warnings
 bc3088c cls_rgw: fix warning
 6ef48c7 doc: update Hadoop docs with plugin download
@@ -20028,7 +23331,10 @@ bf7c402 mon: move quorum out of monmap
 9ca4733 hypertable recent version prototyping includes bool verify in length and read functions
 df45b16 Makefile: build cls_rgw even if we're not building radosgw
 35ef873 Makefile: fix cls_rgw linkage
+5d596a4 ceph_manager: drop -t arg prefix for pg dump_stuck
 cda17fd Makefile: fix cls_refcount linkage
+fb95b63 big: enable medium cluster
+9be0979 fs: disable maxfid smbtorture test
 8d9165a qa/workunits/rbd/simple_big: fix unmap
 15e3c9a qa/workunits/fs/test_o_trunc.sh: fix path
 efe5b67 mon/Paxos: bootstrap peon too if monmap updates
@@ -20039,11 +23345,22 @@ bf4f802 mon/PaxosService: fix trim completion
 741757a ceph_argparse.py: allow valid char RE arg to CephString
 089dfe8 ceph_argparse: ignore prefix mismatches, but quit if non-prefix
 da4c749 ceph_argparse.py: validate's 3rd arg is not verbose, it's partial
+b9b9dd1 Remove btrfs-tools and xfsprogs install step.
+db4ad50 Allow Overrides for ceph-deploy task
+bd9cf10 Workaround repopriority of yum local repo.
+e3a0742 Don't install kernels on non-ubuntu VPS
+8b05c8c Re-create guest if it doesn't come up right.
+4f478cc Add description option to lock.lock()
+f16ecb9 Use ceph.com mirror instea of github for ceph-qa-chef on VPS.
+4dbef12 List IP address in orchestra.run output.
+d7a20ba Fix Missed parenthesis.
+da7483a VM: Use mac addresses from DB instead of randomizing.
 cc10988 ceph-rest-api: separate into module and front-end for WSGI deploy
 495ee10 msg/Pipe: fix RECONNECT_SEQ behavior
 48a2959 mon: make ancestor arg for 'osd crush rm|remove <name>' optional
 c0845a9 mon: AuthMonitor: don't try to auth import a null buffer
 60a19e7 PendingReleaseNotes: ceph -s --format=json output change
+0ee7008 ceph.conf: enable old message assert
 5dd1d4b test: idempotent filestore test failure
 c70216a Revert "test_filestore_idempotent: use obj name from source coll add"
 0a3c902 Revert "test_filestore_idempotent: make newly created objects globally unique"
@@ -20080,6 +23397,7 @@ ea0fcfa cmdparse.cc: catch exception by reference
 1f8fe70 ceph-rest-api: Missing packaging (binary in .spec, manpage in both)
 3d25f46 ceph-rest-api: make main program be "shell" around WSGI guts
 cb0f831 ceph.in: output even a blank status line in compat mode
+3a5cdb1 added upgrade task for mixed mon test
 93b3e63 msg/Message: use old footer for encoded message dump
 cf8f16d rgw: handle bucket removal by system user on master region
 3b110db ceph_argparse.py: define some self.* in __init__
@@ -20124,6 +23442,8 @@ bfa2284 MonCommands: add new fields: modulename, perms, availability
 c9b54d7 common: Formatter: add flush to bufferlist
 989be66 rados.py: allow long-form names of 'warn' and 'err' as watch levels
 1ad2127 PGMonitor: fix stat comparison in prepare_pg_stats
+c0a7808 rgw: use different daemon name and apache module paths
+e18a6b8 Added task yaml for partial-upgrade of osds
 180c7b4 Get device-by-path by looking for it instead of assuming 3rd entry.
 836e35f Get device-by-path by looking for it instead of assuming 3rd entry.
 a120d81 Makefile.am:  fix ceph_sbindir
@@ -20142,6 +23462,9 @@ ff5f25e debian/ceph-test.install: add missing files
 dfe3c21 src/Makefile.am: rename cls_test_rgw_*
 78bae33 ceph.spec.in: add some ceph_test_cls_* files
 dc99a23 Makefile.am: fix build, use $(CRYPTO_LIBS)
+42b9ea9 misc: move system type checking to a generic location
+7845848 s3tests: fix client configurations that aren't dictionaries
+1c22bdb restart rgw after upgrade for rgw tests
 00ae543 mon: do not scrub if scrub is in progress
 8638fb6 unittest_pglog: fix unittest
 c487014 librados/misc.cc: reverse offset and length on write call
@@ -20160,6 +23483,7 @@ da81228 osd: report pg stats to mon at least every N (=500) epochs
 449283f mon/OSDMonitor: allow osdmap trimming to be forced via a config option
 18a624f mon/OSDMonitor: make 'osd crush rm ...' slightly more idempotent
 c5157dd doc/release-notes: v0.66
+3a1c316 Add mon create and destroy with an optional argument mon_initial_members
 8799872 mon/PaxosService: update docs a bit
 44db2ac mon/PaxosService: inline trim()
 cab8eee mon/PaxosService: move paxos_service_trim_max into caller, clean up
@@ -20189,6 +23513,7 @@ b1b188a os: Remove unused hobject_t::set_filestore_key()
 313b7a1 os: Code conformance in LFNIndex.cc
 395262c rgw: call appropriate curl calls for waiting on sockets
 73c2a3d configure.ac: detect whether libcurl supports curl_multi_wait()
+b325d17 lock: fix typo
 d08b6d6 mon/PaxosService: prevent reads until initial service commit is done
 63fe863 mon/PaxosService: unwind should_trim()
 d600dc9 mon/PaxosService: unwind service_should_trim() helper
@@ -20201,6 +23526,7 @@ b71a009 mon/OSDMonitor: remove dup service_should_trim() implementation
 ca54efd mon: sync all service prefixes, including pgmap_*
 b536935 mon/MonitorDBStore: expose get_chunk_tx()
 43fa7aa mon/OSDMonitor: fix base case for loading full osdmap
+d18fe74 VM: Use mac addresses from DB instead of randomizing.
 ad65de4 ReplicatedPG: send compound messages to enlightened peers
 ae1b2e9 ReplicatedPG: add handlers for MOSDPG(Push|Pull|PushReply)
 c0bd831 OSD: add handlers for MOSDPG(Push|PushReply|Pull)
@@ -20217,30 +23543,47 @@ a498432 ReplicatedPG: pass a PushOp into handle_pull_response
 b6b48db (tag: v0.66) v0.66
 a990664 mon: implement simple 'scrub' command
 afd6c7d mon: fix osdmap stash, trim to retain complete history of full maps
+0a13124 rados/mon_thrash: inject random delays in mon messages
+94ae48c rados/monthrash: reduce matrix size a bit
+dbf12a3 rados/monthrash: refactor a bit
+5295406 rados: expand mon thrashing tests
 dd1e6d4 Revert "Makefile: fix ceph_sbindir"
 f07d216 rgw: fix bucket link
 eec903a doc: Fix env variables in vstart.sh documentation
 69a5544 osd/osd_types: fix pg_stat_t::dump for last_epoch_clean
 94afedf client: remove O_LAZY
 e9d19b3 common/crc32c: skip cpu detection incantation on not x86_64
+a045749 mon_thrasher: add pause/unpause of mons to thrashing
+fdebf40 daemon-helper: send arbitrary signals via stdin
+121b1b9 mon_thrash: optionally scrub after each iteration (default true)
 956fafc qa/workunits/rbd/simple_big.sh: don't ENOSPC every time
 d423cf8 qa/workunits/rbd/kernel.sh: move modprobe up
 672f51b qa/workunits/fs/test_o_trunc.sh: fix .sh to match new bin location
 7b7f752 unit tests for ObjectContext read/write locks
 6f1653a rgw: Add --help support to radosgw
+a80ab93 big: disable all but small until it passes
 85a1d6c mon: remove bad assert about monmap version
+823bf38 Revert "rbd: reeanble iozone test, just to see"
+9b22d38 mon_thrash: fix more naming
+389f65e mon_thrash: use _ instead of - consistently
+288b544 kernel: simple_1tb -> simple_big
 3f5a962 qa: write a somewhat <1tb image
 54aa797 qa/workunits/rbd/kernel.sh: modprobe rbd
+84d7651 test_o_trunc.sh moved
 83f3089 qa: move test_o_trunc.sh into fs dir
 507a4ec qa: move fs test binary into workunits dir so teuthology can build it
-a84e6d1 mds/MDSTable: gracefully suicide on EBLACKLIST
+a84e6d18 mds/MDSTable: gracefully suicide on EBLACKLIST
 8b4cb8f rgw: Add explicit messages in radosgw init script
 d09ce3d rgw: fix rgw_remove_bucket()
 972d22e Adding new Wireshark dissector. This is loosely based on the original dissector, it has been re-worked to bring into line with Wireshark coding guidelines that aim to ensure portability. It currently only decodes a handful of messages as test cases, it needs more work to make it useful. See README.txt for a bit more background.
 a793e20 doc: Add a page to document vstart.sh script
+81862ad rbd: reeanble iozone test, just to see
 c14847c .gitignore: cls_test_*
+998a513 fs: add o_trunc test on ceph-fuse
+89cd717 kernel: test o_trunc
 22227cd qa: add O_TRUNC test
 46b7fc2 radosgw-admin: fix cli test
+d54932c Fix VM issues.
 a0b1be9 rgw: fix type encoding
 71ebfe7 mon/Paxos: make 'paxos trim disabled max versions' much much larger
 ab93696 mon: be less chatty about discarding messages
@@ -20264,8 +23607,12 @@ c2873c1 rgw: make rgw_cls_complete_op backward compatible
 20e3abc rgw: make rgw_bucket_dir_entry backward compatible
 4fb782c mds: fix O_TRUNC locking
 cd44a62 Makefile: include rbdmap in dist tarball
+813d96f ceph: don't check leaks on client.* (i.e., radosgw)
 352f362 Makefile: fix ceph_sbindir
 9941171 rgw: keep max_marker on bucket index
+ede7b5a upgrade/rgw: use bobtail s3tests
+86c380b upgrade/rgw: mds restart order doesn't matter
+80b0156 upgrade/rgw-double: start rgw
 0da7da8 unitests: fix compilation
 9942efd mon: dead code removal
 5c053b1 rgw: handle new rest operations
@@ -20284,6 +23631,7 @@ a4805ef osdc/Objecter: resend command map version checks on reconnect
 b6c8366 key_value_store: fixup errno handling
 fc7d622 RESTful implementation to dump regionmap implementation
 641bd2f RESTful API implementation for replica_log
+43f60c3 rbd: remove kernel rbd test from rbd suite
 da1fb93 mds: man page: Fixed cut & paste error
 a4cd631 set object_info_t pool of an ObjectContext if it is undefined or bad
 0efcccd Move rbdmap file to /etc/ceph
@@ -20292,9 +23640,14 @@ ffe7045 install rules for init-rbdmap
 3a20b06 rgw: admin: Add --shard-id option to help
 49ff63b rgw: add RGWFormatter_Plain allocation to sidestep cranky strlen()
 94742d4 doc: Minor fix.
+e80eb48 kernel: specify fs (and use raw disks) for rbd-nomount tests
+7120f22 upgrade: ignore mds restart order on rbd, rados tests
 2ad4ff6 rgw: metadata, data logs can be turned on / off
 935c278 mon: Paxos: update first_committed on first paxos proposal
 847465f librados: fix test warning on 32-bit platforms
+93ae152 radosgw-admin: add missing quote
+c0bf24d radosgw-admin: test 'bucket list' command (all buckets)
+e7fa5fc rgw: move radosgw-admin test into rgw suite
 6d90dad os/FileStore: automatically enable 'filestore xattr use omap' as needed
 1b578a8 librados: add test for large and many xattrs
 8694d29 osd/PGLog: populate log_keys_debug from read_old_log()
@@ -20316,6 +23669,7 @@ e235958 mds: log before respawning when standby-replay falls behind
 e75057f rgw-admin: restructure replicalog commands
 87217e1 client: send all request put's through put_request()
 9af3b86 client: fix remaining Inode::put() caller, and make method psuedo-private
+0742f91 kernel: add instal, ceph tasks to rbd-nomount collection
 2e857a4 librados: fix cmd OSDCommand test
 72ac281 rgw: don't remove bucket index when removing a bucket
 10a2b60 rgw: bilog list by bucket instance (RESTful api)
@@ -20323,19 +23677,27 @@ e75057f rgw-admin: restructure replicalog commands
 977df77 ceph_json: reset values
 674bbab rgw: user param in access key is not manadatory
 9d702e5 rgw: advance ptr when sending data
+4f2a194 big: timebox test to two hours
+25d7d96 big: thrash, and whitelist wrongly marked me down
 13f9c18 doc: Created an install page for Calxeda development packages.
 9e604ee ceph-disk: s/else if/elif/
+c22b941 Update keys if they have changed before locking
 b4ee7ca changes to accomodate review comments on previous version
 8f1da8f rgw: fix regionmap json decoding
+20434de upgrade: move fs upgrade tests to a separate suite
 cd7510f qa/workunits/misc/multiple_rsync: put tee output in /tmp
 e1f9fe5 rgw: fix radosgw-admin buckets list
 fe66331 Handle non-existent front interface in maps from older MONs
 867ead9 qa/workunits/rbd/simple_1tb: add simple rbd read/write test on large image
+033f9ea kernel: add rbd test to write a 1TB image
+2bdbccf marginal: restructure the multimds collection
 8a17f33 ceph-disk: do not mount over an osd directly in /var/lib/ceph/osd/$cluster-$id
+140e63b ceph: disable logrotate
 986185c mon/PGMonitor: avoid duplicating map_pg_create() effort on same maps
 ca55c34 cephtool/test.sh: add case for auth add with no caps
 bfed2d6 MonCommands.h: auth add doesn't require caps (it can use -i <file>)
 71f3e56 Makefile.am: fix libglobal.la race with ceph_test_cors
+1747a38 kernel: reorg kernel rbd tests; add a few missing scripts
 e635c47 mon/PGMonitor: use post_paxos_update, not init, to refresh from osdmap
 1316869 mon/PaxosService: add post_paxos_update() hook
 ea1f316 mon: do not reopen MonitorDBStore during startup
@@ -20355,12 +23717,15 @@ d31ed95 mon/PaxosService: allow paxos service writes while paxos is updating
 7a2566c rgw: remove test placement info
 224130c rgw (test): remove some warnings
 1b162ce rgw: initialize user system flag
+00bc3a8 rbd/rgw upgrade suites to test upgrade from bobtail to cuttlefish to next
 7681c58 rgw: log in the same shard for bucket entry point and instance
 d4e39a7 rgw: unlink/link don't always update entry point
 5680fa1 doc/release-notes: v0.65
 6673b2d rgw: tie metadata put to bucket link/unlink
 5c3df08 cls_rgw: cleanup
 82db84b rgw: some more internal api cleanups
+ee03afa dump_stuck: fix test
+d5acff2 rados: dump-stick: remove configs from yaml; let task set them
 c4be5a7 rgw: unlink bucket from user on metadata rm bucket:< bucket>
 86c73c9 rgw: fixes to object versioning tracking
 8bd31d4 rgw: filter read xattrs
@@ -20387,6 +23752,8 @@ c47f271 mon: fix mkfs monmap cleanup
 78b7ec7 mon/PaxosService: drop unused helpers
 6721122 mon/MonmapMonitor: avoid exists_version() helper
 6429cb1 mon/PaxosService: remove unused exists_version() variant
+cedb606 ceph-deploy: no need for ceph-fuse for non fs workunits
+2df1e20 ceph-deploy: install correct branch for rados api tests
 9ae0ec8 mon/Elector: cancel election timer if we bootstrap
 03d3be3 mon: cancel probe timeout on reset
 521fdc2 mon/AuthMonitor: ensure initial rotating keys get encoded when create_initial called 2x
@@ -20409,12 +23776,20 @@ c700db0 mon/MonCap.cc: use empty() instead of if(size())
 4ab5bf6 common/cmdparse.cc: prefer prefix ++operator for non-trivial iterator
 1e3161a rgw: add a system request param to select op bucket instance
 76228ca rgw: data log contains bucket instance info in key
+b338524 Revert "upgrade: add double-hop bobtail -> cuttlefish -> next fs tests"
+56f9b29 upgrade: add rados-double suite (bobtail -> cuttlefish -> next)
+2917fd4 upgrade/rados: refactor, expand matrix, start with cuttlefish
+e672d6b upgrade/fs: make single hop cuttlefish -> next (not bobtail -> next)
 5dd137a rgw: format bucket.instance meta entries nicely
+fbdb777 upgrade: add double-hop bobtail -> cuttlefish -> next fs tests
+827d2f8 upgrade: refactor single-hop fs tests
 4e90c5e rgw: use new instance handler for bucket operations
 71869c4 rgw: create meta handler for bucket instance
+947eba1 rados: fix multiclient tests
 00973df rgw: put_bucket_info() uses objv_tracker in bucket info
 dab57ef rgw: keep objv_tracker on bucket info
 57dc736 msgr: clear_pipe+queue reset when replacing lossy connections
+e6e1df6 dump_stuck: fix race with osd start
 e053d17 rgw: refactor policy reading
 9586305 msgr: reaper: make sure pipe has been cleared (under pipe_lock)
 ec612a5 msg/Pipe: goto fail_unlocked on early failures in accept()
@@ -20424,6 +23799,7 @@ d74cdad rgw: fix policy read
 ad64067 rgw: init member variable
 b09d799 rgw: encode bucket info only after setting a flag
 cd98eb0 mon/AuthMonitor: make initial auth include rotating keys
+13dbe9d enable-coredump -> adjust-ulimits
 9b2dfb7 mon: do not leak no_reply messages
 ad12b0d mon: fix leak of MOSDFailure messages
 c3260b2 rgw: initial work to separate bucket name and instance
@@ -20439,14 +23815,17 @@ e5e924c rgw: data structures for new data/index placement rules
 c4272a1 ceph: even shinier
 34ef2f2 ceph: do not busy-loop on ceph -w
 27912e5 librados: make cmd test tolerate NXIO for osd commands
+f6270a7 Wipe out existing id_rsa.pub and id_rsa before pushing ssh keys
 abd0ff6 mds: do not assume segment list is non-empty in standby_trim_segments
 241ad07 rgw: make replica log object name configurable
+8e4c292 rados: whitelist 'had wrong client addr' for cephtool test
 469900e RGWReplicaBucketLogger: store bucket replica logs in the .logs pool
 3bebbc0 mds: rev protocol
 ded2e84 mds: kill Server::handle_client_lookup_hash()
 2147c4e mds: use "open-by-ino" helper to handle LOOKUPINO request
 24e59b4 rgw: buffer atomic put handler
 02de43a rgw: tie opstate into intra-region copy operations
+b7763af big: enable big cluster (~50 node)
 31d221c ceph.in: remove some TAB chars
 69e1a91 ceph.in: fix ^C handling in watch (trap exception in while, too)
 29f6f27 ceph: --version as well as -v
@@ -20458,6 +23837,7 @@ ab79ba4 cls_replica_log: integrate with RGWRados
 e4ef5c6 cls_replica_log: add the actual class
 22a02e9 cls_replica_log: add ops for new class
 d1c9594 cls_replica_log: add types for new class
+70b5467 Clean up nested-if logic
 0deb6d4 rgw: lock related modifications
 3b4c11b rgw: add max-entries, marker for log operations
 714f212 osdc: re-calculate truncate_size for strip objects
@@ -20482,9 +23862,13 @@ ed8b0e6 FileStore: apply changes after disabling m_filestore_replica_fadvise
 2a4953b ceph-disk: use unix lock instead of lockfile class
 8c0daaf ceph-disk: make list_partition behave with unusual device names
 8a5d989 FileStore: get_index prior to taking fdcache_lock in lfn_unlink
+3d9fa22 task/peering_speed_test.py: add test which summarizes pg peering speed
+bb2cd9e task/: add args.py
 09e869a PGLog::rewind_divergent_log must not call mark_dirty_from on end()
+02aa7d9 schedule_suite.sh: specify admin_socket branch in overrides yaml
 4d77443 unit tests for PGLog::proc_replica_log
 e11cc1c add constness to PGLog::proc_replica_log
+a93b467 Include MySQLdb Fixes: #5120
 392a8e2 mon/PaxosService: not active during paxos UPDATING_PREVIOUS
 ee34a21 mon: simplify states
 ec2ea86 mon/Paxos: not readable when LOCKED
@@ -20503,11 +23887,16 @@ a42d758 mon/Paxos: do paxos refresh in finish_proposal; and refactor
 d941363 mon: no need to refresh from _active
 03014a4 mon: remove unnecessary update_from_paxos calls
 cc339c0 mon: explicitly refresh_from_paxos() when leveldb state changes
+778d930 Fix to ignore ssh-key checking if running on virtual machines or if a line that reads 'sshkey: ignore' is in the yaml file.
+1441707 Make reset of ssh key code conditional on being a virtual machine. Add and use is_vm to determine if we are running on a virtual machine.
 95bd048 os/FileStore: disable fadvise on XFS
 fd83bc3 client: fix warning
 b2f1a1a mds: fix remote wrlock rejoin
 15a5d37 mds: fix race between scatter gather and dirfrag export
+35f0478 use correct branch for admin_socket tests
+3649e27 admin_socket: fetch test from correct branch
 ded0a5f Revert "client: fix warning"
+4455f75 valgrind: give up and ignore all leveldb leaks
 96c9493 radosgw-admin: interface to control ops state
 8b1524b rgw: OpState internal api
 258edfc rgw: fix check_state call
@@ -20535,6 +23924,8 @@ bfe4bf9 rgw: handle racing default region creation
 ce7b5ea common/Preforker: fix warning
 8bd936f client: fix warning
 6b52acc config.h: ensure U64 option defaults are computed as 64 bits
+32c8234 Use authorized_keys2 instead of authorized_keys
+24fdfd2 ceph-deploy: use correct branch for workunits
 e2af5fb test: test for cls_statelog
 1ecec3a cls_statelog: fixes
 8d5fc70 cls_statelog: add client api functions
@@ -20544,7 +23935,11 @@ df8a3e5 client: handle reset during initial mds session open
 2e27f69 rgw: move a couple of unitests to a different Makefile rule
 f5f8314 rgw: object mtime the same for both object and bucket index
 7f63baa rgw: fix inter-region copy (ofs wasn't correct)
+8c179f3 big: add big suite
+d64f264 rados: don't use ceph-fuse for workunits
 92997a4 mon: fix 'osd dump <epoch>'
+0ff2e42 Use authorized_keys2 instead of authorized_keys
+49e0cfc powercycle: add ext4 to osd powercycle test matrix
 8c6b24e ceph-disk: add some notes on wth we are up to
 94b3700 rgw: intra-region copy, preserve mtime
 29eb333 test/osd/TestPGLog: %s/dirty()/is_dirty()
@@ -20567,7 +23962,9 @@ a9a41bc PGLog: pass only divergent_priors, not ondisklog
 3924531 PGLog: clear missing on backfill reset
 4c89a2b rgw: system user get obj returns JSON encoded metadata
 47ce702 *: always include rados.h using types.h
+ebd003b valgrind: another leveldb leak
 d46e6c3 OSD: we need to check pg ?.0 for resurrection
+4386d2c rbd: do not use ceph-fuse to run functional tests
 5bf08cd libcephfs: add a couple multiclient tests
 ee40c21 client: fix sync read zeroing at EOF
 e538829 ceph-disk: clear TERM to avoid libreadline hijinx
@@ -20590,13 +23987,23 @@ f11ec5c mds: handle undefined dirfrags when opening inode
 29e6597 mds: fix frozen check in Server::try_open_auth_dirfrag()
 18b9e63 mds: don't update migrate_seq when importing non-auth cap
 f179dc1 mon: make mark_me_down asserts match check
+ae04f3c rados: whitelist 'wrongly marked me down' for cephtool tests
 2fe4d29 ceph: remove space when prefix is blank
 8d9eef2 ceph: fix return code for multi-target commands
 84d1847 ceph: error out properly when failing to get commands
 8d9272e test/admin_socket/objecter_requests: fix test
+7887f84 misc: let clients use any pool
 efebdba ceph: do not print status to output file when talking to old mons
+7177d2e ceph_manager: fix ceph tell mon.*
+38f82f6 rados: no cephtool test on ceph-fuse
+7341364 rados: do not run cli tests on top of ceph-fuse
+1ec8ba5 no need for ceph --concise argument
 80c2d5a osd/ReplicatedPG: length 0 writes are allowed
 1113ff0 rgw: get / set user & bucket meta mtime
+1dcc0c5 fs: disable multiclient tests on kernel
+c5cf1bd fs: disable multiclient fsx
+50d9738 move multiclient tests from marginal -> fs suite
+719988f marginal: refactor multiclient collection, separate out locktest
 7e7ff75 common/Preforker: fix broken recursion on exit(3)
 f25f212 osd/OSDMap: fix is_blacklisted()
 21e85f9 qa/workunits/misc/multiple_rsync.sh: wtf
@@ -20628,6 +24035,7 @@ bb5fae4 ceph.in: zero-arg invocation was broken (check array length)
 2be1670 doc: Minor updates.
 b3a143d doc: Minor updates for usage.
 10c0ae9 rgw: remove unused variable
+1bf7a2c rados: thrash under valgrind
 3474fa8 rules:  Don't disable tcmalloc on ARM (and other non-intel)
 37cc85e rgw: bucket marker contains zone name
 bcfd2f3 udev: drop useless --mount argument to ceph-disk
@@ -20640,10 +24048,12 @@ b1293ee ceph: flush stderr, stdout for sane output; add prefix
 92b8300 mon: OSDMonitor: don't ignore apply_incremental()'s return on UfP [1]
 7e08ed1 upstart: start ceph-all on runlevel [2345]
 7503db9 ceph: fix mon.*
+9cdc60a ceph_manager: use new ceph tell mon.* syntax
 a2b2f39 librados: add tests for too-large objects
 4a1eb3c osd: fix types for size checks
 2be3c8d remove RELEASE_CHECKLIST
 f1b6bd7 osd: EINVAL from truncate causes osd to crash
+8730db1 rados: fix up for parallel work
 bcfbd0a ceph_test_rados: add --pool <name> arg
 7ec64db rgw: pass original object attrs through extra request data
 02599c4 ceph-fuse: fix uninitialized variable
@@ -20654,8 +24064,10 @@ a2a78e8 ceph-disk: implement 'activate-journal'
 6ebfd3c ceph.in: better global description of tool
 821b203 ceph.in: less verbosity on error
 99bd5c8 librados: add missing #include
+0e30386 adding a newline to auth key data
 93505bb librados: wait for osdmap for commands that need it
 f6a864d rules:  Don't disable tcmalloc on ARM (and other non-intel)
+e1daa16 modified ceph-deploy to throw appropriate exceptions
 5fb0444 Update adminops.rst add capabilities
 2bda9db osdc/Objecter: dump command ops
 6e73d99 osdc/Objecter: ping osds for which we have pending commands
@@ -20663,6 +24075,8 @@ e4f9dce ceph.in: refuse 'ceph <type> tell' commands; suggest 'ceph tell <type>'
 a6876ad ceph.in: argparsing cleanup: suppress --completion, add help
 68a9199 osdc/Objecter: kick command ops on osd con resets
 db7d121 osdc/Objecter: add perfcounters for commands
+05f634cb added upgrade tasks for cuttlefish to next and bobtail to cuttlefish to next
+f005204 stop stripping leading \n from osd commands
 9a7ed0b mon: fix idempotency of 'osd crush add'
 7e1cf87 librados: do not wait for osdmap on start
 51dae8a doc: Updated with glossary terms.
@@ -20709,6 +24123,7 @@ d0e6575 ceph: implement 'ceph tell osd.* ...'
 6a6025f rgw: generate read request if source rgw is remote
 3e707da vstart.sh: set run_dir to out
 b0b5b1b rbd image_read.sh: wait for rbd sysfs files to appear
+8badb90 move powercycle tests into a separate suite
 8808ca5 osdc/Objecter: fix handling for osd_command dne/down cases
 1154b2a init-ceph: look to ceph.conf instead of hard-coding /var/run/ceph
 6542991 global: create /var/run/ceph on daemon startup
@@ -20726,18 +24141,23 @@ afa16b4 qa: multiple_rsync.sh: more output
 42e06c1 (tag: v0.64) v0.64
 68b5fa9 ceph-fuse: older libfuses don't support FUSE_IOCTL_COMPAT
 1577e20 ceph-create-keys: Make sure directories for admin and bootstrap keys exist
+d396096 valgrind: make leveldb thread suppression more general
 95434d1 rgw: propagate mtime from remote rgw on copy
+8f29b3f Use install -d for /var/log/ceph. Additional fix needed for #4946
 256afa0 store_test: create_collection prior to split
 1a9415a mon: adjust trim defaults
 5f0007e doc: Reworked the landing page.
 dc6cadc doc: Added a hostname resolution section for local host execution.
 f6c51b4 doc: Added some tips and re-organized to simplify the process.
+ae74abd Fix capitalization of CentOS
 9b012e2 client: set issue_seq (not seq) in cap release
 c7fb7a3 doc: Added some Java S3 API troubleshooting entries.
 6c557d5 doc: Added install ceph-common instruction.
 5543f19 doc: Added install ceph-common instruction.
 3f3ad61 doc: Fixed :term" syntax.
 00743d5 rgw: propagate error from remote gateway when copying object
+77e7bdb upgrade: set workunit branch before and after upgrade
+662cafa upgrade/fs: remove unused file
 0948624 ceph-create-keys:  Remove unused caps parameter on bootstrap_key()
 3f2017f osd: fix con -> session ref change after hb reset
 ea3efca rgw: pass grant headers to target when writeing object
@@ -20749,6 +24169,7 @@ a378c4d common/admin_socket: fix leak of new m_getdescs_hook
 8190b43 OSD: create collection in handle_pg_create before _create_lock_pg
 af92b9a Objecter: fail osd_command if OSD is down
 a741aa0 mon: send "osd create" output to stdout; tests rely on it
+335185d added support for rhel
 db0c250 rgw: send meta headers with remote PUT request
 566315c rgw: don't busy wait for outgoing rest requests
 cbf860f rgw: don't send redirect if copy object has a local source
@@ -20760,32 +24181,41 @@ bf6b80c6 rgw: fix logic related to operations on different region
 284f6a2 rgw: format put request auth
 72cb5fd rgw: bucket metadata, ignore ENOENT on put
 31676c5 rgw: skeleton for obj copy across regions
+a3c1122 teuthology-lock --summary:  allow --machine-type=all
 f6c7c0c rgw: metadata lock/unlock implemented with test cases
 d1afc81 RESTful APIs for data changes log implemented with test cases
+0911e54 ceph: ignore ceph-osd leaks for now :(
 8f141c4 unit tests for PGLog::rewind_divergent_log
 04e89a4 unit tests for PGLog::merge_log
+c41e6ff valgrind: glibc/boost_thread leak suppressions
 6ce2354 messages/MMonProbe: fix uninit vars (again)
 10bfa83 osdc/Objecter: clear osd session command ops xlist on close
 81a786e librados: fix pg command test
 00eaf97 librados.h: Fix up some doxygen problems
 e8300d0 mds: fix filelock eval_gather
+22532cc ceph_manager: drop -- before --format=json arg
 2b4157a .gitignore: add 'ceph', now a generated file
+028f1c7 valgrind: more leveldb whitelisting
+a4994e3 Support added for running scheduled tasks on virtual machines.     This included:     A). changes made so that full path names on some files were used         (scheduled tasks started in different home directories).     B.) Changes to insure tasks come up on the beanstalkc queue properly,     C.) Finding and inserting the libvirt eqivalent code for vm machines         in order to simulate ipmi actions,     D.) Fix host key code, report valgrind issue more clearly.     E.) Some me [...]
 359f456 ceph: old daemons output to outs and outbuf, combine
 b3f38f3 ceph: handle old OSDs as command destinations, fix status part of -w
+754301b support install task for fedora
 11e1afd ceph: add -v for version.  Makefile processes ceph_ver.h
 fde536f osd: make scrub chunk size tunable
 637e0ea rados: --num-objects will now cause bench to stop after that many objects
 0bc731e test_filestore_idempotent: use obj name from source coll add
+9d6141d Add RHEL support to teuthology
 ad3934e rgw: handle deep uri resources
 8d55b87 rgw: fix get_resource_mgr() to correctly identify resource
 9a0a9c2 rgw: add 'cors' to the list of sub-resources
 0b036ec osd: do not include logbl in scrub map
 dea8c2d doc: Updated for glossary terms and added indexing.
 8e24328 doc: Added indexing and did a bit of cleanup.
+03b12b8 rados: add btrfs to osd powercycle tests
 a08d620 mds: do not double-queue file recovery in eval_gather
 faa1ae7 MDSMonitor: dead debugging code
 7785fc5 rados.py: make exception with errno string instead of int
-24a5212 ceph, librados, rados.py, librados tests: pass cmd as array
+24a5212d ceph, librados, rados.py, librados tests: pass cmd as array
 37a6102 ceph: -v should mean version, not --verbose
 4446681 ceph: error and verbose message improvement
 b114e11 ceph: refactor -s -> status to common code, pass childargs to old mons
@@ -20805,6 +24235,7 @@ f2177a4 mon: AuthMonitor: remove dead code to avoid confusion
 8e13b38 doc: Updated with glossary terms.
 7116de3 doc: Correction and index tags.
 ae5aeb1 doc: Updates for glossary terms.
+c956980 task/install.py: extraneous subscript in upgrade() for only some remotes
 b394edc doc/release-notes: v0.61.3
 3d6eaf5 doc: Added upstart syntax.
 2d9a46c doc: Added link to Upstart syntax.
@@ -20833,7 +24264,10 @@ c76838f osd: Style corrections
 a99435d mon: fix leak of loopback Connection
 08bb8d5 messages/MMonHealth: remove unused flag field
 4974b29 messages/MMonProbe: fix uninitialized variables
+e4eb4aa teuthology-lock --summary:  allow --machine-type=all
 8c23d8c dev/rbd-diff: make formats into bullet lists (solves linebreak probs)
+ff8f9d3 rados: only 1 client for objectcacher stress test
+188ae89 ceph_manager: don't say you have no arguments and then list them
 09a796d doc: Removed ceph-deploy git, and added ceph-deploy packages.
 d1e2e7b doc: Modified pre-flight checklist for ceph-deploy packages.
 72a6767 doc: Added title. Suppresses no title warning messages.
@@ -20853,6 +24287,7 @@ c216914 Revert "mds: initialize some member variables of MDCache"
 ce67c58 os/LevelDBStore: only remove logger if non-null
 cdf5785 test_filestore_idempotent: make newly created objects globally unique
 713719a test_librbd: use correct type for varargs snap test
+3ec763c ceph: fix valgrind grep output parsing
 b70868e unit tests for PGLog::merge_old_entry
 21a1c4d mon: fix POOL_OP_AUID_CHANGE
 18f2f19 test/librados: remove "set_auid" pool tests
@@ -20883,6 +24318,7 @@ b0469a1 cephtool/test.sh: fix "expect failure"
 22660bd MDSMonitor, cmdparse: increase resiliency of bad cmd_getval()
 1cb7dbd messages/MClientCapRelease: fix string output
 27f82ce ceph: Add missing usage "ceph osd blacklist ls"
+bd7292b ceph: debug valgrind error
 367e203 fusetrace_ll.cc: handle return value of fuse_session_loop()
 541320c mds/MDCache.cc: reduce scope of 'CDir *dir' in _create_system_file_finish()
 84dedf5 mount/mount.ceph.c: reduce scope of 'value'
@@ -20895,10 +24331,12 @@ f7071b2 osd/PG.cc: prefer prefix ++operator for iterator
 1e99be1 vstart.sh: make client logs unique
 eb6d5fc os/LevelDBStore: fix merge loop
 6cdc4f8 merge_old_entry arguments info and oe are changed to const because there is no side effect.
+ed8e3c3 rados: increase recovery timeout when powercycling
 d7e2ab1 mon: fix uninitialized fields in MMonHealth
 f1ccb2d mon: start lease timer from peon_init()
 fb3cd0c mon: discard messages from disconnected clients
 6b8e74f mon/Paxos: adjust trimming defaults up; rename options
+957b0d0 replaced tab with spaces
 a03ccf1 OSD: *inodes_hard_limit must be less than the fd limit
 e12a9c4 OSD: tell them they died if they don't exist as well
 cec8379 osd: fix msg leak on shutdown in ms_dispatch
@@ -20936,6 +24374,7 @@ a7a0425 bench/dumb_backend.cc: check return value of posix_fadvise()
 c5fc52a rgw: only append prefetched data if reading from head
 b1312f9 rgw: don't copy object idtag when copying object
 8f3f053 rgw: Do not assum rest connection to be established
+64eb6df valgrind: add another leveldb suppression
 df2d06d mon: destroy MonitorDBStore before g_ceph_context
 f4eddd7 doc: Updated to reflect glossary usage.
 474bb16 doc: Updated title and syntax to reflect glossary usage.
@@ -20960,6 +24399,8 @@ e9c32b9 doc: note openstack changes for Grizzly
 352b7b5 doc: start Hadoop installation docs
 743c528 doc: Hadoop clarifications
 5fa098f Added -r option to usage
+61ed69b use cuttlefish branch instead of master
+d029abf valgrind: update suppressions for leveldb, libc leaks from mon
 c888d1d mon: fix leak of health_monitor and config_key_service
 3c57061 mon: return instead of exit(3) via preforker
 626de38 mon: Monitor: backup monmap using all ceph features instead of quorum's
@@ -20967,6 +24408,7 @@ c888d1d mon: fix leak of health_monitor and config_key_service
 9e658f0 debian: stop sysvinit ceph-mds daemons
 70a3832 debian: only stop daemons on removea; not upgrade
 f402568 rbd/concurrent.sh: probe rbd module at start
+2957d68 rbd_concurrent: add new task to test concurrent.sh
 0c05955 osd: wait for healthy pings from peers in waiting-for-healthy state
 04aa2b5 osd: distinguish between definitely healthy and definitely not unhealthy
 28ea184 osd: remove down hb peers
@@ -20997,6 +24439,7 @@ e634d9d Use new fuse package instead of fuse-utils
 4af917d os/LevelDBStore: do compact_prefix() work asynchronously
 dd35c26 osd: fix note_down_osd
 45b84f3 osd: fix hb con failure handler
+8f4de68 Rhel support added
 054e96c (tag: v0.63) v0.63
 64d1178 rgw: mdlog, bilog RESTful api cleanup
 fabe723 ceph: first cut at --completion
@@ -21044,6 +24487,7 @@ d7b999b mds: don't stop at export bounds when journaling dir context
 a6df764 PendingReleaseNotes: notes about enabling HASHPSPOOL
 aa0649c osdmaptool: fix cli tests
 0740811 With mdlog lock and unlock functionality, listing and trimming base on shard_id
+8cec56d valgrind: select notcmalloc ceph install flavor
 615b54c doc: Updated rgw.conf example.
 6f93541 doc: Updated RGW Quickstart.
 e59897c doc: Updated index for newer terms.
@@ -21102,6 +24546,7 @@ c2e262f osd: skip mark-me-down message if osd is not up
 08c39b8 ReplicatedPG::submit_push_complete don't remove the head object
 eb91f41 messages/MOSDMarkMeDown: fix uninit field
 70c9851 mds: weaken reconnect assertion
+4e18587 ceph: fix valgrind log check
 bec630f cephtool/test.sh: add test for --verbose and --concise
 28a6761 ceph: fix --concise (set verbose to False)
 d81d0ea sysvinit: fix osd weight calculation on remote hosts
@@ -21116,6 +24561,7 @@ c307d42 ceph-osd: specify which types of addresses to pick
 6d89323 ceph-mon: only care about public addr during pick_addresses()
 1ba9f84 common: add mask argument to pick_addresses() to specify what we need
 78a1834 ceph: remove cli test
+056a823 schedule_suite.sh: resolve ceph sha1 using deb gitbuilder, not tarball
 e15d290 mon: Paxos: get rid of the 'prepare_bootstrap()' mechanism
 586e8c2 mon: Paxos: finish queued proposals instead of clearing the list
 549e547 mds/Migrator.cc: fix possible dereference NULL return value
@@ -21212,6 +24658,7 @@ e9d20ff mon: implement --extract-monmap <filename>
 c0268e2 mon: implement --extract-monmap <filename>
 669e45e rgw: forward bucket creation to master region
 5671fa9 ceph-monstore-tool: implement getmonmap
+945328d rbd_image_read: add new task to test image_read.sh
 d48f1ed rgw: protect ops log socket formatter
 0866517 FileStore: add fd cache
 14d8cc6 ceph-fuse: add ioctl support
@@ -21237,12 +24684,17 @@ a6569c5 ceph-monstore-tool: compact command
 d2a4253 mds: better error check on sessionmap load
 87767fb mon: be a bit more verbose about osd mark down events
 03b9326 Makefile: add -Wformat-security
+6c9292c thrashosds: sync before doing powercycle testing
 a7096f8 rgw: add access key to zone info
+29521f9 rbd: add read flags test with each cache mode
+18e975e ceph-qa-suite: updating Hadoop tests
+f994107 schedule_suite.sh: 8hr -> 10hr suite timeout
 d05a4e5 ceph df: fix si units for 'global' stats
 0c2b738 ceph df: fix si units for 'global' stats
 17f6fcc Remove stop on from upstart tasks
 6340ba8 Fix -Werror=format-security errors
 947e133 MDLOG and BILOG REST Apis implemented along with some bug fixes
+d7ec089 install: make overrides grouped by project
 8bba266 libcephfs: add ceph_get_pool_name()
 feec1b4 doc: Added more glossary-compliant terms and indexing.
 5c4b4f0 doc: Added another instance term to the glossary.
@@ -21254,6 +24706,7 @@ decf342 doc: Minor improvements to Ceph FS landing page.
 42c74fd libcephfs: get stripe_unit/stripe_count/object_size/pool_id by file handle/path
 10496a8 libcephfs: fix typos
 ee3d50e Client: get describe_layout by file handle/path
+5a274c8 client config will be done only after the cluster is operational.
 69e2cbe mon: add 'compact' command
 a130cd5 kv_flat_btree_async.cc: release AioCompletion before leave the loop
 4ba70f8 librbd/internal.cc: fix resource leak
@@ -21275,8 +24728,11 @@ c49ba75 os/FileStore: print error code to log on replay guard failure
 58a880b doc: Fixing index references.
 46f5f58 doc: Added latency comment.
 604c83f debian: make radosgw require matching version of librados2
+bc9f502 set permission for config file
+1df344f schedule_suite.sh: put sha1 in install: overrides, not ceph:
 eaf3abf FileJournal: adjust write_pos prior to unlocking write_lock
 541396f client/Client.cc: fix/silence "logically dead code" CID-Error
+7cb59d3 added UserKnownHostsfile to ssh config
 64871e0 mds: avoid assert after suicide()
 49033b6 objclass/class_debug.cc: reduce scope of 'n' in cls_log()
 297b573 tools/ceph-filestore-dump.cc: reduce scope of 'r' in export_files()
@@ -21325,6 +24781,7 @@ bf612f0 rgw: modify metadata RESTful implementation
 f36ec02 doc: Updated architecture document.
 48e89b5 OSD: scrub interval checking
 1f4e7a5 OSD: Don't scrub newly created PGs until min interval
+e582e15 Fix scrub_test.py permission error
 7b93d28 doc/release-notes: v0.62
 52b0438 doc/rados/configuration: fix [mon] osd min down report* config docs
 2a4425a reflect recent changes in the pg deletion logic
@@ -21364,6 +24821,7 @@ f24b8fb PG: fix some brace styling
 72bf5f4 PG: subset_last_update must be at least log.tail
 395a775 SimpleThrottle: fix -ENOENT checking
 d06d0c3 rgw: slightly simplify metadata abstraction
+58836d9 qemu: load the kvm module before trying to use it
 bb6d1f0 rgw: read bucket metadata before writing it
 88af2b0 Replace mis-named mon config variables using mon_osd_min_down_reports/mon_osd_min_down_reporters
 225fefe ceph-disk: add '[un]suppress-activate <dev>' command
@@ -21379,8 +24837,20 @@ ba05b16 mon: Monitor: tolerate GV duplicates during conversion
 d519346 Objecter, librados: use only ObjectOperation form of sparse_read internally
 ed76824 Objecter: fix error handling for decoding stat
 82211f2 qa: rsync test: exclude /usr/local
+62eb49f schedule_suite.sh: bump suite timeout from 6->8 hours
 459c731 osd/OSD.h: fix try_stop_deletion
+f330d03 fs/samba: disable smbtorture lock test
+8925738 fs/samba: fix noceph mount point
+63203c6 localdir: create/cleanup mnt.foo dir on local fs
 a0d238c rgw: cache obj version
+391f3ab fs/samba: smbtorture: disable base.bench-hold* tests
+857279b fs/samba: add noceph.yaml baseline
+464e5e3 fs/samba: disable kernel build
+a095075 ceph-qa: update Hadoop tests overrides
+6205c3d rados/osd-powrcycle: turn up mds logging
+d0e9a19 fs/samba: restructure and expand test collection
+a12464f Do not scan for vm locks when listing all machines.
+996f1ed task modified to include a '-' before the test script
 93f2794 Throttle: move start_op() to C_SimpleThrottle constructor
 613d747 librbd: run copy in parallel
 fb299d3 librbd: move completion release into rbd_ctx_cb()
@@ -21429,10 +24899,12 @@ b66b8dd common/admin_socket.cc: remove scope of ret variable in do_accept()
 e7d1114 cls/rbd/cls_rbd.cc: reduce scope of variable rc
 cdfc4a7 rgw/rgw_op.cc: use empty() instead of size()
 45ffb36 ceph-filestore-dump.cc: use empty() instead of size()
+1b0f241 Revert "radosgw-admin: Test bucket list for bucket starting with underscore."
 723062b doc: Updated usage syntax. Added links to hardware and manual OSD remove.
 01a07c1 OSD: rename clear_temp to recursive_remove_collection()
 f5a60ca osd: remove_dir use collection_list_partial
 8b3cd6e rgw: don't handle ECANCELLED anymore
+d7fe5c0 nuke: don't require noipmi in ctx
 7a8d6fd PG,OSD: delay ops for map prior to queueing in the OpWQ
 d3dd99b PG: no need to wait on DeletingStateRef for flush
 0ef9b1e osd_internals/pg_removal.rst: update for pg resurrection
@@ -21474,6 +24946,7 @@ a284c9e common/Preforker: fix warnings
 83bbae4 debian/control:  squeeze requres cryptsetup package
 46c3e48 ceph_json: dump timestamp in utc
 551571c rgw: datalog trim
+5caa2bd default project to ceph and extra_pkgs to none
 52666dc PG: reassert_lock_with_map_lock_held() is dead
 17705d7 OSD,PG: lock_with_map_lock_held() is the same as lock()
 e2528ae ceph-create-keys: gracefully handle no data from admin socket
@@ -21481,7 +24954,7 @@ f2a54cc init-ceph: fix osd_data location when checking df utilization
 ea809f7 rgw: bucket index log trim
 546ed91 osd: don't assert if get_omap_iterator() returns NULL
 76b736b rgw: metadata log trim
-36ec6f9 osd: don't assert if get_omap_iterator() returns NULL
+36ec6f9b osd: don't assert if get_omap_iterator() returns NULL
 3846451 rgw: user operation mask
 ef82ad7 rgw: resend data log entry if took too long
 3385c2c rgw: more data changes log implementation
@@ -21563,10 +25036,13 @@ a12357c rgw: admin command to show region info
 488a20d rgw: region creation
 960fa0d rgw: region management encoding/decoding changes
 64aa4e4 rgw: define region/zone data structures
+3ff0fff fs/samba: Add tests for samba/cifs tasks
 f0c0997 doc/install/os-recs: reverse order of releases
 f8ae2bd doc: Fixed typos.
 452fb52 doc: Fixed typo.
 a0cb5e5 doc: Removed "and" as suggested.
+783b92f install: default to ceph project throughout
+5741228 ceph_manager: add timeout option to revive, increase for power_cycle
 ad75582 doc: Fixed hyperlink.
 87160c4 doc: Fixed path typo.
 2d6e4d2 doc: Updated OS support for Cuttlefish.
@@ -21581,9 +25057,15 @@ c207516 doc: Added glossary to TOC.
 8586738 Fix whitespace indentation
 ad504e9 Implement 'config get <var>' for the admin socket
 27d86bd fixed common typo in error messages
+f3963b2 suites/marginal:  Add backtrace restart test
 2bd2731 doc/install/{debian,rpm}: update for cuttlefish
 7c3a0e8 doc/start/get-involved: fix links
 b107081 doc/release-notes: I missed rgw rest api in the release notes
+f1be93f install: only remove ceph data of project is ceph
+0b7cd6a task/cifs-mount.py: Task for mounting cifs
+4899fa1 task/samba.py: Samba task to setup/start smbd
+c0f1ef7 task/daemon-helper: Add nostdin option
+980973d task/install.py: Allow installation of non-ceph
 9d85d67 os/ObjectStore: add missing break in dump()
 c693ba5 rados: add whole-object 'clonedata' command
 298ebdb doc: Deleted redundant "so that" phrase.
@@ -21592,6 +25074,8 @@ c693ba5 rados: add whole-object 'clonedata' command
 2d4b5bd Removed comment out of header, and added "coming soon."
 1cfc6e3 doc: Updated usage for push | pull.
 048e049 Clean up defer_recovery() functions
+2bbbd81 add fs collection ceph-deploy blogbench test in new singleton suite
+3219a87 add fs collection ceph-deploy blogbench test in new singleton suite
 bd36e78 osd: make class load errors louder
 0b4c5c1 osd: optionally enable leveldb logging
 c1d5f81 mon: allow leveldb logging
@@ -21601,9 +25085,12 @@ eb69c7d os/: default to dio for non-block journals
 e662b61 ceph-test.install: add ceph-monstore-tool and ceph-osdomap-tool
 eae02fd ceph.spec.in: remove twice listed ceph-coverage
 71cef08 ceph.spec: add some files to ceph
+33c154c Fix teuthology installations on physical Centos machines. Yum installs of packages specify a pacakge number.  Initial install of yum source changed to not fail if already done. Added yum cleans where necessary.
 c5d4302 doc: Update the usage to reflect optional directory name.
 35acb15 doc: Rearranged to show zapping multiple disks and creating multiple OSDs.
 8add78c doc: Moved install to the second step, from the first step.
+495438f apparently this should never work on our current configs
+7628adb apparently this should never work on our current configs
 6abbe68 doc: Autonumbering syntax correction.
 efa460c doc: Added troubleshooting PGs to the index.
 cddf3b5 doc: Commented out osd list for now.
@@ -21612,6 +25099,8 @@ cddf3b5 doc: Commented out osd list for now.
 3540d90 ceph-test.install: add ceph-monstore-tool and ceph-osdomap-tool
 6f33885 ceph.spec.in: remove twice listed ceph-coverage
 acb60e5 ceph.spec: add some files to ceph
+496fd60 upgrade: fix up rgw tests a bit
+2d59b8a nfs: debug mds
 1a67f7b mon: fix init sequence when not daemonizing
 a763569 ceph: add 'osd crush rule ...' to usage
 3f0b8ec mon: avoid null deref in Monitor::_mon_status()
@@ -21624,6 +25113,7 @@ c189d85 init-ceph: update osd crush map position on start
 6f8c1e9 doc/release-notes: add/link complete changelogs
 4fa2c49 doc/release-notes: v0.56.5
 72fc6eb doc: Fixed typos.
+e3b0e1e s3tests: add force-branch with higher precdence than 'branch'
 5cdd731 Revert "mon: fix Monitor::pick_random_mon()"
 b4e73cc doc/install/upgrading...: note that argonaut->bobtail->cuttlefish must be v0.56.5
 039a3a9 tools/: add paranoid option to ceph-osdomap-tool
@@ -21631,12 +25121,17 @@ b4e73cc doc/install/upgrading...: note that argonaut->bobtail->cuttlefish must b
 444660e librados,client: bump mount timeout to 5 min
 6a61268 OSD: also walk maps individually for start_split in consume_map()
 c659dd7 rgw: increase startup timeout to 5 min
+366781e upgrade/rgw: run first s3tests pass using bobtail tests
 45c9e24 doc/install/upgrading...: note about transitioning to ceph-deploy
 a8d4647 doc/release-notes: note about ceph-deploy
 f95a053 Update debian.rst
+9e6f7b1 nuke.py: Allow ipmi power cycling to be skipped
+d230fb8 upgrade: fix client ids
+a488d61 upgrade rgw: increase client mount timeout
 615b84b Makefile,gitignore: ceph-monstore-tool, not ceph_monstore_tool
 628e232 Makefile: put ceph_monstore_tool in bin_DEBUGPROGRAMS
 d0d93a7 tools: ceph-osdomap-tool.cc
+01d2e15 For vms, fix some bad default configuration settings.
 f498226 OSD: load_pgs() should fill in start_split honestly
 3e0ca62 OSD: cancel_pending_splits needs to cancel all descendants
 d944180 osd: add --osd-leveldb-paranoid flag
@@ -21644,6 +25139,10 @@ d944180 osd: add --osd-leveldb-paranoid flag
 dfacd1b dumper: fix Objecter locking
 7bb145b doc/rados/deploy: note that osd delete does not work yet
 771f452 doc/rados/deploy: misc edits
+c838e1f Revert "Revert "Specify xfs for osd powercycle testing""
+b124e8e ceph_manager: mount_osd_data expects osd as a str
+b948406 ceph.py: set up ctx.disk_config outside of the loop
+0382aa6 ceph.py: the journal component does not current work with restart
 a21ea01 Revert "PaxosService: use get and put for version_t"
 88c030f mon/Paxos: update first_committed when we trim
 3a6138b mon/Paxos: don't ignore peer first_committed
@@ -21653,20 +25152,33 @@ bb270f8 mon: Monitor: fix bug on _pick_random_mon() that would choose an invalid
 2f6eb39 docs: Update links to Github and the Tracker
 81b06be docs: Update the ceph-users join and leave addresses
 b3f37ea docs: Update CloudStack RBD documentation
+853e8fd Revert "Specify xfs for osd powercycle testing"
 fe68afe mon: communicate the quorum_features properly when declaring victory.
 b17e842 doc: Incorporating Tamil's feedback.
 bd6ea8d doc: Reordered header levels for visual clarity.
 bb93eba doc: Fixed a few typos.
 14ce0ad doc: Updated the upgrade guide for Aronaut and Bobtail to Cuttlefish.
+52742fb fix some errors found by pyflakes
+7df72f2 s3tests: revert useless portion of 1c50db6a4630d07e72144dafd985c397f8a42dc5
+809814b rgw: restart radosgw too
+5a6e560 rgw tests: remove users after each test
+6aba6d2 rgw tests: clean up immediately after the test
 7de29dd doc/release-notes: update cuttlefish release notes to include bobtail
+935e868 ceph: allow restarting radosgw
+55b16c7 rgw: add to ctx.daemons so it can be stopped/started dynamically
+4979df3 misc: move daemon stopping function to a generic place
+79abc44 Specify xfs for osd powercycle testing
 cd1d6fb ceph-disk: tolerate /sbin/service or /usr/sbin/service
 a97ecca mon: Monitor: disregard paxos_max_join_drift when deciding whether to sync
 a39bbdf mon: if we get our own sync_start back, drop it on the floor.
 d00b4cd Revert "mon: update assert for looser requirements"
 cedcb19 Revert "mon: when electing, be sure acked leaders have new enough stores to lead"
 c2bcc2a ObjectCacher: wait for all reads when stopping flusher
+08bf161 Verbose output on ceph-qa-chef.
 6ae9bbb elector: trigger a mon reset whenever we bump the epoch
 0acede3 mon: change leveldb block size to 64K
+4f70c89 misc: default base_test_dir to /home/ubuntu/cephtest
+57404b6 swift, s3readwrite: add missing yield
 6f2a7df doc: Fix typo.
 35a9823 doc: Added reference to transition from mkcephfs to ceph-deploy.
 de31b61 doc: Updated index for new pages. Added inner table.
@@ -21679,6 +25191,7 @@ fa9f17c doc: Added transition from mkcephfs to ceph-deploy page.
 adb7c8a osd: read kb stats not tracked?
 b5e2461 osd: Rename members and methods related to stat publish
 bd68b82 mon: enable 'mon compact on trim' by default; trim in larger increments
+70ce4db Disable quiet mode wget output on wget for ceph-qa-chef
 929a994 mon: share extra probe peers with debug log, mon_status
 030bf8a debian: only start/stop upstart jobs if upstart is present
 825a431 man: update remaining copyright notices
@@ -21696,11 +25209,15 @@ ee3cdaa mon: compact leveldb on bootstrap
 ffc8557 doc: update rbd man page for new options
 8b2a147 gitignore: add ceph_monstore_tool
 29831f9 Makefile: fix java build warning
+c8ec76e s3tests, s3readwrite, swift: cleanup explicitly
 a2fe013 mon: remap creating pgs on startup
 278186d mon: only map/send pg creations if osdmap is defined
 28d495a mon: factor map_pg_creates() out of send_pg_creates()
 896b277 client: make dup reply a louder error
 ee553ac client: fix session open vs mdsmap race with request kicking
+a9188bf upgrade: fs: ignore 'wrongly marked down'
+4f2df74 rbd: dont' test python on bobtail
+f3b7db1 upgrade: restructure rbd tests
 bf0b430 Fix a README typo
 cea2ff8 mon: Fix leak of context
 20d99c4 doc: Removed extra whitespace.
@@ -21713,11 +25230,24 @@ cea2ff8 mon: Fix leak of context
 dd6e79a doc: Removed installed Chef. This is now in the ceph wiki.
 945dac6 doc: Removed text for include directive. Wasn't behaving the way I'd hoped.
 3d9bc46 doc: Added ceph-mds to CephFS toc.
+45df0b2 workunit: use passed refspec rather than checking sha1 again
+de745db install.upgrade: apt-get install instead of upgrade
+1e52fb9 install: prefer 'branch' over 'sha1'
+1e449d4 nfs: debug mds
 44d13a7 doc: Fix. ceph, not chef.
+8315a22 upgrade: debug fs jobs
+bc0b50f upgrade: dbench instead of blogbench
+7882363 upgrade: reorganize the basic/rados suite
+17f34a7 rgw asdf
+f1eeec3 upgrade: rgw: restructure collection
 5327d06 ceph-filestore-dump: fix warnings on i386 build
 79280d9 OSDMonitor: when adding bucket, delay response if pending map has name
 e725c3e PaxosService: use get and put for version_t
 1e6c390 tools: add ceph_monstore_tool with getosdmap
+5744afe upgrade: do not start second radosgw
+f08c3a5 upgrade: mount fs with ceph-fuse for fs tests
+ab353c7 upgrade: run blogbench against ceph-fuse
+928e241 upgrade: run rados python test on bobtail to avoid polluting cluster with pools
 50e58b9 ceph.spec.in:  remove conditional checks on tcmalloc
 5c1782a debian/rules:  Fix tcmalloc breakage
 6d348a1 mon: cache osd epochs
@@ -21736,6 +25266,7 @@ b631cc6 doc: Added ceph-deploy package management (install | uninstall ) section
 d85c690 doc: Added new quick start preamble and index.
 3ff7eef doc: Added ceph-deploy preflight.
 9365674 doc: Added ceph-deploy quick start.
+2211b1d Fix improperly spaced line.
 ebbdef2 monitor: squash signed/unsigned comparison warning
 5fa3cbf mon: use brute force to find a sync provider if our first one fails
 c880e95 rgw: fix compilation for certain architectures
@@ -21746,6 +25277,8 @@ f2df876 rgw: fix bucket listing when reaching limit
 7144ae8 rgw: fix bucket count when stating account
 1670a2b rgw: trivial cleanups post code review
 98f532e Makefile.am: Add -lpthread to fix build on newer ld in Raring Ringtail
+f21dcdc ceph config data goes in conf, not config
+df4105b ceph config data goes in conf, not config
 741f468 mon: fix Monitor::pick_random_mon()
 cbc3b91 mon: mark PaxosServiceMessage forward fields deprecated
 77c068d mon: fix double-forwarding check
@@ -21757,10 +25290,15 @@ f768fbb client: re-fix cap releases
 2146930 mon: do not forward other mon's requests to other mons
 a5cade1 PG: clear want_acting when we leave Primary
 3ce35a6 mon: get own entity_inst_t via messenger, not monmap
+a0acdcf Use get('field', default) to assign downburst values for vps.
 303e739 radosgw: receiving unexpected error code while accessing an non-existing object by authorized not-owner user
 407ce13 PendingReleaseNotes: these are now in the release-notes.rst
 4af93dc doc/release-notes: add note about sysvinit script change
 cd7e52c init-ceph: use remote config when starting daemons on remote nodes (-a)
+ae00c60 temporarily add cephfs debugging to overrides
+fb17d37 Revert "turn on debugging for MDS and Client in FS runs"
+35cf122 temporarily add cephfs debugging to overrides
+5d5e0a6 Revert "turn on debugging for MDS and Client in FS runs"
 d90b0ca gen_state_diagram.py: fix function name
 1ee8f39 gen_state_diagram.py: fix naming of global variables/constants
 d9f8de1 gen_state_diagram.py: add some missing spaces around operators
@@ -21778,6 +25316,7 @@ f601eb9 test_mon_config_key.py: fix bad indentation
 226ff52 perf-watch.py: fix naming of local variable
 148710f perf-watch.py: add missing space after comma
 dffa9ee perf-watch.py: remove unnecessary semicolons
+6b8f1c6 repair_test.py: Additional test cases
 ac3dda2 scrub clears inconsistent flag set by deep scrub
 ba527c1 doc/release-notes: enospc note
 2075ec6 doc/release-notes: 0.61 cuttlefish notes
@@ -21800,19 +25339,26 @@ a40772b osd_types: add last_became_active to pg_stats
 d196b5b OSD: don't report peers down if hbclient_messenger is backed up
 49eeaeb Messenger: add interface to get oldest queued message arrival time
 297c671 DispatchQueue: track queued message arrival times and expose oldest
+fd750da Add changes to make teuthology suites work on vms.
 48631c1 mon: revert part of PaxosService::is_readable() change
+b7aaa19 Check downburst paths. Display an appropriate error message if an executable downburst cannot be found.
 0093d70 librbd: fix i386 build
 857c88e librbd: add read_iterate2 call with fixed argument type
 6c798ed librbd: implement read not in terms of read_iterate
 95ed73a mon: drop forwarded requests after an election
 ab25707 mon: requeue routed_requests for self if elected leader
 4b07d69 mon: track original Connection* for forwarded requests
+526863e remove ext4 from rados thrashing for now
 8402107 test_filejournal: adjust corrupt entry tests to force header write
+0b50cb5 Increase IPMI attempts to try to get around Flakey IPMI.
+7fbe467 ceph.conf: enable full debugging on the mon
 556bb64 rgw: stream list buckets (containers) request
+98cc648 Increase IPMI attempts to try to get around Flakey IPMI.
 ccbc4db init-ceph: fix (and simplify) pushing ceph.conf to remote unique name
 7ad63d2 ceph-disk:  OSD hotplug fixes for Centos
 3dd9574 doc: Usage requires --num_osds.
 b71ec9c doc: Added some detail. Calculating PGs, maps; reorganized a bit.
+bbcba29 set 'filestore flush min = 0' for all ffsb jobs
 b73ef01 mon: [MDS]Monitor: remove 'stop_cluster' and 'do_stop()'
 f42fc0e mon: MDSMonitor: tighter leash on cross-proposals to the osdmon
 fa77e1e mon: PaxosService: add request_proposal() to perform cross-proposals
@@ -21825,6 +25371,7 @@ b33fae4 mon: commit LogSummary on every message
 1164345 ceph-mon: Attempt to obtain monmap from several possible sources
 9ba3240 mon: Monitor: backup monmap prior to starting a store sync
 de5d1da rgw: don't send tail to gc if copying object to itself
+48d89c6 ceph-deploy: fix stop command
 85fd2ca mon: make 'osd pool rmsnap ...' idempotent
 43d62c0 mon: make 'osd pool mksnap ...' idempotent
 08e3ec1 mon: make 'osd blacklist rm ...' idempotent
@@ -21839,6 +25386,8 @@ de5d1da rgw: don't send tail to gc if copying object to itself
 1fa719d doc: Aesthetic improvements. Removed unnecessary graphic and overrode margin for h3 tag.
 3749ffe doc: Added a scenario to PG troubleshooting.
 cf91594 doc: Changed usage to "bucket-name". Description was okay.
+861ac49 added ceph.client.admin.keyring on the client to run rbd and rados tests
+2bbac6e added extra packages required by ceph-deploy for rbd and rados tests
 870f47c tools/ceph-filestore-dump: Implement remove, export and import
 88d9ee1 ReplicatedPG::_finish_mark_all_unfound_lost: only requeue if !deleting
 0e15555 ReplicatedPG::_applied_recovered_object*: don't queue scrub if deleting
@@ -21880,6 +25429,9 @@ b021036 PG,ReplicatedPG: move intrusive_ptr declarations to top
 016e975 FileStore::_do_copy_range: read(2) might return EINTR
 07a80ee FileStore::_do_clone_range: _do_copy_range encodes error in return, not errno
 5e4b8bc config: clarify 'mon osd down out subtree limit'
+60e7fb4 turn on debugging for MDS and Client in FS runs
+cb1e8ed turn on debugging for MDS and Client in FS runs
+e21fdf8 ior-cfuse: remove the binary/ dir that make install creates
 cd2cabe doc: Trimmed toc depth for nicer visual appearance.
 44aa696 doc: Added new PG troubleshooting use case.
 2e3579e doc: Updated title.
@@ -21895,6 +25447,8 @@ b0c1001 mon: ensure 'osd crush rule ...' commands are idempotent
 f379ce3 mds: fix setting/removing xattrs on root
 7e4f80b debian/control:  Fix typo in libboost version number
 f4bc760 build:  Add new package dependencies
+4efed08 ceph-deploy: stop daemons, archive, then purge[data]
+a3c4835 ceph.conf: lower mon disk avail warning threshold
 fd678ea debian/control:  Fix typo in libboost version number
 4b34b0e mon: PaxosService: fix trim criteria so to avoid constantly trimming
 86c1ea1 build:  Add new package dependencies
@@ -21920,17 +25474,34 @@ cd4b242 doc: Removed logging. Added references. Reorganized and edited.
 22a5cb6 doc: Removed. Not in toc, and otherwise generates a warning.
 84b0ec2 doc: Updated hyperlink.
 808ad25 doc: Removed fragmented logging info. Consolidated into one doc.
+77cf9f4 misc: Fix for case status['description'] == None
 3c144e9 rbd: Only allow shrinking an image when --allow-shrink flag is passed
 7b408ec client: disable invalidate callbacks :(
 db37bd8 rbd: add --no-progress switch
 8f21beb leveldbstore: handle old versions of leveldb
+551860f rgw suite for upgrade task
 085b3ec mds: change XLOCK/XLOCKDONE's next state to LOCK
 efe7399 mds: pass proper mask to CInode::get_caps_issued
 f25f922 mon: Monitor: convert osdmap_full as well
 1260041 mon: PaxosService: add helper function to check if a given version exists
 246b811 osd/PG.cc: initialize PG::flushed in constructor
+2bcbf18 radosgw-admin-rest: Add task for RESTful admin api.
+8db7b08 radosgw-admin-rest: Add task for RESTful admin api.
 a993d25 Fix policy handling for RESTful admin api.
+3f78cb0 misc: Check for 'None' string from yaml
+438410a lock: Fix import cycle breakage
+dc45709 Revert "Revert "Install.py: Prevent prompts from breaking apt""
+1b65b8f Revert "Install.py: Prevent prompts from breaking apt"
+df3d70f Install.py: Prevent prompts from breaking apt
+750c69b misc: Check for 'None' string from yaml
+1727d9b misc: Use pythonic 'is not None' for jobid case
+c1d47a2 misc: Fix name parsing
+b37f43d lock: Fix import cycle breakage
+72cbf11 misc: Use job id and make short path for testdir
 f3527d4 Fix policy handling for RESTful admin api.
+e8aa0d8 ceph-deploy: purge before archiving
+4befae4 ceph-deploy: purge before archiving
+33a6693 scheduled_suite.sh: check clock skew at start and end of run
 544eb9b qa: pull qemu-iotests from ceph.com mirror
 8994566 librbd: flush on diff_iterate
 efce39e doc: Cherry-picked from master to next. Uses ceph-mds package during upgrade.
@@ -21947,18 +25518,36 @@ fb840c8 osd/PG.cc: initialize PG::flushed in constructor
 638eb24 librbd: print seed for all DiffIterate tests
 b343c52 doc: Changed MDS upgrade to use ceph-mds package.
 785b25f Fix: use absolute path with udev
+1bb7a05 ceph-deploy qa suites. moved a level up to make it run in the nightlies.
+90e2a2e misc: Fix close() call to pass in fd
+cde1429 misc: Fix bug in calling function remote_mktemp()
+3b0d915 misc: Use tempfile.mkstemp() instead of tempnam
+f69ddaf Revert "Revert "Install.py: Prevent prompts from breaking apt""
 b4fc83c doc: Overhauled Wido's CloudStack document.
 d801ca3 ceph.spec.in: use %{_sbindir} macro again
+50aaece misc: Use pythonic 'is not None' for jobid case
+26d7537 rbd: add qemu-iotests
+d0a81f6 rbd: add qemu-iotests
 98de67d qa: add workunit for running qemu-iotests
+715245e misc: Fix name parsing
 a0ae2ec os: bring leveldbstore options up to date
+38e014f prevent osd warnings from failing CephFS tests
+cfcc9c7 prevent osd warnings from failing CephFS tests
+67a616a Revert "Install.py: Prevent prompts from breaking apt"
+52cdaae kernel.py: put submenu name in 01_ceph_kernel if necessary
+2c7b1f3 peer.py: we can't assume pg query state will match mon pg state
 6b98162 mds: output error number when failing to load an MDSTable
+c2b0828 Revert "Install.py: Prevent prompts from breaking apt"
+a6b84a5 lock: Fix import cycle breakage
 ae71b57 init-radosgw.sysv:  New radosgw init file for rpm based systems
 f875c0c mds: only go through the max_size change rigamarole if the client requested it
 9c18fd6 mds: Locker needs to remember requested max_size changes from clients
 87ff4af doc: Added additional note on first step of adding OSD.
 2b6719f doc: Added info on OSD naming, and example.
+52aec32 kernel.py: put submenu name in 01_ceph_kernel if necessary
 a01bc3d doc: Moved ceph osd create to first step.
 74cdbc3 doc: Changed libvirt example to use virtio.
+fa2049f misc: Use job id and make short path for testdir
 4977f3e mds: Delay export on missing inodes for reconnect
 3a1cf53 client:  Unify session close handling
 06d05e5 LibrbdWriteback:  complete writes strictly in order
@@ -21978,13 +25567,18 @@ f5b81d8 ObjectCacher: deduplicate final part of flush_set()
 4b65673 test_stress_watch: remove bogus asserts
 3888a12 test: update rbd formatted-output for progress changes
 718fa0c pool should be root in osd set syntax for bobtail
+e2938f4 Fix for kdb: doesn't work on mira nodes
+9c9baef Fix: kdb: doesn't work on mira nodes
 95374c6 config: fix osd_client_message_cap comment
 a48739d FileJournal: clarify meaning of start_seq and fix initialization
 88ab841 Revert "global: call config observers on global_init (and start logging!)"
+7eb09ef changed "master" to "next" Signed-off-by: tamil <tamil.muthamizhan at inktank.com>
+42dfc75 task yaml for upgrade task
 be801f6 mon: Use _daemon version of argparse functions
 c76bbc2 ceph_argparse: add _daemon versions of argparse calls
 d7b7ace Pipe: call discard_requeued_up_to under pipe_lock
 4cb18b5 journaler: remove the unused prefetch_from member variable
+826ad0d teuthology: fix for ssh-keys-task
 3cdc61e mds: Keep LogSegment ref for openc backtrace
 edc9ddf mds: fix journaler to set temp_fetch_len appropriately and read the requested amount
 0031949 md_config_t: change from class to struct as the name indicates
@@ -22007,6 +25601,11 @@ f7070e9 msgr: add second per-message throttler to message policy
 79b7144 librbd: fix DiffIterateStress test
 b083dec rgw: translate object marker to raw format
 be6961b Allow creation of buckets starting with underscore in RGW
+fa70eb8 radosgw-admin: Test bucket list for bucket starting with underscore.
+4102884 Install.py: Prevent prompts from breaking apt
+5995ae7 Install.py: Prevent prompts from breaking apt
+4824daf teuthology: extend Hadoop task to support branches
+4258807 teuthology: remove previous test ssh keys
 6ef9d87 FileJournal: introduce start_seq header entry
 f12a5ed FileJournal: fill in committed_up_to for old headers
 e5cecd7 debian/ceph-test.install: add installed but not packaged files
@@ -22019,15 +25618,22 @@ a8362f5 ceph.spec.in: reorder and fix ceph file list
 61a2be3 ceph.spec.in: don't move ceph-disk* and ceph-create-keys around
 4d16f38 Makefile.am: install ceph-* python scripts to /usr/bin directly
 354c41e ceph.spec.in: use %{_sbindir} instead of /usr/sbin
+939223a workunit: sudo rm -rf ...
 29f9db1 mon: fix crush unit tests for idempotency
 336c4ae mds: verify mds tell 'dumpcache <filename>' target does not exist
 e6d4582 mon: make 'osd crush unlink ..' idempotent
+21d6af2 rados: whitelist quota warning in cluster log for pool quota test
+5c80201 scheduled_suite.sh: check clock skew at start and end of run
 0d6ddd9 mds: do not go through handle_mds_failure for oneself
+8129bff Implement full reinstallation of a VM system.
 c8cbec2 Makefile.am: fix build of ceph_test_cors
+cf4bf09 ceph.conf: lower mon disk avail warning threshold
 0ce09fa client: Kick waiters for max size
 f9b7dff client: log inode when waiting on max size
 85a77df mon: limit warnings about low mon disk space
 ea7c6c6 librados: Fixes to list_snaps test changes
+982e1ed radosgw-admin: Add test of duplicate user email specification.
+fb34c38 radosgw-admin: Test subuser mask durability when creating new key.
 a793853 ceph-disk:  CalledProcessError has no output keyword on 2.6
 b48d6b4 librados: test for EINVAL on bad list_snaps() ioctx snapid
 65a6975 librados: set SNAP_DIR on listsnaps command
@@ -22075,12 +25681,16 @@ a5ce246 rgw/rgw_user.h: move initialization in initialization list
 07be36c mds/Migrator.cc: prefer prefix ++operator for iterator
 574051f unit tests for FileStore::_detect_fs when running on ext4
 aee6549 fix nspace assignment in LFNIndex::lfn_parse_object_name
+7b3973f radosgw-admin: cluster info -> zone info
 dd19d69 rgw: Create RESTful endpoint for user and bucket administration.
 aa14da2 doc/release-notes: v0.60
+d81babf repair_test: add test for repairing read errs and truncations
+502e439 repair_test: add test for repairing read errs and truncations
 fc13f11 PG::_scan_list: assert if error is neither -EIO nor -ENOENT
 3fa3b67 FileStore: rename debug_delete_obj to debug_obj_on_delete
 40070ce PG: _scan_list can now handle EIO on read, stat, get_omap_header
 fcec1a0 ObjectStore: add allow_eio to read, stat, get_omap_header
+8b49eb1 rados: test mon config keys
 76ad956 librados: test empty ObjectWriteOperation
 690e4df Makefile.am: disable building ceph_test_cors when radosgw is not enabled
 f26f7a3 (tag: v0.60) v0.60
@@ -22200,6 +25810,7 @@ fbcc64d mds: fix MDCache::adjust_bounded_subtree_auth()
 573a4ae mds: process finished contexts in batch
 5cbaae6 mds: preserve subtree bounds until slave commit
 c344ff1 fix null character in object name triggering segfault
+b4a276e rados: test pool quotas when testing api
 e611937 mon: OSDMonitor: add 'osd pool set-quota' command
 9532808 doc: Added entries for Pool, PG, & CRUSH. Moved heartbeat link.
 bcc5c65 doc: Added heartbeat configuration settings.
@@ -22212,11 +25823,18 @@ e9b3f2e ceph-disk list: say 'unknown cluster $UUID' when cluster is unknown
 3da3129 ReplicatedPG: check for full if delta_stats.num_bytes > 0
 9b09073 mon: Monitor: check if 'pss' arg is !NULL on parse_pos_long()
 e2a936d common: util: add 'unit_to_bytesize()' function
+2a1cdda locker: try to make up for apache timeouts
 23c2fa7 osd: osd_types: add pool quota related fields
 655a950 PG::_scan_list: record read errors in the Scrubmap::object
 dab3dac osd_types: add read_error flag to Scrubmap::object
+aeb1bbe do not archive on pass if 'archive-on-error: True'
+a40b850 locker: log desc too
 562e171 ceph-disk: handle missing journal_uuid field gracefully
+90544fa marginal: remove incomplete librbd collection
+9e2ca49 marginal: remove partial collection thrash
+ba4f8de marginal: add multimds collection
 4c4d559 librados: move snapc creation to caller for aio_operate
+9f46f47 run: clean up machine_type thing
 3cbd036 client: update cap->implemented when handling revoke
 4f8ba0e msgr: allow users to mark_down a NULL Connection*
 2da57d7 debian: Add git to Build-Depends (need by check_version script)
@@ -22229,6 +25847,8 @@ b982516 ceph.spec.in:  Add python-argparse dependency
 a021ce6 mon: ConfigKeyService: stash config keys on the monitor
 e950ed0 ceph.spec.in:  Move four scripts from sbin to usr/bin
 06a1e9c ceph: propagate do_command()'s return value to user space
+e8afa45 ceph_manager: retry set_pool_property on EAGAIN
+b815268 run: machine-type: foo, not machine_type: foo
 e91405d ceph: propagate do_command()'s return value to user space
 f804892 PG: update PGPool::name in PGPool::update
 1993c2a PG: use int64_t for pool id in PGPool
@@ -22255,14 +25875,18 @@ b9c8d4f librados.h: fix comment describing rados_pool_list
 10626f7 mon: MonitorDBStore: add 'get_iterator()' method to iterate over a prefix
 d3e4904 ceph-disk: reimplement list_all_partitions
 20d594a ceph-disk: reimplement is_partition
+3c3969d rados test suite for upgrade task
 d89a63c doc: Updated usage.
+d2a021b rbd test suites for upgrade task
 bc72492 doc: Updated usage.
 4362934 ceph-disk: conditionally remove mount path
 2ae297c ceph-disk: ignore udevadm settle return code
 9844500 upstart: try udevadm settle if osd journal isn't present
+e86273e fs test suite for upgrade task
 89c6901 osd: disallow classes with flags==0
 f2dda43 osd: EINVAL when rmw_flags is 0
 50b831e osd: fix detection of non-existent class method
+ac505d0 suites for upgrade
 b0103ab OSD fails to start with error assert(values.size() == 2)
 4db4a02 Fix radosgw upstart job
 5aa5bc2 mds: Delay session close if in clientreplay
@@ -22272,6 +25896,7 @@ b0103ab OSD fails to start with error assert(values.size() == 2)
 a8338ee mkcephfs.in: provide a valid rdir for MONs on -a
 38f845d mkcephfs.in: use mktemp instead of dd+md5sum+awk
 0e009b1 mds: Clear backtrace updates on standby_trim_seg
+6fd7ebd task/mds_thrash: Log mds dump after long delay
 0c43ac3 ceph_common.sh: fix typo in comment
 17029a6 java libcephfs: add serialVersionUID to some Exception classes
 10191a9 CephMount.java: remove unused import
@@ -22281,6 +25906,9 @@ a8338ee mkcephfs.in: provide a valid rdir for MONs on -a
 503ed9e testing: fix hadoop-internal-test
 7c1e9be client: Don't signal requests already handled
 77230d4 fix append to uninitialized buffer in FlatIndex::created
+bc54a8b locker: make desc optional
+07e324a ceph.conf: osd debug op order = true
+8eabe1b locker/api: fix DELETE
 9eda8e5 ceph-disk:  udevadm settle before partprobe
 14cef27 mds: CInode::build_backtrace() always incr iter
 bb68a20 java: fix test name typo
@@ -22289,8 +25917,14 @@ b9141e9 client: Cleanup request signaling
 dfb46b9 client: Always cleanup request after safe
 4dda138 client: Remove got_safe from MetaRequest
 f957e72 CrushWrapper.cc: remove some std::string::c_str() calls
+0b72c8d lock: pass desc to lock operation; leave on unlock
+56820b3 locker: set desc on lock
+6208af9 locker: clear desc on unlock
 7f65c51 doc/release-notes: extra note for v0.56.4
 82b129f doc/release-notes: v0.56.4
+c50b143 thrashosds: add test_backfill_full
+97a5c05 thrashosds.py: fix line length
+e594fcb locker: log updates
 491795e Improve test by getting cloneid from my_snaps vector
 70e0ee8 rgw: bucket index ops on system buckets shouldn't do anything
 b7d7e68 ceph-disk: rename some local variabels in list_*partitions
@@ -22299,17 +25933,21 @@ b7d7e68 ceph-disk: rename some local variabels in list_*partitions
 86e55f5 ceph-disk: add some more docstrings
 543327b ceph-disk: print subprocess.CalledProcessError on error
 2d26bcc ceph-disk: fix indention
+5d3fed4 ceph-deploy: purge /var/lib/ceph data on finish
 9db05a2 java: pretty print Ceph extent
 486e5b9 java: support ceph_get_osd_addr
 bc65ace java: support ceph_get_osd_crush_location
 60fe136 java: support ceph_get_file_extent_osds
 78fd096 PendingReleaseNotes: mention renamin of cluster to zone
+4e68c20 verify /var/lib/ceph not present on start
+466fede install: need sudo when purging /var/lib/ceph
 ece4348 client: don't set other if lookup fails in rename
 836b97f test/libcephfs: Test rename error cases
 8e6a970 client: Fix rename returning ENOENT for dest
 3f5f432 MDSMap: improve health check
 e77cd59 MDSMap:: constify a bunch of methods
 838f1cd preserve /var/lib/ceph on deb/rpm purge
+4a6e3b9 install, nuke: explicitly purge /var/lib/ceph
 4f2051c mon: factor out _get_pending_crush() helper
 eae1532 mon, crush: add some tests to build a DAG via the cli
 a60d7df crush, mon: unlink vs remove
@@ -22330,6 +25968,7 @@ c524e2e common/MemoryModel: remove logging to /tmp/memlog
 6a7ad2e init-ceph: clean up temp ceph.conf filename on exit
 0517345 init-ceph: push temp conf file to a unique location on remote host
 f463ef7 mkcephfs: make remote temp directory name unique
+2ed9f64 teuthology: cleanup client dirs for workunit task
 853dd35 doc: Added {id} argument to OSD lost.
 020fb1a rgw: s/cluster/zone
 8bd7915 rgw: generic decode_json for containers
@@ -22346,6 +25985,7 @@ e5940da os/FileJournal: fix aio self-throttling deadlock
 a583029 mon/LogMonitor.cc: remove twice included <sstream>
 9dd5b20 mon/AuthMonitor.cc: remove twice included <sstream>
 1144260 common/Formatter.h: remove twice included <list>
+4d28a65 stop ignoring osd leaks
 000310f ReplicatedPG: add debug flag to skip full check at reservation
 29a288f ReplicatedPG: replica should post BackfillTooFull in do_scan if full
 f9c8190 PG: halt backfill on RemoteReservationRejected in Backilling
@@ -22375,6 +26015,7 @@ eb8dd0d PG: clarify PG::deleting comment
 42a71c1 FileJournal: quieter debugging on journal scanning
 6740d51 FileJournal: quieter debugging on journal scanning
 7e8cc57 doc: Fixed some typos.
+8f27fa6 moving client.keyring creation out of ceph task
 7698500 libcephfs: fix ceph_get_osd_crush_location
 a72aaff test: add ceph_rename test
 7ed0be1 rados.py: remove unnecessary semicolon
@@ -22390,9 +26031,13 @@ e2df59e ceph-create-keys: rename log to LOG since it's a constants
 ac9fa43 Fix tips in documentation
 cd96dbe Fix important in documentation
 3305157 Fix notes in documentation
+e7cd598 lock: make do_summary() respect --machine-type
+18a782d qa suites for upgrade task
+8ff32eb stop and restart daemons as restart only starts.
 e485471 Update Chef deployment documentation
 97fd7b6 mon: DataHealthService: log to derr instead if we're about to shutdown
 51d62d3 mon: DataHealthService: shutdown mon if failed to obtain disk stats
+b0136f9 use ceph.com/git instead of github
 5bf0331 client/Client.cc: handle error if _lookup() fails
 fc41684 qa/workunits/direct_io/test_sync_io.c: add proper error handling
 a8a5683 test_short_dio_read.c: add proper error handling
@@ -22415,29 +26060,43 @@ cbae6a4 (tag: v0.59) v0.59
 ea26ea0 ceph-disk: remove twice defined function mount
 c57daa3 ceph-disk: remove double defined function get_conf
 57dde5c ceph-disk: rename local variable shadowing builtin
+fabf36d task/ceph:  Revert extra check for running status
 dfb1fbe QuorumService.h: use enum instead of static const int
 6a3aa2a Missed adding rados_types.hpp to package
 c2602d7 ceph-disk: install and package
 f287c6f ceph-disk: simplify command dispatch
 a019753 ceph-disk: consolidate exceptions
 20e4ba5 ceph-disk: consolidate ceph-disk-* into a single binary
+66e27fa suite: shorten subject a bit
 2900bf4 PendingReleaseNotes: fix typo
 1597b3e librbd: optionally wait for a flush before enabling writeback
+c170022 radosgw-admin: Adjust garbage collection settings.
+4d7e1e9 osd: data loss: low space handling
 47f1a94 Makefile: missing header
 020d1b1 mon: use enum instead of static const int
 efc4b12 mon/Paxos: set state to RECOVERING during restart
+511f04f Fixed so that installation works on a brand new CentOS system.
 45843f7 Makefile.am: fix misspelt header name
+1b0369b task/restart:  Handle error from script correctly
 bee5046 mon/PaxosService: handle non-zero return values
 d477594 ceph-disk-prepare: 'mkfs -t' instead of 'mkfs --type='
 9029b09 mds: Handle ENODATA returned from getxattr
 7aec13f mon/PaxosService: fix proposal waiter handling
+62cd409 ceph-deploy qa suite for fs tests
+640c126 ceph-deploy qa suite for rbd tests
+167a26c ceph-deploy qa suite for rados
+2355790 s/dist-upgrade/upgrade
 6774290 Makefile: fix header name
 cecfe41 mon: Monitor: take advantage of the new HealthMonitor class.
 b781400 mon: HealthMonitor: Keep track of monitor cluster's health
 a3751d1 mon: QuorumService: Allow for services quorum-bound to be easily created
 a2ac935 qa: add 16MB direct-io write test
+c55f128 Fixed ceph-fuse mount point cleanup bug
 53c1c48 signal_handler: add func to queue an async signal
 a13ae37 client:  Remove unecessary set_inode() in _rmdir()
+42e9849 task/restart:  Cleanup in finally
+cd98efe task/restart: Fix check for done
+6fe1dea task/restart: Restart task for testing daemon kill
 5e5e1cd mon/Session.h: prefer prefix ++operator for iterators
 ffaf286 test/filestore/workload_generator.cc: prefer prefix ++operator for iterators
 37fd66e test/filestore/test_idempotent.cc: prefer prefix ++operator for iterators
@@ -22513,7 +26172,14 @@ f8d66e8 OSD: split temp collection as well
 5b022e8 hobject: fix snprintf args for 32 bit
 9ea02b8 ceph_features: fix CEPH_FEATURE_OSD_SNAPMAPPER definition
 ee178fb ceph.spec.in:  Additional clean-up on package removal
+d029679 move osd powercycling tests to rados suite
+9e81ff5 added ceph_health check and a few log messages
+e77f20f fs: drop fuse_use_invalidate_cb: true, since that is now the default
 65c31e1 ceph-fuse: invalidate cache by default
+f7fe5b3 task yaml for basic upgrade
+8b942d6 task yaml for upgrade tests
+9b252dd task yaml for upgrade test
+2230500 rados/thrashers: enable split testing
 f3ad12e test_filejournal: add tests for footer, header, payload corruption
 a22cdc6 FileJournal: add testing methods to corrupt entries
 3b767fa FileJournal,Journal: detect some corrupt journal scenarios
@@ -22523,10 +26189,16 @@ c3725e9 FileJournal: add committed_up_to to header
 de8edb7 FileJournal: queue_pos \in [get_top(), header.max_size)
 f1b031b OSD: expand_pg_num after pg removes
 8222cbc PG: ignore non MISSING pg query in ReplicaActive
+4300f42 Fixed 'clock:' on Centos
 11650c5 mon: only try to bump max if leader
 80af5fb ceph-disk-activate: identify cluster .conf by fsid
+fa1faa1 added install.upgrade task
+aaf02ab added task for ceph.restart
 6f15dba debian/control:  Fix for moved file
 7370b55 ceph-disk-activate: abort if target position is already mounted
+85a8baa install: do debs for 'Debian'
+46a78af install: el6 -> rpm
+0c75c6b Added el6 install functionality for CentOS systems.
 18525eb rados/test.sh fails in the nightly run
 efd153e debian: add start ceph-mds-all on ceph-mds install
 41897fc debian: add start ceph-mds-all on ceph-mds install
@@ -22594,6 +26266,7 @@ f6500f5 Context: allow C_Contexts to not have a cct, add list_to_context
 ba449ce osd_internals/snaps.rst: add a description of snaps and trimming
 be95af7 PG::read_log: fix assert, split may introduce holes in the log
 1a8c170 OSD: add debugging to start_split and complete_split
+01a40cf Use service instead of initctl to restart rsyslog.
 de22b18 PG: check_recovery_sources must happen even if not active
 7a434d1 FileStore: fix reversed collection_empty return value
 ce4432a HashIndex: _collection_list_partial must tolerate NULL next
@@ -22630,6 +26303,7 @@ cee8786 client: check for mds state before sending messages
 436e5be mon: AuthMonitor: don't return global_id right away if we're increasing it
 b99367b mon: Paxos: only finish a queued proposal if there's actually *any*
 7710ee2 auth: assert if auth_debug = true and secret_id == 0
+67f0aa1 ceph.conf: auth debug = true
 8659b7e libcephfs_jni.cc: prefer prefix ++operator for iterators
 bc77af7 dupstore.cc: prefer prefix ++operator for iterators
 f6b4f3e crushtool.cc: prefer prefix ++operator for iterators
@@ -22648,23 +26322,33 @@ e047260 osd/PG.h: prefer prefix ++operator for iterators
 f59f644 osd/OSD.h: prefer prefix ++operator for iterators
 5e016d9 client/Client.cc: prefer prefix ++operator for iterators
 8998c9f ceph_syn.cc: prefer prefix ++operator for iterators
+740fb85 ceph-deploy: uninstall even when no archive
 b94f4b3 mon/Session.h: prefer prefix --operator for iterators
 1e89f74 mon/Session.h: prefer prefix ++operator for iterators
 69d1eab mds/SnapRealm.h: prefer prefix ++operator for iterators
 62d33e9 mds/SessionMap.h: prefer prefix ++operator for iterators
 d3f9673 mds/MDSMap.h: prefer prefix ++operator for iterators
 89ffc64 mds/CInode.h: prefer prefix ++operator for iterators
+13d0d5a task/ceph_manager: Only reconnect if powercycled
 bd4f1a3 msg/Messenger.h: prefer prefix ++operator for iterators
 95749b3 MonMap.h: prefer prefix ++operator for iterators
 6ae0345 buffer.h: prefer prefix ++operator for iterators
 5d401b0 types.h: prefer prefix ++operator for iterators
 81e00c1 auth/Crypto.cc prefer prefix ++operator for iterators
+6043bd7 schedule_suite.sh: crank up mon logs
+6be6f6c task/thrashosds: Ipmi checking/setup in thrashosds
+8791b37 task/ceph_manager:  Check that ipmi is enabled
+5ef2a04 task/ceph: Recreate /var/run/ceph on powercycle
+fd1e083 lock: tolerate description of None
 af3b163 doc: Fixed syntax error.
+6511950 teuthology: update hadoop task for new code layout
 be6f7ce doc: Added tcp and bind settings and cleaned up syntax a bit.
 77ecdc7 doc: Setting should use network not single IP.
 669d2ba doc: Fixed usage and added note for copying keyring to client. Added hyperlinks.
 7f66ebe doc: Addressed confusion with version numbering.
 4df44bb doc: Added troubleshooting entry for single OSD and single replica.
+9c7717c ceph-deploy suites task yaml with roles
+bdd1fec increasing the default memory of vm to 4 MB
 36c672d Fix typo in ceph usage output
 60639ab mon/Monitor.h: return string instead of 'char *' from get_state_name()
 72c6226 client/hadoop/CephFSInterface.cc: prefer prefix ++operator for iterators
@@ -22699,9 +26383,13 @@ c6becc6 rgw/rgw_rados.cc: use static_cast instead of C-Style cast
 f8bc21a crush: add test for reweighting multiple items in the tree
 9eb0d91 debian: stop ceph-mds before uninstalling ceph-mds
 c021c5c log: drop default 'log max recent' from 100k -> 10k
+a5e1ed2 ceph-deploy: no need to call chef explicitly
+e9c2606 ceph-deploy: use ceph.com git mirror
 46e8fc0 librbd: invalidate cache when flattening
 f2a23dc ObjectCacher: add a method to clear -ENOENT caching
 f6f876f ObjectCacher: keep track of outstanding reads on an object
+fbfe81b fs: add osd and mds message delays to thrash suite
+53cf1f1 fs: move mds thrashing from marginal, add pjd task
 28c0ce6 crush: make item adjust weight adjust *all* instances of the item
 b3b059e crush: whitespace
 c48c76f ceph: fix cli test
@@ -22744,13 +26432,22 @@ f7eec26 SyntheticClient.cc: Clarify calculation precedence for '%' and '?'
 94d370e fusetrace_ll.cc: reduce scope of 'int err' in main()
 5c1dc92 mds/Locker.cc: fix warning about 'Possible null pointer dereference'
 f971a67 mds/Locker.cc: use static_cast instead of C-Style cast
+c6a427b ceph: sudo grep /var/log/ceph/ceph.log
+d9a233c mds_thrash: requery mds status inside the wait loop
 07820f0 mon/MonMap: don't crash on dup IP in mon host
+f1d66d2 schedule_suite.sh: check, but do not sync, clock for each run
+5df43e6 mpi: substitute $TESTDIR
+b4d2234 exec/pexec: substitute $TESTDIR into command
 0dd956c debian: Fix FTBFS because depend on old libboost-dev
 3fbdfd3 Add code to check flag state
+cbfeac9 radosgw-admin: Adjust garbage collection settings.
 0e33490 librados: fix installed header include paths
 c7aa897 ceph_common.sh: add warning if 'host' contains dots
 4fe52a7 Revert "Update conf.py"
 7008992 Update conf.py
+bfacb37 schedule_suite.sh: take machine type
+a457b17 marginal: fix mds thrasher tasks
+6a398a5 regression: add fs/traceless
 776c042 osd: noscrub, nodeepscrub osdmap flags
 cdb4ae3 doc: Added a new network configuration reference document.
 9bcba94 Fix output of 'ceph osd tree --format=json'
@@ -22760,18 +26457,24 @@ da706c1 doc: Format edits. Excised much of network discussion. Added reference t
 881e9d8 osd: mark down connections from old peers
 ba7e815 osd/PG: rename require_same_or_newer_map -> is_same_or_newer_map
 7a9107d mon/Paxos: make old clock skew message more/less suggestive
+36b877a Revert "ceph.conf: osd debug op order = true"
 416b962 Properly format Content-Length: header on 32-bit systems.
 4384e59 rgw: set attrs on various list bucket xml results (swift)
 7cb6ee2 formatter: add the ability to dump attrs in xml entities
 6669e73 rgw: don't iterate through all objects when in namespace
+1586e07 fs: test traceless replies from mds
 3b260f3 Add filestore_replica_fadvise config option default true
 f52ec6d osd: fadvise replica data don't use
 08d5a28 Avoid sending a success response when an error occurs.
+e7c079f rados.py: add option to do many short runs
 edd0a1c Fix FileStore to notice if filestore_flusher config is changed
+09e14bf ceph-deploy task
 eb9b5f0 client: move annoying traceless handling into verify_reply_trace()
 3f1b7fd client: debug async cache invalidation
 3a7233b mds: pass created_ino back to client on replayed requests
 0bcf2ac mds: track created inos with completed request ids in session info
+e0ad6fa added 'extras' to install extra packages excluding ceph
+75afff7 task for ceph-deploy sample run
 977a1cb client: debug created ino
 974dc84 client: unlink old_dentry on traceless rename reply
 bc92b40 client: force lookup on traceless reply
@@ -22783,8 +26486,12 @@ d87035c client: root can write to any file
 80e122f client: pass ptarget from link, create, mkdir, symlink, setattr
 29abaf6 client: handle traceless replies from make_request()
 1e2864a osd: increate default pg log size from 1000 -> 3000
+cef6e5d lock: remove description when unlocking
 a714c16 vstart.sh: osd debug op order = true
 d43384d Set weight of an osd based on df. priority to Weight in ceph.conf
+6137a70 marginal/multiclient: /tmp/cephtest -> $TESTDIR
+3a951df marginal/osd_powercycle: restructure collection
+4f68e3e ceph.conf: osd debug op order = true
 e6caf69 config: note which options are overridden by common_preinit()
 7c208d2 common: reduce default in-memory logs for non-daemons
 a58eec9 init-ceph: fix run dir
@@ -22795,7 +26502,11 @@ cb3ee33 ObjectCacher: fix debug log level in split
 a319f5c debian: require libsnappy-dev for ceph
 ee158ed libcephfs: return osd location from crush map
 e694ea5 release-process.rst:  Fix typos
+da1e63b testing: fix logic in determining Hadoop master
+6b0a8d3 testing: specify ceph.conf path in core-site.xml
 e8da4bf client: expose extent/osd mapping
+1660d89 install: install libcephfs-java
+daad53f ceph-fuse: install via install.py
 32407c9 ceph-disk-prepare: move in-use checks to the top, before zap
 8550e5c doc/release-notes: v0.58
 b244b87 Update doc/radosgw/s3/python.rst
@@ -22825,6 +26536,7 @@ d5a453f tools: ceph: add 'ceph df' usage
 de6d0a2 mon: Monitor: 'ceph df'
 e384798 README:  Add libsnappy-dev to build requirements.
 14daccc doc: missing blank line in radosgw/config-ref
+c0b03a1 fs:  Add fuse_use_invalidate_cb option
 e01c15b librados.hpp: replace paris with pairs
 d5ec9ca doc: Instruction should have been noout, not nodown.
 d54ded4 doc: Fixed OSD pool config settings. Should be in [global], not [osd].
@@ -22916,9 +26628,12 @@ cfcacea debian: require cryptsetup-bin
 7f7b2e7 doc: Removed auth entries from general. Part of an auth-specific section now.
 712fca6 doc: Added comment to enable message. Added link to Auth Config Reference.
 c0ed901 doc: Added Auth Config Reference as standalone config reference.
+0997954 Implement email task.
+c5b55f9 Fix pass/fail display on exit.
 f62c5ab osd: allow log trimming during recovery
 c980b76 ceph-fuse: add ceph options for all current fuse options
 cf0d4f8 mon: MonitorDBStore: return -ENOENT on get() if key doesn't exist
+1e6e8ae ceph-fuse: fix install_debs call with new syntax
 1a581d0 kv_flat_btree_async.cc: remove unused variables
 b6884e8 kv_flat_btree_async.cc: remove some unreachable break statements
 ebdf67f key_value_store/cls_kvs.cc: remove unused variable dupmap
@@ -22946,6 +26661,10 @@ f811f72 ceph.spec.in: fix leveldb handling
 37d148b configure.ac: check for libsnappy
 83dbaf6 add src/leveldb to .gitignore
 8162215 remove leveldb from master branch
+c0c3383 install: poll for packages if wait-for-package: true
+b33714a test: add hadoop-internal test
+454e161 install: fix branch/tag/sha1 selection
+f985106 test: update java tests
 ea546ae Build: Change build to always use system leveldb
 a34e7c6 ceph-test.install:  remove cepkfs-test.jar
 75ce88f doc: fix rst for watch/notify internals
@@ -22956,6 +26675,7 @@ ac9ed33 client: allow change file owner or group only
 7bd8400 client: use get_filehandle
 3cc0551 client: fix log data
 c5d78f4 libcephfs: fix default parameters document for ceph_open_layout
+d908c30 ceph_manager: use an exception type
 b5c9e59 PendingReleaseNotes: mention ceph-mon requiring the creation of data dir
 ebf4717 mon: give useful errors to user when store is missing
 cf20ea6 mon: Monitor: check for an argument to 'quorum' before reading the array
@@ -22977,6 +26697,8 @@ f0ae380 test: removing dead code
 5006028 test: updating libcephfs-java tests
 4ce3461 Client.cc: reduce the scope of some variables
 60b9b10 Client.cc: use static_cast instead of C-Style cast
+e00bff2 teuthology: remove CEPH_JAVA_PATH
+9af61cb teuthology: add an extra_packages flag to install
 26e8577 Paxos.h: pass string name function parameter by reference
 dc55c29 Typo: pool should be root in rados/operations/crush-map.rst
 ef20382 osd/ReplicatedPG.cc: remove unused variables
@@ -22997,13 +26719,20 @@ ed83d4b Paxos.cc: use empty() instead of size()
 7a81b07 Monitor.cc: use empty() instead of size()
 1c8ffc5 ceph.spec.in:  Create placeholder directorys under /var/lib/ceph (Bug 4119)
 aa79077 configure.ac:	Add test for c++ compiler.
+51fa5fb nuke: blow away /home/ubuntu/cephtest too
+3ebabb3 Add timer.py and display summary info in run.py.
+b744f42 radosgw-admin: Bug fixes for issue 4251.
+0b968f5 Add rbd locking/fencing test
 ccdafa0 test_lock_fence.sh, rbdrw.py: rbd lock/fence test
+bee8dff nuke: blow away /home/ubuntu/cephtest too
+8ce9490 rgw: no lockdep on radosgw
 98408f5 mon: PaxosService: remove lingering uses of paxos getters and wait methods
 9d472ca systest: restrict list error acceptance
 b64d261 systest: fix race with pool deletion
 9e6025b doc: Added subnet example and verbiage to network settings.
 5e5530b doc: Added content to remove REJECT rules from iptables.
 9c693d7 test_rbd: move flatten tests back into TestClone
+3e8d11b Add timer.py and display summary info in run.py.
 55bd768 qa: enable watch-notify dependent test
 345aa2d test_rbd: close image before removing it
 9ab0fd7 doc: Added a small ref section for osd config reference.
@@ -23015,8 +26744,13 @@ b0271e3 systest: fix race with pool deletion
 6722533 PG::build_scrub_map: detect race with peering via last_peering_reset
 04ee8f4 ReplicatedPG::C_OSD_CommittedPushedObject: use intrusive_ptr for pg
 a01dea6 ReplicatedPG::C_OSD_CommittedPushedObject take epoch submitted
+24eeb0d rbd: drop udev
+d733736 exec, pexec: set TESTDIR
+31ef4a1 ceph.conf: debug ms = 1 for monitors
+a60e7b5 rbd: set TESTDIR when running xfstests
 c453734 librbd: remove unused internal method
 5806226 librbd: drop snap_lock before invalidating cache
+9d92ad6 rados: crank up mon debugging for the mon thrashing tests
 3fdf439 doc: Moved admonition to kernel mount.
 516935b doc: Added verbiage to describe single host deadlocks.
 67103a8 Monitor.cc: fix -Wsign-compare
@@ -23025,6 +26759,7 @@ cf167a1 debian: add new files
 6ae10fe Client.cc: don't pass c_str() if std::string is expected
 350481f Paxos.h: fix dangerouse use of c_str()
 9217c4a debian: make gdisk, parted requirements, not recommendations.
+8dcbf8b specify deterministic admin socket path for admin_socket.py tests
 35c951f Minor wording change.
 6bae2a5 Grammar typo
 3896896 Changes to the OS support, multi-data center, and hypervisor questions.
@@ -23041,6 +26776,8 @@ ab7039d doc: Added references from monitoring OSD to troubleshooting OSD.
 64267eb test/librados/watch_notify: fix warning
 53586e7 ceph-object-corpus: re-update
 2dae6a6 PG::proc_replica_log: oinfo.last_complete must be *before* first entry in omissing
+05a8779 rbd_xfstests: re-enable test 049
+d8021a1 nuke: sudo for killall
 3105034 objecter: don't resend linger ops unnecessarily
 15bb9ba objecter: initialize linger op snapid
 5648117 Add test for list_watchers() C++ interface
@@ -23054,8 +26791,16 @@ dc18122 osd/PG: fix typo, missing -> omissing
 6c08c7c objecter: separate out linger_read() and linger_mutate()
 de4fa95 osd: make watch OSDOp print sanely
 dd007db ceph_common.sh: fix iteration of items in ceph.conf
+045a866 task: ceph: create monitor data directories prior to --mkfs
 6cb5374 ceph-conf.rst: missing '=' in example network settings
+c1b75c6 task: mon_thrash: Thrash multiple monitors and 'maintain-quorum' option
+d28bb05 task: mon_thrash: Add 'seed' and 'store-thrash' options
+278be21 ceph.conf: log file, not log dir
 ce7ffc3 PG::proc_replica_log: adjust oinfo.last_complete based on omissing
+c85ba56 install: ignore apt-get update failures on package removal
+17be13b ceph: fix log, /var/run/ceph stupid
+a862d8b Fix unused vars, unused imports, and aliasing
+23669be radosgw-admin: fix errors found by pyflakes
 79f09bf MDS: remove a few other unnecessary is_base() checks
 9f82ae6 mds: allow xattrs on the root inode
 6bd8781 mds: use inode_t::layout for dir layout policy
@@ -23066,13 +26811,18 @@ fea7768 osdc/Objecter: unwatch is a mutation, not a read
 c33c51f FileStore: add _fsetattrs
 2ec04f9 FileStore::_setattrs: only do omap operations if necessary
 83fad1c FileStore::_setattrs no need to grab an Index lock for the omap operations
+8b0eef2 ceph_manager: fix asok string formatting
 ad00fc7 Fix failing > 4MB range requests through radosgw S3 API.
+5c0a2f4 ceph: make /var/run/ceph writeable by non-root too
 96896eb Handle empty CONTENT_LENGTH environment variable.
+1d62baf ceph: fix /var/log/ceph chown/cmod typo
 c83a01d Fix failing > 4MB range requests through radosgw S3 API.
+e1573ea ceph.conf: use default locations for admin socket; fix client log file
 4277265 osd: an interval can't go readwrite if its acting is empty
 a1ae856 librbd: make sure racing flattens don't crash
 995ff0e librbd: use rwlocks instead of mutexes for several fields
 e0f8e5a common: add lockers for RWLocks
+2574d87 make /var/log/ceph writeable by non-root; make clients log to it
 6d8dfb1 osd: clear recovery state on pg removal
 94e5dee test: fix run-rbd-tests pool deletion
 6612b04 ceph-object-corpus: use temporary 'wsp.master.new' corpus until we get merged into master
@@ -23088,7 +26838,16 @@ b33d4ea mon: Paxos: get rid of slurp-related code
 cd4de77 mon: PaxosService: rework full version stashing
 86f6a34 mon: Paxos: trim through Paxos
 a5e2dcb mon: Single-paxos and key/value store support
+4431e1e install: be slightly more efficient
+00986ce install: be more careful about package removal
+7d8a72a ceph-fuse: install -dbg package, too
+425f5dd clusters: add mds to fixed-1.yaml
+e747abe rbd/librbd: fix the cache mode facet
+c64cd20 radosgw-admin: fix sleep syntax error
 5551aa5 mds: parse ceph.*.layout vxattr key/value content
+ecb563b install: install libcephfs1[-dbg]
+28f11d0 ceph_manager: drop extra line
+9996bdb run: print pass/FAIL as final line
 0202bf2 ReplicatedPG: allow multiple watches in one transaction
 9a399af doc: add some internal docs for watch/notify
 661a283 librados/: include watch cookie in notify_ack
@@ -23107,6 +26866,9 @@ eb0f49d rgw_acl: Support ACL grants in headers.
 04f3fe4 mon: fix new pool type
 2e1b02b osd: lock pg in build_past_intervals_parallel()
 473beb5 qa: mon/pool_ops.sh: fix last test
+0c990f3 ceph_manager: eventually time out while waiting for admin socket
+3de8996 schedule_suite.sh: drop obligatory install task
+acab068 add explicit install task before ceph task
 3692ccd doc: make the cephfs man page marginally more truthful
 db99fb4 rgw: fix multipart uploads listing
 34f885b rgw: don't copy object when it's copied into itself
@@ -23120,6 +26882,7 @@ b90167d mon: move OSDMap feature bit calculation into an OSDMap method
 3ff0fe0 testing: updating hadoop-internal test
 f1bff17 qa: sample test for new replication tests
 60d9465 doc/release-notes: v0.57
+8170466 rados: fix ceph_test_filejournal test
 dbadb3e PG: remove weirdness log for last_complete < log.tail
 5fc83c8 os/FileStore: check replay guard on src for collection rename
 56c5a07 osd: requeue pg waiters at the front of the finished queue
@@ -23134,15 +26897,59 @@ d2dbab1 testing: adding a Hadoop wordcount test
 30b8d65 mon: restrict pool size to 1..10
 45a4fe0 qa: rbd map-snapshot-io: udevadm settle
 8e0be54 debian: allow extra args to get passed to ./configure via the environment
+1a0e201 ceph: fix valgrind log check
+50331e0 cleanup-run.sh: figure out owner
+2653b5a install: clean up flavor, distro, arch detection
+60fee7a schedule_suite.sh: include install task in all jobs
+84d7f37 testing: export TESTDIR in workunit task.
+3f7c9bc move the install to a separate task.
+176a340 testrados -> ceph_test_rados
+bf6e846 cleanup-run.sh: <owner> <run name>
+4869b49 lock: allow filtering by description, description substring
+2a344c1 rgw: sudo
+ed82d87 fix a few archive/log stragglers
+38b30c6 ceph: make gitbuilder host configurable
+b1c9864 ceph: install -dbg packages, too
+d8b6bf9 ceph: create /var/run/ceph
+969a6de ceph-fuse: sudo
+4eb047b sudo for admin socket commands
+0ad55b3 cfuse -> ceph-fuse
+88ebc27 ceph: store logs in normal location
+d1d3624 ceph: use default data, keyring locations
+7a61b23 ceph: don't uninstall librados, librbd
+bc9ecf7 ceph: pass package version to apt-get install
+c9d3dea avoid secretfile, except for kclient
+45ddbe6 rgw: specify keyring location
+a54200d nuke: tolerate failed dpkg --configure -a/apt-get -f install
+4dc6c8b install radosgw
+333c726 rbd: remove merge cruft
+8b97161 ceph: simplify apt-key management
+eadefec ceph: put client keyrings in /etc/ceph/ceph.$name.keyring
+149be93 nuke: dpkg --configure -a and apt-get -f install
+3400ea3 nuke: whitespace
+27fec23 ceph: simpilfy package removal
+28116db nuke: remove librados, librbd
+a529bb7 ceph: install ceph-mds, ceph-common
+5235fc1 ceph: fix purge
+c525e10 Install ceph debs and use installed debs
+d790eeb nuke: testrados -> ceph_test_rados
+c68f687 rbd: use 2 node cluster, mostly.
+e250815 rgw: use 2 node cluster
+f05f90b rados: use 2 instead of 3 machines
+267aac2 add fixed-2 cluster
 231dc1b qa: rbd/map-snapshot-io: remove image when done
 1a7a57a qa: fix quoting of wget URLs
 3612ed6 osd: log weirdness if caller_ops hash gets bigger than the log
+61605f8 rados: don't pass ceph.conf path to workloadgen
 ad84ea0 Strip any trailing whitespace from rbd showmapped
+6fa3749 remove rados-multifs link
+7a5fd05 misc: replace : with - in testdir name
 133d0ea buffer: drop large malloc tests
 7fcbfdc buffer: put big buffer on heap, not stack
 fb472a5 unit tests for src/common/buffer.{cc,h}
 94e7c14 mon: fix pgmap stat smoothing
 aa537df doc/release-notes: add note about upgrade to v0.56.3
+0c8d782 rados: remove empty multifs collection
 28e7212 ceph_common: fix check for defined/undefined entities in conf
 d20bf07 buffer::ptr::cmp only compares up to the smallest length
 fecc3c3 ceph-disk-prepare: -f for mkfs.xfs only
@@ -23167,8 +26974,10 @@ ab17aaf OSD: add leveldblog compatibility flag for OSD
 16b3718 PG: verify log versions during read_log
 5f92b6c PG: write_log if we read an old-format log
 1ef9420 osd: move pg log into leveldb
+f931cad schedule_suite.sh: fix s3branch
 ae0c2bb qa: pull qa stuff from ceph.com ceph.git mirror
 160490c doc: radosgw: document config without 100-continue and custom fastcgi
+9513f2f rbd_fsx: binary name now has ceph_ prefix
 467f7a7 config: Add small note about default number of PGs
 6f3f173 test_sync_io.c: add error handling
 26cdb6a test_short_dio_read.c: add error handling
@@ -23240,6 +27049,7 @@ f06b45e ceph-disk-activate: specify full path for blkid, initctl, service
 1af749e upstart/ceph-osd: make crush update on start optional
 ccdcae3 doc/release-notes: v0.56.3
 1334a42 PG,OSD: convert write_info users to write_if_dirty
+5d6d688 rados: testrados -> ceph_test_rados
 34ed5da OSD: init infos_object in OSD::init()
 93b3da6 PG: place biginfo on the infos object next to the info and epoch keys
 f4b70c8 PG: minor style change for append_log and read_info
@@ -23263,6 +27073,7 @@ b8aa476 init-ceph: consider sysvinit-tagged dirs as local
 50cbd10 debian: implement purge postrm script for ceph, ceph-mds
 af2372c ceph-disk-prepare: align mkfs, mount config options with mkcephfs
 617bde9 doc: Added many Q/A sections to the FAQ.
+db41f26 schedule_suite.sh: choose s3branch based on teuthology branch
 670a148 qa: fix mon/osd.sh
 7aefe92 specfile:  Add ceph-coverage to list of binaries
 612f65b Objecter.cc: use !empty() instead of size() to check for emptiness
@@ -23283,6 +27094,8 @@ d7705edf OSDMonitor.h: use !reporters.empty() instead of size()
 180cae5 AuthMonitor.cc: use !pending_auth.empty() instead of 'size() > 0'
 6b8458d CInode.h: use !old_inodes.empty() instead of size()
 a3970e5 CephxProtocol.h: pass CryptoKey by reference to decode_decrypt()
+7309bcc schedule_suite.sh: take option teuthology branch arg
+0c663ca schedule_suite.sh: ensure ceph and kernel branches exist
 9d6eccb osd/PG: initialize info_struct_v in ctor
 7c2e2b9 java: make CephMountTest use user.* xattr names
 977ba79 doc: clarified ceph id vs. ceph name, and fixed a typo.
@@ -23318,6 +27131,7 @@ f0b2e32 Makefile: name binary ceph-filestore-dump
 1b7fc30 .gitignore: fix typo and missing vstart.sh output
 2783fce work unit for rbd cli tests Signed-off-by: tamil <tamil.muthamizhan at inktank.com>
 f923c8c doc: document hadoop replication config
+d4d11a5 adding task for rbd cli tests Signed-off-by: tamil <tamil.muthamizhan at inktank.com>
 7c8d3d0 added new cli tests Signed-off-by: tamil <tamil.muthamizhan at inktank.com>
 a62d519 Unit tests for chain_xattr.cc
 31e911b osd: update snap collections for sub_op_modify log records conditionaly
@@ -23332,6 +27146,9 @@ b26dc4e test: fix run-rbd-tests
 c52dbd5 .gitignore: re-add vstart dirs and tags, fix typo
 ebe70d7 Mutex.cc: fix implicitly-defined namespace 'std'
 c7970bb ceph_authtool.cc: fix implicitly-defined namespace 'std'
+d5c771a rados/monthrash: run some mon tests
+8d5ca94 rados: separate out mon thrasher from thrasher collection
+6e3c2d9 peer: add recovery delay to make test behave
 d6b205f librados/librados.cc: fix implicitly-defined namespace 'std'
 69552ff test_mon_workloadgen.cc: fix -Wgnu
 1f0fd50 src/osd/PG.h: use empty() instead of size()
@@ -23425,12 +27242,14 @@ ff63530 mds: Capability (and sub-structs) now uses modern encoding
 3383618 throttle: optional non perf counter mode
 ed2bb38 OSD: check pg snap collections on start up
 55f8579 OSD::load_pgs: first scan colls before initing PGs
+75d86e4 Made teuthology-lock --summary machine type aware.
 70532d1 rgw: get bucket_owner from policy
 f6af1e7 rgw: fix bucket_owner assignment
 e345dfe Feature 3667: Support extra canned acls.
 fa47e77 ReplicatedPG: check store for temp collection in have_temp_coll
 a18045f rgw: a tool to fix clobbered bucket info in user's bucket list
 a00c77a rgw: bucket recreation should not clobber bucket info
+030bc7c Added support for multiple types of machines.
 36cf4d0 ceph: fix 'pg' error message to direct user toward better input
 1042060 mds: error messages for export_dir said 'migrate_dir'
 c44846e ceph: ceph mon delete doesn't exist; ceph mon remove is the command Fix up cli test as well (doc is already correct)
@@ -23440,12 +27259,17 @@ eb9d6ca osd: fix name of setomapval admin-daemon command
 dbce1d0 PG: dirty_info on handle_activate_map
 9432353 mds: rename mds_traceless_replies to mds_inject_traceless_reply_probability
 af95d93 osd: flush peering queue (consume maps) prior to boot
+ed9103a rgw: parse testdir into apache.conf
 75c40fa qa: fix iogen script
+67bbb9c osd_recovery: add missing testdir arg
+561ea14 ceph_manager: take int or string to osd_admin_socket
 46d7dbd client: trigger the completion in _flush when short-cutting
 08b82b3 mds: add "mds traceless replies" debug option
 9871cf2 logrotate.conf: Silence rgw logrotate some more
 d02340d silence logrotate some more
 c0e1070 test: fix Throttle unit test.
+3fbb552 radosbench: fix missing format value
+936f314 rgw: fix testdir format on f
 1948a02 osd: do not spam system log on successful read_log
 3acc4d2 rbd-fuse: fix for loop in open_rbd_image()
 db0dbe5 msg/Message.h: fix C-style pointer casting
@@ -23456,6 +27280,10 @@ d54bd17 include/types.h: change operator<< function parameter
 ad526c0 obj_bencher.cc: use vector instead of VLA's
 a4042cc ceph_crypto.cc: remove unused shutdown() outside crypto ifdef's
 4268296 WorkQueue.h: fix cast
+ed3c361 nuke: don't try unmount if we're rebooting everything anyway
+c6504ba nuke: make tmpfs check only umount tmpfs
+82273e9 rbd: fix rbd image unmount
+6099045 rbd: set env before running sudo
 27fb0e6 rgw: a tool to fix buckets with leaked multipart references
 50c1775 rgw: radosgw-admin object unlink
 3b63542 mon: move list_rules into CrushWrapper method
@@ -23476,6 +27304,7 @@ a04d3f0 mon: 'osd crush rule create-simple <name> <root> <failure_domain_type>'
 d7ada58 crush: fix get_rule_id() return value
 4f992ea crush: add rule_exists()
 3105700 mon: 'osd find <osd-id>' command
+100e905 misc:  Close connections on reboot
 c058285 mds: uninline Capability encoders
 90d93d9 mds: build dencoder with more stuff
 ad40bdd MDSMap: mds_info_t now uses modern encoding
@@ -23509,6 +27338,7 @@ ccba2ce mds: add CEPH_FEATURE_MDSENC feature bit
 ece1c0f mon: check correct length of command
 64ded02 Relax Throttle::_reset_max conditions and associated unit tests
 ca2d645 os: default to 'journal aio = true'
+da10b58 task/ceph_manager:  Fix NoneType config issue
 d41b541 Edit endpoint-create in ./doc/radosgw/config.rst
 6e60330 Edit rgw keystone url in ./doc/radosgw/config.rst
 af8cac1 Note on host in ./doc/radosgw/config.rst
@@ -23518,6 +27348,7 @@ eba8697 cli test: add pg deep-scrub option to test
 4a6924a install:  remove perl dependency
 804ffc6 Add "pg deep-scrub..." missing from ceph usage output
 9019fbb rgw: fix setting of NULL to string
+2f41f81 misc: don't use colon in default run name
 e0acc33 xattr_bench.cc: remove twice included <time.h>
 c81a9d4 ceph-filestore-dump.cc: remove twice included <iostream>
 558b238 testmsgr.cc: remove twice included <sys/stat.h>
@@ -23529,9 +27360,27 @@ d141f79 tp_bench.cc: remove twice included <iostream>
 c8aeb93 small_io_bench*.cc: remove twice included <iostream>
 8197990 MDS.cc: remove twice included common/errno.h
 4e29c95 mon: enforce reweight be between 0..1
+55c1bcf Add testdir param to get_valgrind_args() calls
 b970d05 qa: smalliobenchrbd workunit
+887e93e nuke.py:  Allow name of job/run to be specified
+46d3ff9 run.py: Add target name to logging info
+ada803d rbd: fix .format() call with {1} syntax
+fe9fb49 ceph_manager: use get() for self.config powercycle checks
+7280980 Fixup latest commits that use /tmp/cephtest.
+bd4f1d5 adding task for iogen
+d9fff40f task/chdir-coredump:  Use readlink -e
+9a9fe73 task/ceph: Fix typo in previous commit
 d050fe1 doc: Minor edits.
+9de9ebc nuke: get_testdir_base needs to be imported
 0797be3 rgw: key indexes are only link to user info
+edfe5ee nuke: Fix cleanup of test dir
+4ebd90e task/ceph: Initialize disk_config maps
+150a3d7 misc: Don't include existing partitions in devs
+3806dc5 task/ceph: Fix device list
+64e3966 misc: get_wwn_id_map() needs to return dict
+dcf99e4 nuke:  Optionally check console status
+ac4ba69 misc: Fix get_wwn_id_map() to be optional
+933cc3c run.py: Fix argument parsing for --name
 fd1512f Build:  Add -n to files and description for rbd-fuse in ceph.sepc.in
 de01bdd Makefile:  Install new rdb-fuse.8 man page
 16cf9dc build:  Add new rbd-fuse package
@@ -23547,13 +27396,26 @@ de01bdd Makefile:  Install new rdb-fuse.8 man page
 4c1d8d0 ceph.spec.in: don't move libcephfs_jni files around
 9b16036 ceph.spec.in: move libcephfs_jni.so to ceph-devel
 3f53c3f Validate format strings for CLS_ERR/CLS_LOG
+fadc22c ceph_manager: wait for admin socket on restart, use for set_config
 97c6619 qa: update the rbd/concurrent.sh workunit
+8f9267c thrashosds: note assumption for powercycling
+8e566f6 marginal/osd_powercycle: OSD powercycle thrashing
+77e8d80 Remove console.py
+8f72045 Assign devices to osds using the device wwn
+5811159 Support power cycling osds/nodes through ipmi
+87b9849 add --name option to teuthology
+ace4cb0 Replace /tmp/cephtest/ with configurable path
+1473027 Fixes for syntax errors found by pyflakes.
+3390cc3 Scripts to use pyflakes to check python syntax.
 0758fab Add ceph-filestore-dump to the packaging
 ab778cb doc: v0.56.2 release notes
 3c8d7d7 osd: create tool to extract pg info and pg log from filestore
 4a950aa Move read_log() function to prep for next commit
+a63fac3 task: mon_clock_skew_check: use absolute value when comparing mon_skew
+89e09fa task: mon_clock_skew_check: mark as ran once if an expected skew was found
 b571f8e PGMap: fix -Wsign-compare warning
 b0d4dd2 test_libcephfs: fix xattr test
+0c87249 rbd: add rbd_cli_misc with map-snapshot-io.sh
 c782d2a qa: add test for rbd map and snapshots
 e253830 cls_rbd, cls_rgw: use PRI*64 when printing/logging 64-bit values
 77f5741 mds: move lexical_cast and assert re-#include to the top
@@ -23574,12 +27436,18 @@ ad7ebad client: allow ceph.* xattrs
 e51299f mds: open mydir after replay
 3bc2114 ObjectCacher: fix flush_set when no flushing is needed
 59ac4d3 qa: add rbd/concurrent workunit
+19f4273 peer: fix filtering out of scrub from pg state
 7cd4e50 client: Wait for caps to flush when flushing metadata.
 907c709 mds: Send created ino in journaled_reply
 cf7c3f7 client: Don't use geteuid/gid for fuse ll_create
+e805b7d admin_socket: don't bother remote executing if there is no test
 0b66994 ceph.spec.in:	package rbd udev rule
+e33b425 osd_recovery: use --no-cleanup for rados bench
+1c31194 osd_recovery: inject a recovery delay
+3b27c9e osd_backfill: --no-cleanup for rados bench
 a7d15af mon: smooth pg stat rates over last N pgmaps
 ecda120 doc: fix overly-big fixed-width text in Firefox
+d500860 btrfs.yaml: increase osd op thread timeout
 3f6837e mon/PGMap: report IO rates
 208b02a mon/PGMap: report recovery rates
 76e9fe5 mon/PGMap: include timestamp
@@ -23613,6 +27481,7 @@ c9ff21a mds: fix "had dentry linked to wrong inode" warning
 cd8d910 mds: don't set xlocks on dentries done when early reply rename
 b429a3a doc: Updated to add indep and first n to chooseleaf. Num only used with firstn.
 f41010c rgw: fix crash when missing content-type in POST object
+81ed1bc rados: add pool_ops workunit to cephtool test
 09522e5 rgw: fix crash when missing content-type in POST object
 b955a59 mon: set limit so that we do not an entire down subtree out
 2b8ba7c osdmap: implement subtree_is_down() and containing_subtree_is_down()
@@ -23620,6 +27489,7 @@ b955a59 mon: set limit so that we do not an entire down subtree out
 97b7892 doc: update ceph man page link
 91a0bc8 ceph, rados: update pool delete docs and usage
 1a6197a qa: fix mon pool_ops workunit
+826e586 cram: fix for runs with coverage enabled
 818e9a2 rbd-fuse: fix printf format for off_t and size_t
 21673e8 rbd-fuse: fix usage of conn->want
 f74265b configure: fix check for fuse_getgroups()
@@ -23643,12 +27513,15 @@ fa421cf configure: remove -m4_include(m4/acx_pthread.m4)
 bbb86ec mon: safety interlock for pool deletion
 700bced Revert "mon: implement safety interlock for deleting pools"
 6c40794 Added libexpat dependency
+b5f8163 osdthrasher: inject pause on a live (on in) osd
 2a6dcab rbd-fuse: add simple RBD FUSE client
 7daf372 rbd-fuse: Original code from Andreas Bluemle
 aec2a47 s3/php: update to 1.5? version of API
 b2a473b workunit for iogen
 17cd549 mon: Monitor: timecheck: only output report to dout once
 13fb172 mon: Monitor: track timecheck round state and report on health
+aa85d91 task: mon_clock_skew_check: increase timeout and kick it off only on stop
+673101c task: mon_clock_skew_check: distinguish between on-going and finished check
 b49440b doc: Added new, more comprehensive OSD/PG monitoring doc.
 5f21050 doc: Trimmed some detail and added a x-ref to detailed osd/pg monitoring doc.
 95cfdd4 doc: Added osd/pg monitoring section to the index.
@@ -23669,16 +27542,21 @@ a6ed62e common: fix cli tests on usage
 d95b431 adminops.rst: revert changes for as-yet-unimplemented features
 bb860e4 rados: remove unused "check_stdio" parameter
 234becd rados: obey op_size for 'get'
+3a5c70b ceph_manager: turn long stall injection off by default
 0c1cc68 FileStore: ping TPHandle after each operation in _do_transactions
 e0511f4 OSD: use TPHandle in peering_wq
 4f653d2 WorkQueue: add TPHandle to allow _process to ping the hb map
+006e706 osd_recovery: fix up incomplete test
+20af01f ceph_manager: fix get_num_active_recovered()
 79d599c java: remove extra whitespace
+b150e8e workunit: pass java path as env variable
 6f0e113 libcephfs-java test: use provided environment
 40ae8ce common: only show -d, -f options for daemons
 7e7130d doc: Syntax fixes.
 b51bfdf doc: Updated usage for Bobtail.
 1d71d05 doc: Updated usage for Bobtail.
 b0a5fe9 java: support ceph_get_file_pool_name
+6a859bc ceph_manager: use 80/70 as pause_long, pause_check_after defaults
 42d92b7 doc: Added example of ext4 user_xattr mount option.
 b3a2e7e rgw_rest: Make fallback uri configurable.
 352652b libcephfs: document ERANGE rv for get_file_pool_name
@@ -23690,7 +27568,12 @@ b3a2e7e rgw_rest: Make fallback uri configurable.
 b9f58ba libcephfs-java test: jar files are in /usr/local/share/java, it seems
 f9f31aa wireshark: fix indention
 3e9cc0d wireshark: fix guint64 print format handling
+0f24dca ceph_manager: use do_rados for rmpool
+2f192ea TestRados expects rollback, not snap_rollback
 67c7757 PendingReleaseNotes: pool removal cli changes
+ec5a145 ceph_manager: default chance_down to 0.4
+566ae53 ceph_manager: add filestore and heartbeat stalls
+5d66c9a Use ceph git repo instead of github.
 8a97eef ReplicatedPG: handle omap > max_recovery_chunk
 c3dec3e ReplicatedPG: correctly handle omap key larger than max chunk
 09c71f2 ReplicatedPG: start scanning omap at omap_recovered_to
@@ -23711,6 +27594,7 @@ ad6b231 osd: do not reply to ping if internal heartbeat is not healthy
 db48caf osd: debug support for omap deep-scrub
 509a93e osd: Add digest of omap for deep-scrub
 cfb1aa8 osd: Add missing unregister_command() in OSD::shutdown()
+e714c77 osd: Testing of deep-scrub omap changes
 e328fa6 test/bench: add rbd backend to smalliobench
 c3266ad config: helper to identify internal fields we should be quiet about
 0ee5ec7 common/Throttle: fix modeline, whitespace
@@ -23736,11 +27620,14 @@ bec96a2 osd: debug msg prio, cost, latency
 cfe4b85 os/FileStore: allow filestore_queue_max_{ops,bytes} to be adjusted at runtime
 101955a osd: make osd_max_backfills dynamically adjustable
 9230c86 osd: make OSD a config observer
+b6e3edc test: create /tmp/cephtest/mnt.{id}
 6401abf qa/workunit: Add iozone test script for sync
 72147fd objectcacher: Remove commit_set, use flush_set
 00b1186 testing: add workunit to run hadoop internal tests.
 359d0e9 config: report on log level changes
 c5e0951 config: clean up output
+98cc1b8 task: mon_clock_skew_check: add option to run at least one timecheck
+b7cb1b1 rados/thrash: 3 monitors, so that we can thrash them
 d7d8192 config: don't make noise about 'internal_safe_to_start_threads'
 2e39dd5 mds: fix default_file_layout constructor
 e461f09 mds: fix byte_range_t ctor
@@ -23749,17 +27636,20 @@ e461f09 mds: fix byte_range_t ctor
 4830895 Clarify journal size based on filestore max sync
 aea898d ceph: reject negative weights at ceph osd <n> reweight
 7d9d765 workunit/cephtool: Use '! cmd' when expecting failure
+ee4a9f2 marginal/mds_thrasher:  Add tests for mds thrasher
 0cb760f OSD: do deep_scrub for repair
 5e00af4 osd: set pg removal transactions based on configurable
 4712e98 osd: make pg removal thread more friendly
 bc99404 os: move apply_transactions() sync wrapper into ObjectStore
 f6c69c3 os: add apply_transaction() variant that takes a sequencer
+53f22d9 task/mds_thrasher:  New task for thrashing the mds
 4bdcfbf client: Respect O_SYNC, O_DSYNC, and O_RSYNC
 045af95 qa: remove xfstest 068 from qemu testing
 1f911fd ceph: allow osd pool get to get everything you can set
 49726dc os/FileStore: only flush inline if write is sufficiently large
 8ddb55d os/FileStore: fix compile when sync_file_range is missing;
 b8d5e28 doc/rados/operations/crush: need kernel v3.6 for first round of tunables
+dbc38ef rbd.py: update scratch and test image sizes
 736966f java: support get pool id/replication interface
 40415d1 libcephfs: add pool id/size lookup interface
 76e715b doc: Added link to rotation section.
@@ -23774,6 +27664,9 @@ dd7caf5 mds: gracefully exit if newer gid replaces us by name
 2e11233 mon: enforce unique name in mdsmap
 ca2d9ac doc: Updated OSD configuration reference with backfill config options.
 e330b7e mon: create fail_mds_gid() helper; make 'ceph mds rm ...' more generic
+f41b542 add mon_thrash task to kernel and rados thrashers collections
+626f610 Add a test for the truncate/osd-commit-reply race
+cc7bf1b rados: add osd reply delay injection
 d81ac84 rbd: fix bench-write infinite loop
 60db6e3 crushtool: warn usefully about missing output spec
 e776b63 crushtool: consolidate_whitespace() should eat everything except \n
@@ -23782,13 +27675,16 @@ efa595f doc/rados/operations/authentication: update for cephx sig requirement op
 50db10d msg/Pipe: require MSG_AUTH feature on server if option is enabled
 4a49a09 cephx: control signaures for service vs cluster
 c236a51 osdmap: make replica separate in default crush map configurable
+cd09be6 ceph: pass ceph.conf to osdmaptool
 c6f8010 mon: Monitor: drop messages from old timecheck epochs
 b0162fa osdmaptool: more fix cli test
 5bd8765 osdmaptool: fix cli test
 98a7631 osd: leave osd_lock locked in shutdown()
+72db1a5 When running teuthology with targets provisionned on OpenStack and kvm, the disks will show under /dev/vda, /dev/vdb etc. Add them to the list of devices to inspect and use for tests.
 faa62fa radosgw: increate nofile ulimit in upstart
 19ee231 ceph: adjust crush tunables via 'ceph osd crush tunables <profile>'
 85eb8e3 osdmaptool: allow user to specify pool for test-map-object
+0946a78 fix mon clock queue test syntax
 37dbf7d rgw: copy object should not copy source acls
 70c3512 ReplicatedPG: ignore snap link info in scrub if nlinks==0
 381e258 osd/PG: fix osd id in error message on snap collection errors
@@ -23803,6 +27699,7 @@ b856874 osd_types: add nlink and snapcolls fields to ScrubMap::object
 3f0ad49 librados.hpp: fix omap_get_vals and omap_get_keys comments
 cb5e2be rados.cc: use omap_get_vals_by_keys in getomapval
 44c45e5 rados.cc: fix listomapvals usage: key,val are not needed
+3c67ee3 rbd: add test for formatted output from rbd cli
 fb4bb5d osd: better error message for request on pool that dne
 9a1f574 osd: drop newlines from event descriptions
 0efb9c5 test: add cram integration test for formatted output
@@ -23818,6 +27715,7 @@ a586966 osd: fix rescrub after repair
 c1a86ab configure.ac: fix problem with --enable-cephfs-java
 1d50aff mds: fix usage typo for ceph-mds
 2dc2b48 mds: use #defines for bits per cap
+c8a9a9a Add cram task
 d56af79 osd: note must_scrub* flags in PG operator<<
 2baf125 osd: based INCONSISTENT pg state on persistent scrub errors
 26a63df osd: fix scrub scheduling for 0.0
@@ -23831,12 +27729,15 @@ a148120 osd: move scrub schedule random backoff to seperate helper
 62ee6e0 osd/PG: trigger scrub via scrub schedule, must_ flags
 1441095 osd/PG: introduce flags to indicate explicitly requested scrubs
 796907e osd/PG: move scrub schedule registration into a helper
+123a2dc rados: adjust socket injection rate down
+71097b7 Revert "task/kclient: chmod root to 1777."
 be0c4b3 ac_prog_javah.m4:  Use AC_CANONICAL_TARGET instead of AC_CANONICAL_SYSTEM.
 13cb196 java: add fine grained synchronization
 85c1035 java: remove all intrinsic locks
 2b9da45 java: remove unnecessary synchronization
 fb8a488 java: remove create/release synchronization
 017b6d6 Revert "osdmap: spread replicas across hosts with default crush map"
+92a9d9c ceph.conf: separate replicas across osds
 410906e mon: OSDMonitor: don't output to stdout in plain text if json is specified
 7ea5d84 osdmap: spread replicas across hosts with default crush map
 3610e72 mon: OSDMonitor: only share osdmap with up OSDs
@@ -23849,6 +27750,8 @@ e94b06a rbd: make 'add' modprobe rbd so it has a chance of success
 0f161f1 Correct typo in mon docs 'ceph.com' to 'ceph.conf'
 aeb0206 qa/run_xfstests.sh: use cloned xfstests repository
 8d0fa15 mon: Monitor: only schedule a timecheck after election if we are not alone
+305cb54 suites: rados: multimon: add mon clock skews task yaml files
+2fa5d23 test: Hadoop cluster and task config.
 58e03ec mon: Monitor: unify 'ceph health' and 'ceph status'; add json output
 bc57c7a mon: Monitor: use 'else if' on handle_command instead of bunches of 'if'
 7a7fff5 mon: Monitor: move a couple of if's together on handle_command()
@@ -23861,6 +27764,7 @@ de6633f doc: Normalized to term "drive" rather than disk. Changed "(Manual)" ent
 44625d4 config_opts.h: default osd_recovery_delay_start to 0
 0f42c37 ReplicatedPG: fix snapdir trimming
 035caac Revert "rgw: fix handler leak in handle_request"
+c8f3fd6 marginal: Remove broken symlinks
 797b3db Added python wrapper to rados_cluster_stat
 59aad34 configure.ac: check for org.junit.rules.ExternalResource
 61437ee configure.ac: change junit4 handling
@@ -23879,6 +27783,19 @@ e1da85f rgw: Fix crash when FastCGI frontend doesn't set SCRIPT_URI
 eba314a rgw: fix handler leak in handle_request
 4483285 librbd: Allow get_lock_info to fail
 77ddf27 doc/release-notes: v0.48.3argonaut
+573f531 marginal/multiclient: Matching tests for kclient
+14385a6 marginal/multiclient:  Add three client cluster
+a4df523 marginal/multiclient:  Adding ior test to marginal
+1e03fe1 marginal/multiclient:  Add a test for fsx-mpi
+c07a4cb marginal/multiclient:  New task to run mdtest
+f17847e task/kclient: chmod root to 1777.
+f895846 task/mpi:  Allow working directory to be specified
+1624812 task: A task to setup mpi
+e88c0fc task/ceph-fuse: chmod root to 1777
+4ed20ae task/pexec: Add barrier capability
+3532008 task/pexec: More fixes for all case, exec on hosts
+081a80f task/pexec: Fix when 'all' is used
+d44fb14 radosgw-admin.py: Increase test coverage to current admin feature set.
 f07921b doc/install: new URLs for argonaut vs bobtail
 72674ad doc/release-notes: v0.56.1
 26e8438 test: enforce -ENOTCONN contract in libcephfs
@@ -23903,6 +27820,10 @@ d3abd0f Revert "OSD: remove RD flag from CALL ops"
 3a94087 libcephfs: delete client after messenger shutdown
 0978dc4 rbd: Don't call ProgressContext's finish() if there's an error.
 e89b6ad ReplicatedPG: remove old-head optization from push_to_replica
+d6496ab remove rbd_header_race test
+620dd55 task: mon_clock_skew_check.py: Check for clock skews on the monitors
+1a87861 regression: include nfs suite
+e88b909 task: ceph_manager: add 'get_mon_health' function
 acfa0c9 mds: optimize C_MDC_RetryOpenRemoteIno
 acbe6d9 mds: don't issue caps while inode is exporting caps
 ca4dc4d mds: check if stray dentry is needed
@@ -23916,18 +27837,22 @@ b03eab2 mds: forbid creating file in deleted directory
 d379ac8 mds: disable concurrent remote locking
 28d59d3 os/FileStore: fix non-btrfs op_seq commit order
 f1e0305 doc: Removed the --without-tcmalloc flag until further advised.
+5ce47c2 ssh_keys.py: pull the keys out of targets entry rather than the hosts known hosts file.
 88af7d1 doc: Added defaults for PGs, links to recommended settings, and updated note on splitting.
 4ae4dce OSD: for old osds, dispatch peering messages immediately
 73bc8ff doc: Added comments on --without-tcmalloc option when building Ceph.
 37b57cd Update doc/rados/configuration/filesystem-recommendations.rst
 43ef677 doc: Added some packages to the copyable line.
 333ae82 doc: Fixed syntax error.
+aaa03bb qa:  Add knfsd reexport suite
 224a33b     qa/workunit:  Add dbench-short.sh for nfs suite
+eee795c rbd_xfstests.yaml: drop test 186
 a32d6c5 osd: move common active vs booting code into consume_map
 0bfad8e osd: let pgs process map advances before booting
 5fc94e8 osd: drop oldest_last_clean from activate_map
 67f7ee6 osd: drop unused variables from activate_map
 a14a36e OSDMap: fix modifed -> modified typo
+9ca69e7 ceph: malloc check =3 means we hear on stderr too
 43cba61 log: fix locking typo/stupid for dump_recent()
 64d2760 doc: Added a memory profiling section. Ported from the wiki.
 5066abf doc: Added memory profiling to the index.
@@ -23938,6 +27863,8 @@ c4370ff librbd: establish watch before reading header
 c461e7f test_filejournal: test journaling bl with >IOV_MAX segments
 dda7b65 os/FileJournal: limit size of aio submission
 e0858fa Revert "librbd: ensure header is up to date after initial read"
+0631099 ceph: enable malloc debugging for ceph-osd
+3686371 rados: add test_filejournal
 8229770 doc: Minor edits.
 d3b9803 doc: Fixed typo, clarified usage.
 8422474 mds: fix rename inode exportor check
@@ -23962,6 +27889,8 @@ e10267b mds: fix Locker::simple_eval()
 49ebe1e client: fix _create created ino condition
 a10054b libcephfs: choose more unique nonce
 e2fef38 client: fix _create
+7d70dd1 Revert "kernel: move fsync test to marginal suite until it works"
+ed586c1 task: ceph: don't wait for 'healthy' if 'wait-for-healthy' is false.
 82cec48 doc: add-or-rm-mons.rst: Add 'Changing Monitor's IPs' section
 379f079 doc: add-or-rm-mons.rst: Clarify what the monitor name/id is.
 8bbb4a3 doc: fix rbd permissions for unprotect
@@ -23974,6 +27903,7 @@ c67c789 librbd: add {rbd_}open_read_only()
 4aa6af7 doc/release-notes: link to upgrade doc
 7b0dbeb doc/install/upgrading: edits to upgrade document
 6711a4c Revert "mds: replace closed sessions on connect"
+bb4a2c5 rgw: enable logging in ceph.conf
 82f8bcd msg/Pipe: use state_closed atomic_t for _lookup_pipe
 a5d692a msgr: inject delays at inconvenient times
 e99b4a3 msgr: fix race on Pipe removal from hash
@@ -24000,6 +27930,8 @@ ea13ecc osd: less noise about inefficient tmap updates
 672c56b init-ceph: default to 16K max_open_files
 948e752 ceph-fuse: Avoid doing handle cleanup in dtor
 ff2d4ab ceph-fuse:  Pass client handle as userdata
+acb91f7 kernel: move fsync test to marginal suite until it works
+02e4eef kernel: move fsx to marginal suite until it passese
 9967cf2 release-notes: rgw logging now off by default
 1c3e12a doc: warn about using caching without QEMU knowing
 f6ce5dd rgw: disable ops and usage logging by default
@@ -24017,6 +27949,7 @@ f1dfd64 messages/MOSDOpReply: remove misleading may_read/may_write
 82c7171 osd: drop 'osd recovery max active' back to previous default (5)
 6f1f03c journal: reduce journal max queue size
 0d2ad2f mds: use set to store MDSMap data pools
+80bcaa2 rados: add filestore_idempotent test with journal aio = true
 2137d5c mds: wait for client's mdsmap when specifying data pool
 9da6d88 doc: document mds config options
 916d1cf doc: journaler config options
@@ -24077,6 +28010,7 @@ b564fdb release-notes: remove warning about osd caps
 085992f doc: minor fix to syntax.
 206ffcd mkcephfs: error out if 'devs' defined but 'osd fs type' not defined
 4a40067 doc: update ceph.conf examples about btrfs default
+677a7a5 rgw: add swift tasks
 999ba1b monc: only warn about missing keyring if we fail to authenticate
 5d5a42b osd: clear CLEAN on exit from Clean state
 b3e62ad auth: use none auth if keyring not found
@@ -24085,6 +28019,7 @@ b3e62ad auth: use none auth if keyring not found
 ae044e6 osd: allow transition from Clean -> WaitLocalRecoveryReserved for repair
 670afc6 PG: in sched_scrub() set PG_STATE_DEEP_SCRUB not scrubber.deep
 19e44bf osd: clear scrub state if queued scrub doesn't start
+c02d34d task/swift: change upstream repository url
 feb0aad doc: Moved path to individual OSD entires.
 e765dcb osd: only dec_scrubs_active if we were active
 ada3e27 osd: reintroduce inc_scrubs_active helper
@@ -24098,6 +28033,7 @@ b5031a2 OSD,ReplicatedPG: do not track notifies on the session
 719679e doc: Added package and repo links for Apache and FastCGI. Added SSL enable too.
 04eb1e7 doc: Fixed restructuredText usage.
 ea9fc87 doc: Removed foo. Apparently myimage was added and foo not removed.
+0953ce5 rados: add cephtool test
 a803159 rgw: configurable exit timeout
 92b59e9 rgw: don't try to assign content type if not found
 08c6424 rgw: don't initialize keystone if not set up
@@ -24138,6 +28074,8 @@ bc9d9d8 Refactor rule file to separate arch/indep builds.
 4bf9078 osdc/Objecter: prevent pool dne check from invalidating scan_requests iterator
 6f978aa doc: draft bobtail release notes
 5061481 doc: correct meaning of 'pool' in crush
+c9b8151 add an fsync-tester workunit to the fuse and kclient suites
+673b682 put fsx back in the kernel suite. Looks like this was lost accidentally?
 1ec70aa qa: add a workunit for fsync-tester
 286dcbe test: remove underscores from cephfs test names
 a7de975 lockdep:  Decrease lockdep backtrace skip by 1
@@ -24185,7 +28123,11 @@ caea0cb os/JournalingObjectStore: un-break op quiescing during journal replay
 6a8a58d doc: document swift compatibility
 cf28e78 docs: add rgw POST object as supported feature
 3950182 st_rados_watch: tolerate extra notifies
+f2dbe5e CephManager: add ability to test split
 29307d3 mds: shutdown cleanly if can't authenticate
+b916f67 pexec.py: Parse out role ID from the back. Also, do not assume that the command needs to run from a specific directory.
+0890d48 Adding a Hadoop task. This task configures and starts a Hadoop cluster. It does not run any jobs, that must be done after this task runs. Can run on either Ceph or HDFS.
+0cd84b3 New ssh task that adds keys for node -> node ssh. This generates a new keypair, pushes it to all nodes in the context and adds all hosts to all other hosts .ssh/authorized_keys file. Cleans up all keys and authorized_keys entries afterwards.
 c310700 objecter: don't use new tid when retrying notifies
 9a40ef0 mds: fix journaling issue regarding rstat accounting
 b9d717c fix build of unittest_formatter
@@ -24198,8 +28140,10 @@ fdcdca7 HashIndex: fix typo in reset_attr documentation
 80cca21 PrioritizedQueue: move if check out of loop in filter_list_pairs
 a50c7d3 config: do not always print config file missing errors
 6fb9a55 config: always complain about config parse errors
+a5b9939 ceph.conf: default to smaller recovery chunk
 2e7cba7 doc: fixed indent in python example.
 788992b config_opts.h: adjust recovery defaults
+1bdd5c3 Fix qemu options for xfstests
 f4be3c8 doc: Added sudo to ceph -k command.
 3709519 doc: Fixed typo.
 47c81a3 Makefile.am: add missing flags to some tests targets
@@ -24259,6 +28203,8 @@ b2ccf11 librbd: handle parent change while async I/Os are in flight
 a55700c librbd: hold AioCompletion lock while modifying global state
 41e16a3 librbd: handle parent change while async I/Os are in flight
 917a6f2 Striper: use local variable inside if() that tested it
+90d8156 qemu: set qemu cache mode based on rbd cache setting
+50af473 Add xfstests on rbd inside qemu
 2a5549c qa: add script for running xfstests in a vm
 2779325 rgw: fix rgw_tools get_obj()
 cb19e99 doc: ceph osd create takes a uuid, not an osd id
@@ -24274,6 +28220,9 @@ a48dee5 os/: Add failure CollectionIndex failure injection
 bd46386 test/store_test: add simple tests for collection_split
 f2a2391 os/: add filestore collection_split
 a83d13a OSD: ignore queries on now deleted pools
+8e78cd8 dropping xfs test 183 for now as it causes nightly failure
+cea082c dropping test 183 as it is causing nightly failures
+307d291 kernel: add kclient ffsb back in
 3986564 Dropping xfs tests 179 and 183 as they are causing nightly failures
 727c37a mds: journal remote inode's projected parent
 3f69f72 mds: don't create bloom filter for incomplete dir
@@ -24364,12 +28313,15 @@ bd03234 osd: simplify active_committed
 e165330 osd: use safe OSDService msgr helpers for heartbeats
 ea65dff osd: helpers to blacklist messages to down osds
 4c3d5dc test_cls_rgw.c:  Call to cls_rgw_bucket_complete_op() needs new parameter.
+9aacaf7 add mon-thrasher singleton
 24d61fa mon: add WARN_UNUSED_RESULT to the MonitorStore functions that return error codes
 c762f91 mon: remove the silly write_bl_ss write_bl_ss_impl distinction
 399f269 mon: convert store users with unchecked return codes to just assert on issues
 ab312f8 mon: update Paxos::read()'s successful read check
 3fe7c6c mon: add new get_bl_[sn|ss]_safe functions
 60f60ff mon: In MonitorStore, wrap all uses of ::close and assert success
+f525359 task: mon_thrash: thrash monitors while running other tests
+71361ac kernel: add rbd map/unmap test
 dd3a24a create qa/workunits/rbd/map-unmap.sh
 5d6da25 rgw: bucket check --check-objects
 e5dc46f rgw: check_disk_state() removes multipart parts from index
@@ -24414,6 +28366,7 @@ f65307a ceph_mon.cc: remove twice included sys/stat.h
 c99d9c3 rbd: fix import from stdin, add test
 a738f44 rbd: allow export to stdout, add tests
 cf2a045 config: make $pid a metavariable
+e1a9a44 fix objectcacher stress test
 aa37fe7 test_rados_api_misc: fix TMAPPUT sorting test
 8850b3c ceph.spec.in:  Fix typo.
 a1e0868 README: add the libboost-thread-dev dep to the list
@@ -24465,7 +28418,7 @@ a3aad3c mds: fix anchor table update
 6efe977 mon, osd: adjust msgr requires for CRUSH_TUNABLES2 feature
 0cc47ff crush: introduce CRUSH_TUNABLES2 feature
 88f2181 crush: for chooseleaf rules, retry CRUSH map descent from root if leaf is failed
-0beeb47 rgw: document ops logging setup
+0beeb47c rgw: document ops logging setup
 6bc32b2 rgw: usage REST api handles cateogories
 94423ac perfcounters: fl -> time, use u64 nsec instead of double
 3a0ee8e perfcounters: add 'perf' option to disable perf counters
@@ -24475,10 +28428,14 @@ d72c2c8 Removing ceph dirs while scrubbing ceph off the system
 74b2a2d rgw: POST requests not default to init multipart upload
 1f8c323 java: add ceph_open_layout interface
 f0c608c client: add ceph_open_layout interface
+a4c41e2 Add objectcacher stress testing
 365ba06 qa: add script to run objectcacher tests
 525f942 init-ceph: do not make noise about missing devs
+9f7ab6e kernel: remove serial rbd_xfstests job
+7ff17ff xfstests: remove 179 and 182 from parallel test
 bc32fc4 syncfs: check for __NR_syncfs too
 6890675 monmap: fix crash from dup initial seed mons
+d07d728 run: save original config, too
 7602a05 osdc/ObjectCacher: fix BufferHead leak on ENOENT
 df550c9 make mkcephfs and init-ceph osd filesystem handling more flexible
 96b82eb mon: Monitor: wake up contexts based on paxos machine's state
@@ -24487,6 +28444,7 @@ df550c9 make mkcephfs and init-ceph osd filesystem handling more flexible
 deabdc8 auth: cephx: increase log levels when logging secrets
 d6cf77d crush: CrushWrapper: don't add item to a bucket with != type than wanted
 95e1fe8 mon: PGMonitor: check if pg exists when handling 'pg map <PG>'
+b2f8035 s3tests: fix typo
 1c715a1 mds: child directory inherits SGID bit
 55081c2 crush: prevent loops from insert_item
 b706945 Try using syscall() for syncfs if not supported directly by glibc
@@ -24503,17 +28461,27 @@ c31f94d set the nonce unconditionally on bind
 afd4da7 doc: Added FAQ back into toc tree.
 0dbf6e8 test_librbd_fsx: Add OP_FLATTEN
 7021f1a test_librbd_fsx: consume saved-image files as test runs
+ddcf208 workunit: fix indentation
+ffd19b6 run xfstests on 3 parallel clients
 b35e37f osdc/Striper: fix handling for sparse reads in add_partial_sparse_result()
 328d72d rgw: signal shuts down fcgi socket
+ca08626 xfstests: run in parallel on multiple machines
 4eb50e6 crypto: fix nss related leak
+1c50db6 rgw-logsocket: a task to verify opslog socket works
 436baa0 java: add Java exception for ENOTDIR
 700b5c0 qa/run_xfstests.sh: drop tests 174 and 181
+8bb3a15 rbd_xfstests: fix the test list, drop 181
 57c8116 doc: filename change to fix a link.
 15f7713 doc: fixed links that broke due to new IA.
+df3b1b8 task/pexec: Output stderr to teuthology log
 f86522c rgw: fix xml parser leak
 98a04d7 rgw: fix memory leaks
 394768b doc: Removed "deprecated" from toctree. Confused some users.
 739bca1 doc: Removing old/unused images.
+d516307 task/ceph-fuse: Add log messages for abort
+837ab3c xfstests: disable 174
+841a289 xfstests: explicitly enumerate test list
+7a602fa workunit: fix default subdir
 288db95 mon: shutdown async signal handler sooner
 45c652d mon/AuthMonitor: refactor assign_global_id
 92d6b8e mon/AuthMonitor: reorder session->put()
@@ -24532,12 +28500,14 @@ bbe2e1a mon: Paxos{,Service}: finish contexts and put messages on shutdown
 988f92a mon: remove all sessions on shutdown
 5cf6c7e ceph_mon: cleanup on shutdown
 68491af rgw: add -lresolv flags to Makefile.am
+fa63dd4 valgrind: enumerate warnings in log; check leaks from client, mon only
 7903aab mon/MonClient: use thread-safe RNG for picking monitors
 07c831a upstart: fix limit lines
 b4a769d upstart: add ceph-osd-all-starter.conf
 ff0a44b upstart: make ceph-osd-all, ceph jobs
 3610754 Makefile.am: fix LDADD for test_objectcacher_stress
 12eb797 client: fix lock leak in lazio_*() failure paths
+2ab8b38 task: benchmark recovery
 be11c31 upstart: set high open file limits
 25f003b msg/Accepter: only close socket if >= 0
 30373ce osd: default journal size to 5GB
@@ -24569,6 +28539,7 @@ fd928b9 ObjectCacher: more debugging for BufferHeads
 0b28ef6 mon: OSDMonitor: fix spacing when outputting items on command reply
 f0c7bb3 build: update for boost_thread library.
 c707568 doc: fix crush set syntax
+43b57de check ceph-mon for leaks
 71cfaf1 os/FileStore: only try BTRFS_IOC_SUBVOL_CREATE on btrfs
 3ca947e mon: clean up 'ceph osd ...' list output
 344c4fd mon: correctly identify crush names
@@ -24579,16 +28550,23 @@ b53e06c DispatchQueue: lock DispatchQueue when for get_queue_len()
 22cb135 doc: add Hadoop configuration parameters
 63c5128 smalliobench: fix init-only, add don-not-init
 b40387d msg/Pipe: fix leak of Authorizer
+6ebd6ba Verifying check-in capability
+69e613f Starting to auto-document this code.
 12c2b7f msg/DispatchQueue: release throttle on messages when dropping an id
 5f214b2 PrioritizedQueue: allow remove_by_class to return removed items
 98b93b5 librbd: use delete[] properly
 4a7a81b objecter: fix leak of out_handlers
 ef4e4c8 mon: calculate failed_since relative to message receive time
 9267d8a rgw: update post policy parser
+87e1fc1 Added comment explaining how setup is tied to actual code.
 f6cb078 mon: set default port when binding to random local ip
 ce28455 rgw: relax date format check
 0a2a0c0 doc: config-cluser move to new IA.
 5c302eb doc: Move of cluster ops for new IA.
+652c429 workunit: Fix indentation
+05065df task/ceph-fuse: If umount fails, abort and cleanup
+cfa2883 pexec: Logging each command isn't useful
+2596404 Add task pexec to run bash commands in parallel
 8430210 doc: add-or-rm-mons.rst: fix typo
 4a34965 client: register admin socket commands without lock held
 4db9442 objecter: separate locked and unlocked init/shutdown
@@ -24597,16 +28575,19 @@ d5bc66a doc/release-notes: fix heading
 74f7607 doc: release-notes for v0.54
 0d42e97 doc: update crush weight ramping process
 131d15a rgw: fix warning
+1a531e5 schedule_suite.sh: less noise
 a0eb891 osd: default pool min_size to 0 (which gives us size-size/2)
 1d00f3a mon: default min_size to size-size/2 if min_size default is 0
 9d979d7 osd: default min_size to size - size/2
 735df02 mon: helpful warning in 'health detail' output about incomplete pgs
 1679a55 osd: start_boot() after init()
 65961ca vstart.sh: support -X by adding 'auth required = none' entries
+5687555 schedule_suite.sh: less noise
 60b84b0 (tag: v0.54) v0.54
 5d27f3d rgw: compile with -Woverloaded-virtual
 1be9923 rgw: fix RGWCache api
 e0e33d2 rgw: fix RGWCache api
+2770ef7 peer.yaml: assumes osd_pool_default_min_size is 1
 9a38059 osd: remove dead rotating key code from init
 eee0982 osd: defer boot until we have rotating keys
 193e2ea PG: persist divergent_priors in ondisklog
@@ -24648,10 +28629,16 @@ e43f9d7 mon: process failures when osds go down
 763d348 mon: ignore failure messages if already pending a failure
 23531c2 osd: add 'osd debug drop op probability'
 efa03ce mon: require pgnum in 'ceph osd pool create <poolname> <pgnum> [<pgp_num>]' command
+25d4f56 misc: Show url on get failure
 cda9e51 librbd: return actual error when detecting format fails during creation
 3d76e67 java: add symlink/readlink tests
 6dd7925 test_libcephfs: fix, add symlink loop tests
+bb2924d libcephfs_java.yaml: Adding the libcephfs-java test to the suite.
 3902a01 debug: adjust default debug levels
+aabca84 fs/verify: check for ceph-fuse leaks
+a46dd6b ceph-fuse: apply overrides[ceph-fuse] to config
+f9b4efe valgrind.supp: deliverate onexit leak
+02d62d7 valgrind.supp: ceph-fuse leak from libfuse
 6c0be02 client: simplify/fix symlink loop check
 d037ff4 client: fix path_walk for directory symlinks
 cd14453 OSDMonitor: remove max_devices and max_osd interdependency
@@ -24668,6 +28655,8 @@ de2cd18 test: add cli test for missing args to rbd
 9aae0ee rbd: check for second argument to mv/rename
 5ef16ed test.sh: Adding LD_LIBRARY_PATH so JUnit can find .so files. Removing useless java.library.path references.
 9e9feff doc/cluster-ops/pools.rst: Added documentation for min_size
+f309c33 Clean up string interpolation operator spacing ceph_manager.py
+f82d4a7 Add divergent_priors test
 b5ce4d0 client: fix SnapRealm leak
 56a152b client: debug SnapRealm reference counting
 a34a9af vstart: allow minimum pool size of one
@@ -24713,6 +28702,9 @@ c3129a2 Changing build.xml to produce a jar instead of class files Adding a Test
 791a822 Adding a simple workunit that executes one libcephfs-java test
 5dec917 Moving test java files into a proper heirarchy. Moving the compilation of tests classes from build.xml to Makefile and editing configure.ac to look for the junit4 jar in the default location of /usr/share/java. It is still possible to build and run tests from build.xml as well as Makefile.
 08ee736 java: fix return type on JNI function
+9645838 workunit: Move cleanup to separate run
+f0080b0 workunit: Allow scratch dir to already exist
+ea02fb7 workunit: Add option to use specified subdir
 b1b9f0a vstart.sh: allow shorter MON, OSD, MDS count env vars
 8ccccbb osdmap: make flag names match
 f2bf7aa mon: make osdmap flags incur a health warning
@@ -24749,6 +28741,7 @@ e1195fd doc: Minor edits for admonitions.
 0a939d0 doc: Removed legacy OpenStack installation.
 5ef58d3 doc: Added topic for adding an OSD and adjusting CRUSH weights.
 50d3598 rgw: don't convert object mtime to UTC
+bd83ed7 ceph_manager: add test_min_size action
 c357474 ceph.spec.in:  Remove ceph version requirement from ceph-fuse package.
 cca606f Update log file when rgw is not running
 1348685 osd/: add pool min_size parameter for min acting set size
@@ -24835,7 +28828,9 @@ bcefc0e msg/Pipe: fix leak of AuthSessionHandler
 4bff87d doc: Added qemu caching section.
 f171aba doc: Added Commandline Usage and a few fixes.
 f7412fe crypto: add cms utility function
+6c9d45e schedule: fix var name
 c51e1f9 test script for s3 tests Signed-off-by: tamil <tamil.muthamizhan at inktank.com>
+5f4414e schedule: add option to display jobs in the queue
 a7e1368 test: Add symlink test for absolute paths
 4cd47c4 client: Fix state of symlink with absolute path
 bc4e6b9 doc: Changed term "kernel object" to "kernel module". Added hyperlink to cephx.
@@ -24844,6 +28839,7 @@ bc4e6b9 doc: Changed term "kernel object" to "kernel module". Added hyperlink to
 2b2ac13 doc: Changed RBD to librbd cache for accuracy.
 a01b112 doc: changed --user to --id, and fixed a typo.
 a7551e4 run_xfstests.sh: add optional iteration count
+f88a2f7 rbd task: support xfstests repeat count
 dfc57c4 doc: minor edits to the index.
 968b315 doc: Added layering to the snapshot section.
 6c5de99 osd: add PG state recovery_wait
@@ -24923,6 +28919,8 @@ f0c2e12 mds: Send mdsdir as base inode for rejoins
 ceeebaf mds: Fix stray check in Migrator::export_dir()
 d2ac024 mds: fix stray migration/reintegration check in handle_client_rename
 2f09d47 mon: fix leading error string from 'ceph report'
+0de0772 radosgw-admin.py: add test of deleting user with data.
+818ea4e radosgw-admin.py: add test of deleting bucket with objects.
 9cea181 doc: updated front page graphic.
 65ed99b PG: Do not discard op data too early
 047f58d java: use unique directory in test
@@ -24969,11 +28967,15 @@ c9ca3c9 client: do not reset session state on reopened sessions
 4ac4520 mds: do not mark closed connections disposable
 ad839c7 mds: use connection on closed sessions in force_open_sessions
 53b354d doc: update copyright to include contributors, CC
+53ff33a Use the configured username for _make_scratch_dir
 c33f93d rgw: abort multipart upload uses part manifest
 c0df832 osd: fix populate_obc_watchers() assert
+e10b99a Add exit to kcon_most script
 3eda37a mds: check getcwd() return value
 1afa8ff leveldb: fix accidental submodule revert
+fa1e434 Add fsstress with btrfs and ext4 on krbd
 a5eb72b doc: note raw format required for openstack/rbd
+afcf2ea coverage: note db table structure
 ee2ce73 rgw: multipart manifest uses parts built manifest
 43caeeb rgw: multipart uses striping
 f085708 rgw: remove objects on failed upload
@@ -25013,6 +29015,7 @@ e609c8a librados: add assert_exists guard operation
 eed28da osd: return EOPNOTSUPP on bad class or method name
 31260a3 leveldb: fix accidental revert
 b85cde5 rgw: dump an error message if FCGX_Accept fails
+b4bf14e add exec task
 d4f0a88 workqueue: make debug output include active threads
 4d20b60 msg/Pipe: fix tight reconnect loop on connect failure
 54cab4d throttle: less default log noise
@@ -25020,6 +29023,8 @@ d4f0a88 workqueue: make debug output include active threads
 ac07afa test: Dont check initial permissions
 84a35f8 Makefile: include java in dist tarball
 3cc39f5 rgw: check client write status on swift get_obj
+a8f4f88 fixing the tense of my README change
+41d1257 Edit to README.bst to note that the ssh keys needed in the targets entries are the hosts public ssh key and not the users defined in the same entry
 a755674 rgw: don't continue processing of GET request on error
 2248822 osd: drop conditional check in populate_obc_watchers
 4156b98 osd: populate obc watchers even when degraded
@@ -25179,6 +29184,7 @@ d28ba52 autogen.sh:  Create m4 directory for leveldb submodule.
 151d940 Makefiles:  ignore the m4 macro directory
 3658157 Makefile:  Updates to eliminates warnings, add test for boost system lib.
 a1d8267 cls_rgw: init var in ctor
+ea2182b fix kernel symlink for fs
 8d7c8e3 rgw: don't add port to url if already has one
 662c69e ceph-disk-prepare, debian/control: Support external journals.
 4db1251 logrotate: fix bash syntax
@@ -25187,6 +29193,11 @@ a1d8267 cls_rgw: init var in ctor
 e0bc555 cephx: simplify signature and check code
 d123e31 msg/Pipe: avoid duplicated code to randomize out_seq
 389fac7 rgw: replace bucket creation with explicit pool creation
+ce7d997 New nfs task that performs NFS client mount of export (see knfsd)
+cac4a6a New knfsd task that does an nfs server export     Ability to specify options     By default only export to current hosts
+939c3ae New kcon_most task that enables most ceph kernel logging
+558590c Fix ceph-fuse example
+3ca2e22 Fix typo in README
 0ec4520 rbd: don't issue usage on errors
 7477a08 doc: Added radosgw temp remove.
 041081c doc: Minor clean-up.
@@ -25194,6 +29205,7 @@ d123e31 msg/Pipe: avoid duplicated code to randomize out_seq
 fc34f16 rgw: fix compilation
 15775f8 mon: drop command replies on paxos reset
 06a8d95 doc: Editing and cleanup.
+2e12967 regression: update for new kernel collections
 410ebac test: Allow randomized tests to be repeatable
 baf54b2 client: Reset cache_name pos on dirp
 8518115 rbd cli tests: copy.sh was looking for old version of rbd ls -l output
@@ -25201,6 +29213,8 @@ baf54b2 client: Reset cache_name pos on dirp
 73462f0 cleaned up the perl module
 bd47169 cleaned up the script
 11c51dc radosgw-admin: don't try to parse date if wasn't specified
+fe313a3 kernel: add msgr failure facet to rbd collection
+bc6244a kernel: break basic collection into rbd, kclient
 f268d4e ceph-debugpack: updates
 9810517 doc: separate config from I/O in openstack diagram
 4ebe8c2 doc: clarify rbd openstack docs
@@ -25262,6 +29276,8 @@ e153bce rgw: start moving usage management functions
 a9c9f96 rgw: hide fcgi details from client code
 c038c3f doc: Added admonishments to use separate host for clients.
 fa66eaa mds: Reset session stale timer at end of open
+d948212 Printing the number of tests passed when 'all' tests are successful
+6a0979d radsogw-admin: bump up logging
 48fc340 upstart: OSD journal can be a symlink; if it's dangling, don't start.
 8d718ca osd: Make --get-journal-fsid not really start the osd.
 435a5c1 osd: Make --get-journal-fsid not attempt aio or direct_io.
@@ -25336,9 +29352,11 @@ b8aeb76 mds: Don't drop client request from MDS
 60a5d78 doc: first draft of full OpenStack integration
 e2f6ae0 filejournal: check lseek() return value on darwin
 13b841c osdmap, mon: optional dump 'osd tree' in json
+f868718 rbd: reduce fsx runtimes
 14de7a5 client: Resolves coverity NULL dereference issue
 d250bb8 libcephfs: destroy cmount on ceph_shutdown()
 965ab90 filestore: check ioctl SYNC return code
+7c9dc93 radosgw-admin: usage should time out after 20 minutes
 8237f68 obj_bencher: fix leak in error path
 709ff14 PG: assert auth is not end in _compare_scrubmaps
 7ca754b FileStore: copy paste error _do_copy_range
@@ -25357,6 +29375,8 @@ cc5d05b Changes to existing None and Unknown authentication protocols to handle
 12bba4a OSDCap: parse spaces explicitly
 b57b86a test: remove commented out test cases
 32a6394 logrotate: check for executables to avoid cron errors
+7a593a0 console: add console task
+b22e3ea internal: stop warning about lockdep circular dependency
 2bf3f8c filestore: check lseek64 return value
 409fbb7 filestore: check ioctl WAIT_SYNC return value
 15ebc0f filejournal: check lseek64 return value
@@ -25368,6 +29388,7 @@ ef393b5 mon: fix large pass by value
 238b497 mon: debug recovered_{peon,leader}
 b8cbe26 mon: fix recovered_peon assert
 1708cf8 mon: debug recovered_{peon,leader}
+ee3407f include newpool in osd cap for client.0
 6f7067f mon: avoid large pass by value in MForward
 4878fdc mds: fix MDSCacheObject pin printer * logic
 a3e42d8 mdsmap: init fields in ctor
@@ -25454,6 +29475,7 @@ bdadc4e MemoryModel: init in ctor
 6984037 doc: Added commentary for BMC and VLAN.
 3223b3d ceph.spec.in:  Build rpms with nss instead of cryptopp.
 53c1db9 vstart.sh: let you specific specific ip to bind to
+13c91db misc: use new syntax for osd caps
 0ee6a95 doc: Added commentary for BMC and VLAN.
 cc4dcf6 test: add more OSDCap unit tests
 de6e0d8 OSDCaps: fix allow_all()
@@ -25468,8 +29490,9 @@ a1cfe74 client: Mods to fix #3184 for messenger shutdown
 3c1e2e1 manpage-howto.txt: note that man/Makefile.am may need update too
 eb27f9a Add howto for changing man pages
 d37ca79 mon: update 'auth' help/usage
-1d552a4 rados: fix man page
+1d552a4b rados: fix man page
 8740ddf9 doc: fix rpm url (part deux)
+38f7f3d rbd: add test for reading an up to date header
 6c5c939 librbd: fix includes for portability
 c9266d6 rgw: check that realloc succeeded
 4513397 ReplicatedPG: track incoming pushes with perf counters
@@ -25598,6 +29621,7 @@ abf1bf4 doc: Clean up and added placeholders for new definitions.
 873ccdf doc: Fixed hyperlink.
 441ae00 doc: Restored "Differences from Posix" to /doc/dev
 153fb3b doc: Adds package deps to list in README
+30748f3 fix lock held when returning to user space typo
 e32ee37 librados: cleanup: use Mutex::Locker
 d1de1f1 librados: protect lookup_pool, get_pool_name with lock
 720a301 mon: tolerate no session on no_reply()
@@ -25605,11 +29629,18 @@ a794c93 ReplicatedPG: set op return version to pg version on ENOENT
 53b18e3 osd: catch decoding errors from client ops
 ef29d90 osd: some whitespace
 a73777a osd: return -EPERM on insufficient caps
+a09153b Allow scheduled jobs to use different teuthology branches
+57bb434 Fix errors found by pyflakes
 3b4f754 msg/Accepter: assert that listen_sd always >= 0
 f13eaf4 msg/Accepter: fix race in accepter shutdown
 0c7637d rgw: prepare_update_index should not error on system bucket
+6a2be38 smoke: add cls unit tests in validator
+8eaaddc rados: run class unit tests through validators
+31a2a83 move rgw tasks to separate suite
 9aa467d ceph-object-corpus: update with v0.52 objects
+675e9f3 rados: test all rados classes
 58ad4dd uuid: include unistd to make encode testing work
+cbfb056 test_cls_rbd has moved
 0c07607 qa: move all rados class unit tests into qa/workunits/cls
 420baa9 Makefile: rename test_rados_api_cls_lock -> test_cls_lock
 69743c1 vstart.sh: Alternative fix for vstart.sh -n
@@ -25620,6 +29651,7 @@ c9e81fd Makefile.am: librdb_fsx test needs math library on some platforms
 671c4c3 ceph_common.sh: 'hostname -s' (instead of cut ...)
 51d5815 doc: v0.48.2argonaut release notes
 fd205da cfuse: Add the parent entry (..) for a top-level readdir
+0395df3 ignore 'lock held when returning to user space' from btrfs sb_internal crap
 d5a048c doc: Removed legacy doc. Not needed.
 9fa5720 doc: Removed legacy doc. Not needed.
 64198f8 doc: Removed legacy doc. Not needed.
@@ -25643,6 +29675,8 @@ faddb80 Swap current dir (.) with CEPH_BIN for OOT builds
 db04ce4 mon: make MRoute encoding backwards-compatible
 e068bd7 rbd/copy.sh: fix typo
 ed43d4d rbd/copy.sh: fix typo
+a101e49 replace tab with spaces
+5ce4d70 fix error on teardown failing to unmount /mnt
 7a3d1e6 librbd: bump version
 855dff6 cls_rbd: remove locking methods
 8f2a0d9 rbd: add locking commands
@@ -25690,6 +29724,7 @@ b64641c osd: include boot_epoch in MOSDBoot
 ed18eea :doc: Changed rados.gateway to radosgw.gateway. Start with /etc/init.d.
 900e4ce workqueue: allow thread pool size to be adjusted dynamically
 5e095ec :doc: Trimmed up the stack diagram and fixed a few hyperlink refs.
+808e2b2 update doc to be ceph-fuse task (instead of cfuse)
 5350830 :doc: Modified hostname to hostname -s.
 7c178be :doc: Modified hostname to hostname -s.
 5936ded :doc: Modified hostname to hostname -s.
@@ -25742,7 +29777,10 @@ cfe4830 rgw: set atomic context for copy operation src and dest
 20c7852 rgw: use refcount put instead of obj delete where needed
 78d6a60 qa: test args for rbd import
 fd4b294 rbd: make --pool/--image args easier to understand for import
+78b7b02 imported subprocess module in nuke script
 0bf7723 doc: fix indentation
+d27806a nuke: add missing import
+c8c7014 rbd: fix typo and cast to int before comparing format
 cf6899d Adding RBD CLI test automation script, RbdLib and cephscrub.sh
 539786a osd: move permissions check into pg op thread
 221fc78 osd: drop unnecessary can_discard_request() in PG::queue_op()
@@ -25762,6 +29800,7 @@ f4210f8 mon: include global versions in paxos, slurp messages
 d697b54 test/rados-api/aio.cc: use read operation for omap cmp test
 c1372f9 osd/OSD.cc: Fix typo in OSD::heartbeat_check()
 e89cab6 osd/ReplicatedPG: set truncate_seq when handling CEPH_OSD_OP_APPEND
+055bf73 rbd: only specify --format if not using the default
 c73c440 Makefile: rename 'core' -> 'base', add a few things
 24c3cae librbd, cls_rbd: close snapshot creation race with old format
 31560ca Rejig the way the shared libraries are processed so that manual postinst/postrm scripts are not required for lib* packages, ensuring that the .so's in the ceph package are not detected
@@ -25787,6 +29826,7 @@ dd9819e doc: make note of crush usage change
 0817b94 mon: make redundant osd.NNN argument optional
 01a8146 ceph tool: add 'osd crush create-or-move ...' to help
 44fa233 :doc: Deleting this. Wrote a new one, but will be revised a bit soon.
+79607ee Don't lose tracebacks of exceptions raised in a greenlet.
 32f30f9 :doc: Removed old ops pool section.
 0313365 :doc: Removed old authentication section.
 d1053d9 :doc: Removed old resize OSD section.
@@ -25805,9 +29845,17 @@ b2409a2 mon: 'osd crush create-or-move <id> <initial-weight> <loc ...>'
 adedd6b crush: create_or_move_item()
 588b263 crush: get_item_weight[f]()
 f8d9f86 osdmap: 4 decimal places for osd tree
+f64cedf rbd: allow xfstests task to specify rbd image formats
+73a29cd rbd: allow image format to be specified
 d51d7b3 rgw: fix rgw_dir_suggest_changes() to set header_changed
+b6f5d12 changed the debug value for mds from 10 to 20
+39efbbc Suppress valgrind error "Invalid write 8"
 f8c365e rgw: add missing ret code check
+d6c2ded radosgw-admin: update task for new usage reporting
+be426d1 schedule_suite.sh: try to use same branch for s3-tests.git
+3473c2e s3tests: run against arbitrary branch/sha1 of s3-tests.git
 0cfac6d librbd: bump version
+db8037d debian ntp servers
 d77205d objecter: remove the now-unused "registering" flag on LingerOps
 33def83 cls_rbd: remove locking methods
 eeaa92c rbd: add locking commands
@@ -25878,6 +29926,7 @@ ef6814d doc: Promoting PG concepts into mainline docs. Redundant version still i
 2a1ac68 doc: Added a new CRUSH map section. Will need to incorporate new tunables info.
 fc093f8 doc: Moving new auth section from configuration to operations.
 e09b265 objecter: fix osdmap wait
+f8e1f5c task: die on ceph error or coredump
 c1e000b doc: Fix leftover "localhost" mention.
 3302a2d doc: Added debug ref to toctree. Trimmed title names a bit.
 4609639 doc: Added "how to" for debug/logging config. Trimmed titles too.
@@ -26011,12 +30060,15 @@ bd534bf mon: make parse_pos_long() error message more helpful
 c7d11cd osd: turn off lockdep during shutdown signal handler
 c03ca95 (tag: v0.51) v0.51
 aa91cf8 mon: require --id
+dc1c247 disable lockdep recursive warnings until #3040 is fixed
 5fd2f10 mon: fix int parsing in monmon
 31c8ccb mon: check for int parsing errors in mdsmon
 304c08e mon: check for int parsing errors in osdmon
 3996076 interval_set: predeclare const_iterator
 ef4ab90 Makefile: update coverity rules
 6b1f23c librbd-dev.install: package new rbd/features.h header file.
+e386fb9 rbd: add msgr failure injection
+d8bc55e avoid doing filestore idempotency tester 2x w/ and w/o msgr failures
 d9bd613 mon: describe how pgs are stuck in 'health detail'
 bcd4b09 osd: fix use-after-free in handle_notify_timeout
 e97f1c5 ceph.spec.in: package new rados library.
@@ -26036,20 +30088,25 @@ fed8aea rbd: force all exiting paths through main()/return This properly destroy
 f0e746a mon: name cluster uuid file 'cluster_uuid'
 cada8a6 objecter: use ordered map<> for tracking tids to preserve order on resend
 91d5c19 Don't package crush header files.
+b6b3028 internal: fix escaping of \b in syslog grep
+82cefa2 suppress this valgrind error
 4905c06 mon: create cluster_fsid on startup if not present
 7fde8e9 mon: create, verify cluster_fsid file in mon_data dir on mkfs
 b207b15 cephfs: add 'map' command to dump file mapping onto objects, osds
 0f9f63a perf-watch: initial version
 1113a6c objecter: use ordered map<> for tracking tids to preserve order on resend
 a5901c6 doc: Either use a backslash and a newline, or neither.
+92d3404 rados: add msgr failure injection
 ec90d3f cls_rgw: add gc commands handling
 e4a78d2 config_opts: add gc configurables
 7dd5d06 cls_lock: specify librados namespace explicitly
 eda5a76 cls_rgw: cleanups
 e7c492b mon: implement 'ceph report <tag ...>' command
 8f95c1f config: remove dead osd options
+14ce35a move kclient + blogbench to marginal
 bfb24a7 Fix compilation warnings on squeeze; can't printf() snapid_t directly
 bb1e65e rgw: use sizeof() for snprintf
+035f49a include mds debugging on ffsb
 4a0704e osd: fix requeue order for waiting_for_ondisk
 1a09423 rgw: dump content_range using 64 bit formatters
 ddbef47 Revert "rgw: dump content_range using 64 bit formatters"
@@ -26060,6 +30117,7 @@ ddbef47 Revert "rgw: dump content_range using 64 bit formatters"
 5642a5e test_rbd.py: remove clone before image it depends on
 cc435e9 rgw: dump content_range using 64 bit formatters
 dd4c1dc osd: fix requeue order of dup ops
+aa4ea5f marginal: remove verify collection (unused)
 6ae216e osd: fix warning
 60fdb6f init-ceph: use SSH in "service ceph status -a" to get version
 5c70392 doc: mkcephfs man page, -c ceph.conf is not optional
@@ -26068,6 +30126,8 @@ dd4c1dc osd: fix requeue order of dup ops
 19ea312 mon: add MonitorStore::sync()
 223d6fa crypto: cache CryptoHandler in CryptoKey
 cfe211a doc: fix key export syntax
+b800496 ceph: fix cpu_profile default
+7d50411 rbd.xfstests: default to 1gb (not 250mb) image
 f965358 Roll up loose ends from a marathon merge/rebase session
 380b047 Review:
 5a295c8 librbd: snap_protect: verify layering is supported
@@ -26106,6 +30166,7 @@ b403db1 rbd: update man page to avoid deprecated --secret, --user
 e0b094b keyring: make --key, --keyfile override loaded keyring
 7b57931 config: make --user a synonym for --id
 b2d6ea7 librbd: add test for discard of nonexistent objects
+5b7ec43 task: run osd/mds/mon with Google CPU profiler via cpu_profile option
 a3ad98a librbd: hide ENOENT on discard
 7141a6c msg/Pipe: log port number
 1daeb2d cpu_profiler: drop start, stop commands
@@ -26118,6 +30179,7 @@ d01c126 crypto: remove old crypto globals
 becf206 osd: avoid dereferencing pg info without lock
 130b559 mutex: add is_locked_by_me()
 da48658 run-cli-tests: Check that virtualenv is found.
+7f6591b ceph: support tmpfs_journal option to put journal on tmpfs
 ef80abe msg/SimpleMessenger: fix leak of local_connection
 34d626d test_librbd_fsx: fix leak
 6e44e9e SyntheticClient: fix warnings
@@ -26149,6 +30211,7 @@ c6ae5e2 objectcacher: fix bh leak on discard
 24a26c62 mkcephfs: fix mon_data check
 3d3d91d osdmap: apply mon_max_osd when generating osdmap from conf
 1a5e12e osdmap: fix pg_num calculation when generating osdmap from conf
+6dbbcf0 queue: fix logging of child return code
 cca85af global: only print banner if type is daemon
 615f85d mon: throttle daemon messages independently from client messages
 9fc7958 filejournal: instrument journal write counts, sizes
@@ -26161,6 +30224,7 @@ b254ba7 mon: require CRUSH_TUNABLES when latest osdmap has tunables set
 da35b4c msgr: make set_policy_throttler safe, act on default
 73218a7 msgr: make set_policy() and set_default_policy() safe to re-use
 5ab4939 doc: v0.48.1argonaut release notes, changelog
+99ac6b0 Disable asynchronous DNS lookups.
 75172c7 msg/Accepter: fix nonce initialization
 294c25b ceph-osd: log journal-creation failure with derr
 8af2cf3 msgr: expose get_policy() through generic Messenger API
@@ -26196,6 +30260,8 @@ cd5d724 librbd: fix memory leak on error in clone
 c77f0fb rbd: add snap [un]protect commands
 8d5f1e9 librbd: add methods for protecting/unprotecting snapshots
 cc8eac2 rgw_admin.cc: Allow for deletion of objects through radosgw-admin.
+273a43e Flush data to temp file before reading it in another process.
+8aaf21d Oops tempfile now gives us file objects not fds.
 f9359f0 doc: New example usage.
 6bc1067 rgw: fix usage trim call encoding
 04a0eac cls_rgw: fix rgw_cls_usage_log_trim_op encode/decode
@@ -26205,6 +30271,9 @@ d39ea1d rgw: complete multipart upload can handle chunked encoding
 03b787e rgw_xml: xml_handle_data() appends data string
 3809e34 rgw: ETag is unquoted in multipart upload complete
 52f03dc doc: Added debug and logging reference.
+99e9975 In teuthology-worker, shuffle the child stdout/stderr into our log.
+05007f7 Minimize scope of try-except.
+4b9e176 Use tempfile.NamedTemporaryFile instead of mkstemp.
 d78dfe5 mkcephfs: use default osd_data, _journal values
 3c90ff4 mkcephfs: use new default keyring locations
 c03f744 keyring: make from_ceph_context() a member
@@ -26233,6 +30302,8 @@ beccac9 librbd: don't open parent again during get_parent_info
 c961a20 doc: Fix toctree structure for man obsync(1).
 5db3a9e rgw_admin.cc: Disallow addition of S3 keys with subuser creation
 4e40a78 ceph-authtool: Fix usage, it's --print-key not --print.
+3b85b23 task: verify scrub detects files whose contents changed
+8665bdc task: scrub OSDs periodically
 25de5e5 Revert "osd: peering: detect when log source osd goes down"
 203dffa doc: cd to repository before running git status.
 48de9b5 doc: Say what to do if submodules are out of date.
@@ -26240,10 +30311,12 @@ c961a20 doc: Fix toctree structure for man obsync(1).
 6af560d doc: Correct Git URL for clone
 0d3d75e osd: peering: detect when log source osd goes down
 ca2c381 osd: peering: detect when log source osd goes down
+c2ff66e crank up pjd debugging
 bb6e0d0 wireshark: update patch
 deec81b ReplicatedPG: clear waiting_for_ack when we send the commit
 f22b95d rbd: fix off-by-one error in key name
 e775ce5 secret: return error on empty secret
+cd0b527 separate regression suite into topical categories rados, rbd, fs
 cda5e8e PG,ReplicatedPG: clarify scrub state clearing
 6d464a2 PG::mark_clean(): queue_snap_trim if snap_trimq is not empty
 1041b92 ReplicatedPG::snap_trimmer: requeue if scrub_block_writes
@@ -26267,11 +30340,19 @@ fb1d549 os: KeyValueDB: re-implement (prefix) iter in terms of whole-space iter
 effdec9 auth: introduce cluster, service, and client auth settings
 ec6ecc1 auth: AuthSupported -> AuthMethodList
 4a0a7e2 auth: (ordered) list of auth methods, not a set
+e4e239e kernel: push a local .deb instead of using gitbuilder
+1c93d5a syslog check: fix false-positive BUG matches in random strings
 9d43c8a test: workloadgen: Don't linearly iterate over a map to obtain a collection
+85187ed add osd-recovery-incomplete
+a084769 osd_recovery: also test unfound discovery
+8dd09cb osd_recovery: test incomplete pg recovery
+a9f2bf6 ceph_manager: wait_for_active
+731d520 ceph_manager: count 'incomplete' as 'down'
 bae8370 osd: peering: make Incomplete a Peering substate
 d1602ee osd: peering: move to Incomplete when.. incomplete
 d612694 config: send warnings to a ostream* argument
 de4474a vstart.sh: apply extra conf after the defaults
+0b8b58f fix adminsocket test
 bbc4917 msg/Pipe: if we send a wait, make sure we follow through
 6c01d46 client: handle fault during session teardown
 a879425 msg/Pipe: make STANDBY behavior optional
@@ -26297,15 +30378,19 @@ c2e1c62 mutex: assert we are unlocked by the same thread that locked
 aef10e7 librbd: fix id initialization in new format
 17bb78a librbd: fix id initialization in new format
 5601ae2 mon: set a configurable max osd cap
+0d6ce42 Fixed the code to pass 'yes' during mkfs
+2b75dde Added '-y' option for mkfs.ext4
 bcb9ab8 doc: updates to fix problem with ceph-cookbooks appearing in chef-server.
 9767146 osd: generate past intervals in parallel on boot
 d45929f osd: move calculation of past_interval range into helper
 18d5fc4 osd: fix map epoch boot condition
+61ff894 admin-socket: test generic admin socket commands
 11b275a osd: avoid misc work before we're active
 278b5f5 mon: ignore pgtemp messages from down osds
 08e2eca mon: ignore osd_alive messages from down osds
 404a7f5 admin_socket: json output, always
 0133392 admin_socket: dump config in json; add test
+9bc8617 admin_socket: make test optional
 0ef8cd3 config: fix 'config set' admin socket command
 f565ace osd: fix pg log zeroing
 d67ad0d Wireshark dissector updated, work with the current development tree of wireshark. The way I patched it is not really clean, but it can be useful if some people quickly need to inspect ceph network flows.
@@ -26331,6 +30416,12 @@ df71c2d librbd: store parent info in snapshot metadata
 d5e4541 cls_rbd: return negative pool id if parent does not exist
 9b9efe7 cls_rbd_client: fix locking function indentation
 5fcb22f mkcephfs: add sync between btrfs scan and mount
+d0698f9 schedule_suite.sh: put coverage option in ceph section
+deb7a54 marginal kclient+ffsb: enable mds logging to catch badess
+32353f1 move misc, blogbench back into active kernel suite
+000fec3 move all kernel tests to kernel suite; symlink collections from regression
+17e4f75 this fails reliably
+f70b825 ceph: fix mkfs/mount option defaults
 2d7e2cb crush: fix name map encoding
 b497bda osd/OpTracker: fix use-after-free
 7cf1f1f msg/Pipe: go to STANDBY on lossless accept fault
@@ -26355,13 +30446,15 @@ cef8510 msg/Connection: add failed flag for lossy Connections
 5ecc5bc msg/DispatchQueue: cleanup debug prefix
 89b07f4 msg/Pipe: move tcp_* functions into Pipe class
 d034e46 msgr: move Accepter into separate .cc
-3e98617 msg/Pipe: get_state_name()
+3e98617c msg/Pipe: get_state_name()
 f78a401 msgr: rework accept() connect_seq/race handling
+ec4ce8b regression: do some tests on ext4
 a6735ab OpRequest,OSD: track recent slow ops
 9e207aa test/store_test.cc: verify collection_list_partial results are sorted
 49877cd cls_lock: cls_lock_id_t -> cls_lock_locker_id_t
 315bbea cls_lock: document lock properties
 056d42c cls_log: update a comment
+da77014 move cfuse+dbench back to regression for verify, too
 2c7d782 rados: lock info keeps expiration, not duration
 d16844c rados tool: add advisory lock control commands
 2f8de89 cls_lock: objclass for advisory locking
@@ -26374,19 +30467,23 @@ dec9369 osd/mon: subscribe (onetime) to pg creations on connect
 7f58b9b mon: track pg creations by osd
 4c6c927 Revert "rbd: fix usage for snap commands"
 42de687 rbd: fix usage for snap commands
+e1c98e7 tasks: add multibench task for testing pool creation
 58cd27f doc: add missing dependencies to README
 6f381af add CRUSH_TUNABLES feature bit
 e3349a2 OSD::handle_osd_map: don't lock pgs while advancing maps
 c8ee301 osd: add osd_debug_drop_pg_create_{probability,duration} options
 8f5562f OSD: write_if_dirty during get_or_create_pg after handle_create
 ca9f713 OSD: actually send queries during handle_pg_create
+c49daec clock: print skew with ntp servers to log to help debug time issues
 5dd68b9 objecter: always resend linger registrations
+55847fc nuke: log what pid we are killing when we kill it
 76efd97 OSD: publish_map in init to initialize OSDService map
 7586cde qa/workunits/suites/pjd.sh: bash -x
 675d630 ObjectCacher: fix cache_bytes_hit accounting
 4e1d973 doc: Fixed heading text.
 ebc5773 doc: favicon.ico should be new Ceph icon.
 3a377c4 doc: Overhauled Swift API documentation.
+5c5ca4b move cfuse + dbench from marginal to regression
 d78235b client: fix readdir locking
 82a575c client: fix leak of client_lock when not initialized
 90ddc5a OSD: use service.get_osdmap() in heartbeat(), don't grab map_lock
@@ -26394,6 +30491,7 @@ d78235b client: fix readdir locking
 32892c1 doc/dev/osd_internals: add newlines before numbered lists
 fe4c658 librados: simplify locking slightly
 199397d osd: default 'osd_preserve_trimmed_log = false'
+12dc0ad ceph: archive mon data to a .tgz
 24df8b1 doc/dev: add osd_internals to toc
 5a27f07 doc/internals/osd_internals: fix indentation errors
 6490c84 doc: discuss choice of pg_num
@@ -26418,6 +30516,9 @@ c7fb964 PG::RecoveryState::Stray::react(LogEvt&): reset last_pg_scrub
 470796b CompatSet: users pass bit indices rather than masks
 b7814db osd: based misdirected op role calc on acting set
 14d2efc mon/MonitorStore: always O_TRUNC when writing states
+ff0f474 set machine description to ctx.archive when auto-locking machines for a run
+811665a move cfuse + ffsb from marginal to regression
+88e2ad3 move cfuse + fsx back into regression suite
 f94c764 mon: remove osds from [near]full sets when their stats are removed from pgmap
 fe57681 mon/MonitorStore: always O_TRUNC when writing states
 bf9a85a filestore: dump open fds when we hit EMFILE
@@ -26425,18 +30526,29 @@ a278ea1 osdmap: drop useless and unused get_pg_role() method
 38962ab osd: based misdirected op role calc on acting set
 6faeeda osd: simplify helper usage for misdirected ops
 ed4f80f vstart: use absolute path for keyring
+042edcb schedule/suite: schedule job, suite N times
 117b286 OSD: add config options to fake missed pings
 ce20e02 crushtool: allow information generated during testing to be dumped to a set of CSV files for off-line analysis.
 8a89d40 doc: remove last reference to ceph-cookbooks.
 2011956 doc: cookbooks issue resolved, so changed 'ceph-cookbooks' back to 'ceph.'
+f37214e kernel: fix kernel installation when kdb: is specified
 5a5597f qa: download tests from specified branch
+f5d1a32 schedule_suite.sh: use workunits from ceph commit
 5360079 OSD: send_still_alive when we get a reply if we reported failure
+b0985e4 ceph: add default btrfs mkfs options
+014fb97 ceph: cleanup/simplify mount/mkfs options
+d0c9e49 workunit: allow overrides
+a50bf67 workunit: allow branch/sha1/tag to be specified
 5924f8e PG: merge_log always use stats from authoritative replica
+58126b0 workunit: pass branch/sha1 to test
 3dd65a8 qa: download tests from specified branch
 ce7e0be mon: use single helper for [near]full sets
 30b3dd1 mon: purge removed osds from [near]full sets
+1741cb6 Added functionality to get mkfs and mount options for file systems from the config file,if present. Otherwise, default options are used.
 bcfa573 ReplicatedPG: don't mark repop done until apply completes
+353d9cc fixed typo
 10ec592 test_librbd: fix warnings
+f6a16f7 fix wrongly marked down whitelist
 5450567 ReplicatedPG,PG: dump recovery/backfill state on pg query
 508bf3f rbd: enable layering when using the new format
 dfe29af doc: reverted file and role names.
@@ -26447,6 +30559,7 @@ f33c0be rgw: don't override subuser perm mask if perm not specified
 09c60b4 doc: added :: to code example.
 ad8beeb doc: minor edits.
 63a1799 doc: cookbook name change broke some things in doc. Fixed.
+51148b8 radosgw-admin: use --bucket instead of old --bucket-id
 65c43e3 debian: fix ceph-fs-common-dbg depends
 cc8df29e rados tool: bulk objects removal
 99a048d rados: more usage cleanup
@@ -26458,10 +30571,19 @@ e5997f4 doc: added DHO config.
 675a1b7 crush: Set maximum device/bucket weights.
 c9fc5a2 crush: prevent integer overflow on reweight
 d29ec1e     rados: usage message     Bad linebreaks, wrapping, stringification, missing doc for bench args
+9b28948 nuke: honor 'check-locks: ...' field in targets file
+3abc412 internal: archive mon data dirs
+cff2cfa internal: move pulling archive w/ tar to helper
+986c0ef rbd: test with layering enabled
 2c001b2 Makefile: don't install crush headers
 22d0648 librados: simplify cct refcounting
 c5bcb04 lockdep: stop lockdep when its cct goes away
+fe6ed3e ffsb is marginal, remove from smoke suite
 7adc6c0 mon: simplify logmonitor check_subs; less noise
+19742ec Revert "smoke: add msgr failures"
+657c9db move cfuse fsx into marginal suite
+c3e1ab4 remove suites/stress/basic
+392a659 move some old flaky tasks into marginal suite
 a542d89 mds: fix race in connection accept; fix con replacement
 0f917c2 osd: guard class call decoding
 0ff6c97 test_stress_watch: just one librados instance
@@ -26510,6 +30632,8 @@ ec87a1e cls_rbd: add logging for failed set_parent steps
 b23e4e5 doc: Removed legacy paths and keyname settings from examples.
 b387077 debian: include librados-config in librados-dev
 8e5fe62 doc: remove reference to 'ceph stop' command
+9ea2213 use sudo to kill teuthology proc
+98a21cc move qemu_iozone test to marginal suite
 03c2dc2 lockdep: increase max locks
 b554d11 config: add unlocked version of get_my_sections; use it internally
 01da287 config: fix lock recursion in get_val_from_conf_file()
@@ -26536,6 +30660,7 @@ b486f2f client: fix locking for SafeCond users
 d751006 rados tool: copy object in chunks
 16ea64f rados tool: copy entire pool
 960c212 rados tool: copy object
+ed3bd21 increase thrashosds timeout
 23d31d3 ceph.spec.in: add ceph-disk-{activate,prepare}
 668ce00 osd: make on_removal() pure virtual
 3d00130 osd: fix PG dtor compile error
@@ -26553,6 +30678,7 @@ c70392a doc: minor typo
 57bc8da doc: minor updates to the restrucuredText file.
 0659f7c doc: minor cleanup.
 1c9e1c6 doc: Publishing as described. Still requires some verification and QA.
+e5fb499 run: make -a short for --archive
 7e26d6d PG: C_PG_MarkUnfoundLost put pg in finish
 31db8ed OSD::activate_map: don't publish map until pgs in deleted pools have been removed
 7f2354c doc/scripts/gen_state_diagram.py: make parser a bit more forgiving
@@ -26623,21 +30749,30 @@ d015823 OSD,PG: push message checking to pg
 1ffd190 PG: CephPeeringEvt
 18fec69 OSD,PG::scrub() move pg->put() into queue process
 ea11c7f Allow URL-safe base64 cephx keys to be decoded.
+2499bd3 watch-suite: stupid script to watch teuth run progress
+132dc00 nuke: be more careful about kill; simplify
+6dbf53e nuke: nuke based on archive path
+45fcca1 valgrind: add strptime suppressions
 7fa8579 osd: add missing formatter close_section() to scrub status
+12a1f62 move other ffsb workloads to marginal suite
 f67fe4e librados: Bump the version to 0.48
 bcfcf8e librados: add assert_version as an operation on an ObjectOperation
 39eaa23 ReplicatedPG: do not set reply version to last_update
 e6e36c0 rgw: initialize fields of RGWObjEnt
+fb9d39d move locktest to marginal suite
 35b9ec8 rgw-admin: use correct modifier with strptime
 da251fe rgw: send both swift x-storage-token and x-auth-token
 4c19ecb rgw: radosgw-admin date params now also accept time
 6958aeb rgw-admin: fix usage help
+e07b711 Added a debug message
 ad97415 ceph-disk-prepare: Partition and format OSD data disks automatically.
 a1696fe doc: removed /srv/osd.$id.journal  from ceph.conf example.
 8f64647 CrushTester.cc: remove BOOST dependencies.
 7e23aad doc: Updates to 5-minute quick start.
 83c043f radosgw-admin: fix clit test
 c667f5d lockdep: increase max locks
+b99d11c schedule_suite: use the sha1, not branch name
+f3c2451 nuke - optionally kill the process hung
 12ba580 config: add unlocked version of get_my_sections; use it internally
 5674158 ceph: fix cli help test
 fc18cca doc: Clean up of 5-minute quick start.
@@ -26678,13 +30813,17 @@ aa3255b qa: add tests for rbd ls with old and new formats
 0ad14c9 qa: add rbd command rename tests
 6f096b6 librbd: use new class methods to access rbd_directory
 f2d37c5 librbd: handle NULL old_format pointer in detect_format()'s logging
+9278e23 smoke: add msgr failures
 2628530 doc: fixed --cap error and a few additional bits of cleanup.
+b9414b6 fewer hosts for mon tests
 2472034 OSD::do_command: unlock pg only if we had it
 841451f MOSDSubOp: set hobject_incorrect_pool in decode_payload
 b91beca doc: Added sudo and cleaner instruction for unmap.
+38aa344 ceph: fix valgrind error check
 f16a9c4 doc: updated usage on RBD kernel object commands.
 e186013 doc: drop mention of MDS capabilities from radosgw man page
 deceb70 filestore: initialize m_filestore_do_dump
+96ccb06 add rbd_xfstests to kernel suite
 31ad263 doc: add design doc for rbd layering
 398a229 filestore: set min flush size
 0810ab6 osdmap: check new pool name on rename
@@ -26696,13 +30835,16 @@ cc4955a PG: reset_recovery_pointers in activate if missing is empty
 a8d7fd9 mon: 'osd pool rename <oldname> <newname>'
 02f1b0a doc: document new 'osd crush move ...' command
 f5e3a67 mon: fix 'osd crush move <item> <loc ...>'
+2e5853f Now using daemon-helper
 5e454bb mon: add 'osd crush move <name> <loc ...>' command
 a2d0cff crush: add move_bucket() method
 d22529d crush: get_loc -> get_immediate_parent
 d7c18c1 PG::merge_old_entry: handle clone prior_version case
 b907c88 ReplicatedPG: adjust log.complete_to based on rmissing
 143afcf ReplicatedPG: clear peer_(missing|log)_requested in check_recovery_sources
+3321700 qemu_iozone: use a larger image
 090e510 PG: check_recovery_sources on each map
+74b1468 kernel suite
 c3a02ea doc: radosgw: further simplify Apache rewrite rule
 76c657e cls_rbd: add methods for dealing with rbd_directory objects
 de62c4c objclass: add create method
@@ -26713,8 +30855,11 @@ a1d477b librbd: add indirection between name and header object
 147114d doc: minor edits.
 15ebf20 rest-bench: mark request as complete later
 5c23d35 buffer.h: fix operator> and operator>= increment error
+c6b2e93 add cleanup-user.sh script
+f81fead schedule_suite.sh: drop -x
 335b918 DBObjectMap: clones must inherit spos from parent
 cc1da95 filestore: sync object_map object in lfn_remove when nlink > 1
+8c453cc cleaned up commented code
 218dd5a radosgw Apache configuration: simplify rewrite rule
 5a06af7 Fix example radosgw Apache configuration
 6a5c155 objclass: remove unused variable from cls_cxx_map_get_vals
@@ -26724,6 +30869,7 @@ f969b59 cls_rbd: fix signed-ness warning
 0d9b558 debian: arch linux-any
 8949232 debian: build with libnss instead of crypto++
 9d7f048 doc/config-cluster/authentication: keyring default locations, simplify key management
+1a43c34 Added blktrace task
 f366173 pg: report scrub status
 c94583e pg: track who we are waiting for maps from
 ecd7ffe pg: reduce scrub write lock window
@@ -26740,14 +30886,21 @@ efb74a9 doc: converted daemon references from nasty tables to lines.
 df7729e CrushTester: eliminated compiler warning
 88e3154 radosgw-admin: improve man page
 63b562f doc: explain how to configure Ceph for radosgw
+1db84dd include ceph task in librbd collection
 07029a4 rgw: send both swift x-storage-token and x-auth-token
 80a939a rgw: radosgw-admin date params now also accept time
 c87c83f rgw-admin: fix usage help
+aa89e6a move kclient_workunit_suites_ffsb to marginal suite
+cc380de ignore DEADLOCK line inside lockdep splat
 77fcf06 upstart: fix regex
 840ae24 mon: don't tick the PaxosServices if we are currently slurping.
 ef6beec objecter: do not feed session to op_submit()
 4e45d60 ObjectStore::Transaction: initialize pool_override in all constructors
 ff67210 objecter: do not feed session to op_submit()
+48e8e0a Add script to create a vm image with extra packages
+38f6a78 Add a task to run a test against rbd inside of qemu.
+94a6ab8 Add some tests inside qemu for the librbd suite
+a92306a Move librbd tests to rbd suite
 9fcc3de osd_types.cc: remove hobject_t decode asserts
 80649d0 mon: note that monmap may be reencoded later
 77d836c mon: encoding new monmap using quorum feature set
@@ -26757,7 +30910,9 @@ c399d90 mon: conditionally encode PGMap[::Incremental] with quorum features
 06288a9 mon: track intersection of quorum member features
 2355b23 mon: conditionally encode old monmap when peer lacks feature
 2fe9816 OSD,PG,ObjectStore: handle messages with old hobject_t encoding
+03597ca Check for machine args based on local, not ctx.machines Signed-off-by: Dan Mick <dan.mick at inktank.com>
 ddf7e83 doc: ceph osd crush add is now ceph osd crush set
+7773a93 whitelist current lockdep warnings in syslog
 448f5b0 logrotate: reload all upstart instances
 58db045 docs: clarify example in radosgw-admin
 7044192 filestore: remove btrfs start/end transaction ioctl code
@@ -26766,9 +30921,14 @@ ddf7e83 doc: ceph osd crush add is now ceph osd crush set
 1e539da doc: Normalized shell script syntax. Added generic cookbook path.
 7d38758 doc: Changed libvirt-dev to libvirt-bin, and cleaned up ./autogen.sh
 3e32dd0 doc: Typo.
+c8e1ec6 record owner at start of run
+845e6c2 move cfuse + dbench task that triggers #1737 to marginal suite
 c467d9d (tag: v0.47.3) v0.47.3
+218b692 teuthology-ls: tolerate non-existent 'success' key in summary file
 17dcf60 filestore: disable 'filestore fiemap' by default
 88c7629 OSD: clear_temp: split delete into many transactions
+233bc8d schedule_suite: enable kdb
+286e639 kernel: enable/disable kdb
 b84e1ed doc: document usage log
 25311e9 cls_rbd: note overlap units
 156879f cls_rbd: fix uninitialized var in cls_rbd_parent
@@ -26780,6 +30940,7 @@ e8b36ed cls_rbd: make snapshots inherit the head's parent
 f87c441 cls_rbd: check for LAYERING feature for parent methods
 2f75b46 cls_rbd: implement get_parent, set_parent, remove_parent
 c7f91e6 cls_rbd_client: use snapid_t type
+ab42b8d add usage log tests to radosgw-admin tasks
 145d1c1 rgw: set s->header_ended before flushing formatter
 8a4e2a1 rgw: log user and not bucket owner for service operations
 282e226 rgw: initalize s->enable_usage_log
@@ -26802,11 +30963,16 @@ b101f4c doc: first cut of OpenStack/RBD w/out cephx. Authentication coming soon.
 95ac5ba debian: fix python-ceph depends
 d7fe0e3 debian: update homepage url
 82cb3d6 filestore: fix 'omap' collection skipping
+372fbe0 sync clock at start of every run
 343cc79 run-cli-test: use new pip incantation
 e3b6957 run-cli-test: use new pip incantation
 3d4ba43 cls_rbd: do not pass snapid_t to vargs
+a4589c6 don't dup ceph task for new fsx jobs
+f7ee34b tolerate 250ms clock drift
 5efaa8d msg: fix buffer overflow in ipv6 addr parsing
+9929ceb include suite in archive dir
 d9e902f ceph.newdream.net -> ceph.com wiki url -> docs url
+abd7d18 whitelist 'slow request' in qa runs
 f8a196f cls_rbd: drop useless snapshot metadata helpers
 b08d7ba cls_rbd: use encode macros for on-disk snap metadata
 07f853d PG: best_info must have a last_epoch_started as high as any other info
@@ -26820,6 +30986,8 @@ c88a455 cls_rbd: add locking functions.
 1e899d0 filejournal: make less noise about open failures
 b415fd2 rgw: obj copy respects -metadata-directive
 2dd1798 librbd: only clear the needs_refresh flag on successful updates.
+0c40b24 Run fsx on rbd with thrashing
+50e01c1 Increase number of ops done by fsx against rbd.
 b5de839 osd: optional verify that sparse_read holes are zero-filled
 1156f94 buffer: add list and ptr is_zero() method
 614d5a7 LFNIndex.cc: escape null in append_[un]escaped
@@ -26834,6 +31002,12 @@ b44092f radosgw: stop startup timer on failed start
 92589f8 objclass: allow class methods to retrieve their triggering entity_inst
 7515b05 doc: added qemu-img documentation for rbd.
 5db4509 rgw: limit number of buckets per user
+9aeac5d add radosgw-admin test to regression suite
+3bd387f radosgw-admin: fix for non-numeric bucket ids
+697c3b9 radosgw-admin: test max buckets limit
+474f8da radosgw-admin: remove buckets before user
+83f8f3d radosgw-admin: fix swift subuser/key tests
+57da279 schedule_suite.sh: add flavors, check/fix sha1s, optional templates
 0adb33d doc: Added steps for OpenStack install with DevStack
 446e5d8 doc: fixed bash syntax error.
 97c9f01 qa: disable xfstest 68 for now
@@ -26887,10 +31061,15 @@ baa3aff rgw: access methods for new usage ops
 ea2f955 rgw: new config options
 9a70ec9 rgw: new class methods for handling usage information
 d265bb6 rgw: don't fail initialization if socket path returns ENXIO
+5792f13 workunit: grab 'all' config from the right variable
 9851683 cls_rbd: add get_all_features method
 ee7a027 mon: fix pg state logging
+5012b73 Add test for cls_rbd
 5cd33cd workunits/rbd: add workunit for running cls_rbd tests
 031d42a workunits/rbd: disable remove_with_watcher test
+68f14b4 Test old and new rbd formats
+04ef5dc Update for new workunit task syntax
+8af8d0e workunit: allow setting environment variables
 3d22546 librbd: remove unnecessary notify from add_snap()
 44e5f3d librbd: ignore RBD_MAX_BLOCK_NAME_SIZE when generating object ids
 3d7925b workunits/rbd: allow creating images in different formats
@@ -26920,6 +31099,7 @@ f1d6963 librbd: use cls_client functions for calling class methods
 4eb2138 librbd: update ictx_refresh to work with both formats
 7376a77 librbd: Update ImageCtx for new format
 dcc7c96 cls_rbd: add methods for interacting with the new header format
+8c08482 regression: fix new rados, rbd test yamls
 70686c5 librbd: remove useless ENOMEM checks
 13aa578 DBObjectMap: remove extra semicolon
 8c637f5 vstart: debug osd classes when debugging is on
@@ -26934,6 +31114,7 @@ e0fda59 objclass: pass strings as const references
 fefedc1 cls_rbd: add indent settings header
 92325d0 cls_rbd: remove unused test_exec and snap_revert methods
 b2793c4 mon: require force argument for 'mds newfs ...'
+6df344c run rados, rbd api tests under thrashing
 3eca360 Cleaned some lingering references to "verbose"
 c0a02a4 crushtool: refine tunables warning message
 978d541 re-include assert after boost::pool
@@ -26948,6 +31129,7 @@ c04de2b crush: generate histogram of choose tries
 c4336a3 crushtool: arguments to adjust tunables
 8b79697 crush: make magic numbers tunable
 7332e9c mon: use mode 0600 throughout
+4fa665c --summary: add total counts, also note free machines
 07169d2 doc: Added mount cephfs with fstab.
 7d1b32a osd: include past_intervals in pg query results
 36a3979 OSD: _have_pg should return NULL if pg is not in map
@@ -26956,14 +31138,18 @@ fae1d47 deliberately break encoding macros when wrong assert is present
 a1ae8b6 reinclude assert.h after json_spirit
 e87a66b doc: Incorporated Sam's comments.
 3939839 doc: Typo fix.
+44374bc new variable lock hid lock() function
 22863c3 mon: set policy for client, mds before throttler
 71c2877 DBObjectMap: fix some warnings
+9313cde teuthology-lock: add --summary and --brief options
 0f6d90c make everyone use our assert #include and macro
 a30601a assert: detect when /usr/include/assert.h clobbers us
+9ec2843 pull s3-tests.git using git, not http
 165fbd1 keyserver: also authenticate against mon keyring
 5d520f1 keyring: implement get_caps()
 ca433f2 mon: share mon keyring with KeyServer
 47b202e mon: put cluster log at /var/log/ceph/$cluster.log and/or send to syslog
+7523ff3 ceph: simplify 'cluster' mon log handling
 78b0bea monclient: be paranoid/defensive about send_log vs log_client==NULL
 58b02f9 crushtool: fix cli tests given new less-chatty output, help
 af4d8db crushtool: allow user to select output reporting in blocks
@@ -26973,6 +31159,8 @@ d4c30da mon: include pg acting in health detail
 0167fd0 mon: include all types of stuck pgs in health detail
 37bf2a1 test/cli/ceph-authtool: keyring.bin -> keyring
 5b443eb doc: keyring.bin -> keyring everwhere
+120ce3f Pass up unmodified exceptions from connection.connect()
+fac88a4 More shortnames fixes: - Allow shortnames in teuthology-updatekeys as well - Use list comprehensions instead of map()
 f918049 test/: Made omap_bench compatible with teuthology
 02a9a01 doc: Added the root discussion to deploy with mkcephfs.
 993caf8 doc: Added chmod for keyring, and moved client.admin user higher.
@@ -27005,12 +31193,14 @@ e083e0e DBObjectMap: restructure for unique hobject_t's
 dab238b os/: update CollectionIndex filename encodings
 927458c test/ObjectMap: Copy current DBObjectMap implementation
 d5ab877 src/: Add namespace and pool fields to hobject_t
+23c7293 task/: Added object map benchmarking test
 ec689e3 rgw: replace 'should_log' with 'enable_ops_log'
 8e41ac1 mon: clear osd_stat on osd creation/destruction
 fb6ea82 doc: Added S3 examples to the toctree.
 62fe9f4 doc: adding code samples for S3 API usage (thanks, DH!)
 644a615 Makefile.am: explicitly mention that -Wl,--as-needed is location-sensitive.
 e83a84c doc: Added ${lsb_release -sc} based on Sam's feedback.
+044697d Allow short names to teuthology-lock (e.g. "plana14")
 fc1f9e0 rgw: shutdown init_timer
 5087997 rgw: try to create fcgi socket through open() first
 000f9d3 msg: make clear_pipe work only on a given Pipe, rather than the current one.
@@ -27030,9 +31220,12 @@ f871d83 Makefile: include ceph-mds upstart bits in dist tarball
 7a4e923 test/: Added object map benchmarking tool
 e0f1952 doc: fix autobuild debian source line
 97d4396 mon: throttle client msgr memory
+d3f855e fix up dist var
 7e3d90a Objecter: tone down linger op messages on tick
+af4fe15 Change hardcoded oneiric to precise
 4a7683c test_stress_watch: exercise watch/unwatch/close from second client
 95e0a88 qa: stress_watch.sh workunit runs test_stress_watch
+95ecf40 add rados_stress_watch to regression
 fb7ce59 doc: Added preliminary rbd and ko info.
 10b0db3 osd: Adding const to methods in OSDMap
 8609caf vstart.sh: initialize keyring even if not using cephx
@@ -27095,6 +31288,7 @@ bea1e03 filestore: verify that op_seq is nonzero
 0f38d75 filestore: fix initial btrfs snap creation (on mkfs)
 3ae84ee rgw: replace dump_format() with dump_int()
 9a9418e librados, ObjectCacher: include limits.h for INT_MAX
+ea6c546 Added python-dev to list of required packages.
 8c1c882 rbd.py: Fix seemingly-random segfaults
 2f963fa librados: Remove errant sha1
 6e46de3 rbd.py doc: Fix markup on :class: references
@@ -27184,6 +31378,7 @@ dae2f53 mon: fix leak of MonMap
 7b2614b monmap: filter_initial_members -> set_initial_members
 13c86ca Update leveldb to remove CompactionInputErrorParanoid
 7d5fa4e man: Remove leftover --bin from ceph-authtool(8).
+62f8f00 rbd.xfstests: default to 250mb instead of 100mb
 ad663d5 (tag: v0.47) v0.47
 e2e7f58 keys: new release key
 5d2ec1d builder: make reweight helpers static, void
@@ -27302,6 +31497,7 @@ bb74b8b osdmap: filter out nonexistent osds from map
 7ce157d utime_t: no double ctor
 90fb403 objectcacher: make *_max_dirty_age tunables; pass to ctor
 82a3600 librbd: set cache defaults to 32/24/16 mb
+43ac8e2 rbd_fsx in write-through mode
 d96bf6c test_filestore_workloadgen: name the Mutex variable
 8bacc51 workloadgen: time tracking using ceph's utime_t's instead of timevals.
 772276c workloadgen: forcing the user to specify a data and journal.
@@ -27321,6 +31517,8 @@ aa782b4 Makefile: libos.la -> libos.a
 d96e084 Makefile: libmon.la -> libmon.a
 7dbcc1c libs3: added 'make check' target
 827d222 debian: build-depend on libxml2-dev
+d6b9bd8 schedule_suite: fix 'slow request' whitelist
+3d1fff8 rbd_fsx: resize to byte boundaries (not object multiples)
 3851423 objectcacher: make cache sizes explicit
 b5e9995 objectcacher: delete unused onfinish from flush_set
 6f3221a objectcacher: explicit write-thru mode
@@ -27336,6 +31534,7 @@ a4b42fc keyring: clean up error output
 ae0ca7b keyring: catch key decode errors
 6812309 debian: depend on uuid-runtime
 3509b03 safe_io: int -> ssize_t
+396d1fe ceph.newdream.net -> ceph.com
 203a7d6 objectcacher: wait directly from writex()
 991c93e mon: fix call to get_uuid() on non-existant osd
 150adcc debian: add rules for rest-bench
@@ -27430,6 +31629,7 @@ b0ee6e8 Added introduction to clustered storage and deleted older files that hav
 a1b31dd Initial cut of introduction, getting started, and installing. More to do on installation. RADOS gateway to follow.
 31fb8f9 Put change to rbd manpage in .rst instead, update man/ from output
 d3a2c56 doc: move documentation build instructions to doc/dev section
+715abde ignore syslog cron noise
 ee26c5d Treat rename across pools as an error Fixes: #2370 Reviewed-by: Samuel Just <samuel.just at dreamhost.com>
 2a93258 doc: add warning about multiple monitors on one machine.
 b41f4d1 rgw: normalize bucket/obj before updating cache
@@ -27450,6 +31650,7 @@ b9a54b4 PG: check_new_interval now handles adding new maps to past intervals
 274cd0c throttle: count get_or_fail success/failure explicitly
 81f51d2 osd: pg creation calc_priors_during() should count primary as up
 75a107c throttle: note current value and max in perfcounters
+c5429bf use fewer nodes for the simple singleton tasks
 1acdc57 .gitignore: ceph-kdump-copy
 18790b1 osd: add is_unmanaged_snaps_mode() to pg_pool_t; use more consistently
 22bd5df pick_address: don't bother checking struct ifaddrs which have a null ifa_addr
@@ -27460,6 +31661,7 @@ ccfb6b3 global_init: don't fail out if there is no default config.
 845718d global: fix incorrect CINIT flag.
 f3771b0 throttle: feed cct, name, and add logging
 7413828 osdmap: do no dereference NULL entity_addr_t pointer in addr accessors
+dcbb8d4 osd_recovery: test no* osdmap flags
 4c597fa OSD: add different config options for map bl caches
 cefaa7d mon: fix nion -> noin typo
 cb7f1c9 (tag: v0.46) v0.46
@@ -27550,6 +31752,8 @@ ba1d3b1 mon: use can_mark_*() helpers
 1966402 TestFileStoreState: distinguish between 'get_coll()' and 'get_coll_at()'
 44dafc8 run_seed_to.sh: Add valgrind support.
 4430c01 TestFileStoreState: free memory on terminus.
+25114bf nuke: refactor to run in parallel and add unlock option
+b32b693 parallel: obey iterator protocol
 4bfcbe6 mon: decode old PGMap Incrementals differently from new ones
 59957da mon: do not mark osds out if NOOUT flag is set
 2673875 mon: do not mark booting osds in if NOIN flag is set
@@ -27584,10 +31788,12 @@ b8f4acf osd: remove preferred from object_locator_t
 0138a76 osd: ignore localized pgs
 94adf5d osd: remove localized pgs from pg_pool_t
 43d1a92 run_seed_to.sh: remove stray arg
+a11b69f nuke: ignore ntpdate errors
 0112e74 run_seed_to.sh: rework the script, make it more flexible and broaden the tests.
 e9ecd1b perfcounters: tolerate multiple loggers with the same name
 97f507f Makefile: disable format-security warning
 c8377e4 filestore: verify that fiemap works
+6cf8767 filestore_idempotent: url has changed
 7471a9b rados: fix error printout for mapext
 07ddff4 librbd: instrument with perfcounters
 fb9fdf4 librbd: fix ictx_check pointer weirdness by using std::string
@@ -27619,6 +31825,7 @@ a68b306 test_filestore_workloadgen: track pg log size in memory
 f00dab6 ceph-authtool: rename cli test dir
 613d817 ceph-conf: rename cli test dir
 ac874a6 OpRequest: mark_event output is now at debug 5
+c9d2185 hammer.sh: -a to archive each run
 f2d4574 config: dump subsys log levels with --show-config
 607a8a3 osd: do not create localized pgs
 8335eb5 rgw: can't remove user that owns data
@@ -27627,8 +31834,11 @@ a394fb2 msgr: do not establish a new Pipe for any server
 42a49cf msgr: Create a proper Connection* lazy_send_message()
 526cd9d msgr: merge submit_message() functions
 56fc92c FileStore: don't read reference to writeq head after pop_write
+ff0fe37 add rbd_fsx_[no]cache jobs to regression suite
+e3af087 rbd_fsx: show progress
 758fa9b rgw: bucket HEAD operation should return 200, not 204
 9cb796a obsync: section 8 -> 1
+6a58314 fix misc checks that wait for N osds to be up
 5c21f7e msgr: remove the Pipe::disposable flag.
 c470e1a msgr: start moving functions around in Pipe
 db6d467 msgr: fix some spacing issues in Pipe
@@ -27651,6 +31861,7 @@ aab516d rgw: allow subuser creation on radosgw-admin user create
 c9adf07 msgr: move SimpleMessenger function declarations into good order
 dbe779a osd: dump old ops singly rather than all at once.
 b0532aa osdmap: restructure flow in OSDMap::decode
+7ae1aef gather logs for cfuse dbench workload, hopefully catch #1737
 bc458bf ceph-object-corpus: prune archive
 ebb19e7 test/encoding/import.sh: fix
 e1622e4 rgw: HEAD bucket also dumps bucket meta info
@@ -27658,6 +31869,7 @@ e1622e4 rgw: HEAD bucket also dumps bucket meta info
 c3bfe45 osdmap: remove bad cct reference
 eea982e osdmap: workaround old bug that zeroed pool_max
 ee22c97 python-ceph: remove rgw bindings
+407b2e0 whitelist xfs_fsr syslog noise
 eb8a8fd pgmap: allow Incrementals to specify [near]full_ratios of 0
 88525ea pgmon: add "set_full_ratio x" and "set_nearfull_ratio x" commands
 882bd5b pgmon: convert PGMonitor::prepare_command to our standard infrastructure.
@@ -27679,6 +31891,7 @@ dea8536 FileJournal: clarify locking in header
 a422c47 msgr: remove out-of-date comments and function
 2eb2aa5 msgr: group functions more appropriately
 0be9718 librbd: 'rbd cache enabled' -> 'rbd cache'
+e875b89 Add task for running fsx on an rbd image.
 6cbda4c qa: comment out xfstest 232
 e0ec286 objecter: recalc pgid in case it wasn't a precalculated pgid
 f22da6c cls_rgw: rgw_dir_suggest_changes move cur_disk_bl inside loop
@@ -27688,12 +31901,16 @@ f22da6c cls_rgw: rgw_dir_suggest_changes move cur_disk_bl inside loop
 6868ec6 rgw: skip empty entries when listing a bucket
 f87a6a0 cls_rgw: rgw_dir_suggest_changes() don't try to decode cur_disk
 73badda cls_rgw: don't zero out error code before reading value
+6bede29 dump_stuck: whitelist 'wrongly marked me down'
+19e673c filestore_idempotent: use new sequence-based tester
 20ab74e test_librbd: fix write sizes
 81c8a7b test_idempotent_sequence: no config file
 a40fe5b run_xfstests.sh: ensure cleanup on errors
 3d187db run_xfstests.sh: pass test result via exit status
 a4dd920 objclass: fix cls_cxx_map_remove_key()
 b352a3e rgw: call dir_suggest_changes with correct bufferlist
+6ba4efc rbd.py: add xfstests functionality
+4498825 add rbd_xfstests to regression suite
 a31392c objectcacher: name them
 28e6181 librados: add IoCtx::get_pool_name() to c++ api
 d80c126 ObjectCacher: use "objectcacher" instead of the CephContext::name for perfcounter init
@@ -27780,6 +31997,7 @@ c6551fb test_rados.py: add basic tests for the Object class
 5ce8d71 FileJournal: allow write_thread and write_finisher to run in parallel
 dc44950 filestore: implement collection_move() as add + remove
 dc10d04 OSD: improve information and format of OSDTracker messages
+55535d0 move tasks:cfuse_workunit_suites_dbench.yaml to stress pending #1737 fix
 3d11455 osdmaptool: fix clitest for lpg_num 0
 8a6c3c1 msgr: dispatcher: Documentation of security functions
 9b2aff2 FileStore: don't return ENOENT from object_map getters
@@ -27818,14 +32036,21 @@ be5b25b filestore: fix collection_move guard
 43de5e4 FileStore: dumping transactions to a file
 cd4a760 osd: fix heartbeat set_port()
 1775301 osd: reenable clone on recovery
+ddb98f7 ceph_manager: don't try to start greenlet twice
 6fbac10 osd: allow users to specify the osd heartbeat server address.
 4f030e1 osd_types: fix off by one error in is_temp
 31f16a4 rgw: list multipart response fix
 89fecda Makefile.am: remove some clutter
+1ac5554 kernel: kludge around mysterious 0-byte .git/HEAD files
 0aea1cb (tag: v0.45) v0.45
+0d5918f kernel: reset to remote firmware branch; don't pull
+9b755fd kernel: change git incantation for firmware pull
+22b1f17 ls: another newline
+7757fbb ls: remote stray newline
 d348e1a configure: --with-system-leveldb
 34cc308 filestore: fix leveldb includes
 0b2e1cd cephfs: fix uninit var warning
+9906d5e Change to local mirror of linux-firmware repo to try to stop failures
 f79b95e Makefile: add missing .h to tarball
 8d5c87a rgw: fix object name with slashes when vhost style bucket used
 853b045 OSD: use per-pg temp collections, bug #2255
@@ -27847,14 +32072,19 @@ cfee033 config: parse fsid uuid in config, not ceph_mon
 930a669 config: add cluster name as metavariable; use for config locations
 bda562f config: implement --show-config and --show-config-value <option>
 f18b219 test_workload_gen: fix logging
-32b5d0f config: remove obsolete bdev_* options
+32b5d0f8 config: remove obsolete bdev_* options
 0e5d087 README: update instructions
+3d7f1db Kernel: Pull linux-firmware from git
 0df6fbd rados: fix rados import
+ba0fb3e cleanup-and-unlock.sh: helper to nuke and then unlock a set of nodes
+3adf2bf schedule_suite.sh: helper to schedule a suite
 0921c06 config: drop loud ERROR prefix
 b9185bb osdmap: allow row, room, datacenter, pool in conf for initial crush map
 4313a2d crush: don't warn on skipped types
 56a6aa7 osdmap: set 'default' pool type correctly
 dd7b84a ceph-fuse: fix log reopen when -f is specified
+1836d46 Added assertion to check that targets > roles
+9529402 nuke: don't run umount when no xargs args
 e859611 osd: define more crush types
 2dbdadb test_rewrite_latency: check return value
 493344f Makefile: add mssing header
@@ -27883,6 +32113,7 @@ e792cd9 filestore: fix ZERO fallback write
 f8a5386 osd: fix error code return from class methods
 15f0a32 monmaptool: make clear you can set the fsid when making a new map.
 208daeb ceph_mon: fix fsid parsing.
+9a69c3f ceph.conf: enable 'osd recover clone overlap'
 aa31035 osd: update_stats() on reads too
 2878865 log: dump_recent in fatal signal handler
 f7f65eb osd: fix typo in debug message
@@ -27909,6 +32140,8 @@ eebc9ec test: test_workload_gen: Add callback for collection destruction.
 409b648 config: drop old debug_* items
 5d981b1 rgw: add unittest just to verify we link
 69b0172 config: fix librados, libcephfs unit tests
+b4aa098 make Thrasher not inherit from Greenlet
+394d8b1 Add test for object source marked down
 1c8ec70 PG,ReplicatedPG: update missing_loc_sources with missing_loc
 05ef3ba ReplicatedPG: fix loop in check_recovery_sources
 c39ed56 test: test_workload_gen: Fixing a memleak.
@@ -27925,6 +32158,7 @@ a3bdf05 test: test_workload_gen: Default arguments, and minor changes.
 d172b40 test: test_workload_gen: Destroy collections.
 8948ad0 test: test_workload_gen: CodeStyle compliance and cleanup.
 3770096 test: test_workload_gen: Mimic an OSD's workload.
+749826c allow use of a separate journal block device
 36c2f27 osdmaptool: fix clitest conf filename
 ffc468f osdmap: less noisy about osd additions during buildmap
 ca1f79b dout: no newlines on dout_emergency
@@ -27965,6 +32199,7 @@ f41887e log: new logging infrastructure
 679cd1f objecter: add in-flight ops throttling
 d6b0cbd config: use our assert
 c3dc6a6 msg: assert pipe->msgr == msgr
+e30b771 rbd: fix typo in default config
 483fcf8 doc: include crush in toctree
 3bd1f18 doc: few notes on manipulating the crush map
 6db7715 doc/dev/peering.rst: fix typo
@@ -27975,7 +32210,14 @@ ea377a0 osd/: Convert OpRequest* to OpRequestRef
 e478a75 vstart: enable omap for xattrs
 3ab2895 don't override CFLAGS
 d3bcac2 Makefile: fix modules that cannot find pk11pub.h when compiling with NSS on RHEL6
+ef17c8c add smoke suite
 b5641ef rgw: don't #include fcgi from rgw_common.h
+397e7f2 add osd_recovery task to test divergent osd logs
+1c1192a backfill: use 'rbd' pool instead of 'data'
+24910c3 add osd-recovery test
+6bf9c95 renamed backfill -> osd_backfill
+ca9a5a4 rename backfill -> osd_backfill
+22e8087 put filestore xattr option in [global]
 2ec8f27 rados_bench: generate_object_name now takes a buffer length
 21a170e doc: dev/peering.rst edits from Greg
 8fa904a doc: update dev/peering document
@@ -27983,12 +32225,15 @@ de86763 msgr: fix tcp.cc linkage
 fd9935b cephtool: don't prefix log items
 2e21adf Objecter: resend linger_ops on any change
 52aff48 ObjectStore: Add collection_move to generate_instances
-ec52eeb FileStore: remove src on EEXIST during collection_move replay
+ec52eeb2 FileStore: remove src on EEXIST during collection_move replay
 23313ee FileStore: whitelist COLLECTION_MOVE on replay
 b47454b ObjectStore: add COLLECTION_MOVE to dump
 1b2a066 ceph-kdump-copy: add tools for saving kdumps
 3caa431 ceph: define and use a shell_scripts Makefile variable
+6f0f250 suite: add missing print statement
+8a9a567 suite: fix print statement when summary doesn't exist
 d0e8f14 doc: update list of debian dists
+91c08f6 Add watch op to rados.py
 7236178 Objecter: resend linger_ops on any change
 3019d46 TestRados: Add watch
 4760536 rgw: keep pool placement info also in cacheable location
@@ -28003,7 +32248,15 @@ e42fbb7 rgw: process default alt args before processing conf file
 e0b8f7a rgw: process default alt args before processing conf file
 51a0733 rgw: incrase socket backlog
 5b33198 rgw: fix internal cache api
+815fc3e suite: failed runs might not have durations
 f923b84 OSD: do not hold obc lock in disconnect_session_watches
+a65d413 suite, coverage: use absolute dirs for isdir checks
+bdb72c2 filestore_idempotent: get coverage and coredumps
+6c8db1a suite: more results logging
+7173a8a ceph.conf: no comment
+7de798f ceph.conf: set 'filestore xattr use omap = true'
+7d2e105 fix teuthology-ls isdir check
+94f0ba1 run valgrind with cwd set to /tmp/cephtest/archive/coredump
 fd85130 ReplicatedPG: there should be no object_contexts during on_activate
 77c08f8 osd: fix object_info.size mismatch file due to truncate_seq on new object
 619fe73 .gitignore: xattr_bench
@@ -28013,8 +32266,10 @@ d8bcc1b config: fix recursive locking of md_config_t::lock
 15d85af osd: explicitly create new object,snap contexts on push
 d4addf5 osd: re-use create_object_context() in get_object_context()
 58c5d5a osd: ReplicatedPG::create_object_context()
+01924a2 disable rbd thrash workload, #2174
 96780bd osd: create_snapset_context()
 872bdd0 osd: ensure we don't clobber other *contexts when registering new ones
+07b97fe suite: log results and coverage generation
 a49a197 ReplicatedPG,FileStore: clone should copy xattrs as well
 2a593dd RadosModel: test xattrs with omap
 14506dc FileStore: add support for omap xattrs
@@ -28029,13 +32284,16 @@ d8325e5 DBObjectMap: implement xattr interface
 f2e6b8d ReplicatedPG: populate_object_context during handle_pull_response
 4cfc34f leveldb: .gitignore TAGS
 5db6902 leveldb: un-revert
+8fbd087 results: make sure email is sent before anything else fails
 89ccd95 osd: maybe clear DEGRADED on recovery completion
+b457235 Revert "disable rbd thrash workload, #2174"
 826d30f rgw: remove extra layer of RGWAccess
 80e2a5e msgr: switch all users over to abstract interface
 1e1453c msgr: introduce static Messenger::create() function
 d26feff msgr: promote more methods to abstract Messenger interface
 c2af646 rgw: put_obj() uses bufferlist instead of extra alloc/copy
 2b3bfd0 rgw: remove fs backend
+1bec416 disable rbd thrash workload, #2174
 bec47b5 introduce CEPH_FEATURE_OMAP
 8c96fd2 leveldb: new .gitignore entry
 20d1171 osd: rev cluster internal protocol
@@ -28053,8 +32311,12 @@ e5934f1 qa: kclient/file_layout.sh: ...
 db12627 ObjectCacher: remove unused and crufty atomic sync operations
 5f92f33 librados: move methods that require an IoCtx to IoCtxImpl
 8f27864 librados: split into separate files and remove unnecessary headers
+b90354d thrash: put client on separate machine from osds
+5c9acbd gitbuilder: put flavor last
+1a01cca Pull from new gitbuilder.ceph.com locations.
 98792e9 rgw: add more meaningful tests instances of encoded objects
 dfe50bc build-doc: use alternate virtualenv dir, if specified
+096427d remove dup ceph tasks from new thrash workloads
 6869c57 test_idempotent: fix global_init call
 401a571 qa: kclient/file_layout.sh poking
 7e0e7ce rgw: modify bucket instance for encoding test
@@ -28066,13 +32328,22 @@ e19417e Makefile.am, rgw: remove fcgi dependency where not needed
 31578c1 rgw: tone down some log messages
 452b124 rgw: provide different default values for 'debug rgw'
 2c88f85 config: alternative config options for global_init()
+3833ada Made the example better with multiple roles.
+0a61ffa Added some example yaml files and an example parallel execution task.
 968d29d qa: use recent kernel for kernel_untar_build.sh
 9316439 rgw: switch ops log flag to use ceph config
 c9898f4 filestore: fix op_num offset/labels
 7c75349 config: tmap to omap upgrade, true by default
+2b9e7bc clusters/fixed-3.yaml: 2 -> 6 osds
+51d817f Revert "disable s3tests on valgrind/lockdep until #2103 is fixed"
+af44518 add rbd, kclient workloads to regression thrash collection
 3bba6b7 Makefile: link libfcgi to librgw
 945218c qa/workunits/kclient/file_layout: escape *
+71e6e62 fix typo, ceph-fyuse -> ceph-fuse
 a849787 filejournal: less log noise
+b84897e use dbench workunit, not the autotest one
+008cf7f autotest: pull from github.com/ceph/autotest
+2124129 workunit: include python2.7 path too
 0851b74 filestore: remove unused bool idempotent
 6980ed9 filestore: fix arguments
 9b2a5c5 filestore: sync object_map on _set_replay_guard()
@@ -28101,6 +32372,7 @@ e43546d osd: fix watch_lock vs map_lock ordering
 80d9252 osd: update_heartbeat_peers as needed
 619244d rgw: implement dump() for encoders
 2277fb4 rgw: add stubs for dencoder test
+ddc1ab0 rados.py: include setattr and rmattr
 ac28b60 ceph: document the way files are laid out
 1ed1d46 librados: fix unit test for omap_get_vals_by_key rename
 ce43bd5 osd: format time nicely in ops_in_flight output
@@ -28128,6 +32400,8 @@ b0c9d58 rbd: pass all mon addrs when mapping devices
 9dc7c65 msgr: remove SimpleMessenger::get_ms_addr() in favor of Messenger::get_myaddr
 4d0bcdc objectstore: fix collection_move() encoding
 0a59d08 ReplicatedPG,librados: add filter_prefix to omap_get_vals
+31762c0 lock: Improved logging when there aren't enough nodes available to lock-many.
+05a07dd lock: Added a --locked flag to teuthology-lock.
 6bf7201 rgw: some minor cleanups
 5f8ffde objclass: fix cls_cxx_map_write_header
 799c8ac cls_rgw: fix debug message
@@ -28165,6 +32439,7 @@ d8dcb28 librados: add tmap_put to ObjectWriteOperation
 195301e mds: respawn when blacklisted
 769ef36 journaler: add generic write error handler
 8618640 .gitignore: src/ocf/rbd
+2a18c3e nuke: unmount osd data directories
 e3b4ba9 filestore: create snap_0 on mkfs
 a14d44f filestore: drop useless read_op_seq() arg
 affda7c rbd OCF RA: fix whitespace inconsistency
@@ -28191,6 +32466,7 @@ e36940a rgw: atomic processor writes to shadow object
 702f09e librados: close narrow shutdown race
 743da9b osd: don't trust pusher's data_complete
 e1a9e18 osd: warn if recovery still has missing at end
+1493674 Use non-zero exit status if any tests failed
 c31b869 OCF resource agents: add rbd
 75cbed6 DBObjectMap: remove stray ;
 0272b59 LevelDBStore: #include types.h
@@ -28206,8 +32482,10 @@ ef24477 msgr: Remove the SimpleMessenger start/start_with_nonce distinction.
 ffa5955 msgr: Remove SimpleMessenger::register_entity
 3bd1d2a msgr: add start() and wait() stubs to the Messenger interface
 70360f8 github.com/NewDreamNetwork -> github.com/ceph
+dc1abab github.com/NewDreamNetwork -> github.com/ceph
 cacf0fd filestore: fix rollback safety check
 9fa8781 (tag: v0.43) v0.43
+9f757ca disable s3tests on valgrind/lockdep until #2103 is fixed
 3a83517 RadosModel: separate initialization and construction
 cd31388 librados: only shutdown objecter after it's initialized
 2c275ef Makefile: add headers for distcheck
@@ -28221,6 +32499,8 @@ d9b130f Added LevelDBStore
 58a3b7f Added leveldb submodule
 cddcc2d Makefile: make check-local relative to $(srcdir)
 749281e Makefile: add json_spirit headers to tarball
+a80246c dump_stuck: note required ceph configuration
+b2bbede dump-stuck: set pg stuck threshold to match test
 85d04c6 rgw: don't check for ECANCELED in the _impl() functions
 8634065 rgw: don't retry certain operations if we raced
 b1f2644 msgr: fix race in learned_addr()
@@ -28235,10 +32515,12 @@ e843766 osd: fix typo is recovery_state query dump
 2437ce0 msgr: discard the local_pipe's queue on shutdown.
 7690f0b osd: remove down OSDs from peer_info on reset
 23a0c03 rgw: check for bucket swift permissions only if failed
+85cc96c dump_stuck: verify that 'ceph health' mentions the right number of inactive/unclean/stale pgs
 b9a675a mon: report pgs stuck inactive/unclean/stale in health check
 bc80ba1 rgw: fix swift bucket acl verification
 cc93518 rgw: implement swift public group
 d10e1f4 mon: fix slurp_latest to fill in any missing incrementals
+999e219 peer: ignore +scrubbing portion of pg state
 7b48cca test_osd_types: fix unit test for new pg_t::is_split() prototype
 fd0712d Makefile: drop separate libjson_spirit.la
 edd35c0 osd: drop useless ENOMEM check
@@ -28246,6 +32528,7 @@ a7de459 ceph-osd: clarify error messages
 97926e1 init: Actually do start the daemons when 'service ceph start <type>' is specified
 f317028 doc: beginnings of documentation of stuck pgs and pg states
 1917024 filestore: make less noise on ENOENT
+722af1a no peer as part of lost_unfound
 244b702 pg: use get_cluster_inst instead of get_inst in activate
 b6a0417 osd: pg_t::is_split(): make children out param a pointer, and optional
 85ed06e osd: bypass split code
@@ -28259,12 +32542,32 @@ ee4d990 journaler: log on unexpected objecter error
 41295b5 debian: /var/run/ceph -> /run/ceph
 0d8b575 debian: build-{indep,arch}
 3ad6ccb debian: sdparm|hdparm, new standards version
+9afafdf move peer to separate test for now
+6295578 lost_unfound: do peer after, until wait_for_clean propagates last_epoch_started
+84cd4ed peer: wait for peering to complete, or block
+d944e7e fix lockdep.yaml conf syntax
 266902a rgw: initialize bucket_id in bucket structure
 f8f6e4d rgw: _exit(0) on SIGTERM
+5d5a022 run radosgw through valgrind for s3tests
+c9c1a4a do peer test along with lost_unfound
+b873958 peer: remove unused variable
+62bda12 misc: always return a usable result from get_valgrind_args
+e480181 rgw: simplify valgrind args
+edbb41e add peer task
 732f3ec (tag: v0.42.2) v0.42.2
+fc531a9 rename valgrind -> verify, add in runs under lockdep
+7ac04a4 lost_unfound: list missing/unfound for each pg and verify the unfound counts
+c43e87d ceph_manager: list_pg_missing
 d85ed91 osd: fix array index
 722e9e5 lockdep: don't make noise on startup
 fdaed0a formatter: fix trailing dump_stream()
+c93a08e Whitespace and unnecessary formatting fixes
+3bfb8d6 ceph, ceph-fuse: simplify valgrind argument additions
+9ec0472 refactor all valgrind users to use a get_valgrind_args() helper
+90fdc84 ceph: always create valgrind logs dir
+7af6e46 ceph: always try to process valgrind logs
+e2ea73d rgw: add valgrind support
+7bf64b7 rgw: accept dict
 7ad35ce osd: include timestamps in state json dumps
 e22adac osd: use blocks for readability in list_missing
 6d90a6d osd: dump recovery_state states in json
@@ -28280,6 +32583,7 @@ c9416e6 osd: 'tell osd.N mark_unfound_lost revert' -> 'pg <pgid> mark_unfound_lo
 804f243 do_autogen.sh: -T for --without-tcmalloc
 5efa821 rgw: swift read acls allow bucket listing
 f09fb87 rgw: fix swift acl enforcement
+d40a9b2 lost_unfound: new mark_unfound_lost syntax
 7c7349e ceph: fix help.t
 c3e1291 (tag: v0.42.1) v0.42.1
 0281f1c debian: add ceph-dencoder
@@ -28288,6 +32592,7 @@ f6e42a8 ceph.spec.in: add ceph-dencoder
 cbf79a9 ceph-tool: remove reference to "stop" command
 3bad945 mds: remove unused MDBalancer dump_pop_map() function.
 4dfec57 rgw: enforce swift acls
+81a46c4 dump_stuck: flush stats before waiting for recovery/clean
 065d6dd mds: clean up useless block
 159f2b8 mds: fix Resetter locking
 f5bf9d9 rgw: s3 only shows s3 acls
@@ -28314,25 +32619,36 @@ d1fe2f8 mon: deprecate mon 'stop' command
 7842bb5 mds: Add old_inodes to emetablob
 26b5675 Fix ceph-mds --journal-reset
 761ecc6 Makefile: include encoding check scripts in dist tarball
+52a52cf Add test for 'ceph pg dump_stuck'
 7fab4fa debian: add ceph-dencoder
 cd5a8f7 ceph.spec.in: add ceph-dencoder
 a6c7f99 ceph-dencoder: man page
 8c48a8e rgw: read correct acls for swift metadata update ops
+995dc1f Add a task for testing stuck pg visibility.
+2a1c74c Move duration calculation to an internal task
 e67c0ff osd: make object_info_t::dump  using hobject_t and object_locator_t dumpers
+eb434a5 Add necessary imports for s3 tasks, and keep them alphabetical.
 55a6065 osdmap: dump embedded crush map in Incremental::dump()
 2365c77 rgw: maintain separate policies for object and bucket
 d2335fa crush: write CrushWrapper:dump()
 27c8a3f test/rados-api/misc: fix LibRadosMisc.Operate1PP test
 174f6b8 osd: refuse to return data payload if request wrote anything
 7cafa25 osdmap: dump fullmap from dump()
+11073e5 s3roundtrip, s3readwrite: access key uses url safe chars
 0e4367a rgw: accepted access key chars should be url safe
+6e1b3a5 rgw: access key uses url safe chars
+df5f573 add valgrind collection to regression suite
 17d3870 rgw: don't invalidate cache when adding xattrs
 cedb3d7 ceph: if 'pg <pgid> ..' doesn't parse a pgid, send to mon
 9927671 Makefile: fix misplaced unit tests
 1ff7568 hobject_t: remove unused back_up_to_bounding_key()
+c5688e6 ceph: valgrind trumps coverage when picking a flavor
+5216d3c ceph.conf: no lockdep by default
 4d3de03 osd: sched_scrub() outside of map_lock
 0b7f6e3 global: resurrect lockdep
+5f9445c suite.results: include test duration in output
 4432037 mon: disable pg_num adjustment
+84bd876 cfuse -> ceph-fuse
 7d3ae37 mon: use encode function for new Incremental
 f3a273a osdmap: successfully decode short map
 f3020c4 osdmap: use FEATURE encoder macro
@@ -28340,9 +32656,12 @@ ebd29b6 qa/btrfs/test_rmdir_async_snap
 1e407b4 ceph-dencoder: add OSDMap::Incremental
 a4f2fdb osdmap: add Incremental::dump()
 76cc71b osd: don't count SNAPDIR as a clone during backfill
+71d0d97 cfuse -> ceph-fuse
+7ff9f04 ceph: allow valgrind per-type (not just per-name)
 24b470a crush: fix CrushCompiler warning
 d74e029 test/encoding/readable.sh: sh, not dash
 e33bf5a crushtool: fix clitests
+eb93fa7 lost_unfound: mark osds in when we revive them
 0429aa7 msgr: fix shutdown race again
 b205c64 (tag: v0.42) v0.42
 76e88d1 msgr: fix accept shutdown race fault
@@ -28361,8 +32680,11 @@ ffddb34 osd: dispatch 'pg <pgid> ...' commands to PG::do_command()
 1f5e446 msgr: promote SimpleMessenger::Policy to Messenger::Policy
 1001692 mds: ignore all msgr callbacks on shutdown, not just dispatch
 1f240ca mon: discard messages while shutting down
+45b6189 ceph_manager: ignore stale states when counting
 787dd17 msgr: fix shutdown vs accept race
 c3a509a mds: drop all messages during suicide
+b5668cf thrashing: whitelist 'objects unfound and apparently lost' message
+196d4a1 wait_till_clean -> wait_for_clean and wait_for_recovery
 806285f mon: fix STUCK_STALE check
 c08615e mon: add dump_stuck command
 c0ab63e mon: constify functions needed to use dout from a const function
@@ -28400,6 +32722,7 @@ ebbfdef msgr: mark_all_down on shutdown
 c1b6b21 osd: do not sync_and_flush if blackholed
 e6ffe31 workqueue: make pause/unpause count
 40802ae osd: exit code 0 on SIGINT/SIGTERM
+bc0e406 add regression/multifs collection; run rgw tests under both xfs and btrfs
 2aafdea signals: check write(2) return values
 9cd0900 osd: semi-clean shutdown on signal
 ec06682 mds: remove some cruft
@@ -28414,6 +32737,8 @@ ecd2802 signals: implement safe async signal handler framework
 4425f3b libradospp: add config_t typedef
 06fa268 librados: use rados_config_t typedef instead of CephContext
 e32668f doc: Balance backticks.
+ad9d7fb backfill: wait for clean before writing+blackholing
+50cc60f nuke: nuke testrados too
 2281a00 librados: expose CephContext via C API
 bc4e78d mds: use new tmap_get pbl argument
 dd32285 librados: need prval for tmap_get
@@ -28424,6 +32749,13 @@ a53a017 ReplicatedPG: pull() should return PULL_NONE, not false
 f9b7529 osd_types.h: Add constructors for ObjectRecovery*
 7b1c144 test_filestore_idempotent: fix test to create initial object
 6b30cd3 libcephfs: define CEPH_SETATTR_*
+3fbb571 rename fs files
+10a94d2 regression/thrash on xfs and btrfs both
+04f3e44 btrfs: 1 -> fs: btrfs
+6f3abc6 ceph_manager: mark in a bit more often than out
+af4ce44 ceph: use any fs, not just btrfs, on scratch devices
+975d73a nuke: nuke testrados and rados processes, too
+46b612e misc: make get_scratch_devices look for (almost) any disk that's not mounted
 b54bac3 test/encoding/readable.sh: drop bashisms
 ffa1de3 filejournal: drop unused variable
 ccf8867 filejournal: aio off by default
@@ -28456,14 +32788,18 @@ eba609b filestore: make flush() block forever if blackholed
 7c6dff4 osd: filter trimming|purged snaps out of op SnapContext
 02bda42 mon: add {mon,quorum}_status admin socket commands
 e4258ce mon: move quorum_status into helper
+2adad55 hammer.sh: assume path is set
 60067f8 mon: move mon_status into a helper
 a414fd5 init-ceph, mkcephfs: try 'btrfs device scan' before 'btrfsctl -a'
+4fad131 add snap thrashing covering a small number of objects
+e841f9c move snap thrashing back into regression suite
 a391b0d osd: fix MOSDPGCreate version setting
 e09c90f osd: queue pg removal under pg's epoch
 4834c4c osd: check for valid snapc _before_ doing op work
 a0caa85 osd: some cleanup
 7eff37b mon: validate osmdap input
 7e32a3d rgw: objects can contain '%'
+6028b36 move kclient_workunit_suites_blogbench.yaml to stress suite
 bd1a956 mon: fix MMonElection encoding version
 22eca41 mon: remove the last_consumed setting in Paxos
 6e6c34f objecter: LingerOp is refcounted
@@ -28605,6 +32941,11 @@ efe77a8 check-generated.sh: nicer output
 c4ca114 osd: fix osd_recover_clone_overlap
 cb75491 osd: use obc for size in calc_head_subsets()
 36a4ca4 filestore: remove obsolete fs type check
+0cd16cf ceph: always add logger for daemons
+7af7c66 ceph: rename type parameter to type_
+7146db9 ceph: use the correct comparison operator
+e7672b6 ceph: sync before unmounting btrfs devices
+1364b88 ceph: delay raising exceptions until all daemons are stopped
 824c3af client: add initialized flag to client
 51ccce0 client: let set_filer_flags clear flags, toos
 3365952 librados: discard incoming messages when DISCONNECTED
@@ -28651,6 +32992,9 @@ a5366c8 ceph-dencoder: add all message types
 32010d7 msg: add missing #includes for messages
 1cb39fa msg: dump messages via build option
 597e97a osd: fix assignment in PG::rewind_divergent_log()
+0b68dbc add backfill test
+0236dc0 add backfill task
+e337c47 ceph_manager: add manager.blackhole_kill_osd()
 9d385f5 msgr: Document recv_stamp and add a dispatch_stamp and throttle_wait.
 ba4aad4 qa: test_backfill.sh: take osd.0 down
 5a54483 osd: restart peering if requesting acting osd goes down
@@ -28658,6 +33002,8 @@ ba4aad4 qa: test_backfill.sh: take osd.0 down
 747b3d4 osd: use RecoveryContext transaction, finishers on recovery completion
 f4e44e4 qa: test_backfill.sh: limit pg log length so we trigger backfill
 f1c3538 osd: fix divergent backfill targets
+d7be776 Allow user to disable lock checking.
+09bed16 Allow user to provide flavor to use.
 9520ee7 filestore: implement filestore_blackhole hook
 1fe75ee rgw: should remove bucket dir instead of sending intent
 2b5bbe8 librados: fix a leak
@@ -28682,6 +33028,7 @@ e9e212f qa: test/rados-api/list fix warning
 9454102 admin_socket: fix uninit warning
 483c089 mon: trim old auth states
 9bb3875 filestore: fix rollback when current/ missing entirely
+9da0118 make 6-osd-2-machine simpler... single monitor
 5e16974 osd: reset pgstats timer when we reopen monitor session
 9e78d53 clock: ignore clock_offset if cct is NULL
 5938d17 filejournal: add corruption test to check crc checking code
@@ -28696,6 +33043,8 @@ db3b9ee filejournal: fix header initialization
 8d439e9 filejournal: move zero_buf allocation
 f9620d7 client: do not send release to down mds
 e43db38 signal: use _exit() on SIGTERM
+06c8fdc regression: add admin socket test for objecter requests.
+f84b4aa Add admin socket task.
 0f9c6b4 test: add script for checking admin socket 'objecter_requests' output
 097bc5c objecter: add an admin socket command to get in-flight requests
 39f6c4c admin socket: increase debug level for successful requests
@@ -28717,6 +33066,7 @@ b8e6a6bd assert: include timestamp
 b3c80bc rgw: acls cleanup wip
 91b547b osd: remove the unused require_current_map
 2bc7105 filestore: fix typo
+fe2834f remove snap thrashing from regression suite for time being
 ec7a140 filestore: zero btrfs vol_args prior to ioctl
 dedf575 mon: num_kb -> num_bytes in cluster perfcounters
 625b0b0 osd: remove num_kb from object_stat_sum_t stats
@@ -28733,6 +33083,7 @@ ae36f59 filestore: TEMP_FAILURE_RETRY on ::close(2)
 a43937f filestore: return -errno from lfn_open
 0fd6ca9 filestore: audit + clean up error checks
 2835d40 rgw: rgw_acl_s3.* compiles
+4aa9ca4 CephManager: base timeout on time since last change in active+clean
 9dc7b92 rgw: fix warning
 cfe1d01 ceph: bail out on first failing command
 54a7673 ceph: don't write output on error
@@ -28785,6 +33136,7 @@ fdaf91e osd: implement --dump-journal
 a52762a rgw: read large bucket directory correctly
 802acb1 rgw: refactor acls, separate protocol dependent code
 6c275c8 rgw: fix warning
+29885f3 kernel: ignore connection problems while waiting for reboot
 e016cca Convert mount.ceph to use KEY_SPEC_PROCESS_KEYRING
 8a9252f rgw: adjust high level debug level
 148031b rgw: fix intent log processing
@@ -28795,6 +33147,7 @@ f441adf objecter: add stat ops to op vector!
 1d5c8fd objecter: gift reply data to outbl _after_ demuxing
 905e8d8 osd: make in/outdata split/merge helpers static OSDOp methods
 1a7c8b4 rgw: log_show_next() fix reading of the next buffer
+5bb9a9d Add small cluster thrashing tasks
 06e7562 filestore: overwrite fsid during --mkfs
 4c6c443 rgw: reset timestamp when processing starts
 127bbd1 hadoop: fix unix timestamp calculation in hadoop lib
@@ -28808,18 +33161,37 @@ db65295 rgw: log host_bucket, http status
 0e8b12c rgw: simple request logging
 63b94b6 mds: abort startup if we fail to bind
 4f70acf osd: abort on startup if we fail to bind to a port
+45e4c92 thrashosds: maxdead default to 0
 47db4d0 ceph: fix "run_uml.sh" script
 549b780 TestRados: implement max_seconds, reimplement argument parsing
+bf22a4f task/rados: use new usage for radosmodel tool
 20f3f68 RadosModel: prefix line with m_op
 7b2fd45 mds: fix uninitialized value in MClientLease::h
+b2c07d8 add simple thrash workload to regression suite
+71390f9 thrashosds: fix action selection
+8fc6086 thrashosds: make actions less nonsensical
 b5f8de7 msgr: move operator<< for sockaddr_storage to msg_types.cc
 e93999f qa/workunits/rados/load-gen-mix.sh
 ba83e8c qa: rados load-gen: use rbd pool
+9419f58 ls: include duration, less noise
+c5bbfff hammer.sh: new -nuke syntax
+8fb115f include run duration in summary.yaml
+8e126db mon.0 -> mon.a
+43da161 mds.0 -> mds.a
+7b47e49 ls: fix extraneous newline
 b7a1102 rados: load-gen: wake up on reply
 51e402e rados: fix load-gen 'max-ops'
 7d3b2c4 librados: allow ObjectReadOperation::stat() to get time_t mtime
+b58f956 ceph: ignore all leaks
 706b691 osd: recover_primary_got() -> recover_got()
 a4e2395 osd: clear missing set on replica when restarting backfill
+40fb86f ceph: take single arg or list for valgrind args
+c88ec57 combined mon, osd, mds starter functions
+f8ec23e rbd: default to all:
+72057a9 use local mirrors for (most) github urls
+fbfa94b teuthology-ls: show pid, last line of output for running jobs
+f70b158 show host -> roles mapping on startup
+f795261 lost_unfound: make test work with backfill
 a464294 msgr: don't assert on socket(2) failure
 a6c0610 msgr: uninline operator<< on sockaddr_storage
 6b02f9f osd: rev osd internal cluster protocol
@@ -28830,6 +33202,7 @@ a6c0610 msgr: uninline operator<< on sockaddr_storage
 42a6cef ReplicatedPG: munge truncate_seq 1/truncate_size -1 to seq 0/size 0
 0ded7e4 ReplicatedPG: munge truncate_seq 1/truncate_size -1 to seq 0/size 0
 44cb076 rgw: limit object PUT size
+3bfa41c Use yaml.safe_dump so unicode doesn't mess up the yaml files.
 d575337 objecter: fix up stat, getxattrs handlers
 7eea40e (tag: v0.40) v0.40
 81c0ad8 librados: make new ObjectReadOperations arguments non-optional
@@ -28847,8 +33220,10 @@ a855828 osd: mux/demux OSDOp::outdata in MOSDOpReply
 4f4b79c osd: include return code in OSDOp
 f42c658 osd: fill in empty item in peer_missing for strays
 10b0031 rgw: don't crash when copying a zero sized object
+0da4459 nuke: take config files from -t argument
 845aa53 ReplicatedPG: Do a write even for 0 length operation
 80f57f9 ReplicatedPG: fix stat accounting error in CEPH_OSD_OP_WRITEFULL
+96e89d3 kernel: loop reconnecting in case we race with shutdown
 cfa39bf qa/client/gen-1774.sh
 6cf7753 osd: fix PG::Log::copy_up_to() tail
 805513b osd: reset last_complete on backfill restart
@@ -28863,6 +33238,11 @@ b93bf28 PG: gen_prefix should grab a map reference atomically
 38b9b50 rgw-admin: add pool rm and pools list
 e2c0254 rgw-admin: clean up unused commands
 ac1e105 osd: bound log we send when restarting backfill
+5936923 thrasher: don't mark down osds out; tell monitor same
+3c0346b lost_unfound: typo
+6dae2f8 thrasher: adjust min_dead default
+fb74b90 thrasher: add max_dead
+50463ff verify all osds start before checking health
 79085ad rados.py: avoid getting return value of void function
 85552cf pg: remove unnecessary guard from calc_trim_to()
 b1da511 pg: add a configurable lower bound on log size
@@ -28872,6 +33252,9 @@ b1da511 pg: add a configurable lower bound on log size
 f09b21e resolve_addrs: return ipv4 and ipv6 addrs
 9e9b5c6 ReplicatedPG: fix typo in stats accounting in _rollback_to
 d4815e5 osd: send log with backfill restart
+f4883eb ceph: let the user running ceph-osd remove subvolumes
+2317b9a add rgw readwrite and roundtrip tasks
+d2fadf9 syslog: ignore lockdep non-static key warning
 c7d92d1 osd: fail to peer if interval lacks any !incomplete replicas
 3b81fa5 mon: allow specifying pg_num and pgp_num when creating new pools.
 69aface auth: Fix Doxygen warnings.
@@ -28907,9 +33290,12 @@ c23cc23 osd: add OSDOp::outdata
 d8ebbf4 osd: OSDOp::data -> indata
 b17736a osd: populate_obc_watchers when object pulled to primary
 a59ee8f osd: handle case where no acceptable info exists
+b354ce4 run: put pid in archive dir
+fbf7912 do not put monitors on the same nodes as clients
 92ca3ef perfcounters: fix unittest for new admin_socket interface
 d8e5499 Makefile: disable untitest_interval_tree
 bcf2146 unittest_interval_tree: make it compile
+13445d2 ceph_manager: a booting osd is no longer automatically marked in
 a774d50 osd: clean up src_oid, src_obc map key calculation
 3c60e80 osd: read op should claim_append data instead of claim
 0d175cd rgw: remove object before writing both xattrs and data
@@ -28918,6 +33304,9 @@ a774d50 osd: clean up src_oid, src_obc map key calculation
 26b54ae rgw: rearrange PutObj::execute()
 a0b5539 rgw: different atomic handling for small objects
 199b14d mon: fix uninitialized cluster_logger_registered
+001701a mon_recovery: need n/2 + 1 monitors for quorum
+cfeaef4 move multimon failure thrashing tests into regression
+da92107 ceph: don't skip monitor ports
 bebd393 objecter: ignore replies from old request attempts
 ac177d7 osd: encode retry attempt in MOSDOp[Reply]
 b501efd mon: document quorum_status, mon_status
@@ -28925,6 +33314,7 @@ ca8df7e mon: fix misplaced else
 643b9db ceph: speak new admin socket protocol
 5a5dece admin_socket: fix, extend admin_socket unit tests
 b389685 admin_socket: string commands
+561f06c suite: make email-on-success the default behavior
 14a4943 mon: elector needs to reset leader_acked on every election start
 435c294 mon: instrument elector so you can stop participating in the quorum
 99e5f85 mon: kill client sessions when we're not in quorum
@@ -28965,10 +33355,15 @@ ed9a4a0 osd: return EINVAL on bad PGLS[_FILTER] handle
 2f1720d librados: return int64_t pool ids
 a97aca7 rados.py: use uint64_t for auids
 8e56e99 radosgw-admin: add eol following info
+ec3a3a9 rados: fix example config
+71d5bcb Adjust rados model workloads for new config format
 0e470c5 testrados: replace testreadwrite and testsnaps with testrados
 9112405 RadosModel: check for out of order replies within WriteOps
 f03e770 RadosModel: allow TestOps to pass data to their finish methods
 ec6530d RadosModel: make object write ranges configurable
+cdd5c45 nuke-on-error: only unlock if this run locked the machines
+0176c9a Remove unused mon.0 variables.
+2e9b1c7 rados: use testrados instead of testsnaps and testreadwrite
 a66d90e osd: add a monitor timeout via MPGStatsAck messages
 f4b0cda Fix invalid docdir_SCRIPTS usage with >=automake-1.11.2
 f8929ba osd: trigger RecoveryFinished event on recovery completion
@@ -28978,6 +33373,11 @@ a125246 librados: take lock in rollback
 1c75418 osd: be a bit more verbose during backfill
 0692bed cmp: fix 5-uple operator==
 3dcaf6c osd: do not backfill if any objects are missing on the primary
+932257f rados: remove unused variable
+0af9c0a rados: clean up argument construction
+6df4ce5 rados: fix references to testrados
+cdf142b rados: fix documentation format
+2f71f03 misc: simplify reconnect logic
 949f24d rgw: create default constructors for some structs
 251fc3d osd: handle backfill_target for pick_newest_available
 a352589 osd: return EINVAL if multi op specified with no src object name
@@ -28994,6 +33394,7 @@ a693438 mon: only update full_ratio if we're the leader
 df84594 mon: make full ratio config change callback safe
 585fb5c clitests: update for new error format
 cec2692 clitests: update monmaptool test
+f04e295 teuthology rgw-admin: annotated test cases for inventory    this is not a nose suite, so I simply added test case    descriptions in csv format, and put a file to extract    them at the top of the file. Signed-off-by: Mark Kampe <mark.kampe at dreamhost.com>
 48df71c init script: be LSB compliant for exit code on status
 3b2ca7c keyring: print more useful errors to log/err
 eba235f common: trigger all observers on startup
@@ -29074,6 +33475,7 @@ d959334 mon: fix setting of mon addr when joining a cluster
 e5f4910 man: Update the configuration example for radosgw
 83cf1b6 man: It is capital -C instead of -c when for creating a new keyring
 3e323e6 rgw: fix updating of object metadata
+d0e90d7 syslog checking: forgot a pipe
 08f968f rgw: bucket cannot be recreated if already exists
 f54f4aa obsync: add authurl to CLI
 bfbde5b object.h: initialize max in hobject_t(sobject_t) constructor
@@ -29084,15 +33486,19 @@ bfbde5b object.h: initialize max in hobject_t(sobject_t) constructor
 739fd9f man: clarify mount.ceph auth options
 e5a5ae1 man: update rule definition for ceph-rbdnamer
 4eb8365 authx -> cephx everywhere it's used
+7eec309 rountrip: add task
 b5c3259 ReplicatedPG: fix backfill mismatch error output
 41f64be ReplicatedPG: calc_clone_subsets fix other clone_overlap case
 5b41c47 OSD: use disk_tp.pause() without osd_lock
+97cc6c2 readwrite: fix task with default conf
 ec776f4 ceph.spec: Clean up and fix spec file and build for a couple of distributions
 0e0583f init-ceph/init-radosgw: Don't use unspecified runlevel 4
 7a7aab2 osd: wait for src_oid if it on other side of last_backfill from oid
 ca2e8e5 osd: EINVAL on mismatched locator without waiting for degraded
 0c54704 osd: preserve write order when waiting on src_oids
 da28605 client: fix logger deregistration
+659e66a readwrite: fix conf, task runs
+7d085ad readwrite: add readwrite task
 62c830f ReplicatedPG: add_object_context_to_pg_stat, obc->ssc may be null
 5a40093 obsync: add vvprint back in
 cda5f0d PG: clear waiting_on_backfill during clear_recovery_state
@@ -29180,10 +33586,13 @@ abecbc5 OSDMonitor: remove useless check
 207c40b libceph: add missing #includes
 2f281d1 libceph: catch errors from Client::init()
 c87f31e client: return errors from init
+31b5ccb coverage: use locally stored build instead of downloading from a gitbuilder
 7133a2f filestore: dump transaction to log if we hit an error
 6b42567 objectstore: implement Transaction::dump()
 3d13f00 objectstore: create Transaction::iterator class
+4da96ff rados load-gen workunits
 6ff95e9 qa: rados load-gen workunits
+c9e4504 Ignore lockdep being turned off for now.
 6d5e5bd pybind/rados: add asynchronous write,append,read,write_full operations
 fb8fd18 doc: Clarify documentation of reweight command.
 db30716 doc: Add missing documentation for osd pool get.
@@ -29207,7 +33616,10 @@ cf279a8 workunits: print tests pjd runs
 798ef38 osd: delay pg list on a snapid until missing is empty
 e2a9450 obsync: add swift support to obsync
 d21f4ab msgr: turn up socket debug printouts
+a768ad7 coverage: don't generate html reports for each test
+7b52dd1 syslog: ignore 'task blocked' warnings
 891025e5 udev: drop device number from name
+6b8588b Use btrfs for regression tests
 a5606ca pybind: trivial fix of missing argument
 e4db129 crush: whitespace
 808763e osdmap: initialize cluster_snapshot_epoch
@@ -29217,6 +33629,7 @@ d940d68 client: trim lru after flushing dirty data
 1545d03 client: unmount cleanup
 f3c90f8 client: wait for sync writes even with cache enabled
 adbe363 client: send umount warnings to log, not stderr
+e69057e internal: check syslog for errors
 2d3721c ObjectStore,ReplicatedPG: remove old collection_list_partial
 717621f librados,Objecter,PG: list objects now includes the locator key
 322f93a hobject_t: encode max properly
@@ -29227,6 +33640,7 @@ adbe363 client: send umount warnings to log, not stderr
 cada2f2 object.h: Sort hobject_t by nibble reversed hash
 348321a hobject_t: sort by (max, hash, oid, snap)
 2026450 hobject_t: define max value
+95e6324 workunit: set client id and secretfile env vars
 745be30 gitignore: Ignore src/keyring, as created by vstart.sh
 a1ebd72 ReplicatedPG: don't crash on empty data_subset in sub_op_push
 8afa5a5 workunits: fix secret file and temp file removal for kernel rbd
@@ -29250,6 +33664,9 @@ ddc11a8 test_rados.py: clean up after EEXIST test
 019597e filejournal: make FileJournal::open() arg slightly less weird
 86c34ba vstart.sh: .ceph_keyring -> keyring
 1e3da7e filejournal: remove bogus check in read_entry
+dbd7a3b Rename "testrados" task to not begin with "test".
+e80c32c Rename "testrados" and "testswift" tasks to not begin with "test".
+0dd4d69 Fix unit tests for SSH keep-alive setting.
 dc167ba filejournal: set last_committed_seq based on fs, not journal
 4a0b00a mon: stub perfcounters for monitor, cluster
 8bbe576 osd: safely requeue waiting_for_ondisk waiters on_role_change
@@ -29274,9 +33691,11 @@ d4aef20 hadoop: apache license.
 348c71c mds: fix blocking in standby replay thread
 f6ee369 global: make daemon banner print explicit
 5828009 mds: fix usage text
+50c4b31 Handle interactive-on-error also when error is from contextmanager exit.
 353ee00 mds: adjust flock lock state on export
 2443878 Objecter: loop the right direction when searching for local replicas
 1c696b6 doc: Add peering state diagram
+2918b50 Move kclient multiple_rsync workunit to stress collection.
 30ede64 Makefile: ipaddr.h, pick_address.h
 77a62fd Makefile: add missing uuid.h to tarball
 ebb585d Objecter: fix local reads in recalc_op_target
@@ -29308,7 +33727,9 @@ bed3c47 mon: handle rank change in bootstrap
 8b46409 mon: pick an address when joining and existing cluster
 5ba356b mon: remove unused myaddr
 0c9724d mon: simplify suicide when removed from map
+24ee09b Revert "more logs (yuck) for #1682"
 eb8d91f PG: it's not necessary to call build_inc_scrub_map in build_scrub_map
+c651c88 Properly handle case where first error is inside a context manager __exit__.
 c066e92 mds, osd, synclient: Pick cluster_addr/public_addr based on *_network.
 0477f23 common/pickaddr: Pick cluster_addr/public_addr based on *_network.
 eec61b4 common/ipaddr: Add utility function to parse ip/cidr style networks.
@@ -29322,31 +33743,56 @@ b47347b osd: protect handle_osd_map requeueing with queue lock
 811145f paxosservice: tolerate _active() call when not active
 88963a1 objecter: simplify map request check
 cd2e523 objecter: cancel tick event on shutdown
+ea00114 more logs (yuck) for #1682
 f607028 paxos: fix sharing of learned commits during collect/last
 3b53b72 rgw: support alternative date formatting
+721c0e9 nuke: don't specify full path
+4b53288 ceph_manager: %
+dcab329 fix conf thinko
 9aabd39 paxosservice: consolidate _active and _commit
 10fed79 paxosservice: remove unused committed() callback
 b521710 mon: mdsmon: tick() from on_active() instead of committed()
 becfce3 mon: share random osd map from update_from_paxos, not committed()
 9920a16 config: support --no-<foo> for bool options
+a08e7f1 regression/basic/tasks/kclient_workunit_misc: turn on mds log
 1a468c7 config: whitespace
+13c98df regression/basic/tasks/cfuse_dbench: turn up client debugging
 cc5b5e1 osdmon: set the maps-to-keep floor to be at least epoch 0
 45cf89c Revert "osd: simplify finalizing scrub on replica"
 57ad8b2 FileStore.cc: onreadable callbacks in OpSequencer order is enough
+508f4f8 Save summary after nuking machines.
+91cfdfe Add an example overrides file for running regression tests.
+7c8a7a8 Move multimds tests to a new suite, 'experimental'.
 dedf2c4 osd: error responses should trigger all requested notifications.
 09c20c5 objecter: trigger oncommit acks if the request returns an error code.
 9800fae paxos: do not create_pending if !active
 fa58768 Revert "mon: don't propose new state from update_from_paxos"
 66c628a mon: don't propose new state from update_from_paxos
+94100ad Move collections into separate suites
+42cecb5 suite: put common config before facets
+044a88c suite: schedule a list of collections for running instead of a single suite directory
 6ae0f81 rgw: if swift url is not set up, just use whatever client used
+23aae67 testswift: fix config
+d8fc151 Clean up C++isms.
+c545094 Add a task for easily running chef-solo on all the nodes.
 ef5ca29 fuse: fix readdir return code
 d61ba64 paxos: fix trimming when we skip over incrementals
 367ab14 paxos: store stashed state _and_ incrementals
 6bc9a54 mon: elector: always start election via monitor
+89f8041 ceph_manager: fix logging
+f85f5dd ceph: deep merge overrides, so e.g. log whitelists can be overridden
+a763297 misc: move deep_merge out of the MergeConfig class - it's generic
 685450b common: libraries should not log to stdout/stderr
+c6988a0 Save config after locking nodes, so targets are included.
 f1dd56d objecter: set skipped_map if we skip a map
 5afef02 objecter: add is_locked() asserts
 bf91177 objecter: send slow osd MPing via Connection*
+4e6cd55 filestore_idempotent: remove unused import
+7d51e3d mon_recovery: remove unused code and import
+f4d527e thrashosds: timeout for every clean check, not just the last one
+9d12b72 ceph_manager: add a default timeout of 5 minutes for mon quorum
+cb9ac08 ceph_manager: log mon quorum status so the logs show progress (or lack thereof)
+f3c569e rgw: add swift task
 fa4b0fb osd: add pending_ops assert
 17fa1e0 mon: renamed get_latest* -> get_stashed*
 b9d5fbe mon: fix ver tracking for auth database
@@ -29358,6 +33804,7 @@ b425f6d mon: always load stashed version when version doesn't match
 cd90061 Resolve gcc warnings.
 a5b8c85 osd: remove dead osd_max_opq code
 f418775 workunits: rados python workunit should be executable
+b43981b multimon: need at least 2 osds to go healthy
 102c434 crush: send debug output to dout, not stdout/err
 25eee41 test/run_cmd: use mkstemp instead of mkstemps
 1800986 ceph-authtool: fix clitests
@@ -29399,12 +33846,15 @@ aea7563 mon: create initial states after quorum is formed
 0a926ef mon: include monmap dump in mon_status and quorum_status
 8c3d872 mon: pull initial monmap from monmap/latest OR mkfs/monmap
 0ecae99 mon: take explicit initial monmap -or- generate one via MonClient
+2bad011 filestore-idempotent
+c5f070b filestore_idempotent.py: simple task to test non-idempotent osd ops
 dae6c95 test_filestore_idempotent: detect commit cycles due to non-idempotent ops
 add04d1 filejournal: fix replay of non-idempotent ops
 9f1673c test_filestore_idempotent: transactions are individually idempotent
 8df0cd3 filestore: make trigger_commit() wake up sync; adjust locking
 0981112 filestore: document the btrfs_* fields
 69cd362 filestore: sync after non-idempotent operations
+1c1ebb4 Add rados python tests.
 5407fa7 workunits: add workunit for running rgw and rados python tests
 2fb7029 rgw: remove warning
 71bfe89 test/pybind: add test_rgw
@@ -29416,6 +33866,7 @@ a177a70 rbd.py: fix list when there are no images
 27bb48c mon: overwrite in put_bl
 2f97a22 PG: mark scrubmap entry as not absent when we see an update
 8794112 rgw: implement swift copy, fix copy auth
+77c977c misc: allow >1 monitor per role in get_mon_names()
 704644b PG: gen_prefix: use osdmap_ref rather than osd->osdmap
 7fb182a OSD: sync_and_flush afer mkfs to create first snap
 a3dd5bd PG: update info.history even if lastmap is absent
@@ -29431,6 +33882,7 @@ b41b1fa PG: cache read-only reference to the current osdmap on pg lock
 0dffddf osd/: change type of osd::osdmap to a shared_ptr
 9db994a PG: always add backlog entry
 15da478 rbd: Fix the showmapped cmd usage
+303e863 add hammer.sh
 3354933 hadoop: return all replica hostnames
 e6035a6 hadoop: make listStatus quiet
 d7f911f hadoop: handle new ceph_get_file_stripe_address
@@ -29440,9 +33892,17 @@ c5c5037 client: fix bad perfcounter fset callers
 c51e2f7 osd: fix perfcounter typo
 1ac6b47 os: rename and make use of the split_threshold parameter.
 09455ee perfcounters: fix users of fset on averages
+afa56f1 nuke: increase reboot timeout
+6618a02 mon_recovery: add task to test monitor cluster failure recovery
+60863f7 ceph_manager: manipulate monitors
+6d39cc1 ceph: keep ceph.conf at ctx.ceph.conf
+9acea7a multimon mon_recovery tests on variously sized monitor clusters
 87634ce osd: don't open deleted map from generate_past_intervals
 20cf1e9 automake: enable 'make V=0'
+4b0cf89 Add rbd python binding test.
 1bc1a24 mon: handle active -> electing transition properly
+006a0dd Remove unused imports and variable.
+5d32bca Add nuke-on-error option.
 7609032 rgw: don't return partial content response with bad header
 7a32cc6 rgw: swift bucket report returns both bytes size and actual size
 a04afd0 rgw: abort early on incorrect method
@@ -29470,12 +33930,14 @@ c2fc986 monmap: simplify constructor
 2836104 rgw: fix accept-range for suffix format, other related issues
 2f881e1 Timer.cc: remove global thread variable
 d4ef921 common: return null if mc.init() unsuccessful
+c764b24 Fix leftover orchestra import clause.
 480b826 rbd: add showmapped to clitests and rst man page
 4e518ed rbd: Document the rbd showmapped cmd
 34d8039 rbd.py: fix list when there are no images
 ae41f32 OSD: write_info/log before dropping lock in generate_backlog
 fb70f5c FileJournal: stop using sync_file_range
 585a46c monclient: simplify auth_supported set
+a38c005 test_libcephfs
 1014167 Makefile: use static add for test_libcephfs_readdir.
 5b4e9d3 RadosModel: add DeleteOp to test object deletions
 280a4d1 rgw: fix tmp objects leakage
@@ -29486,9 +33948,13 @@ fc6522a rgw: don't purge pools in any case
 b873347 gitignore: just ignore all test_ files
 d4faf58 qa: workunit to run test_libcephfs_readder
 120c3fb test: write a test to try and check on Client::readdir_r_cb.
+4f3b113 ceph_manager: log ceph -s output so progress is visible in the logs
+0b451f9 Keep each ssh connection alive.
+6e3e0d7 connection: allow the caller to specify whether keep-alive should be used
 58eb8c5 rgw: fix null deref, cleanups
 7726e78 rgw: add support for chunked upload
 0d4987d rgw: fix crash when accessing swift auth without user
+b1a0c1a locker: fix race in locking
 376dad9 hadoop: remove unused fs_default_name
 3191e0d hadoop: FileSystem.rename should not return FileNotFound
 60e1e14 hadoop: ENOTDIR should be negative
@@ -29502,6 +33968,7 @@ dcf2d62 hadoop: remove unused variable
 a79b7e1 hadoop: emulate Ceph file owner as current user
 e9adf73 hadoop: use standard log4j logging facility
 c861ee1 PG: mark scrubmap entry as not absent when we see an update
+a2f406e testrados: set CEPH_CLIENT_ID without a ;
 f497132 debian: empty dependency_libs in *.la files
 0b0f65a add missingok to logrotate
 47b7036 debian: update VCS sources
@@ -29523,6 +33990,7 @@ ef51f0f monclient: fix else formatting
 e15177a monclient: fail fast when our auth protocols aren't supported
 9ea0223 osd: kill unused on_osd_failure() hook
 1d9e806 RadosModel.h: use default conf location
+810cae1 testrados: specify CEPH_CONF directly
 b9a0b2b Revert "PG: call set_last_peering_reset in Started contructor"
 f9b7ecd hadoop: Return NULL when the path does not exist.
 5bd029e osdmap: fix g_ceph_context reference
@@ -29552,6 +34020,8 @@ c57ed06 add images for documentation
 7a02202 rgw: handle swift PUT with incorrect etag
 cae7d5a rgw: handle swift PUT with incorrect etag
 697bba3 rgw: handle swift PUT with incorrect etag
+10c3508 rgw: add user suspend/enable test
+86aa940 rgw: log-to-stderr is now a binary flag
 a817a38 rgw: handle swift PUT with incorrect etag
 d9dfd14 rgw: handle swift PUT with incorrect etag
 87224c0 rgw: handle swift PUT with incorrect etag
@@ -29610,6 +34080,10 @@ afa3479 librbd: show correct size for snapshots
 46bb412 rbd: let all commands use the pool/image at snapshot format
 8c6db18 rbd: specify which commands take --snap in usage
 4b10cad rbd: check command before opening the image
+2be3999 Add btrfs dimension to thrash tasks
+2ad6545 Add testrados based thrashing tasks
+8d0a7c5 testrados: rename testsnaps to testrados and make snap testing optional
+a1249d0 workunit: set PYTHONPATH so we can test python bindings
 88905b3 test/osd: Add TestReadWrite
 5e4e797 mon: allow adjustment of per-pool crash_replay_interval
 f57c33d rgw: fix check_disk_state; add a strip_namespace function.
@@ -29623,6 +34097,8 @@ c15e62a mon: need to print pool id for output to be useful
 6779eb3 osd: make osd replay interval a per-pool property
 7cb4d25 osd: pg_pool_t: introduce flags, crash_replay_interval
 f2816a1 osd: pg_pool_t: normalize encoding
+61cbb32 ceph.conf: python parser doens't like ; comments
+3ed0656 ceph.conf: more frequent osd scrubbing; remove old cruft
 54e2826 scratchtool[pp]: fix rados_conf_set/get test of log_to_stderr
 9323f25 osd: fix PG::Log::copy_after wrt backlogs (again)
 1b846f4 radosgw: drop useless/broken set_val daemonize
@@ -29635,10 +34111,12 @@ e98cbc4 rgw: fix xattrs cache
 cf6a940 osd: eliminate CRASHED state
 d6661f9 ReplicatedPG: Include pg version in MOSDOpReply on error
 f8afd8b rgw: reduce rados bucket stats (and getxattrs)
+b8beff3 ceph_manager: count active+clean+<somjething else> as active+clean
 a1756c5 rgw: object removal should remove object from index anyway
 dd5087f osd: simplify finalizing scrub on replica
 29899de osd: PriorSet: acting/up membership implies still alive
 f94a44e OSDMonitor: reweight towards average utilization
+409c571 coverage: don't remove ceph tarball
 49b6c11 osd: PG::PriorSet: make debug_pg arg const
 fa66e65 osd: PgPriorSet -> PriorSet
 7bc855a osd: PgPriorSet: rename prior_set_affected -> affected_by_map
@@ -29695,10 +34173,15 @@ f89f4d9 osd: PgPriorSet: do not include UP osds in prior.cur
 9dfa110 rgw: fix swift account and containers listing limits
 c5638b7 osd: PgPriorSet: any_survived -> any_is_alive_now
 e6dbd71 doc: Change diagram to have radosgw closer to direct rados access.
+3c90c0d add singleton lost-unfound
+4ec37b2 add lost_unfound task
 edcd4d9 rgw: some more swift fixes
+83cf3fe Expect 'wrongly marked me down' messages during thrashing
+bcded7f ceph: add whitelist for cluster log errors
 0bad37e streamtest: do mkfs
 525a610 streamtest: print to stdout
 9c95604 mkcephfs: copy ceph.conf to /etc/ceph/ceph.conf (when -a)
+fba220e nuke: reset syslog configuration after rebooting
 9baf5ef ceph.spec: don't chkconfig
 21d941e ceph.spec: work around build.opensuse.org
 195a484 ceph.spec: capitalize first letter to make rpmlint happy
@@ -29757,6 +34240,7 @@ c98e1c5 ReplicatedPG: remove unused tmap implementation.
 42c8ae7 test_librbd: expect copy to succeed
 d0d265b librbd: return errors when read_iterate fails during copy
 a50fbe2 PG: merge_old_entry: merged delete might not be in missing
+493596a radosgw-admin: test swift keys creation/removal
 42bbea8 rgw: swift key removal
 05dae94 Revert "config: base default libdir, sysconfdir off autoconf values"
 1216eb2 rgw: some swift api fixes
@@ -29785,6 +34269,8 @@ b6c4615 librbd: slightly cleaner
 6185517 hadoop: get the right class member, and ask for it properly.
 dc40b37 auth: move AuthAuthorizeHandler registry into class
 11a1d60 OSD,ReplicatedPG: expire and cleanup unconnected watchers
+321381d teuthology-worker: remove --keep-locked-on-error
+3d3eb0e Remove --keep-locked-on-error, and behave as if it were specified
 9d846d3 move ceph_mount call
 039035b osd: discard requests that from disconnected clients
 88de6ab ceph.spec.in: handle docdir properly
@@ -29797,6 +34283,7 @@ af6a9f3 crush: try _all_ bucket items when doing exhaustive search
 6e29c28 mon: tolerate 50ms (instead of 10ms) of clock drift
 ff31d3c rgw: fix printf format warning
 8be9450 rgw: make log object naming configurable
+c56ab97 reconnect: ignore SSHExceptions before the timeout expires
 3d2f89f mon: make other send_incremental variant handle map discontinuity
 64935d4 auth: fix authorizer leak fix
 6e04f60 mon: fix osdmap trimming unsigned overflow
@@ -29806,12 +34293,14 @@ ad48ada pg: rename warm_restart and last_warm_restart
 85bbc2f .gitignore: add multi_stress_watch
 6bfae03 ReplicatedPG: assert *_FLUSHING unreachable for AccessMode
 5840ae4 test/multi_stress_watch.cc: add watch notify tester for teuthology
+4722d46 task/watch_notify_stress: watch_notify_stress now thrashes clients
 088d0df qa: rados-api: try harder to make these pool names unique
 646ef6a xlist: more assertive
 d78b8c7 xattr: use sys/types.h instead of linux/types.h
 960deb4 Makefile: include ceph_extattr.h to dist tarball
 07c8860 Revert "osd: simplify the session ref-counting branches"
 fce1761 osd: boot with map of oldest-1
+4e61e48 rgw: keep radosgw in foreground
 d272146 radosgw-admin: UTC for time in 'log show', local time in separate field
 e8dd1f8 utime: add gmtime() output function
 77cfbfb radosgw-admin: for date filtering for 'log list'
@@ -29859,6 +34348,8 @@ c4f4ea9 osd: use numeric values for OSD errnos, as ERESTART and ESHUTDOWN values
 2b9ba0b Use system-independent definitions for Ceph's setxattr flags parameter.  Ceph passes Linux setxattr flags on the wire without converting them, so use the same values for these defitions.
 9fde4d9 First cut of FreeBSD support.  This patch allowes ceph to compile successfully under FreeBSD.
 0cd3e56 Use /bin/sh, which is more portable.
+107db6a Retry listing machines if the lock server goes down.
+39a1e76 rgw: use normal logging mechanism
 64d2b97 osd: remove some dead code
 167c091 rgw: write debug acl output to the debug output (not cout)
 4e9969e rgw: more sane defaults
@@ -29889,7 +34380,16 @@ e661bf8 osd: use pointers for handle_advance_map
 ea087e3 mon: implicitly mark lost_at when a fresh osd rejoins the cluster
 b8aca4e radosgw: run as 'user' field in ceph.conf
 8e37140 radosgw-admin: make 'log list' behave when .log pool dne
+7b7ff6e teuthology-worker: clean up last_in_suite jobs
+3d3ba1e daemon-helper: detect the signal actually sent
 81c5f6a debian: don't recommend gceph
+d305d61 ceph_manager: remove unused raw_pg_status method
+8e03173 ceph_manager: run ceph -s as a normal program
+bad609e teuthology-results: include passed tests in email
+8bcd2a7 teuthology-results: include reasons for failure in email
+030161e teuthology-ls: show reasons for failures with -v
+1cad309 Add failure_reason to summary for the first failure detected.
+817b950 radosbench: get coverage and cores
 9f71bb0 rgw: don't specify create_pool and set_marker in create_bucket.
 4ae8f1c radosgw: make stop succeed when not running
 36f650b rgw: move rgw_bucket_select_host_pool behind RGWAccess as select_bucket_placement
@@ -29897,17 +34397,24 @@ c49c19c radosgw: fix init-radosgw provides
 bbc644e rgw: remove withdraw_pool function.
 1018673 rgw: remove preallocation of pools
 df9c660 rgw: remove preallocating pools maintenance tick
+fe1a271 watch_notify_stress.py: add ceph flags option
+28d6017 ceph.py: add btrfs option
 04f825e rgw: set xmlns on ListBucketResult element
 848a1f9 debian: add non-stripped exception for libcls_rgw rados class
 2a078f8 Makefile: more radosgw_admin -> radosgw-admin
 5d54398 mds: fix possible deadlock in multi-mds setup
+a192ee1 basic: add rbd cli tests
 b968ff3 workunit: clean up temporary files
 12ce321 ReplicatedPG: reset return code after find_object_context
 6103ee9 more radosgw_admin -> radosgw-admin
 c5b0e3e osd: trim ondisk log using Log::Entry::offset
 f85dfa7 osd: combine log_op into append_log
+ae19602 nuke: keep up with renaming cfuse -> ceph-fuse
 8111b61 mds: make jouranl writeable in MDLog::append()
 f4e61db mdcache: tolerate no subtrees in create_subtree_map()
+2b601a3 radosgw-admin: test additional keys, log list/show/rm
+b93a007 tasks/radosgw-admin: test radosgw-admin tool
+afc2dc0 nuke: killall apache2 and radosgw too
 4922757 ceph.spec.in still packages libceph.so in 0.36
 877cacb (tag: v0.36) v0.36
 c00e06f doc: add documentation for librbd python bindings
@@ -29919,6 +34426,7 @@ b1bed35 rgw: don't remove on-disk bucket object in rgw_user::rgw_remove_bucket
 19228d4 rgw: revert part of commit:30b814e2677659ddda4109a1421f33a47d83a05b
 30b814e rgw: don't remove bucket object twice
 b441ecf rgw: fix bucket-id assignment
+9b44469 s3-tests: use radosgw-admin instead of radosgw_admin
 48558bb rgw: separate bucket vs object owner tracking
 cb2d366 radosgw-admin: fix acl vs content-length check
 b269d3c radosgw-admin: rename clitest dir
@@ -29938,9 +34446,11 @@ d38b9a5 ReplicatedPG: dump_watchers in do_osd_op_effects
 4be3309 radosgw-admin: 'log list' and 'log rm'
 120736a radosgw-admin: dump ints, not strings
 a1942e6 radosgw-admin: fix bucket stats
+5242758 ceph_manager: parse osd numbers with dots
 fc8588f rgw: protect the id-based accesses from namespace collisions
 47e5958 rgw: remove get_bucket_info and convert users.
 e701177 radosgw-admin: fix 'bucket stats --uid='
+569e786 Forgot to add ceph task to a couple thrashers.
 8147841 radosgw-admin: include 'suspended' in user info output
 6891761 radosgw-admin: clean up some cout usage
 0870764 radosgw-admin: use formatter for 'bucket list'
@@ -30023,6 +34533,7 @@ d27d9d8 init-ceph: more c* -> ceph-* cleanup
 b582e7c osd: fix warning
 8563b96 debian: include radosgw init script in package
 b19a200 radosgw: init script
+10e90ab Add some thrashing tasks.
 30d81fe filejournal: improve debug error messge
 8aa15e8 osd/mon: make max maps per MOSDMap message tunable
 8711c32 osd: limit size of osdmap messages
@@ -30072,6 +34583,7 @@ dd8d4ec Makefile: drop useless all_sources thing
 5369c77 misc: use strncpy to prevent buffer overflow
 4b8cf14 ceph-elaster: add parentheses around macros
 1707181 test_store: parse args, don't link librados
+a92fef7 rename c* -> ceph-*
 5f95172 rgw: fix clone in case state is NULL
 e241433 rgw: send correct object id to class op
 abdb327 rgw: encode correct timestamp
@@ -30081,6 +34593,8 @@ abdb327 rgw: encode correct timestamp
 6eb8862 crushtool: fix argument parsing
 33151a9 crushtool: crush_finalize on any map modification
 e6eeaac Revert "crush: refuse to add item >= max_devices"
+c7ff5fc queue: results_timeout needs to be converted to a string
+b22fab3 Last run of iozone uses 10240M, so increase image size.
 b873ec8 rgw: two phase commit for index updating
 bfbda96 man: rebuild manpages
 3e9a936 doc: more c* -> ceph-* renames
@@ -30090,6 +34604,7 @@ fba541e libceph -> libcephfs
 97aa1aa mount.ceph: use ":/" instead of just ":" as ip/path delimiter
 e124b15 addr_parsing: null terminate safe_cat result
 17b2451 mount.ceph: fix use-after-free
+ef56a72 task/watch_notify_stress.py: add simple watch_notify stress test
 ac1a40d test/rados-api/test.cc: Add ability to set client id
 4686b56 test/test_stress_watch.cc: added simple watch stress test
 5503d45 PG: set log.backlog to false on corrupt log
@@ -30120,6 +34635,7 @@ d292aee doc: Move ops/grow under ops/manage.
 c448c4c doc: Clean reStructuredText syntax.
 aa666b1 objclass: add stat operation
 2ec5f15 cls_rgw: list marker uses lower_bound
+c3c2626 schedule: put results timeout in the job
 894a8b6 client: tear down dir when setting I_COMPLETE on empty
 b9e32ff rados: accept '-b' as an argument.
 0afda37 (tag: v0.35) v0.35
@@ -30146,12 +34662,16 @@ fdd98ee rgw: fix bucket listing (still using old scheme)
 73bc771 rgw: make cls_rgw compile
 0f7a490 ReplicatedPG: populate obc watchers on sub_op_push
 a711f29 osd: remove throttle_op_queue()
+e4dfe3d lockfile: increase interval to prevent incorrect locking orders
+5ff88d1 lockfile: don't fail cleanup if no lock procs exist
 741990c rgw: class for bucket directory
 46cfda7 osd: preserve ordering when throttling races with missing/degraded requeue
 4fb3d34 osd: set reply version for dup requests
 6cce379 CInode: don't leak old default_layouts if they get overwritten
 0ac8985 CDir: put delete with pop so we don't read free'd memory in output
 8c8e1d8 osd: clear need_up_thru in build_prior as appropriate
+734e63a locking: add another test.
+e498caa locking: fix the horribly-botched maxwait values
 d440a67 flock: clean up waiting records of a lock when adding it succeeds
 39970c0 flock: add a replay parameter to add_lock
 d64237a ceph_common.sh: Do not sudo to root unless needed
@@ -30163,8 +34683,14 @@ ea6986a flock: move operator<<() to flock header, remove extra line
 b6f4de4 msgr: parse ipv6 addresses without []'s
 cb7f553 doc/dev/logs.rst: Add performance counter writeup
 89c06e7 doc: Add section about changing config values
+0d5dbfa workunit: Fetch source from github.
 7d3aa0e osd: use target obj locator for source object if empty
+5583fac s3tests: Clone repository from github.
+4d92c35 coverage: Fetch source from github.
+4a0f8fe ceph.py: remove unused variables mds_daemons and mon_daemons
+a3c886a ceph.py/cephmanager.py: add ctx.daemons for restarting daemons
 0d46f06 conf: allow ; as a list separator
+85cb29d testsnaps: LD_PRELOAD needed for librados
 2981cca Make g_conf constant for all conf variable types
 dd01df7 const-ify integer config values
 a052c2e config: define config vars once in config_opts.h
@@ -30180,10 +34706,12 @@ ffe844a mds/MDS.cc: don't crash on bad injectargs
 b68eaf1 doc: Say "radosgw" not "rgw".
 ecd368c doc: Shrinking of MDSes is not supported yet.
 ee6126b Makefile.am: fix test_librbd
+a2372fc Move orchestra to teuthology.orchestra so there's just one top-level package.
 e86f3bd osd/OSD.cc: don't crash on incorrect injectargs
 6c81960 PG: fix typo in PgPriorSet constructor
 5b57fb6 tools: fix compile
 9a0f55d Get rid of silly unused parameter
+41f5ddf locking test: add a maxwait to each lock attempt
 8e141f4 monclient: reopen session on monmap change
 613e906 monclient: use cur_con throughout
 691794f msgr: send_keepalive to a Connection
@@ -30193,6 +34721,7 @@ f74f603 get_*_compat_set: get ctor param ordering right
 9a1e13c PG: assemble backlog directly rather than queueing on corrupt log
 6228389 qa: add test_librbd workunit
 e84996d Remove global ctors/dtors for CompatSet
+24b4b9c test_librbd
 923c60c librados: add conf_parse_env()
 5cb7b37 librbd: move c++ tests to gtest
 e2ec946 librbd: convert C tests to gtest
@@ -30213,19 +34742,26 @@ c25b955 doc: Remove duplicate paragraph from rbd.8
 b37b61e workunit: use sudo when first reading /sys... too
 efaf91b workunit: and delete root-owned file
 d021b22 workunit: you need sudo to look at /sys/kernel/debug
+cc72fe6 Callers of task s3tests.create_users don't need to provide dummy "fixtures" dict.
 9713666 rgw: rgw_admin, init gen_secret, gen_key
+1970bad thrashosds: fix timeout when no options are specified
 ba9cafd debian: no /var/run!
+8dd52f9 thrashosds: fail if cluster doesn't finally become clean in 5 minutes
 4281f02 PG.h: Initialize invalid_hash in Entry()
 83c6dd0 debian: create var/run/ceph
 a73d53f debian: radosgw: add dirs to package
 be43ff5 ceph: avoid noise when there is no output
 2eb9baa OSD: reset filestore_update_collections to false after do_convertfs
 ff639c1 object.h: initalize hobject_t correctly
+fc1b14d thrasher: get coverage and cores from calling ceph commands
+b72c5a8 thrashosds: wait for every pg to go active and clean before exiting
 34822ad workunits: make file_layout.sh more robust
 c16241f rpm: Include ceph.spec in release tarball.
+08747c5 thrasher: clean up a bit
 0978178 rpm: Create the /etc/ceph directory.
 b618528 rpm: Add newly-added files to %files.
 b4bc9f6 rpm: Build without tcmalloc, by default.
+6eba495 locking: there is no client.2
 3819beb ceph: -o - to stdout, not stderr
 4c0125d mon: Clean up MonSession/Connection leak on MForwards
 2f04acb osd: Clean up PG leak
@@ -30234,6 +34770,11 @@ b4bc9f6 rpm: Build without tcmalloc, by default.
 228bd59 filestore: free fiemap used for testing fs capabilities
 676dc9c WorkQueue: don't deliberately leak memory
 1a44500 ceph: clean up command output
+55d564d Run kclient tests on all clients.
+b1ae07f Run cfuse tests on all clients.
+1314f27 Run rbd tasks on all clients.
+091b0ae autotest: allow tests to be run on all clients
+e45109b rbd: allow specifying all clients
 1986d98 client: some debugging
 fc587d6 client: fix leaked Dir on rename
 7077e67 client: clean up Inode ref counts slightly
@@ -30263,6 +34804,7 @@ c29b221 ceph.spec: spec file should be creating /var/run/ceph
 37c7067 doc: Note that mkcephfs can't do upgrades etc.
 ecefa8e doc: mkcephfs ssh's directly as root@
 71dc75b mkcephfs: Config $user is irrelevant when we want to run as root.
+20e8b64 Increase rbd image size for ffsb
 24939ce ceph tool: convert to new-style arg parsing
 21dbec9 ceph_argparse: remove unused macros, functions
 a00e9db cfuse: use new-style argument parsing stuff
@@ -30281,20 +34823,25 @@ aca43cc TestSignalHandlers: use new argument parsing stuff
 b71f24a rados.cc: remove unused macro
 7f45388 librados-config: remove unused old-style argparse
 290da11 cauthtool: convert to new-style arg parsing
+77f52c9 Add another osd to the roles for two clients.
 bb167e5 gceph tool: convert to new-style arg parsing
 77153d9 rgw: set perm_mask for swift operations
 988ff0f rgw: fix update_container_stats()
 1a52cbd doc: Explicitly say ceph.conf host= must not include domain name.
 47b09e2 mkcephfs: cosd and cmon setup must happen as root.
 28539cc doc: Document mkcephfs-style installation.
+655e4a4 locktest: don't fail cleanup if the dir doesn't exist
 fd7a422 man: mkcephfs: Typo.
 703631e FileStore: close correct fd in version_stamp methods
 3fa2103 FileStore: check write_op_seq return code during sync_entry
 479e9ca ReplicatedPG: use the client_messenger for OSD Op replies
+d4a876f teuthology: do a deep merge of input yaml fragments
 933e794 os/lfnindex: silence compiler warnings
 abdaf98 client: only flush imported cap if it is now auth
 bd4a247 Makefile: flock.h!
+8f2ef43 lock: default to only listing machines you have locked
 2a10fba cosd: Do filestore conversion after common_init_finish
+5c99f9f rgw: run as an external fastcgi server to match dho
 78d13e4 rgw: fix include
 c856a40 LFNIndex: Fix lfn parsing for hobjects with keys
 b1b1808 rgw: rename source file
@@ -30311,34 +34858,50 @@ a04b15c object.h: Update hobject_t to include object locator key
 ba7ab2f rgw: rename openstack to swift
 969c67a rgw: s/OPENSTACK/SWIFT/g
 8b9ca2a rados tool: fix rados df formatting
+c96f134 Fix rbd_workunit_trivial_sync.
 b30e157 rgw: catch buffer::error exceptions thrown at decode
 22cc333 mds: flock: remove auto-insertion of waiting locks.
 327d0dd rgw: user info dumps json/xml
 62f3c46 doc: Deb install should work now.
 39b0354 doc: Write about deb installation.
 6506d43 rgw: log of nonexsistent bucket config option
+e66dffc don't eat exceptions for breakfast
 2641eb3 qa: test false
 d6df086 mds: flock: tidy up comments
 1d5cf73 flock: move implementation into a separate c++ file
 e8b12d8 OSD: Fix flipped error codes in do_convertfs
 d0eed62 rgw: poll allocation thread
+6b2c23e remove kernel spec; we just want to leave that in overrides
+9de91f1 add locktest to multiclient collection
+7c4a5ac locktest: make it actually run the executable test
+82bb575 nuke: synchronize clocks after reboot, and optionally synchronize all clocks
 7f640a9 rgw: user removal delets user index last
+2455f7d Remove unnecessary exclude from s3tests.
 2aad7c2 rgw: multipart meta object uses tmap
 e09d4a9 doc: Architecture, placeholder in install, and first appendix.
 0a14c75 doc: Make object store diagram fit in 1024 pix wide browser.
 66ee58f doc: Move internals into a new section, /dev.
 c8c205f objectcacher: write vs truncate races are ok
 b2c762b mds: truncate sanity checks
+c502418 thrashosds: make it work when first mon isn't mon.0
+3ce1cbb thrashosds: no camelcaps, add some whitespace
 4fa62d5 rgw: configurable thread pool size
 bcf9cb7 cfuse: initialize Fh to NULL
 886440d client: clean up _{create,release}_fh
 b71f3bc mon: fix 'osd create ...' error paths
+eedc175 nuke: remove unused import
+4d77828 nuke: localize again imports so they occur after gevent monkey-patching
 a8ab69a librbd: cosmetic changes to progress stuff
+51ac061 nuke: reboot if rbd is mounted
 2255a9a Finishing moving src/doc/object_store.dot to doc/
 2e63eac Add object store architecture overview picture
 7293537 ceph tool: convert to new-style arg parsing
+d340eba schedule: add a way to delete jobs from the queue
 9ff3505 qa: add multiple_rsync.sh workunit
+f9daa70 parallel: don't hang if no tasks were spawned
 82776e1 osd: one more flush before collection_list
+3d69965 workunits: remove unused variable
+8c67d5a Don't specify kernel in any tasks.
 f3325e6 mds flock: make get_overlapping_locks and share_space handle 0 lengths
 f1cae57 osd: flush previous operations to fs before collection list + destroy
 d519789 osd: whitespace
@@ -30346,6 +34909,8 @@ d519789 osd: whitespace
 600bc8f MOSDSubOpReply/MSDSubOp: No need to update encoding version.
 bcef985 FlatIndex: lfn_get needs to set *exist for short filenames
 00e0b77 OSD: Fix encoding versions affected by hobject switch
+697f341 nuke: add option to reboot all nodes
+ec768ba Fix pyflakes warnings.
 9a26100 librbd: improve copy_with_progress
 d08c784 Makefile: os/CollectionIndex.h
 47a8063 assert: work around libatomic_ops vs assert in a less lame way
@@ -30355,10 +34920,15 @@ cf862c6 assert: use our assert
 3714862 global_init: make startup line consistent, less ugly
 961260d Makefile: add os/*Index.h to dist tarball
 4dce9fe doc/architecture: describe lib arch, config arch
+5dd50b3 coverage: remove debugging
+5b42b08 workunit: save coverage and coredumps
+ed2d1ea basic workunits: switch to run on all clients simultaneously instead of client.0
 6180c2c osd: fix osd reply message
+6d91915 workunits: rework a little bit to allow "all" clients in a run
 5b70958 cosd,OSD: Improve filestore upgrade path
 be2187c ObjectStore: Support older format on journal replay
 f9b8537 librados: don't hide error in case of failed pool ctx creation
+ec97dd8 cfuse: support running through valgrind
 7a8ab74 client: plug leak of inode refs on seekdir(0)
 6247df6 client: fix iterator adjustment in readdir assimilation
 ed7ebf3 client: drop mostly-useless relink()
@@ -30405,18 +34975,26 @@ cb89d00 testlibrbdpp: test copy_with_progress
 8293dfa pg: warn about inconsistent object_info/on-disk size
 281dae6 osd_types: add standard header so spacing is correct
 e61fc66 pg: remove useless line
+0c2bee1 valgrind: don't run valgrind_post if there's no valgrind
 b140ff2 client: debug link/unlink and dn_set parent relationship
 853658e heartbeatmap: fix reset_timeout with mixed-used threads
 f8296ef rados tool: df dumps formatted data
 c4219a1 rgw: log show dumps some information about bucket owner
+3a3c859 valgrind: scan logs for bad results
+50a648b valgrind: use xml output for tools that support it
 8cb25ca bandaid for gitbuider
 698df7e osd: remove dead lookup_lock_pg()
+7be9eaa suite: add option to send an email if the entire suite passed
 9d37c92 osd: set suicide timeouts on some workqueues
 66b6289 mon: health not ok when up < in osds
 502cf0b filestore: add suicide grace to workqueue
 5fde401 heartbeatmap: add suicide grace
 8d64fee testlibrbdpp: test librbd::Image::copy
 68d1ba8 rgw: don't check for time skew when Expires was set
+4f4227a Generate coverage at the end of a suite run, and optionally email failures and ongoing jobs.
+2b66938 queue: delete every job when it finishes, so only running jobs are buried
+b582764 Add teuthology-coverage for analyzing test coverage for a suite run.
+0545e2f Add scripts to analyze coverage for a single teuthology run.
 1bb0e3c client: simplify Client::remove_cap() args
 b9c824c client: clean up Client::remove_session_caps() args
 16a5761 client: don't falsely populate mds_sessions on bad MClientSession msg
@@ -30447,6 +35025,10 @@ fa757b7 Makefile.am: install coverage files for libtool outputs as well
 85496ff mds: fix shadow
 09b08c5 fix utime.h #include recursion
 528703b crush: comment fall-thru
+fb33ef3 thrasher: improve documentation a little
+83e2634 thrasher: add option to mark OSDs down instead of out.
+0f9b74e thrasher: allow a config to set values
+5d5de0e thrasher: remove redundant wait_till_clean()
 75e53e9 mds: remove unused Mutation in do_rmdir_rollback()
 b826749 workunits: make file_layout test setting only one parameter
 4dba8bc cephfs: use strtol instead of atoi; handle 0 properly
@@ -30461,15 +35043,20 @@ e478e92 rgw: fix 500 error on bucket creation
 9538e87 rgw: fix clitest
 5fb216e MonClient: fix possible null dereference
 2488e44 mds/Server.c: fix memory leak in err path
+83a248b add multiclient collection with a locking task
 44c68b1 rgw: don't check for zero content when creating a bucket
 94fefce rgw: rgw_admin bucket stats dumps owner
 6dce3b1 client: drop useless insert_trace timestamp argument
 3ef32bc client: be careful about replacing dentries during readdir assimilation
+1130e5f coverage: create dir conditionally
 0b1ad60 rgw: add radosgw_admin bucket stats
+0840d05 lockfile: add a lockfile task
 2a4cf8f client: factor update_dentry_lease() out of insert_dentry_inode()
 53b3e14 librados: fix memory leak in error path
 94af6a4 test/confutils.cc: fix resource leak
+103a198 more kclient workunit/suites
 f2381f9 client: carry inode reference across calls to _flush
+7612340 add fsx workunit under cfuse, kclient, rbd
 2ba01bb rgw: fix prepare_get_obj when ENOENT
 ebae5a7 rbd: fix shared_ptr usage
 57fd56c do_kernel_showmapped: fix resource leak
@@ -30480,10 +35067,12 @@ ad1b2b0 CephBroker.cc: fix allocation/deallocation mismatch
 6fd4f3a rados_sync: make number of worker threads configurable
 76718b0 rgw: clear warning
 e9b739f journaler: fix waiting_for_zero clearing, again
+c2c5ac0 lock: --list-targets: list locks and dump result in targets: yaml format.
 23d94a4 Revert "pipe.c: #define GNU_SOURCE"
 6c6fa6d client: only link directories into hierarchy once
 eaaefec mon: fix store_state
 ba44cde journaler: fix waiting_for_zero clearing
+42318c5 check ceph cluster log for badness (ERR, WRN, SEC)
 2144c7a add missing include, required for certain environments
 adf1efe rgw: fix multipart upload
 1a7eac4 parallelize rados export and import
@@ -30515,6 +35104,7 @@ db8ad46 mon: 'osd crush add <id> <name> <weight> [<loc1=foo> [<loc2=bar> ...]]'
 deb25e0 mon: make auto-marking of booting osds 'in' optional
 09f4c46 client: kill dentry_of() macro
 7ef44b7 osd_types: fix category encoding in object_info_t
+21d0441 ceph: copy cluster log file to archive/ceph.log
 d5a9301 qa: define _GNU_SOURCE for qa helpers; add btrfs dir makefile
 ba4551d librados: whitespace
 3205441 librbd: reimplement copy using read_iterate
@@ -30539,11 +35129,14 @@ c412da8 osd: remove dead code, whitespace
 3a623bb librados: prefix api test pools
 2fb3af4 librados: statically link gtest into librados unit tests
 6decfa7 Makefile: allow gtest to be linked in statically
+fc4da52 rados_api_tests: run on top of cfuse, not rbd
+e79dda9 workunits: set CEPH_CONF environment
 239f61e .gitignore: scratchtool
 b02a347 Makefile: rule to upload + submit coverity job
 aa3a8dd addr_parsing: fix warning
 7d2f4a5 escape: fix snprintf args
 952009c .gitignore: scratchtool
+aa575c1 rbd: make default image 10G instead of 1G
 febe232 librados: fix shadow
 244b6ee mds: fix shadow
 56b1484 mds: fix shadow
@@ -30578,6 +35171,8 @@ ab40c3c Revert "Remove old rados tests"
 c095f74 rgw: fix bucket removal
 6fca624 osd: call incorrect osdmap addrs errors
 3794bd9 rgw: fail operation when trying to clone to the same shadow
+410d963 add multimds subtree thrashing test
+a42d9a8 add rados_api_tests.yaml
 e5cf2a4 mds: xlocker_caps are supplemental to caps
 a35ac29 mds: only client hold xlock in xlockdone can change lock state
 1dfdc73 rgw: fix prepare_atomic_for_write
@@ -30603,6 +35198,7 @@ f336fef rgw: fix some issue, basic stuff works
 9f5d163 file_layout.sh: add test for setting a new data pool
 665fd82 Remove old rados tests
 eb8925a rgw: fix locator for shadow objects
+cfd5456 suite: support a suite consisting of multiple collections
 1546bea mds: share max_size on any client_range adjustment
 87e20c9 rgw: add missing files
 b8b8f9d add WatchNotifyTimeoutTestPP
@@ -30650,6 +35246,10 @@ afdb1f3 rgw: list objects shows time in GMT, thread safety fix
 a311715 src/perfglue/heap_profiler.cc: fix snprintf
 14de6d2 client: compile with older libfuse2
 cb1f050 osd_types: fix merge
+e20bae2 valgrind: Document!
+4efc95f include log in valgrind log file names
+d5eb2c2 ceph task: split up arguments a little more
+5323e17 valgrind: move valgrind logs to log dir
 340b434 (tag: v0.33) v0.33
 89eeb34 osd: pg_stat_t: throw exception on unknown encoding
 ce00564 qa: test rename into snapped dir
@@ -30672,10 +35272,14 @@ cc25266 test/rados-api/aio.cc: add RoundTripPP
 44d3193 monmaptool: don't inc bool
 915aa41 crushtool: don't inc bool
 8df56b5 mds: break #include recursion
+aa74481 ceph: split up daemon-running arguments and insert valgrind ones
+9ec19f1 ceph: Set up valgrind as a flavor, and create a dir for logging.
+98ac89a ceph task: pass the full config to the daemon startup subs
 3e66b80 rgw: some encoding fixes
 3b9fbcf rgw: handle zero and invalid max_keys
 b10b3a3 rgw: create CommonPrefixes section for each prefix
 19ec8d0 rgw: adjust returned error when got invalid auth request
+747deec Add assert to catch simple typos in roles list.
 80dfc98 obsync: add SRC_SECURE, DST_SECURE
 e2d4462 LibRadosAio: WaitForSafePP
 ffab58f test/rados-api/aio.cc: SimpleWritePP
@@ -30714,21 +35318,34 @@ d745572 cfuse: properly invalidate page cache data via callback
 e643e4e fuse: enable atomic_o_trunc
 49a89c9 fuse: allow big writes
 61a712d Add test/rados-api/misc.cc
+6938946 manypools: remove commented-out code
 d172800 librados: implement/document tmap_{get,put}
+1ea825c teuthology-nuke: split the big main function
 78f0f2d test/rados-api/snapshots: add SelfManagedSnapTest
 0e51231 librados:fix name of rados_ioctx_snap_set_read/wr
 287b0a1 radosgw_admin: fix clitest
+af0d7c5 teuthology-nuke: move it into its own file.
 fb774ca mds: don't wait for lock 'flushing' flag on replicas
 4a960e4 rgw: can specify time for intent log purging
 8caef77 test/rados-api/snapshots.cc: test SnapGetName
 3f9c0d0 test/rados-api/snapshots.cc: test Rollback
 762e3e1 rgw: reduce intent log obj name time resolution
+453a0f9 teuthology-nuke: identify and reboot machines with kernel mounts
+9566008 teuthology-nuke: use a more robust cfuse mount finder
+257d631 teuthology-nuke: split out different pieces into different loops
+b5859f8 Move reconnect function from kernel task to misc.py
 3dbf9d4 test/rados-api/snapshots.cc: test SnapRemove
 75e6731 rgw: add 'temp remove' tool for rgw_admin
 47ab2ba qa/fsstress: be verbose
 c635c46 librados: document tmap_update
 ba3ac0f librados: document rados_pool_list
 e9d9201 rgw: update radosgw man page
+7fd798a Configure grub to default to the right kernel, not the greatest installed one.
+e98b218 restructure in terms of collections
+39e22e4 Handle socket.timeout when waiting for a reconnect.
+742109f Wait up to 300 seconds for a reboot.
+2387471 add workunits on top of rbd
+bf7b1dd ceph: fix max_mds calculation
 32711fc rgw: remove original object in put_obj, instead of truncate
 68bd54d librados: support op flags
 07837c9 osd: don't request heartbeats from down peers
@@ -30742,11 +35359,15 @@ b2a3623 objecter: fix compile error
 b1539a2 objecter: allow setting of op flags for previously added op
 45e7ac7 osd: add CEPH_OSD_OP_FLAG_FAILOK
 5107813 Add test/rados-api/stat.cc: test rados_stat
+a1f3cac kernel: comment reconnect task, clean up reporting
 7dcae35 auth: use set<> instead of map<,bool>
 446f89e librbd: deduplicate sparse read interpretation
 f7ec378 librbd: make read_iterate mirror AioCompletion::complete
 d48bebe librados, rgw: integrate stats categories
+663bbf8 manypools: remove commented-out code
+1ccdcb9 Make rbd task use mnt.N not mnt.client.N as mountpoint.
 a3219c9 test/rados-api/pool.cc:test PoolCreateWithCrushRule
+780ebcd Make sure workunit task does not create mnt.N by itself.
 fd0d26d test/rados-api/pool.cc: test rados_ioctx_get_id
 5a06f0c test/rados-api/pool.cc: add AuidTest1, AuidTest2
 54b9441 test/rados-api/pool.cc: test PoolLookup,PoolDelete
@@ -30774,12 +35395,17 @@ ed19946 test/rados-api/aio.cc: add RoundTripAppend
 87f2960 test/rados-api/io: add roundtrip, overlappingwrite
 e442a2c Makefile.am: build gtest in debug builds
 41f484d objecter: allow requesting specific maps in maybe_request_map
+4e2ec6f Add interactive-on-error, to pause and explore on error.
 5cb2bd2 testlibrbd: remove useless print statements
+eee1d9a allow s3tests.create_users defaults be overridden
 52b90b7 testlibrbd: NUL-terminate orig_data
 60b4588 testlibrbd(pp): accept standard command line arguments
+4241185 Add simple unit test for get_clients.
+b5ba155 Revert "fix get_clients"
 5e9bdf8 librados: add missing method that was lost in merge
 8942d2b rgw: add hour to intent log name for higher resolution
 7cc208b rgw: log shadow objects removal in intent log
+137f36d teuthology: add task manypools
 66050ad Add test-rados directory, I/O test
 98ee76b pgmon: use pool.get_last_change whenever creating new PGs
 5b260cd rgw: return ENOENT after get_obj() in some cases
@@ -30795,6 +35421,11 @@ fca7908 pgmon: call check_osd_map via a new on_active implementation
 45ca940 objecter: halt compound operation only if result < 0
 ad31db9 Remove unused dyn_sprintf
 6ad7f46 config.cc: clean up includes
+01fac3e new gitbuilder ref/branch naming
+3f2ad30 cfuse, kclient: print remote host
+83b6678 fix get_clients
+06e3e69 tasks/kclient: don't clobber remote
+ef2b809 use coverage_dir
 822d28b mds: chain rename subtree projections
 7a3e750 client: whitespace cleanup
 3d25879 client: send renames to the correct node
@@ -30803,11 +35434,17 @@ ad31db9 Remove unused dyn_sprintf
 d99333d injectargs: forbid unsafe string modifications
 c409e70 test/daemon_config: add injectargs test
 2d4f549 Add daemon_config, libceph_config, etc.
+f38c369 kernel: install in parallel
+f66c010 kernel: debug weird socket exceptions
+6df0d71 kernel: reboot immediately after installing
 7be4b6d pgmon: create ALL pgs in new pools with correct creation epoch
 6dcf379 add config unit test
+3e6b17f Down machines shouldn't be considered free.
 a33b837 librados.h: fix out-of-date comment
 fa2a406 debian: don't explicitly list deps; let debian figure it out
 92e976a config: more cleanup
+68e6f2b Make scheduled tasks leave some machines free.
+4e399da Log connections to targets
 66c3d8f libceph.h: fix C compilation
 2c9949e config: add proper locking, fix comments
 f53cc37 rgw: mark ops as atomic
@@ -30848,13 +35485,20 @@ cf88ce5 messages: change = to result = for less confusion!
 ab537f1 Revert "qa: Remove fsx workunit."
 6a920bb os/FileStore: print out BTRFS_IOC_SNAP_CREATE errs
 efc1566 osd: add CATEGORIES incompat feature
+ac0a469 teuthology-worker: log to a file with timestamps
 a71adf1 librados: add create in specific category to c++ api
 fdd9332 osd: allow 'category' to be set on object creation
+5897d7b teuthology-nuke: run in parallel, and print each node being nuked
 ee4460d osd: segregate stats into categories
+30a8dac Set success at the beginning of a run.
 8981a23 rgw_rest: don't leak formatters
+e8676ce teuthology-nuke: reset rsyslog config
+d079d51 teuthology-worker: keep machines locked on error
 cec04ca XmlFormatter: add DTD option
+c7b62fe teuthology-lock: update usage
 ede86e7 Formatter.cc: add get_len method
 24a7b5f RGWFormatter: get rid of one flush variant
+cd04423 teuthology-lock: allow list of locks to be filtered by owner and status
 dbbce93 rgw_formats: introduce dump_unsigned, dump_int,etc
 7b23944 rgw: rename dump_value_str to dump_format
 9dbeeaa rados tool: fix cmdline parsing for --block-size
@@ -30913,6 +35557,7 @@ c775c03 rgw: get current utc epoch differently
 aba88f5 rgw: init correctly req_state->{bucket, object}
 a4e4c08 rgw: fix total time reporting in rgw_admin
 5c194f5 rgw: tweak content-md5 handling
+6ac6f7a teuthology: convert from bzip2 to gzip.
 86c7260 heartbeatmap: fix/clarify the commenting
 925cb46 scatterlock: compress boolean flags into a set of state flags
 acca584 scatterlock: rename scatter_flags -> state_flags
@@ -30936,6 +35581,7 @@ ef509b3 rgw: check content md5 validity when doing auth
 dc4834b scatterlock: convert [un]scatter_wanted to a bitfield
 579f2e9 mds: Handle unscatter_wanted in try_eval(lock, need_issue)
 f5f6b12 mds: Split the CInode::scatter_wanted field in two
+277c4ff set max_mds based on non-standbys
 3d9621f heartbeatmap: fix mode
 8e4a358 heartbeatmap: warn if previous deadline is missed
 a981333 ceph_context: only wake up periodically if heartbeat_interval is set
@@ -30968,7 +35614,13 @@ c4a5380 vstart: use paired MDSes with a specified standby.
 5089cf7 mds: make two passes on scatter_nudge
 d22e48f mds: honor scatter_wanted flag in scatter_eval()
 447d4f6 testrados_delete_pool_while_open: remove from make
+07745f8 no ++ in python
+c220a4f roles/3-simple: include a standby mds
 77dd3b7 remove testrados_delete_pool_while_open
+573c9ff configure mds's with -s suffix as standby
+5015b90 roles: use letters for mon, mds names
+5b09244 tolerate named (not numbered) mons
+7c0f7c2 specify and clean up admin socket
 56f955b mds: fix projected rename adjustment
 c7236d9 mds: clean out rename subtree cruft
 208e8e3 mds: simplify subtree map after adjusting for rename
@@ -31078,7 +35730,18 @@ a25374e mds: witness rmdir when subtrees are on other hosts
 820b3f3 rgw: setup as external server is configurable
 9eed12f rgw: clearer logging context
 41f7acf config.cc: fix comments
+4630a0a lock server: configure for apache with mod_wsgi
+629187f Set content-type with PUT.
 019955a PGMonitor.cc: add force_create_pg command to monitor
+02d0efa schedule: make default owner different from that of a normal run
+99afde1 Update example targets in readme.
+731fe41 Remove print that clutters the worker logs.
+271e066 Connect without using any known_hosts files.
+8d196b0 Make targets a dictionary mapping hosts to ssh host keys.
+e5f33ca Add command to update ssh hostkeys.
+81bebfc lock server: return host pubkeys with locked machine names
+77174bd lock server: allow sshpubkey to be updated
+160174c Update lock db schema.
 43575c7 rgw: fix concurrency issue
 bd89bd9 ProfLogger: add request codes
 77fc561 ProfLogger: use double quotes, add commas
@@ -31121,10 +35784,13 @@ f71ab2a ProfLogger: don't return val from inc/set/etc.
 77bcc06 Remove lingering references to g_conf
 92df6e3 rgw: fix range dump
 aef29ca mds: fix protocol versions
+ac5c1c4 Add an overrides section for the ceph task.
 a7d791d rgw: multithreaded handling
 d4ed22f recalc_op_target must give an error if pool gone
 172dc20 Makefile: remove dup testrados_list_parallel_SOURCES
 02abdca rgw: multiple fixes, prepare main loop for multithreading
+e056686 Unit test that connection.connect actually calls create_key.
+7587f79 Optionally override system host keys.
 4f08881 rgw: fix compilation for certain architectures
 2955783 botol_tool: add --xattr argument for PUT
 b4b3672 boto_tool: add --get-obj-xattr
@@ -31134,6 +35800,7 @@ ac07e76 rgw_common: remove unused variable
 91343a8 Add control character detection
 5d606c2 rgw: fix log
 0593001 rgw: cleanup, remove globals and static craft
+9721e33 Better interface for running functions in parallel.
 8b1119f bloom filter: update to latest version
 585239a filepath: remove asserts
 56abfd8 honor CINIT_FLAG_NO_DEFAULT_CONFIG_FILE
@@ -31142,6 +35809,7 @@ ba31462 rgw: configurable way to enable/disable cache
 8a8c1e8 ReplicatedPG: project changes to clone_overlap
 a9f815b CodingStyle: whitespace
 bc6eb10 CodingStyle: final decisions?
+0000b63 ceph.conf: remove other random bits
 357db1d ReplicatedPG: sub_op_push fix
 143e2dd Don't build build tests unless requested
 2de0156 rados parallel tests: prepend "test"
@@ -31173,11 +35841,23 @@ b9c0a72 rados_list_parallel: make num_objects selectable
 cccdd43 Add rados_open_pools_parallel test
 aaa1a02 rgw: clean up temp object if failed to PUT
 4089e4d rgw: clean up temp object if failed to PUT
+176b304 fusermount runs on a single mount point.
+b99e33f Download ceph binaries in parallel.
+3e2c6c6 Run workunits on different clients in parallel.
+a282991 Download and run autotests on multiple clients in parallel.
+4b245fc Add a utility for running functions in parallel.
+127ef68 Feed locally-created binary tarball to remotes in parallel.
 5983604 rgw: remove include of curl/types.h
+aee9442 Use a nameless tempfile for local tarball, avoids cleanup.
+f52df63 More careful error checking, avoid need for shell quoting.
 cb86978 test/rgw_escape: add JSON + utf-8 test
 e8b6125 test/rgw_escape.cc: add utf-8 test
+e7618a6 Clean up tarball tmpdir in all cases.
 f29b9bd CodingStyle: Typos, whitespace.
+c701fe5 Use tempfile instead of ad hoc temp dir creation.
 6bca619 rgw: fix escaping clobbering utf8
+69a6b04 Remove TODO note covered by teuthology-nuke.
+3547eba Avoid identifier clash with builtin "dir".
 da1f92c crushtool: Add call to global_init() to avoid dout segfault
 8af8ec8 Revert "mds: handle importer failure on exporter"
 fa48867 obsync: Depend on python-pyxattr instead of python-xattr
@@ -31186,12 +35866,30 @@ bf768cb mds: handle importer failure on exporter
 c9d6907 rados: fix warning
 3698522 client: print ino along with &fh for ll_ file ops
 ad20e71 rados: fix warning
+49e0e3f ceph.conf: clean out random debug level changes
+8f3d56a include sha1 in summary
+479af4b ls: mention directories without summary.yaml
+1edd435 Clean up from pyflakes.
+5fadb1c Whitespace and style cleanup.
+c53615c Remove unused variable.
+e69cf0b Success of test may not have been set yet.
 41451a0 rgw: bucket deletion updates caches
+f92b3a1 add locktest task
+38ad4d1 task ceph: distribute monmap to all nodes, not just mons.
+28f19a4 Add an option to keep machines locked if a test fails.
+c47dc17 lock: specify machines as input yaml targets: clause
+1767b21 print --lock-many result as yaml targets: stanza
+2f35edd clean up locked machine list
 d5c8b96 mime.c: avoid compiler warning
 1fdf721 test/mime: test invalid decodes
 1bfe9b8 mime.h: better comments
 4f1d6ac Add mime encoding
+91c6f35 tell user which machines you locked
+6cf9633 nuke: use default owner
 cc40f11 rgw: fix lru
+a8d4901 make connect work if no roles are specified
+d1eb9ce suite: schedule jobs instead of executing each configuration serially.
+85c24bd Add teuthology-schedule and teuthology-worker.
 059019c rgw: restructure runtime config env
 844186f mds: only issue xlocker_caps if we are also the loner
 48837e3 mds: rely on master to do anchor locks for slave_link_prep
@@ -31216,9 +35914,14 @@ d72bdab mds: take a remote_wrlock on srcdir for cross-mds rename
 025748a mds: implement remote_wrlock
 4d5b053 client: clean up debug output
 c3a4082 mds: add mix->lock(2) state
+4218d70 Add httplib2 to setup.py.
+5981ffb readme
+a260da1 teuthology-suite: pass --lock and --block to teuthology
+fd30ed7 Add --block option to retry until machines are locked.
 d6f35b5 rados: fix usage()
 fbdb0a3 mds: check that dnl exists before dereferencing it.
 a20d110 rgw: cache cleanups
+f14b4e1 Check more invalid argument combinations for teuthology-lock.
 61fdbbf librados: add conf_parse_argv, use in tests
 ffdbcb5 systest: support passing in argc, argv
 54ca67b handle_pool_op_reply: update osdmap even on error
@@ -31234,39 +35937,78 @@ af93d81 mon: fix up pending_inc pool op mess
 5dc09dd mds: set old and new dentry lease bits
 616ff50 mds: conclude ENOENT during travrse if we have a readable null dentry
 a91b1bf Revert "mds: fail out of path_traverse if we have a null dentry."
+e327515 Remove locking from TODO.
+f6efcd3 Update readme for locking.
+a55d2eb Read lock server from ~/teuthology.yaml.
+9158c83 Verify that machines are locked before nuking them.
+9bfca87 Check that all machines are locked, and add an option to lock machines instead of providing targets.
+4d62dd3 Add command line tool for locking machines.
+09bee43 Move username to a utility method.
+dbf5e54 Add simple lock server HTTP interface.
 4761317 rgw: fix copy operation (clean up acls)
+4fcecf0 task ceph: set_max_mds so multiple MDS nodes are used
+850f337 workunits task: clean up properly if there's an error.
 03b6c40 qa: mds rename: account for not being in the ceph root dir
 b5e4a31 move mds rename tests to workunits directory
 e483bb6 qa: mds rename: Rework so it will function in teuthology as a workunit:
+9655042 Skip s3-tests marked fails_on_rgw, they will fail anyway.
+3665f75 The shell exits after the command, hence there is no need for pushd/popd.
 088013b mds: cleanup: use enum for lock states
 8f9eaf0 rgw: when listing objects, set locator key only when needed
+7ea8ecd Don't set unix env vars in fastcgi env.
 0c6d2e6 rgw: async rados delete should be called with the correct object
 1d7fbed rados: rename load-gen options
+7e603ce adjust ceph.conf from yaml input
+f164dd7 nuke: sudo for the final rm -rf
+f80a2f6 Remove quotes from apache conf.
+a3b42e6 Let apache kill rgw.
+ba6545f Set LD_LIBRARY_PATH for rgw.
+d04256a set flavor to 'local' if pushing from local src dir always record in summary
 1da8f81 honor CINIT_FLAG_NO_DEFAULT_CONFIG_FILE
 5b2de2b mkcephfs: Only create OSD journal dir if we have a journal.
 2aa146a mds: always clear_flushed() after finish_flush()
 fb7696f client: fix num_flushing_caps accounting
+003492a Retry connecting based on more error codes.
 e9e3883 client: don't call flush_snaps when nothing to flush
 933e349 mds: kill stray break
 7e1f09f context: implement complete()
 531f46c logrotate.conf: Mark stat/*.log as "missingok"; it's not always there.
 8459f80 rgw: remove temp objects asynchronously
 b670f31 Move stat/*.log to the end of logrotate.conf
+2b168b0 nuke: do not escape fusermount .../mnt.*
 6feab3c mds: fix file_excl assert
+3dd95f6 .gitignore: ignore emacs backups
+d4fdaba a few basic kclient workunits, reorg
+e1db8e9 new workunits
+ae87abf Add s3tests task.
+03a08eb Add rgw task with hardcoded apache config.
 924a322 obsync: improve formatting a little bit
 da917ad obsync: add man page, documentation line
 f5cca2e buffer: remove do_cow,  clone_in_place
+6e73607 More idiomatic Python.
+ce041a5 summary is used outside the try: except:, move it outside it.
+c1cd141 Only ignore ENOENT errors in teuthology-ls.
+fdbf591 Whitespace cleanup.
+e697e4b pyflakes cleanup.
+2137da2 Add final slash to path to enforce it being a dir.
+53fc692 use local source dir for teuthology run
 a6ffcc8 librados: close very small race condition
 0e6de71 mon: add 'osd create [id]' command
 1af8998 client: clean up cap flush methods
 984e5a0 Makefile: libmds.a, not libmds.la
 038a754 mds: fix off-by-one in cow_inode vs snap flushes
 e6eef5e rgw: sync cache between rgw instances
+effee7f Make kernel a separate entity outside of tasks.
+4b1d536 Don't recreate ctx.cluster when reconnecting after installing kernels.
+6f86de4 Save kernel version in summary instead of a separate file.
+7b1b332 Kernel task does not need to be a context manager.
 1206625 rgw: fix of users are created suspended
 ca6d239 Fix handling of CEPH_CONF
 31d4973 rados: fix warning
 6e49415 client: only send one flushsnap once per mds session
 860c665 rados tool: load generator
+e16556e Archive dir removal has to be unconditional.
+cb4ffc3 Use idiomatic python.
 2f5925e Add "How to use C_GatherBuilder" comment
 f69fcc7 C_GatherBuilder: add C_GatherBuilder::activate()
 16b6567 C_Gather: remove unused "any" option
@@ -31290,8 +36032,16 @@ d1c5fb1 messenger: rename is_blank_addr to is_blank_ip.
 0b252e9 client: make tdout() work in header
 aad529e rgw: merge chunks for clone range, truncate to 0 before
 902f699 qa: add clone_range tool
+7d57b35 teuthology-ls: summarize results from an archive-dir
+4376316 workunits: set LD_LIBRARY_PATH
+b95e61a teuthology-nuke
+2125e8d include @hostname in owner
+770edc3 suite: set each run description to combination of facet names
+052f43c pass owner, optional description through to summary.yaml
+bc91e1b workunits: set CEPH_ARGS so 'ceph' etc can find config
 7d75441 qa: file_layout.sh is kclient only
 8bcc639 messenger: add a set_ip function to initialize the IP
+a9a2733 workunit: include ceph bin dir in path
 1247a22 qa: fix file_layout.sh mode
 cde5dc2 client: improve debug output a bit
 92e0fac mds: fix snaprealm split for dir inode
@@ -31299,8 +36049,12 @@ b5510a6 qa: move file_layout into workunits/misc
 c292cce radosgw_admin: fix cli test
 4b16d6f radostool: load gen, wip
 f7fb547 qa: make kernel_untar_build.sh output more helpful
+4815f51 Add rgw testing with s3tests.
+7995653 Add kernel task for controlling kernel version for different roles.
+0904ce5 Move get_clients method into misc to avoid duplication.
 016cf67 testceph: test file opens with extra slashes
 f5cbe50 testceph: expand test cases
+256e6a2 tasks/clock: tolerate ntp daemon
 a851a15 filepath: don't parse multiple slashes as multiple dname bits.
 d1fcffa Resetter: Check return values from Journaler.
 2c70bb8 Journaler: pay attention to return codes from read head.
@@ -31392,10 +36146,24 @@ f1f75df ceph_argparse_witharg: fix dashes in args
 4549501 common/Throttle: Remove unused return type on Throttle::get()
 c23d4c2 libceph: delogbalize, again
 92a3a47 Makefile: add missing common/dout.h header to dist tarball
+e2e96b3 Enable core dumps when running the rbd cli tool.
+9eea6b9 Handle rbd config with no properties.
+122ed28 Wait for rbd devices to be removed before removing the module.
 5da0662 librados: fix end_block calculation for aio_read
 823a05c proflogger: only register one event for all loggers
+efeac9f Add status logging into task rbd.
+fb01654 Wait for block device to show up in task rbd.dev_create.
 629ac0d librados: deglobalize, again
 9a3a685 OSDMap: kill some deadcode, deglobalize
+d9e343e Remove rbd from TODO file.
+c048279 Add combined rbd task that runs all the subparts.
+789de09 Add rbd.mount method.
+deb2fe5 Add rbd.mkfs method.
+2d64461 Add rbd -> /dev mapping method.
+dfc3a85 Add rbd modprobe method.
+bd46d75 Add rbd task with create_image function.
+1a68b6f Remove unused import.
+b0c8b1d Ensure suite components are run in alphabetical order.
 0559f84 librados: deglobalize
 e8b4573 AnchorServer: fix return values in add().
 a8eac4f uclient: change snaprealm refcounting to erase at 0 refs.
@@ -31435,6 +36203,7 @@ e214420 librbd: sparse read cleanup, fixes
 bfe0d55 librbd: sparse read cleanup, fixes
 e4f7d8e librbd: fix and cleanup a bit read_iterate
 52e6e85 rgw: some cleanups
+e1f8125 add a bunch of workunits
 6725e74 qa: simple rbd import/export test
 9761810 Objecter.cc: de-globalize
 df2e3bc initialize g_ceph_context in common_preinit
@@ -31448,6 +36217,7 @@ c17d9c0 mds: avoid copying snap client_caps xlist
 cdb500e mon: do not copy subscription xlist
 abafef3 qa: clean up after snaptest-multiple-capsnaps.sh
 0763221 Dispatcher: prevent copying
+5df7b23 add kclient_ prefix to trivial_sync, dbench
 2e7d06c msgr: only SO_REUSEADDR when specific port is specified
 4c1cb28 mon: fix log state trimming
 888e880 mon: add 'log ....' command
@@ -31462,6 +36232,9 @@ c48540a filestore: fix fiemap
 39e2c6e rbd: fix read_iterate for sparse read
 5ad52af rgw: use adapting window for put obj
 9e9cec6 AuthNone: encode entity name in authorizer
+05e2ba9 Save the flavor of a run in the summary file.
+330ec41 Add teuthology-suite, to run multiple tests in a batch.
+fb1a875 Initial import.
 7f92d37 osd: warn instead of error if cluster addr specified but not public addr
 37a9762 include/atomic.h:fix build for non-atomic-ops case
 8f1beb1 rgw: put data using a window
@@ -31522,6 +36295,8 @@ ee5502d Remove cdout
 128bfcf rgw: put_obj_data doesn't need mtime
 10171ca filestore: fix fiemap
 ca2f4e2 rbd: fix read_iterate for sparse read
+e481db1 Archive syslog messages while the test was in progress.
+bc8cc86 Fix bug that thought all >1 node clusters always had core dumps.
 cc644b8 mds: avoid EMetaBlob::fullbit copying with shared_ptr
 f78de01 encoding: add list<shared_ptr<T>> macros
 96ef8a6 debian: ceph-client-tools depends on librados2, librbd
@@ -31530,8 +36305,10 @@ f78de01 encoding: add list<shared_ptr<T>> macros
 93623fb common: fix descriptinos for -i, -n too
 830f48d update clitests with new usage
 1b9575e common: fix generic usage for -D vs -d
+6a32f72 Autotests run no ceph software, so don't try to do code coverage there.
 b9c3672 rgw: suspend/enable buckets through pool async api
 e2150a0 librados: add async pool op operations
+57c542b Archive cores dumped during test, record test as failed if any seen.
 6fb971f qa: organize workunits
 b2c803d secret.c: use safe_read when appropriate
 7b8bf55 mds: fix use-after-free on ESession replay
@@ -31547,17 +36324,50 @@ bc1782a osd: fix find_object_context debug output
 1c5f655 mds: when prodding head inode on snapped behalf, avoid XSYN
 3a59579 mds: fix transitions away from XSYN state
 9974b7e rgw: user suspension
+2738ce0 Better examples for kclient task.
+0b28b96 Remove done and abandoned TODO entries.
+4f46e4b Make cfuse and kclient default to all clients.
+b49c739 Archive autotest result output.
+5b237fc Support running multiple autotest tests.
+cebf88d show role in log prefix when running workunits
+55cfd92 fix typo in workunits example
+7fe89b7 add clock task to sync clocks
+78a3c23 Move non-ceph logic out of the ceph task: base dir, archive transfer.
+301ab56 Move non-ceph logic out of the ceph task: host in use check.
+629ad44 Move /tmp/cephtest/data creation to where it belongs, in ceph.cluster.
+594a3d3 Refactor ceph log handling into a separate subtask.
+9353fcc Move summary inside context.
+445186b Refactor ceph healthy waiting into a separate subtask.
+259eb35 Refactor ceph mds daemon handling into a separate subtask.
 3aa6a4d qa: pjd must run as root
 8b4b838 rgw: get multipart list marker to work
 77d38e4 (tag: v0.29.1) v0.29.1
+2da2d25 Refactor ceph osd daemon handling into a separate subtask.
+3fc00e3 Refactor ceph mon daemon handling into a separate subtask.
 a379c67 rgw: some multipart cleanups, fixes
+574cfe9 Remove the "ceph mds set_max_mds" call, apparently it's not needed.
+eaba08f Add debug logging to contextutil.nested.
+915a366 Refactor ceph cluster setup into a separate subtask.
+14ddb41 Start splitting the ceph task into smaller units.
+9746e77 Allow running of any function in a task.
+1dd25b4 Add task for mounting with the kernel client.
+9d70d04 Add function to output a secret key to a file.
 515f088 librbd: fix block_completion race condition
 e9e3fee rgw: implement list multiparts
+9d60344 Output a summary file when archiving a run.
+98a8071 Remove unused imports and variable.
 2d63264 librbd: add AioCompletion debugging
 4a7a42b uclient: path_walk should notice when it gets absolute paths.
+1a20ba1 Tee teuthology log into the archive directory, if archiving.
 e90e041 vstart: put .journal outside of osd data dir
+2bae22f Create the archive dir earlier, move out of ceph task.
+faa855d Add task for running ceph workunits, QA tests that expect cwd to be ceph mount.
+e00b7fc Clean up after an autotest run.
+cd8f31d Write config.yaml directly to file.
+d157e32 Move imports to the top.
 af9879d qa: Make snaptest-git-ceph.sh clean up after itself.
 1aa2932 rgw: fix abort multipart upload
+c49f947 Archive teuthology config.
 27f79fc auth: remove CephCryptoManager
 ea3db17 librbd: fix AioCompletion race condition
 eced4d6 monmaptool: implement --help
@@ -31571,6 +36381,8 @@ e645152 crbdnamer: +x
 cca3da1 qa: radosgw_admin: fix clitest
 66229c7 qa: direct_io: fix warnings
 7154324 configure: check for boost statechart header
+883991a added thrashosds
+398a333 adding radosbench.py to tasks
 e304fd0 radosgw_admin: log show can dump info in xml, json
 28e1a89 radosgw_admin: fix log show (with new poolid param)
 efe1aa7 radosgw_admin: pool create command
@@ -31608,11 +36420,15 @@ ddf0263 debian: Prevent git ignore from recursing on *.substvars etc.
 0da30c3 debian: Properly package the python bindings.
 edc6659 rgw: list bucket display the correct objects owners
 3e284a7 boto_tool: add put_bucket_acl
+5cbe10c added testsnaps
 7fa8b97 rgw: don't allow handing out object ownership
 b282603 librados: remove useless reference holding
 98a3b54 librados: get reference to the io context for the pending async ops
 52e9e5e heap_profiler: if log_dir is empty, don't try and log to root dir!
 f41773b coverage.sh: use .lcov instead of .info to avoid confusion
+d09ae0b With coverage, use SIGTERM to make the daemons exit(0).
+e52bb0c Put all coverage data in the archive dir.
+41e19f5 Initialize BASE before using it.
 a6afb05 rgw: remove required indexes when modifying user info
 9a705c7 mon: Follow the Message rules when forwarding tells
 842f3ac rgw: remove required indexes when modifying user info
@@ -31628,6 +36444,16 @@ cf129ff Revert "mds: allow mds to 'exit immediately'"
 dd9ea9c Revert "osd: add command to exit cleanly"
 b6e5c08 mds: fix up MDCache::path_is_mine to remove a totally bogus assert.
 3a582f9 mon: add new "tell" infrastructure.
+798f3cc Can now run specific ceph version based on sha1.
+ae77af0 Simplify ceph sha1 saving. No need to round-trip, store N copies.
+2533edd Save ceph sha1 in coverage dir.
+18b1173 Add TODO notes.
+c45488c Show status message before gcov-style shutdown of daemons.
+7c5eed2 Check daemon exit codes, even when using gcov.
+43f37b8 Add coverage support.
+f162f07 Support ceph tarball "favors", that is different gitbuilders.
+f3757f7 Let ceph task config specify branch or tag to run.
+4903d06 Archive everything written to the "archive" directory.
 4c79875 rgw: can specify alternative remote addr http header param
 cf5e70d rgw: skeleton for list multipart uploads
 2c4367f CephContext: initialize module_type in ctor
@@ -31660,6 +36486,7 @@ f69bc30 debian: move gceph into a separate package
 802e9e5 rgw: implement namespace, use it in complete multipart
 01df7c6 ReplicatedPG: make_writeable, use correct size for clone_size entry
 1376a5a gtest: add -lpthread to AM_LDFLAGS
+e8abcf0 readme: need libevent-dev
 b2becef man: update cosd man page to include info on flush-journal option.
 d224b3e Makefile.am: Add -lpthread to UNITTEST_LDADD
 5922de2 mds: only target XSYN state if we have a valid target loner
@@ -31674,6 +36501,7 @@ d224b3e Makefile.am: Add -lpthread to UNITTEST_LDADD
 4e58308 mds: rename: remove illicit assert.
 5b43419 mds: try_trim_non_auth_subtree if we rename a dir away from a non-auth subtree
 072e80e mds: use CDIR_AUTH_UNDEF where possible
+e19d639 Gevent 0.14 switches to libev, avoid it for now.
 811dcae mds: remove unlinked metadata from cache on replay
 2330281 rgw: set locator on all required rados calls
 5cf6d0e rgw: put locator key in rgw_obj
@@ -31691,6 +36519,10 @@ a7c083e mds: journal renames on witnesses if we have nested subtrees
 5c870c6 mds: CDir::contains() should use projected hierarchy
 1b0fdca re-fix ignore return value change
 3a2ce92 mds: fix check for no-op rename of two links to the same inode
+fcc73e0 Class loading no longer needs a temp dir.
+047669e Cleanup is done, adjust TODO.
+e6ae6bd Archive logs if given --archive=PATH. Clean up after a test run.
+37cbd7a Support PIPE for stdout and stderr, too.
 f225db7 rgw: remove parts after multi-part upload
 a7012f2 cfuse: really ignore write() return value
 37fd3b5 rgw: calc etag for multipart upload
@@ -31703,6 +36535,7 @@ e4f0541 Makefile.am: clean gcno and gcda files in "make clean"
 3e0352d mds: rename: add missing pop_and_dirty_projected_inode calls
 569baab mds: rename: use temp *in in _rename_apply
 bdfcc0d mds: rename: move unlink out of every possible branch
+ce10d4f Add missing trailing commas.
 12bb308 test/bufferlist: add copy_all test
 eb09764 coverage: add helper script to get coverage for a local test
 6909273 mon: add all_exit and exit commands
@@ -31728,11 +36561,16 @@ e36a4b2 mds: rename: fix handling of target that is remote dentry
 a566229 client: fix mds routing for link requests
 7770e1c mds: rename: only add target to stray if destdn is a primary
 df7f895 mds: fix/clean up xlock import/export
+c8ecd12 Remove mountpoint after shutting down cfuse.
 3c6a7e1 gitbuilder: quiet
+a988083 Add orchestra.cluster, for running commands on multiple hosts.
 d11f471 rgw: silence gitbuilder (hopefully)
+26d720d Add TODO note about results gathering.
 f292fb7 radosgw_admin: link bucket to user
+2b23788 First draft of documentation.
 b5011e2 mds: adjust subtree roots on rename
 10750f8 common: add module_type to CephContext; use to initialize rotating keys
+ad81fcc Use orchestra repo on ceph.newdream.net, now that it exists.
 b28ba77 osd: use osd_op.soid in call
 5cc146e osd: allow src_oids to be snapped
 8e69c39 (tag: v0.29) v0.29
@@ -31753,12 +36591,17 @@ a635a9c rgw: multipart complete upload
 8e55e18 librados: remove useless reference holding
 a082747 osd: make CLONERANGE src oid encoding more sane
 740eea1 Refactor MonClient, KeyRing
+7f39337 Prettify exception handling.
+0860769 Remove dead code.
+b6e2243 Prettify config debug printing.
 f2f2f42 osd: src src_oids oloc check
+57f979f Refactor for modularity.
 ed41f29 remove g_keyring
 90b5354 dout:remove stream from dout_emerg_streams earlier
 98226c2 DoutStreambuf: de-globalize dout lock
 5b7049c DoutStreambuf: de-globalize emergency logging
 6ed9a58 Add simple_spin
+0d975b5 Record Remote in RemoteProcess.remote, for caller convenience.
 70d7709 Revert "cfuse.cc: use safe_write"
 73ea844 librados: get reference to the io context for the pending async ops
 1aee7f9 rgw: use clone_range for multi upload completion
@@ -31769,6 +36612,9 @@ a97451f librados: support clone_range
 d1d3e26 mds: remove now-erroneous comment
 19949f6 mds: Clean up _rename_prepare journaling
 4689073 mds: _rename_prepaer should only journal dest if auth for it
+97ff24c Un-hardcode tasks.
+3be4b48 Make autotest show debug messages.
+1634f3e Move autotest running into a task.
 cd5049d uclient: reset flushing_caps on (mds) cap import.
 c28b749 uclient: don't use racy check for uncommitted data.
 2c6b560 uclient: call the right function pointer on truncate
@@ -31784,6 +36630,7 @@ d4edd17 rgw: multipart: use locator on created parts
 3766618 rgw: multipart additions and fixes
 6fd694c Remove unneeded libcrush1 files
 d6bbf3e mds: journal parents of srci when srcdn is remote
+ce5f0e7 Move interactive and cfuse into tasks.
 806646b journaler: also initialize safe_pos
 a13b664 journaler: fix trim crash after standby-replay -> active
 7ca240b mds: cleanup rename_prepare a bit
@@ -31801,9 +36648,11 @@ b152a93 rgw: more cleanup
 0cce0a5 filestore: allow clone_range to different offsets
 502baea filestore: fix fallback/slow do_clone_range
 6ca168e filestore: fix fallback/slow do_clone_range
+95163e9 Fetch ceph binary tarball independently on every node.
 7e2e477 mon: make sure osd paxos is writeable before doing timeouts
 c5470e0 OSD: don't keep old connection over new one in update_heartbeat_peers
 780322d boto_tool: add get_bucket_acl
+de0f0c7 Refactor to use Cluster and Remote, to evaluate the new APIs.
 65dc841 rgw: implement list multipart
 e340bfe dout: use recursive mutex for dout
 44770df lockdep: fix shadowed global, add printout
@@ -31813,11 +36662,16 @@ e340bfe dout: use recursive mutex for dout
 b4bc1c6 rados export: better name mangling rules, fix test
 5dd0e12 rgw: handle multipart completion
 d29b3b7 rgw: parser for multi upload completion
+7a474b1 Use orchesta.remote as a higher-level wrapper, stop worrying about hostnames.
 33c39ab rados_sync: prefix user extended attributes
 0806e65 rgw: some more xml reshuffling
+9970b86 Wrap Remote._runner in staticmethod() or it gets mistaken for a method.
+dc9aaac Add a pretty wrapper on top of Paramiko and run.run.
 f5d6be6 rgw: move generic xml parsing code to some shared location
+5875f79 Remove dead code.
 efee746 objecter, osd: clonerange operation
 07c1989 librados: implement aio_flush
+7d4bb12 Initial import.
 6db2a4e crushtool: error out if uniform weights vary
 35b19a4 osd: fix ScrubFinalizeWQ::_clear condition
 1528d2c debian: depend on libboost-dev >= 1.34
@@ -31880,7 +36734,17 @@ c67dd16 mkcephfs: error out on bad usage
 203a43b rgw_admin: clean warning
 ab278b4 rgw_admin: add key create
 bd0eb9a rgw_admin: subuser and key removal
+0566de4 Let callers specify that some arguments should not be quoted.
+dad0a67 Simple unit tests for shell quoting.
+be28e5b Refactor to extract shell quoting into utility function.
+1a459dd Depend on Paramiko 1.7.7 or newer to be able to read modern OpenSSH keys.
 7330c3c journaler: tolerate ENOENT when prezeroing
+37c94af Pyflakes cleanup.
+5d5b179 Add a utility function run.wait to wait for processes to exit.
+073a4bb Paramiko ChannelFile.close() didn't actually close the remote stdin.
+6dd4774 Log debug info of commands actually executed.
+9c42fe6 Cleanup dead code.
+f10668f Allow easy writing to stdin of remote processes.
 bb13c92 test_common.sh: skip rm before put
 e42736a radostool: rados put should use write_full
 9ff7cc7 Create a libcommon service thread
@@ -31951,6 +36815,7 @@ cef8eb9 debian: no shlibs:Depends for obsync either
 a71981c PG: add_event, add_next_event: ignore prior_version on backlog events
 922f7cc expanding testceph to test open/readdir/telldir
 3471d41 add ceph_readdir() to libceph
+f8f6bed Add run.run option wait, this will make handling stdin easier soon.
 8f7d6c7 librados: add python bindings for getxattrs
 3df86c3 client: hold FILE_BUFFER ref while waiting for dirty throttle
 838067d client: clean up _flush callers
@@ -31959,11 +36824,21 @@ a71981c PG: add_event, add_next_event: ignore prior_version on backlog events
 3f43c78 client: _flush should no-op if nothing to flush
 bc2c31e PG: choose_log_location: prefer OSDs with a backlog
 fe298f6 OSD: send a log in response to a log query when the pg dne
+93c2e17 Return a structured result from run.run, to make capturing stdout/stderr easier.
+9a5c959 Add integration tests for signals and connection loss.
+df84f4e Check for errors on remote commands.
 57f423b librados: add rados_getxattrs API for C bindings
 bcbcf30 ReplicatedPG: wait_for_missing_object in _rollback_to
+87d7192 Add setup.py, install in devel mode into virtualenv.
+492fa48 Don't close file after copying stdout/stderr to it.
 4043059 testrados: retab with C-style tabs
 6a580bf testrados: more getxattr / setxattr tests
 1dd1743 Remove libcrush from packaging
+37df5b1 Refactor remote running to support more use cases.
+5bfcec2 Add debug logging to monkeypatching.
+1ed70d7 Silence paramiko transport logging.
+b397eb5 Silence a Paramiko crypto deprecation.
+85a28a2 Make monkeypatching respect order.
 f16903d client: do not retake lock in sync_write_commit
 ce7f78d ceph.spec.in: fix obsync description
 4d39f1b journaler: ENOENT is okay on trim
@@ -31980,6 +36855,7 @@ dbb2c38 PG: _remove_pg, reset info.last_update and info.log_tail on log zero
 8c6ce34 osd: clean up choose_acting output
 5d161aa PG: make choose_acting a bit smarter
 14a3f26 Move crush into libcommon
+0535e4d Initial import
 2fc13de Move crush into libcommon
 0d79f1d man: update cosd man page
 071881d (tag: v0.28) v0.28
@@ -32550,7 +37426,7 @@ fd0290a rgw: listing non existent bucket returns NoSuchBucket
 4082808 osd: check obj name length to avoid ENAMETOOLONG
 2380e70 librbd: don't write to stdout
 1eae9d6 Add test_mutate
-9c731ed mdsmap: initialize standby_for_rank
+9c731edd mdsmap: initialize standby_for_rank
 5e27a07 mon: simplify mds follow checks
 2b59bc6 mon: simplify mds laggy check
 e9afe17 mon: don't take over for a standby-replay
@@ -38889,7 +43765,7 @@ cd492b7 uclient: fix test condition
 d8987ee Client: put guards around some dentries[foo] accesses without checking for existence.
 09040d9 kclient: typo
 866d979 paxos: allow wait on newer version
-f49579f kclient: set have_version in MOSDGetMap
+f49579f7 kclient: set have_version in MOSDGetMap
 f16a28d No more VERSION_T; just 0.
 52046d7 mon: remove old asserts conflicting with new readable semantics
 77db2e0 messages: Clean up of PaxosServiceMessages, and some fixes for their users.
diff --git a/ceph.spec b/ceph.spec
index 5337023..b30e2b1 100644
--- a/ceph.spec
+++ b/ceph.spec
@@ -56,7 +56,7 @@
 # common
 #################################################################################
 Name:		ceph
-Version:	10.2.5
+Version:	10.2.6
 Release:	0%{?dist}
 Epoch:		1
 Summary:	User space components of the Ceph file system
@@ -216,12 +216,8 @@ Requires:      cryptsetup
 Requires:      findutils
 Requires:      which
 %if 0%{?suse_version}
-Requires:      lsb-release
 Recommends:    ntp-daemon
 %endif
-%if 0%{?fedora} || 0%{?rhel}
-Requires:      redhat-lsb-core
-%endif
 %if 0%{with xio}
 Requires:      libxio
 %endif
@@ -700,7 +696,7 @@ make %{?_smp_mflags} check
 make DESTDIR=%{buildroot} install
 find %{buildroot} -type f -name "*.la" -exec rm -f {} ';'
 find %{buildroot} -type f -name "*.a" -exec rm -f {} ';'
-install -D src/etc-rbdmap %{buildroot}%{_sysconfdir}/ceph/rbdmap
+install -m 0644 -D src/etc-rbdmap %{buildroot}%{_sysconfdir}/ceph/rbdmap
 %if 0%{?fedora} || 0%{?rhel}
 install -m 0644 -D etc/sysconfig/ceph %{buildroot}%{_sysconfdir}/sysconfig/ceph
 %endif
diff --git a/ceph.spec.in b/ceph.spec.in
index 9ddd75f..fd8ab92 100644
--- a/ceph.spec.in
+++ b/ceph.spec.in
@@ -216,12 +216,8 @@ Requires:      cryptsetup
 Requires:      findutils
 Requires:      which
 %if 0%{?suse_version}
-Requires:      lsb-release
 Recommends:    ntp-daemon
 %endif
-%if 0%{?fedora} || 0%{?rhel}
-Requires:      redhat-lsb-core
-%endif
 %if 0%{with xio}
 Requires:      libxio
 %endif
@@ -700,7 +696,7 @@ make %{?_smp_mflags} check
 make DESTDIR=%{buildroot} install
 find %{buildroot} -type f -name "*.la" -exec rm -f {} ';'
 find %{buildroot} -type f -name "*.a" -exec rm -f {} ';'
-install -D src/etc-rbdmap %{buildroot}%{_sysconfdir}/ceph/rbdmap
+install -m 0644 -D src/etc-rbdmap %{buildroot}%{_sysconfdir}/ceph/rbdmap
 %if 0%{?fedora} || 0%{?rhel}
 install -m 0644 -D etc/sysconfig/ceph %{buildroot}%{_sysconfdir}/sysconfig/ceph
 %endif
diff --git a/configure b/configure
index d434fb9..246c29a 100755
--- a/configure
+++ b/configure
@@ -1,6 +1,6 @@
 #! /bin/sh
 # Guess values for system-dependent variables and create Makefiles.
-# Generated by GNU Autoconf 2.69 for ceph 10.2.5.
+# Generated by GNU Autoconf 2.69 for ceph 10.2.6.
 #
 # Report bugs to <ceph-devel at vger.kernel.org>.
 #
@@ -590,8 +590,8 @@ MAKEFLAGS=
 # Identity of this package.
 PACKAGE_NAME='ceph'
 PACKAGE_TARNAME='ceph'
-PACKAGE_VERSION='10.2.5'
-PACKAGE_STRING='ceph 10.2.5'
+PACKAGE_VERSION='10.2.6'
+PACKAGE_STRING='ceph 10.2.6'
 PACKAGE_BUGREPORT='ceph-devel at vger.kernel.org'
 PACKAGE_URL=''
 
@@ -1595,7 +1595,7 @@ if test "$ac_init_help" = "long"; then
   # Omit some internal or obsolete options to make the list less imposing.
   # This message is too long to be a string in the A/UX 3.1 sh.
   cat <<_ACEOF
-\`configure' configures ceph 10.2.5 to adapt to many kinds of systems.
+\`configure' configures ceph 10.2.6 to adapt to many kinds of systems.
 
 Usage: $0 [OPTION]... [VAR=VALUE]...
 
@@ -1667,7 +1667,7 @@ fi
 
 if test -n "$ac_init_help"; then
   case $ac_init_help in
-     short | recursive ) echo "Configuration of ceph 10.2.5:";;
+     short | recursive ) echo "Configuration of ceph 10.2.6:";;
    esac
   cat <<\_ACEOF
 
@@ -1856,7 +1856,7 @@ fi
 test -n "$ac_init_help" && exit $ac_status
 if $ac_init_version; then
   cat <<\_ACEOF
-ceph configure 10.2.5
+ceph configure 10.2.6
 generated by GNU Autoconf 2.69
 
 Copyright (C) 2012 Free Software Foundation, Inc.
@@ -2932,7 +2932,7 @@ cat >config.log <<_ACEOF
 This file contains any messages produced by compilers while
 running configure, to aid debugging if configure makes a mistake.
 
-It was created by ceph $as_me 10.2.5, which was
+It was created by ceph $as_me 10.2.6, which was
 generated by GNU Autoconf 2.69.  Invocation command line was
 
   $ $0 $@
@@ -17045,7 +17045,7 @@ fi
 
 # Define the identity of the package.
  PACKAGE='ceph'
- VERSION='10.2.5'
+ VERSION='10.2.6'
 
 
 cat >>confdefs.h <<_ACEOF
@@ -17087,47 +17087,16 @@ AMTAR='$${TAR-tar}'
 
 
 # We'll loop over all known methods to create a tar archive until one works.
-_am_tools='gnutar plaintar pax cpio none'
-
-# The POSIX 1988 'ustar' format is defined with fixed-size fields.
-      # There is notably a 21 bits limit for the UID and the GID.  In fact,
-      # the 'pax' utility can hang on bigger UID/GID (see automake bug#8343
-      # and bug#13588).
-      am_max_uid=2097151 # 2^21 - 1
-      am_max_gid=$am_max_uid
-      # The $UID and $GID variables are not portable, so we need to resort
-      # to the POSIX-mandated id(1) utility.  Errors in the 'id' calls
-      # below are definitely unexpected, so allow the users to see them
-      # (that is, avoid stderr redirection).
-      am_uid=`id -u || echo unknown`
-      am_gid=`id -g || echo unknown`
-      { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether UID '$am_uid' is supported by ustar format" >&5
-$as_echo_n "checking whether UID '$am_uid' is supported by ustar format... " >&6; }
-      if test $am_uid -le $am_max_uid; then
-         { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
-$as_echo "yes" >&6; }
-      else
-         { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
-$as_echo "no" >&6; }
-         _am_tools=none
-      fi
-      { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether GID '$am_gid' is supported by ustar format" >&5
-$as_echo_n "checking whether GID '$am_gid' is supported by ustar format... " >&6; }
-      if test $am_gid -le $am_max_gid; then
-         { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
-$as_echo "yes" >&6; }
-      else
-        { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
-$as_echo "no" >&6; }
-        _am_tools=none
-      fi
+_am_tools='gnutar  pax cpio none'
+
+
 
-  { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to create a ustar tar archive" >&5
-$as_echo_n "checking how to create a ustar tar archive... " >&6; }
+  { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to create a pax tar archive" >&5
+$as_echo_n "checking how to create a pax tar archive... " >&6; }
 
   # Go ahead even if we have the value already cached.  We do so because we
   # need to set the values for the 'am__tar' and 'am__untar' variables.
-  _am_tools=${am_cv_prog_tar_ustar-$_am_tools}
+  _am_tools=${am_cv_prog_tar_pax-$_am_tools}
 
   for _am_tool in $_am_tools; do
     case $_am_tool in
@@ -17139,8 +17108,8 @@ $as_echo_n "checking how to create a ustar tar archive... " >&6; }
    echo "$as_me:$LINENO: \$? = $ac_status" >&5
    (exit $ac_status); } && break
       done
-      am__tar="$_am_tar --format=ustar -chf - "'"$$tardir"'
-      am__tar_="$_am_tar --format=ustar -chf - "'"$tardir"'
+      am__tar="$_am_tar --format=posix -chf - "'"$$tardir"'
+      am__tar_="$_am_tar --format=posix -chf - "'"$tardir"'
       am__untar="$_am_tar -xf -"
       ;;
     plaintar)
@@ -17152,14 +17121,14 @@ $as_echo_n "checking how to create a ustar tar archive... " >&6; }
       am__untar='tar xf -'
       ;;
     pax)
-      am__tar='pax -L -x ustar -w "$$tardir"'
-      am__tar_='pax -L -x ustar -w "$tardir"'
+      am__tar='pax -L -x pax -w "$$tardir"'
+      am__tar_='pax -L -x pax -w "$tardir"'
       am__untar='pax -r'
       ;;
     cpio)
-      am__tar='find "$$tardir" -print | cpio -o -H ustar -L'
-      am__tar_='find "$tardir" -print | cpio -o -H ustar -L'
-      am__untar='cpio -i -H ustar -d'
+      am__tar='find "$$tardir" -print | cpio -o -H pax -L'
+      am__tar_='find "$tardir" -print | cpio -o -H pax -L'
+      am__untar='cpio -i -H pax -d'
       ;;
     none)
       am__tar=false
@@ -17170,7 +17139,7 @@ $as_echo_n "checking how to create a ustar tar archive... " >&6; }
 
     # If the value was cached, stop now.  We just wanted to have am__tar
     # and am__untar set.
-    test -n "${am_cv_prog_tar_ustar}" && break
+    test -n "${am_cv_prog_tar_pax}" && break
 
     # tar/untar a dummy directory, and stop if the command works.
     rm -rf conftest.dir
@@ -17198,14 +17167,14 @@ $as_echo_n "checking how to create a ustar tar archive... " >&6; }
   done
   rm -rf conftest.dir
 
-  if ${am_cv_prog_tar_ustar+:} false; then :
+  if ${am_cv_prog_tar_pax+:} false; then :
   $as_echo_n "(cached) " >&6
 else
-  am_cv_prog_tar_ustar=$_am_tool
+  am_cv_prog_tar_pax=$_am_tool
 fi
 
-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_prog_tar_ustar" >&5
-$as_echo "$am_cv_prog_tar_ustar" >&6; }
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_prog_tar_pax" >&5
+$as_echo "$am_cv_prog_tar_pax" >&6; }
 
 
 
@@ -21415,7 +21384,7 @@ else
 JAVA_TEST=Test.java
 CLASS_TEST=Test.class
 cat << \EOF > $JAVA_TEST
-/* #line 21418 "configure" */
+/* #line 21387 "configure" */
 public class Test {
 }
 EOF
@@ -26737,7 +26706,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
 # report actual input values of CONFIG_FILES etc. instead of their
 # values after options handling.
 ac_log="
-This file was extended by ceph $as_me 10.2.5, which was
+This file was extended by ceph $as_me 10.2.6, which was
 generated by GNU Autoconf 2.69.  Invocation command line was
 
   CONFIG_FILES    = $CONFIG_FILES
@@ -26803,7 +26772,7 @@ _ACEOF
 cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
 ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
 ac_cs_version="\\
-ceph config.status 10.2.5
+ceph config.status 10.2.6
 configured by $0, generated by GNU Autoconf 2.69,
   with options \\"\$ac_cs_config\\"
 
diff --git a/configure.ac b/configure.ac
index 32e273f..51daf91 100644
--- a/configure.ac
+++ b/configure.ac
@@ -8,7 +8,7 @@ AC_PREREQ(2.59)
 # VERSION define is not used by the code.  It gets a version string
 # from 'git describe'; see src/ceph_ver.[ch]
 
-AC_INIT([ceph], [10.2.5], [ceph-devel at vger.kernel.org])
+AC_INIT([ceph], [10.2.6], [ceph-devel at vger.kernel.org])
 
 AX_CXX_COMPILE_STDCXX_11(, mandatory)
 
@@ -51,7 +51,7 @@ AM_PROG_LIBTOOL
 AM_PROG_AS
 
 
-AM_INIT_AUTOMAKE([foreign parallel-tests tar-ustar])
+AM_INIT_AUTOMAKE([foreign parallel-tests tar-pax])
 # enable make V=0 (if automake >1.11)
 m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])])
 
diff --git a/doc/Makefile b/doc/Makefile
index c13fccf..b7058bf 100644
--- a/doc/Makefile
+++ b/doc/Makefile
@@ -239,10 +239,10 @@ OTOOL64 =
 PACKAGE = ceph
 PACKAGE_BUGREPORT = ceph-devel at vger.kernel.org
 PACKAGE_NAME = ceph
-PACKAGE_STRING = ceph 10.2.5
+PACKAGE_STRING = ceph 10.2.6
 PACKAGE_TARNAME = ceph
 PACKAGE_URL = 
-PACKAGE_VERSION = 10.2.5
+PACKAGE_VERSION = 10.2.6
 PATH_SEPARATOR = :
 PKG_CONFIG = /usr/bin/pkg-config
 PKG_CONFIG_LIBDIR = 
@@ -267,7 +267,7 @@ SET_MAKE =
 SHELL = /bin/bash
 SPHINX_BUILD = sphinx-build
 STRIP = strip
-VERSION = 10.2.5
+VERSION = 10.2.6
 WARN_ERROR_FORMAT_SECURITY = -Werror=format-security
 WARN_IGNORED_QUALIFIERS = -Wignored-qualifiers
 WARN_TYPE_LIMITS = -Wtype-limits
@@ -285,7 +285,7 @@ acx_pthread_config =
 am__include = include
 am__leading_dot = .
 am__quote = 
-am__tar = tar --format=ustar -chf - "$$tardir"
+am__tar = tar --format=posix -chf - "$$tardir"
 am__untar = tar -xf -
 bindir = ${exec_prefix}/bin
 build = x86_64-pc-linux-gnu
diff --git a/doc/dev/quick_guide.rst b/doc/dev/quick_guide.rst
index 6a4fe08..adc7643 100644
--- a/doc/dev/quick_guide.rst
+++ b/doc/dev/quick_guide.rst
@@ -103,9 +103,9 @@ You can now use the swift python client to communicate with the RadosGW.
 
 .. code::
 
-    $ swift -A http://localhost:8000/auth -U tester:testing -K asdf list
-    $ swift -A http://localhost:8000/auth -U tester:testing -K asdf upload mycontainer ceph
-    $ swift -A http://localhost:8000/auth -U tester:testing -K asdf list
+    $ swift -A http://localhost:8000/auth -U test:tester -K testing list
+    $ swift -A http://localhost:8000/auth -U test:tester -K testing upload mycontainer ceph
+    $ swift -A http://localhost:8000/auth -U test:tester -K testing list
 
 
 Run unit tests
diff --git a/doc/man/8/rados.rst b/doc/man/8/rados.rst
index 2243a5e..05fc2be 100644
--- a/doc/man/8/rados.rst
+++ b/doc/man/8/rados.rst
@@ -122,6 +122,8 @@ Pool specific commands
   default, and is used as the underlying object name for "read" and
   "write" ops.
   Note: -b *objsize* option is valid only in *write* mode.
+  Note: *write* and *seq* must be run on the same host otherwise the
+  objects created by *write* will have names that will fail *seq*.
 
 :command:`cleanup`
 
@@ -132,13 +134,17 @@ Pool specific commands
   List all key/value pairs stored in the object map of object name.
   The values are dumped in hexadecimal.
 
-:command:`getomapval` *name* *key*
+:command:`getomapval` [ --omap-key-file *file* ] *name* *key* [ *out-file* ]
   Dump the hexadecimal value of key in the object map of object name.
+  If the optional *out-file* argument isn't provided, the value will be
+  written to standard output.
 
-:command:`setomapval` *name* *key* *value*
-  Set the value of key in the object map of object name.
+:command:`setomapval` [ --omap-key-file *file* ] *name* *key* [ *value* ]
+  Set the value of key in the object map of object name. If the optional
+  *value* argument isn't provided, the value will be read from standard
+  input.
 
-:command:`rmomapkey` *name* *key*
+:command:`rmomapkey` [ --omap-key-file *file* ] *name* *key*
   Remove key from the object map of object name.
 
 :command:`getomapheader` *name*
diff --git a/doc/rados/configuration/osd-config-ref.rst b/doc/rados/configuration/osd-config-ref.rst
index 68b140f..ff2cf8a 100644
--- a/doc/rados/configuration/osd-config-ref.rst
+++ b/doc/rados/configuration/osd-config-ref.rst
@@ -486,7 +486,7 @@ priority than requests to read or write data.
 
 :Description: The maximum number of backfills allowed to or from a single OSD.
 :Type: 64-bit Unsigned Integer
-:Default: ``10``
+:Default: ``1``
 
 
 ``osd backfill scan min`` 
diff --git a/doc/radosgw/config-ref.rst b/doc/radosgw/config-ref.rst
index b8e4b44..abcc268 100644
--- a/doc/radosgw/config-ref.rst
+++ b/doc/radosgw/config-ref.rst
@@ -791,10 +791,17 @@ Swift Settings
 
 ``rgw swift url prefix``
 
-:Description: The URL prefix for the Swift API. 
+:Description: The URL prefix for the Swift StorageURL that goes in front of
+              the "/v1" part. This allows to run several Gateway instances
+              on the same host. For compatibility, setting this configuration
+              variable to empty causes the default "/swift" to be used.
+              Use explicit prefix "/" to start StorageURL at the root.
+              WARNING: setting this option to "/" will NOT work if S3 API is
+              enabled. From the other side disabling S3 will make impossible
+              to deploy RadosGW in the multi-site configuration!
 :Default: ``swift``
-:Example: http://fqdn.com/swift
-	
+:Example: "/swift-testing"
+
 
 ``rgw swift auth url``
 
@@ -929,6 +936,17 @@ Logging Settings
 :Default: ``30``
 
 
+``rgw log http headers``
+
+:Description: Comma-delimited list of HTTP headers to include with ops
+	      log entries.  Header names are case insensitive, and use
+	      the full header name with words separated by underscores.
+
+:Type: String
+:Default: None
+:Example: "http_x_forwarded_for, http_x_special_k"
+
+
 ``rgw intent log object name``
 
 :Description: The logging format for the intent log object name. See manpage 
diff --git a/doc/radosgw/s3/commons.rst b/doc/radosgw/s3/commons.rst
index 14e6fd0..ca848bc 100644
--- a/doc/radosgw/s3/commons.rst
+++ b/doc/radosgw/s3/commons.rst
@@ -18,6 +18,8 @@ The second method identifies the bucket via a virtual bucket host name. For exam
 	GET / HTTP/1.1
 	Host: mybucket.cname.domain.com
 
+To configure virtual hosted buckets, you can either set ``rgw_dns_name = cname.domain.com`` in ceph.conf, or add ``cname.domain.com`` to the list of ``hostnames`` in your zonegroup configuration. See `Ceph Object Gateway - Multisite Configuration`_ for more on zonegroups.
+
 .. tip:: We prefer the first method, because the second method requires expensive domain certification and DNS wild cards.
 
 Common Request Headers
@@ -105,3 +107,5 @@ Common Response Status
 +---------------+-----------------------------------+
 | ``500``       | InternalError                     |
 +---------------+-----------------------------------+
+
+.. _`Ceph Object Gateway - Multisite Configuration`: ../../multisite
diff --git a/doc/radosgw/upgrade_to_jewel.rst b/doc/radosgw/upgrade_to_jewel.rst
new file mode 100644
index 0000000..dbe1f74
--- /dev/null
+++ b/doc/radosgw/upgrade_to_jewel.rst
@@ -0,0 +1,37 @@
+=============
+RGW upgrading to Jewel versions 10.2.0, 10.2.1, 10.2.2 and 10.2.3
+=============
+
+.. versionadded:: Jewel
+
+Upgrade of :term:`Ceph Object Gateway` to older versions of jewel (up to 10.2.3 included) caused issues. This document describes the needed recovery procedure.
+
+Mixed version of :term:`Ceph Object Gateway` is not supported
+
+Backup of old configuration
+================
+rados mkpool .rgw.root.backup
+rados cppool .rgw.root .rgw.root.backup
+
+Fix confgiuration after upgrade
+================
+Stop all :term:`Ceph Object Gateway` running in the cluster.
+
+Run the following commands:::
+
+  $ rados rmpool .rgw.root
+
+  $ radosgw-admin zonegroup get --rgw-zonegroup=default | sed 's/"id":.*/"id": "default",/g' | sed 's/"master_zone.*/"master_zone":"default",/g' > default-zg.json
+
+  $ raodsgw-admin zone get --zone-id=default > default-zone.json
+
+  $ radosgw-admin realm create --rgw-realm=myrealm
+
+  $ radosgw-admin zonegroup set --rgw-zonegroup=default --default < default-zg.json
+
+  $ radosgw-admin zone set --rgw-zone=default --default < default-zone.json
+
+  $ radosgw-admin period update --commit
+
+Start all :term:`Ceph Object Gateway` in the cluster.
+
diff --git a/install-deps.sh b/install-deps.sh
index 129178f..94def86 100755
--- a/install-deps.sh
+++ b/install-deps.sh
@@ -19,21 +19,12 @@ if test $(id -u) != 0 ; then
 fi
 export LC_ALL=C # the following is vulnerable to i18n
 
-if test -f /etc/redhat-release ; then
-    $SUDO yum install -y redhat-lsb-core
-fi
-
-if type apt-get > /dev/null 2>&1 ; then
-    $SUDO apt-get install -y lsb-release devscripts equivs
-fi
-
-if type zypper > /dev/null 2>&1 ; then
-    $SUDO zypper --gpg-auto-import-keys --non-interactive install lsb-release systemd-rpm-macros
-fi
-
-case $(lsb_release -si) in
-Ubuntu|Debian|Devuan)
-        $SUDO apt-get install -y dpkg-dev
+source /etc/os-release
+case $ID in
+    debian|ubuntu|devuan)
+        echo "Using apt-get to install dependencies"
+        $SUDO apt-get install -y lsb-release devscripts equivs
+        $SUDO apt-get install -y dpkg-dev gcc
         if ! test -r debian/control ; then
             echo debian/control is not a readable file
             exit 1
@@ -57,7 +48,9 @@ Ubuntu|Debian|Devuan)
 	$SUDO env DEBIAN_FRONTEND=noninteractive apt-get -y remove ceph-build-deps
 	if [ -n "$backports" ] ; then rm $control; fi
         ;;
-CentOS|Fedora|RedHatEnterpriseServer)
+    centos|fedora|rhel)
+        echo "Using yum to install dependencies"
+        $SUDO yum install -y redhat-lsb-core
         case $(lsb_release -si) in
             Fedora)
                 $SUDO yum install -y yum-utils
@@ -82,12 +75,14 @@ CentOS|Fedora|RedHatEnterpriseServer)
         $SUDO yum-builddep -y $DIR/ceph.spec 2>&1 | tee $DIR/yum-builddep.out
         ! grep -q -i error: $DIR/yum-builddep.out || exit 1
         ;;
-*SUSE*)
+    opensuse|suse|sles)
+        echo "Using zypper to install dependencies"
+        $SUDO zypper --gpg-auto-import-keys --non-interactive install lsb-release systemd-rpm-macros
         sed -e 's/@//g' < ceph.spec.in > $DIR/ceph.spec
         $SUDO zypper --non-interactive install $(rpmspec -q --buildrequires $DIR/ceph.spec) || exit 1
         ;;
-*)
-        echo "$(lsb_release -si) is unknown, dependencies will have to be installed manually."
+    *)
+        echo "$ID is unknown, dependencies will have to be installed manually."
         ;;
 esac
 
diff --git a/man/ceph-authtool.8 b/man/ceph-authtool.8
index 8c5f657..8b80743 100644
--- a/man/ceph-authtool.8
+++ b/man/ceph-authtool.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "CEPH-AUTHTOOL" "8" "December 09, 2016" "dev" "Ceph"
+.TH "CEPH-AUTHTOOL" "8" "March 07, 2017" "dev" "Ceph"
 .SH NAME
 ceph-authtool \- ceph keyring manipulation tool
 .
diff --git a/man/ceph-clsinfo.8 b/man/ceph-clsinfo.8
index 68742f6..e36aa49 100644
--- a/man/ceph-clsinfo.8
+++ b/man/ceph-clsinfo.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "CEPH-CLSINFO" "8" "December 09, 2016" "dev" "Ceph"
+.TH "CEPH-CLSINFO" "8" "March 07, 2017" "dev" "Ceph"
 .SH NAME
 ceph-clsinfo \- show class object information
 .
diff --git a/man/ceph-conf.8 b/man/ceph-conf.8
index 525ac6f..704046e 100644
--- a/man/ceph-conf.8
+++ b/man/ceph-conf.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "CEPH-CONF" "8" "December 09, 2016" "dev" "Ceph"
+.TH "CEPH-CONF" "8" "March 07, 2017" "dev" "Ceph"
 .SH NAME
 ceph-conf \- ceph conf file tool
 .
diff --git a/man/ceph-create-keys.8 b/man/ceph-create-keys.8
index f289ddd..23cb2e5 100644
--- a/man/ceph-create-keys.8
+++ b/man/ceph-create-keys.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "CEPH-CREATE-KEYS" "8" "December 09, 2016" "dev" "Ceph"
+.TH "CEPH-CREATE-KEYS" "8" "March 07, 2017" "dev" "Ceph"
 .SH NAME
 ceph-create-keys \- ceph keyring generate tool
 .
diff --git a/man/ceph-debugpack.8 b/man/ceph-debugpack.8
index 9b835ab..69a571b 100644
--- a/man/ceph-debugpack.8
+++ b/man/ceph-debugpack.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "CEPH-DEBUGPACK" "8" "December 09, 2016" "dev" "Ceph"
+.TH "CEPH-DEBUGPACK" "8" "March 07, 2017" "dev" "Ceph"
 .SH NAME
 ceph-debugpack \- ceph debug packer utility
 .
diff --git a/man/ceph-dencoder.8 b/man/ceph-dencoder.8
index 310e724..5467179 100644
--- a/man/ceph-dencoder.8
+++ b/man/ceph-dencoder.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "CEPH-DENCODER" "8" "December 09, 2016" "dev" "Ceph"
+.TH "CEPH-DENCODER" "8" "March 07, 2017" "dev" "Ceph"
 .SH NAME
 ceph-dencoder \- ceph encoder/decoder utility
 .
diff --git a/man/ceph-deploy.8 b/man/ceph-deploy.8
index 75bacf8..f8949bc 100644
--- a/man/ceph-deploy.8
+++ b/man/ceph-deploy.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "CEPH-DEPLOY" "8" "December 09, 2016" "dev" "Ceph"
+.TH "CEPH-DEPLOY" "8" "March 07, 2017" "dev" "Ceph"
 .SH NAME
 ceph-deploy \- Ceph deployment tool
 .
diff --git a/man/ceph-detect-init.8 b/man/ceph-detect-init.8
index e740ae5..6b2ac3f 100644
--- a/man/ceph-detect-init.8
+++ b/man/ceph-detect-init.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "CEPH-DETECT-INIT" "8" "December 09, 2016" "dev" "Ceph"
+.TH "CEPH-DETECT-INIT" "8" "March 07, 2017" "dev" "Ceph"
 .SH NAME
 ceph-detect-init \- display the init system Ceph should use
 .
diff --git a/man/ceph-disk.8 b/man/ceph-disk.8
index 835959a..91c9eba 100644
--- a/man/ceph-disk.8
+++ b/man/ceph-disk.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "CEPH-DISK" "8" "December 09, 2016" "dev" "Ceph"
+.TH "CEPH-DISK" "8" "March 07, 2017" "dev" "Ceph"
 .SH NAME
 ceph-disk \- Ceph disk utility for OSD
 .
diff --git a/man/ceph-fuse.8 b/man/ceph-fuse.8
index 181905b..e4c619f 100644
--- a/man/ceph-fuse.8
+++ b/man/ceph-fuse.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "CEPH-FUSE" "8" "December 09, 2016" "dev" "Ceph"
+.TH "CEPH-FUSE" "8" "March 07, 2017" "dev" "Ceph"
 .SH NAME
 ceph-fuse \- FUSE-based client for ceph
 .
diff --git a/man/ceph-mds.8 b/man/ceph-mds.8
index fc2ca71..a93b016 100644
--- a/man/ceph-mds.8
+++ b/man/ceph-mds.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "CEPH-MDS" "8" "December 09, 2016" "dev" "Ceph"
+.TH "CEPH-MDS" "8" "March 07, 2017" "dev" "Ceph"
 .SH NAME
 ceph-mds \- ceph metadata server daemon
 .
diff --git a/man/ceph-mon.8 b/man/ceph-mon.8
index 4f0755b..cedd3dc 100644
--- a/man/ceph-mon.8
+++ b/man/ceph-mon.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "CEPH-MON" "8" "December 09, 2016" "dev" "Ceph"
+.TH "CEPH-MON" "8" "March 07, 2017" "dev" "Ceph"
 .SH NAME
 ceph-mon \- ceph monitor daemon
 .
diff --git a/man/ceph-osd.8 b/man/ceph-osd.8
index 3e7f87f..cf54a9f 100644
--- a/man/ceph-osd.8
+++ b/man/ceph-osd.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "CEPH-OSD" "8" "December 09, 2016" "dev" "Ceph"
+.TH "CEPH-OSD" "8" "March 07, 2017" "dev" "Ceph"
 .SH NAME
 ceph-osd \- ceph object storage daemon
 .
diff --git a/man/ceph-post-file.8 b/man/ceph-post-file.8
index 4a8c103..db49269 100644
--- a/man/ceph-post-file.8
+++ b/man/ceph-post-file.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "CEPH-POST-FILE" "8" "December 09, 2016" "dev" "Ceph"
+.TH "CEPH-POST-FILE" "8" "March 07, 2017" "dev" "Ceph"
 .SH NAME
 ceph-post-file \- post files for ceph developers
 .
diff --git a/man/ceph-rbdnamer.8 b/man/ceph-rbdnamer.8
index 91e8117..fecf799 100644
--- a/man/ceph-rbdnamer.8
+++ b/man/ceph-rbdnamer.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "CEPH-RBDNAMER" "8" "December 09, 2016" "dev" "Ceph"
+.TH "CEPH-RBDNAMER" "8" "March 07, 2017" "dev" "Ceph"
 .SH NAME
 ceph-rbdnamer \- udev helper to name RBD devices
 .
diff --git a/man/ceph-rest-api.8 b/man/ceph-rest-api.8
index b0c642c..c3908aa 100644
--- a/man/ceph-rest-api.8
+++ b/man/ceph-rest-api.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "CEPH-REST-API" "8" "December 09, 2016" "dev" "Ceph"
+.TH "CEPH-REST-API" "8" "March 07, 2017" "dev" "Ceph"
 .SH NAME
 ceph-rest-api \- ceph RESTlike administration server
 .
diff --git a/man/ceph-run.8 b/man/ceph-run.8
index f847d7e..af49e62 100644
--- a/man/ceph-run.8
+++ b/man/ceph-run.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "CEPH-RUN" "8" "December 09, 2016" "dev" "Ceph"
+.TH "CEPH-RUN" "8" "March 07, 2017" "dev" "Ceph"
 .SH NAME
 ceph-run \- restart daemon on core dump
 .
diff --git a/man/ceph-syn.8 b/man/ceph-syn.8
index 133ba78..8624db1 100644
--- a/man/ceph-syn.8
+++ b/man/ceph-syn.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "CEPH-SYN" "8" "December 09, 2016" "dev" "Ceph"
+.TH "CEPH-SYN" "8" "March 07, 2017" "dev" "Ceph"
 .SH NAME
 ceph-syn \- ceph synthetic workload generator
 .
diff --git a/man/ceph.8 b/man/ceph.8
index 73a1cfd..dbae3fd 100644
--- a/man/ceph.8
+++ b/man/ceph.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "CEPH" "8" "December 09, 2016" "dev" "Ceph"
+.TH "CEPH" "8" "March 07, 2017" "dev" "Ceph"
 .SH NAME
 ceph \- ceph administration tool
 .
diff --git a/man/cephfs.8 b/man/cephfs.8
index 2285e8a..25135f1 100644
--- a/man/cephfs.8
+++ b/man/cephfs.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "CEPHFS" "8" "December 09, 2016" "dev" "Ceph"
+.TH "CEPHFS" "8" "March 07, 2017" "dev" "Ceph"
 .SH NAME
 cephfs \- ceph file system options utility
 .
diff --git a/man/crushtool.8 b/man/crushtool.8
index 6759926..ac5f3b8 100644
--- a/man/crushtool.8
+++ b/man/crushtool.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "CRUSHTOOL" "8" "December 09, 2016" "dev" "Ceph"
+.TH "CRUSHTOOL" "8" "March 07, 2017" "dev" "Ceph"
 .SH NAME
 crushtool \- CRUSH map manipulation tool
 .
diff --git a/man/librados-config.8 b/man/librados-config.8
index ee67304..d34e38a 100644
--- a/man/librados-config.8
+++ b/man/librados-config.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "LIBRADOS-CONFIG" "8" "December 09, 2016" "dev" "Ceph"
+.TH "LIBRADOS-CONFIG" "8" "March 07, 2017" "dev" "Ceph"
 .SH NAME
 librados-config \- display information about librados
 .
diff --git a/man/monmaptool.8 b/man/monmaptool.8
index ecde0b0..4de6ee6 100644
--- a/man/monmaptool.8
+++ b/man/monmaptool.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "MONMAPTOOL" "8" "December 09, 2016" "dev" "Ceph"
+.TH "MONMAPTOOL" "8" "March 07, 2017" "dev" "Ceph"
 .SH NAME
 monmaptool \- ceph monitor cluster map manipulation tool
 .
diff --git a/man/mount.ceph.8 b/man/mount.ceph.8
index 05e91d8..1dc72cd 100644
--- a/man/mount.ceph.8
+++ b/man/mount.ceph.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "MOUNT.CEPH" "8" "December 09, 2016" "dev" "Ceph"
+.TH "MOUNT.CEPH" "8" "March 07, 2017" "dev" "Ceph"
 .SH NAME
 mount.ceph \- mount a ceph file system
 .
diff --git a/man/osdmaptool.8 b/man/osdmaptool.8
index 39a203f..bda4d30 100644
--- a/man/osdmaptool.8
+++ b/man/osdmaptool.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "OSDMAPTOOL" "8" "December 09, 2016" "dev" "Ceph"
+.TH "OSDMAPTOOL" "8" "March 07, 2017" "dev" "Ceph"
 .SH NAME
 osdmaptool \- ceph osd cluster map manipulation tool
 .
diff --git a/man/rados.8 b/man/rados.8
index aba8f80..4d020c0 100644
--- a/man/rados.8
+++ b/man/rados.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "RADOS" "8" "December 09, 2016" "dev" "Ceph"
+.TH "RADOS" "8" "March 07, 2017" "dev" "Ceph"
 .SH NAME
 rados \- rados object storage utility
 .
@@ -146,6 +146,8 @@ is an arbitrary object name. It is "benchmark_last_metadata" by
 default, and is used as the underlying object name for "read" and
 "write" ops.
 Note: \-b \fIobjsize\fP option is valid only in \fIwrite\fP mode.
+Note: \fIwrite\fP and \fIseq\fP must be run on the same host otherwise the
+objects created by \fIwrite\fP will have names that will fail \fIseq\fP\&.
 .UNINDENT
 .sp
 \fBcleanup\fP
@@ -158,13 +160,17 @@ List all the keys stored in the object map of object name.
 List all key/value pairs stored in the object map of object name.
 The values are dumped in hexadecimal.
 .TP
-.B \fBgetomapval\fP \fIname\fP \fIkey\fP
+.B \fBgetomapval\fP [ \-\-omap\-key\-file \fIfile\fP ] \fIname\fP \fIkey\fP [ \fIout\-file\fP ]
 Dump the hexadecimal value of key in the object map of object name.
+If the optional \fIout\-file\fP argument isn\(aqt provided, the value will be
+written to standard output.
 .TP
-.B \fBsetomapval\fP \fIname\fP \fIkey\fP \fIvalue\fP
-Set the value of key in the object map of object name.
+.B \fBsetomapval\fP [ \-\-omap\-key\-file \fIfile\fP ] \fIname\fP \fIkey\fP [ \fIvalue\fP ]
+Set the value of key in the object map of object name. If the optional
+\fIvalue\fP argument isn\(aqt provided, the value will be read from standard
+input.
 .TP
-.B \fBrmomapkey\fP \fIname\fP \fIkey\fP
+.B \fBrmomapkey\fP [ \-\-omap\-key\-file \fIfile\fP ] \fIname\fP \fIkey\fP
 Remove key from the object map of object name.
 .TP
 .B \fBgetomapheader\fP \fIname\fP
diff --git a/man/radosgw-admin.8 b/man/radosgw-admin.8
index ed1e0a2..8050828 100644
--- a/man/radosgw-admin.8
+++ b/man/radosgw-admin.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "RADOSGW-ADMIN" "8" "December 09, 2016" "dev" "Ceph"
+.TH "RADOSGW-ADMIN" "8" "March 07, 2017" "dev" "Ceph"
 .SH NAME
 radosgw-admin \- rados REST gateway user administration utility
 .
diff --git a/man/radosgw.8 b/man/radosgw.8
index 69a7204..e053b99 100644
--- a/man/radosgw.8
+++ b/man/radosgw.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "RADOSGW" "8" "December 09, 2016" "dev" "Ceph"
+.TH "RADOSGW" "8" "March 07, 2017" "dev" "Ceph"
 .SH NAME
 radosgw \- rados REST gateway
 .
diff --git a/man/rbd-fuse.8 b/man/rbd-fuse.8
index a96a8de..2898be2 100644
--- a/man/rbd-fuse.8
+++ b/man/rbd-fuse.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "RBD-FUSE" "8" "December 09, 2016" "dev" "Ceph"
+.TH "RBD-FUSE" "8" "March 07, 2017" "dev" "Ceph"
 .SH NAME
 rbd-fuse \- expose rbd images as files
 .
diff --git a/man/rbd-mirror.8 b/man/rbd-mirror.8
index 80d1d44..221fda4 100644
--- a/man/rbd-mirror.8
+++ b/man/rbd-mirror.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "RBD-MIRROR" "8" "December 09, 2016" "dev" "Ceph"
+.TH "RBD-MIRROR" "8" "March 07, 2017" "dev" "Ceph"
 .SH NAME
 rbd-mirror \- Ceph daemon for mirroring RBD images
 .
diff --git a/man/rbd-nbd.8 b/man/rbd-nbd.8
index 04e1d53..aca3c86 100644
--- a/man/rbd-nbd.8
+++ b/man/rbd-nbd.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "RBD-NBD" "8" "December 09, 2016" "dev" "Ceph"
+.TH "RBD-NBD" "8" "March 07, 2017" "dev" "Ceph"
 .SH NAME
 rbd-nbd \- map rbd images to nbd device
 .
diff --git a/man/rbd-replay-many.8 b/man/rbd-replay-many.8
index 23e15a2..53822b8 100644
--- a/man/rbd-replay-many.8
+++ b/man/rbd-replay-many.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "RBD-REPLAY-MANY" "8" "December 09, 2016" "dev" "Ceph"
+.TH "RBD-REPLAY-MANY" "8" "March 07, 2017" "dev" "Ceph"
 .SH NAME
 rbd-replay-many \- replay a rados block device (RBD) workload on several clients
 .
diff --git a/man/rbd-replay-prep.8 b/man/rbd-replay-prep.8
index d80da65..aea3b83 100644
--- a/man/rbd-replay-prep.8
+++ b/man/rbd-replay-prep.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "RBD-REPLAY-PREP" "8" "December 09, 2016" "dev" "Ceph"
+.TH "RBD-REPLAY-PREP" "8" "March 07, 2017" "dev" "Ceph"
 .SH NAME
 rbd-replay-prep \- prepare captured rados block device (RBD) workloads for replay
 .
diff --git a/man/rbd-replay.8 b/man/rbd-replay.8
index 4b43301..8c64d40 100644
--- a/man/rbd-replay.8
+++ b/man/rbd-replay.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "RBD-REPLAY" "8" "December 09, 2016" "dev" "Ceph"
+.TH "RBD-REPLAY" "8" "March 07, 2017" "dev" "Ceph"
 .SH NAME
 rbd-replay \- replay rados block device (RBD) workloads
 .
diff --git a/man/rbd.8 b/man/rbd.8
index 2ac572b..b07bc25 100644
--- a/man/rbd.8
+++ b/man/rbd.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "RBD" "8" "December 09, 2016" "dev" "Ceph"
+.TH "RBD" "8" "March 07, 2017" "dev" "Ceph"
 .SH NAME
 rbd \- manage rados block device (RBD) images
 .
diff --git a/man/rbdmap.8 b/man/rbdmap.8
index 346b357..95be216 100644
--- a/man/rbdmap.8
+++ b/man/rbdmap.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "RBDMAP" "8" "December 09, 2016" "dev" "Ceph"
+.TH "RBDMAP" "8" "March 07, 2017" "dev" "Ceph"
 .SH NAME
 rbdmap \- map RBD devices at boot time
 .
diff --git a/qa/.gitignore b/qa/.gitignore
new file mode 100644
index 0000000..c4a1a68
--- /dev/null
+++ b/qa/.gitignore
@@ -0,0 +1,5 @@
+*~
+.*.sw[nmop]
+*.pyc
+.tox
+__pycache__
diff --git a/qa/README b/qa/README
new file mode 100644
index 0000000..0e32ce9
--- /dev/null
+++ b/qa/README
@@ -0,0 +1,52 @@
+ceph-qa-suite
+-------------
+
+clusters/    - some predefined cluster layouts
+suites/      - set suite
+
+The suites directory has a hierarchical collection of tests.  This can be
+freeform, but generally follows the convention of
+
+  suites/<test suite name>/<test group>/...
+
+A test is described by a yaml fragment.
+
+A test can exist as a single .yaml file in the directory tree.  For example:
+
+ suites/foo/one.yaml
+ suites/foo/two.yaml
+
+is a simple group of two tests.
+
+A directory with a magic '+' file represents a test that combines all
+other items in the directory into a single yaml fragment.  For example:
+
+ suites/foo/bar/+
+ suites/foo/bar/a.yaml
+ suites/foo/bar/b.yaml
+ suites/foo/bar/c.yaml
+
+is a single test consisting of a + b + c.
+
+A directory with a magic '%' file represents a test matrix formed from
+all other items in the directory.  For example,
+
+ suites/baz/%
+ suites/baz/a.yaml
+ suites/baz/b/b1.yaml
+ suites/baz/b/b2.yaml
+ suites/baz/c.yaml
+ suites/baz/d/d1.yaml
+ suites/baz/d/d2.yaml
+
+is a 4-dimensional test matrix.  Two dimensions (a, c) are trivial (1
+item), so this is really 2x2 = 4 tests, which are
+
+  a + b1 + c + d1
+  a + b1 + c + d2
+  a + b2 + c + d1
+  a + b2 + c + d2
+
+Symlinks are okay.
+
+The teuthology code can be found in https://github.com/ceph/teuthology.git
diff --git a/qa/archs/aarch64.yaml b/qa/archs/aarch64.yaml
new file mode 100644
index 0000000..6399b99
--- /dev/null
+++ b/qa/archs/aarch64.yaml
@@ -0,0 +1 @@
+arch: aarch64
diff --git a/qa/archs/armv7.yaml b/qa/archs/armv7.yaml
new file mode 100644
index 0000000..c261ebd
--- /dev/null
+++ b/qa/archs/armv7.yaml
@@ -0,0 +1 @@
+arch: armv7l
diff --git a/qa/archs/i686.yaml b/qa/archs/i686.yaml
new file mode 100644
index 0000000..a920e5a
--- /dev/null
+++ b/qa/archs/i686.yaml
@@ -0,0 +1 @@
+arch: i686
diff --git a/qa/archs/x86_64.yaml b/qa/archs/x86_64.yaml
new file mode 100644
index 0000000..c2409f5
--- /dev/null
+++ b/qa/archs/x86_64.yaml
@@ -0,0 +1 @@
+arch: x86_64
diff --git a/qa/ceph-deploy-overrides/ceph_deploy_dmcrypt.yaml b/qa/ceph-deploy-overrides/ceph_deploy_dmcrypt.yaml
new file mode 100644
index 0000000..859a37f
--- /dev/null
+++ b/qa/ceph-deploy-overrides/ceph_deploy_dmcrypt.yaml
@@ -0,0 +1,3 @@
+overrides:
+   ceph-deploy:
+      dmcrypt: yes
diff --git a/qa/ceph-deploy-overrides/disable_diff_journal_disk.yaml b/qa/ceph-deploy-overrides/disable_diff_journal_disk.yaml
new file mode 100644
index 0000000..5c998c5
--- /dev/null
+++ b/qa/ceph-deploy-overrides/disable_diff_journal_disk.yaml
@@ -0,0 +1,3 @@
+overrides:
+   ceph-deploy:
+      separate_journal_disk:
diff --git a/qa/ceph-deploy-overrides/enable_diff_journal_disk.yaml b/qa/ceph-deploy-overrides/enable_diff_journal_disk.yaml
new file mode 100644
index 0000000..ea3f634
--- /dev/null
+++ b/qa/ceph-deploy-overrides/enable_diff_journal_disk.yaml
@@ -0,0 +1,3 @@
+overrides:
+   ceph-deploy:
+      separate_journal_disk: yes
diff --git a/qa/ceph-deploy-overrides/enable_dmcrypt_diff_journal_disk.yaml b/qa/ceph-deploy-overrides/enable_dmcrypt_diff_journal_disk.yaml
new file mode 100644
index 0000000..59cb799
--- /dev/null
+++ b/qa/ceph-deploy-overrides/enable_dmcrypt_diff_journal_disk.yaml
@@ -0,0 +1,4 @@
+overrides:
+   ceph-deploy:
+      dmcrypt: yes
+      separate_journal_disk: yes
diff --git a/qa/clusters/extra-client.yaml b/qa/clusters/extra-client.yaml
new file mode 100644
index 0000000..349439c
--- /dev/null
+++ b/qa/clusters/extra-client.yaml
@@ -0,0 +1,9 @@
+roles:
+- [mon.a, mon.c, osd.0, osd.1, osd.2]
+- [mon.b, mds.a, osd.3, osd.4, osd.5]
+- [client.0]
+- [client.1]
+openstack:
+- volumes: # attached to each instance
+    count: 3
+    size: 10 # GB
diff --git a/qa/clusters/fixed-1.yaml b/qa/clusters/fixed-1.yaml
new file mode 100644
index 0000000..3768607
--- /dev/null
+++ b/qa/clusters/fixed-1.yaml
@@ -0,0 +1,10 @@
+overrides:
+  ceph-deploy:
+    conf:
+      global:
+        osd pool default size: 2
+        osd crush chooseleaf type: 0
+        osd pool default pg num:  128
+        osd pool default pgp num:  128
+roles:
+- [mon.a, osd.0, osd.1, osd.2, client.0]
diff --git a/qa/clusters/fixed-2-ucephfs.yaml b/qa/clusters/fixed-2-ucephfs.yaml
new file mode 100644
index 0000000..bc120ad
--- /dev/null
+++ b/qa/clusters/fixed-2-ucephfs.yaml
@@ -0,0 +1,10 @@
+roles:
+- [mon.a, mds.a, osd.0, osd.1, client.0]
+- [mon.b, mds.a-s, mon.c, osd.2, osd.3]
+openstack:
+- volumes: # attached to each instance
+    count: 2
+    size: 10 # GB
+log-rotate:
+  ceph-mds: 10G
+  ceph-osd: 10G
diff --git a/qa/clusters/fixed-2.yaml b/qa/clusters/fixed-2.yaml
new file mode 100644
index 0000000..861e509
--- /dev/null
+++ b/qa/clusters/fixed-2.yaml
@@ -0,0 +1,7 @@
+roles:
+- [mon.a, mon.c, osd.0, osd.1, osd.2, client.0]
+- [mon.b, osd.3, osd.4, osd.5, client.1]
+openstack:
+- volumes: # attached to each instance
+    count: 3
+    size: 10 # GB
diff --git a/qa/clusters/fixed-3-cephfs.yaml b/qa/clusters/fixed-3-cephfs.yaml
new file mode 100644
index 0000000..499c84c
--- /dev/null
+++ b/qa/clusters/fixed-3-cephfs.yaml
@@ -0,0 +1,11 @@
+roles:
+- [mon.a, mds.a, osd.0, osd.1]
+- [mon.b, mds.a-s, mon.c, osd.2, osd.3]
+- [client.0]
+openstack:
+- volumes: # attached to each instance
+    count: 2
+    size: 10 # GB
+log-rotate:
+  ceph-mds: 10G
+  ceph-osd: 10G
diff --git a/qa/clusters/fixed-3.yaml b/qa/clusters/fixed-3.yaml
new file mode 100644
index 0000000..8e622d2
--- /dev/null
+++ b/qa/clusters/fixed-3.yaml
@@ -0,0 +1,8 @@
+roles:
+- [mon.a, mon.c, osd.0, osd.1, osd.2]
+- [mon.b, osd.3, osd.4, osd.5]
+- [client.0]
+openstack:
+- volumes: # attached to each instance
+    count: 3
+    size: 10 # GB
diff --git a/qa/clusters/fixed-4.yaml b/qa/clusters/fixed-4.yaml
new file mode 100644
index 0000000..7f051dc
--- /dev/null
+++ b/qa/clusters/fixed-4.yaml
@@ -0,0 +1,5 @@
+roles: 
+- [mon.a, osd.0, osd.4, osd.8, osd.12] 
+- [mon.b, osd.1, osd.5, osd.9, osd.13] 
+- [mon.c, osd.2, osd.6, osd.10, osd.14] 
+- [osd.3, osd.7, osd.11, osd.15, client.0] 
diff --git a/qa/config/rados.yaml b/qa/config/rados.yaml
new file mode 100644
index 0000000..6d9aa9d
--- /dev/null
+++ b/qa/config/rados.yaml
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    conf:
+      osd:
+        osd op queue: debug_random
+        osd op queue cut off: debug_random
+        osd debug verify cached snaps: true
diff --git a/qa/config_options/cephdeploy_conf.yaml b/qa/config_options/cephdeploy_conf.yaml
new file mode 100644
index 0000000..7f9f0b7
--- /dev/null
+++ b/qa/config_options/cephdeploy_conf.yaml
@@ -0,0 +1,6 @@
+overrides:
+   ceph-deploy:
+      conf:
+         global:
+             mon pg warn min per osd: 2
+             osd pool default size: 2
diff --git a/qa/debug/buildpackages.yaml b/qa/debug/buildpackages.yaml
new file mode 100644
index 0000000..527ed66
--- /dev/null
+++ b/qa/debug/buildpackages.yaml
@@ -0,0 +1,6 @@
+tasks:
+    - buildpackages:
+        machine:
+          disk: 40 # GB
+          ram: 15000 # MB
+          cpus: 16
diff --git a/qa/debug/mds_client.yaml b/qa/debug/mds_client.yaml
new file mode 100644
index 0000000..c6fec3f
--- /dev/null
+++ b/qa/debug/mds_client.yaml
@@ -0,0 +1,9 @@
+overrides:
+  ceph:
+    conf:
+      mds:
+        debug ms: 1
+        debug mds: 20
+      client:
+        debug ms: 1
+        debug client: 20
\ No newline at end of file
diff --git a/qa/debug/openstack-15G.yaml b/qa/debug/openstack-15G.yaml
new file mode 100644
index 0000000..857ad22
--- /dev/null
+++ b/qa/debug/openstack-15G.yaml
@@ -0,0 +1,3 @@
+openstack:
+  - machine:
+      ram: 15000 # MB
diff --git a/qa/debug/openstack-30G.yaml b/qa/debug/openstack-30G.yaml
new file mode 100644
index 0000000..da7ed80
--- /dev/null
+++ b/qa/debug/openstack-30G.yaml
@@ -0,0 +1,3 @@
+openstack:
+  - machine:
+      ram: 30000 # MB
diff --git a/qa/distros/a-supported-distro.yaml b/qa/distros/a-supported-distro.yaml
new file mode 100644
index 0000000..44d2f0e
--- /dev/null
+++ b/qa/distros/a-supported-distro.yaml
@@ -0,0 +1,2 @@
+os_type: centos
+os_version: "7.2"
diff --git a/qa/distros/all/centos.yaml b/qa/distros/all/centos.yaml
new file mode 100644
index 0000000..9dfcc7f
--- /dev/null
+++ b/qa/distros/all/centos.yaml
@@ -0,0 +1,2 @@
+os_type: centos
+os_version: "7.3"
diff --git a/qa/distros/all/centos_6.3.yaml b/qa/distros/all/centos_6.3.yaml
new file mode 100644
index 0000000..32187d6
--- /dev/null
+++ b/qa/distros/all/centos_6.3.yaml
@@ -0,0 +1,2 @@
+os_type: centos
+os_version: "6.3"
diff --git a/qa/distros/all/centos_6.4.yaml b/qa/distros/all/centos_6.4.yaml
new file mode 100644
index 0000000..02383cd
--- /dev/null
+++ b/qa/distros/all/centos_6.4.yaml
@@ -0,0 +1,2 @@
+os_type: centos
+os_version: "6.4"
diff --git a/qa/distros/all/centos_6.5.yaml b/qa/distros/all/centos_6.5.yaml
new file mode 100644
index 0000000..77c9e41
--- /dev/null
+++ b/qa/distros/all/centos_6.5.yaml
@@ -0,0 +1,2 @@
+os_type: centos
+os_version: "6.5"
diff --git a/qa/distros/all/centos_7.0.yaml b/qa/distros/all/centos_7.0.yaml
new file mode 100644
index 0000000..bccb286
--- /dev/null
+++ b/qa/distros/all/centos_7.0.yaml
@@ -0,0 +1,2 @@
+os_type: centos
+os_version: "7.0"
diff --git a/qa/distros/all/centos_7.1.yaml b/qa/distros/all/centos_7.1.yaml
new file mode 100644
index 0000000..74c68f9
--- /dev/null
+++ b/qa/distros/all/centos_7.1.yaml
@@ -0,0 +1,2 @@
+os_type: centos
+os_version: "7.1"
diff --git a/qa/distros/all/centos_7.2.yaml b/qa/distros/all/centos_7.2.yaml
new file mode 100644
index 0000000..44d2f0e
--- /dev/null
+++ b/qa/distros/all/centos_7.2.yaml
@@ -0,0 +1,2 @@
+os_type: centos
+os_version: "7.2"
diff --git a/qa/distros/all/centos_7.3.yaml b/qa/distros/all/centos_7.3.yaml
new file mode 100644
index 0000000..9dfcc7f
--- /dev/null
+++ b/qa/distros/all/centos_7.3.yaml
@@ -0,0 +1,2 @@
+os_type: centos
+os_version: "7.3"
diff --git a/qa/distros/all/debian_6.0.yaml b/qa/distros/all/debian_6.0.yaml
new file mode 100644
index 0000000..6820fa3
--- /dev/null
+++ b/qa/distros/all/debian_6.0.yaml
@@ -0,0 +1,2 @@
+os_type: debian
+os_version: "6.0"
diff --git a/qa/distros/all/debian_7.0.yaml b/qa/distros/all/debian_7.0.yaml
new file mode 100644
index 0000000..8100dc4
--- /dev/null
+++ b/qa/distros/all/debian_7.0.yaml
@@ -0,0 +1,2 @@
+os_type: debian
+os_version: "7.0"
diff --git a/qa/distros/all/debian_8.0.yaml b/qa/distros/all/debian_8.0.yaml
new file mode 100644
index 0000000..300a443
--- /dev/null
+++ b/qa/distros/all/debian_8.0.yaml
@@ -0,0 +1,2 @@
+os_type: debian
+os_version: "8.0"
diff --git a/qa/distros/all/fedora_17.yaml b/qa/distros/all/fedora_17.yaml
new file mode 100644
index 0000000..801053a
--- /dev/null
+++ b/qa/distros/all/fedora_17.yaml
@@ -0,0 +1,2 @@
+os_type: fedora
+os_version: "17"
diff --git a/qa/distros/all/fedora_18.yaml b/qa/distros/all/fedora_18.yaml
new file mode 100644
index 0000000..07872aa
--- /dev/null
+++ b/qa/distros/all/fedora_18.yaml
@@ -0,0 +1,2 @@
+os_type: fedora
+os_version: "18"
diff --git a/qa/distros/all/fedora_19.yaml b/qa/distros/all/fedora_19.yaml
new file mode 100644
index 0000000..5bac8ac
--- /dev/null
+++ b/qa/distros/all/fedora_19.yaml
@@ -0,0 +1,2 @@
+os_type: fedora
+os_version: "19"
diff --git a/qa/distros/all/opensuse_12.2.yaml b/qa/distros/all/opensuse_12.2.yaml
new file mode 100644
index 0000000..ee9f877
--- /dev/null
+++ b/qa/distros/all/opensuse_12.2.yaml
@@ -0,0 +1,2 @@
+os_type: opensuse
+os_version: "12.2"
diff --git a/qa/distros/all/opensuse_13.2.yaml b/qa/distros/all/opensuse_13.2.yaml
new file mode 100644
index 0000000..7551e81
--- /dev/null
+++ b/qa/distros/all/opensuse_13.2.yaml
@@ -0,0 +1,2 @@
+os_type: opensuse
+os_version: "13.2"
diff --git a/qa/distros/all/opensuse_42.1.yaml b/qa/distros/all/opensuse_42.1.yaml
new file mode 100644
index 0000000..48c789d
--- /dev/null
+++ b/qa/distros/all/opensuse_42.1.yaml
@@ -0,0 +1,2 @@
+os_type: opensuse
+os_version: "42.1"
diff --git a/qa/distros/all/opensuse_42.2.yaml b/qa/distros/all/opensuse_42.2.yaml
new file mode 100644
index 0000000..10e8702
--- /dev/null
+++ b/qa/distros/all/opensuse_42.2.yaml
@@ -0,0 +1,2 @@
+os_type: opensuse
+os_version: "42.2"
diff --git a/qa/distros/all/rhel_6.3.yaml b/qa/distros/all/rhel_6.3.yaml
new file mode 100644
index 0000000..6a8edcd
--- /dev/null
+++ b/qa/distros/all/rhel_6.3.yaml
@@ -0,0 +1,2 @@
+os_type: rhel
+os_version: "6.3"
diff --git a/qa/distros/all/rhel_6.4.yaml b/qa/distros/all/rhel_6.4.yaml
new file mode 100644
index 0000000..5225495
--- /dev/null
+++ b/qa/distros/all/rhel_6.4.yaml
@@ -0,0 +1,2 @@
+os_type: rhel
+os_version: "6.4"
diff --git a/qa/distros/all/rhel_6.5.yaml b/qa/distros/all/rhel_6.5.yaml
new file mode 100644
index 0000000..7db54be
--- /dev/null
+++ b/qa/distros/all/rhel_6.5.yaml
@@ -0,0 +1,2 @@
+os_type: rhel
+os_version: "6.5"
diff --git a/qa/distros/all/rhel_7.0.yaml b/qa/distros/all/rhel_7.0.yaml
new file mode 100644
index 0000000..c87c0bc
--- /dev/null
+++ b/qa/distros/all/rhel_7.0.yaml
@@ -0,0 +1,2 @@
+os_type: rhel
+os_version: "7.0"
diff --git a/qa/distros/all/sle_12.2.yaml b/qa/distros/all/sle_12.2.yaml
new file mode 100644
index 0000000..2a4a28c
--- /dev/null
+++ b/qa/distros/all/sle_12.2.yaml
@@ -0,0 +1,2 @@
+os_type: sle
+os_version: "12.2"
diff --git a/qa/distros/all/ubuntu_12.04.yaml b/qa/distros/all/ubuntu_12.04.yaml
new file mode 100644
index 0000000..dbc3a8d
--- /dev/null
+++ b/qa/distros/all/ubuntu_12.04.yaml
@@ -0,0 +1,2 @@
+os_type: ubuntu
+os_version: "12.04"
diff --git a/qa/distros/all/ubuntu_12.10.yaml b/qa/distros/all/ubuntu_12.10.yaml
new file mode 100644
index 0000000..ab65567
--- /dev/null
+++ b/qa/distros/all/ubuntu_12.10.yaml
@@ -0,0 +1,2 @@
+os_type: ubuntu
+os_version: "12.10"
diff --git a/qa/distros/all/ubuntu_14.04.yaml b/qa/distros/all/ubuntu_14.04.yaml
new file mode 100644
index 0000000..309e989
--- /dev/null
+++ b/qa/distros/all/ubuntu_14.04.yaml
@@ -0,0 +1,2 @@
+os_type: ubuntu
+os_version: "14.04"
diff --git a/qa/distros/all/ubuntu_14.04_aarch64.yaml b/qa/distros/all/ubuntu_14.04_aarch64.yaml
new file mode 100644
index 0000000..9dfbcb5
--- /dev/null
+++ b/qa/distros/all/ubuntu_14.04_aarch64.yaml
@@ -0,0 +1,3 @@
+os_type: ubuntu
+os_version: "14.04"
+arch: aarch64
diff --git a/qa/distros/all/ubuntu_14.04_i686.yaml b/qa/distros/all/ubuntu_14.04_i686.yaml
new file mode 100644
index 0000000..4a0652e
--- /dev/null
+++ b/qa/distros/all/ubuntu_14.04_i686.yaml
@@ -0,0 +1,3 @@
+os_type: ubuntu
+os_version: "14.04"
+arch: i686
diff --git a/qa/distros/all/ubuntu_16.04.yaml b/qa/distros/all/ubuntu_16.04.yaml
new file mode 100644
index 0000000..a459fdd
--- /dev/null
+++ b/qa/distros/all/ubuntu_16.04.yaml
@@ -0,0 +1,2 @@
+os_type: ubuntu
+os_version: "16.04"
diff --git a/qa/distros/supported/centos_7.3.yaml b/qa/distros/supported/centos_7.3.yaml
new file mode 100644
index 0000000..9dfcc7f
--- /dev/null
+++ b/qa/distros/supported/centos_7.3.yaml
@@ -0,0 +1,2 @@
+os_type: centos
+os_version: "7.3"
diff --git a/qa/distros/supported/ubuntu_14.04.yaml b/qa/distros/supported/ubuntu_14.04.yaml
new file mode 100644
index 0000000..309e989
--- /dev/null
+++ b/qa/distros/supported/ubuntu_14.04.yaml
@@ -0,0 +1,2 @@
+os_type: ubuntu
+os_version: "14.04"
diff --git a/qa/erasure-code/ec-feature-plugins-v2.yaml b/qa/erasure-code/ec-feature-plugins-v2.yaml
new file mode 100644
index 0000000..102a452
--- /dev/null
+++ b/qa/erasure-code/ec-feature-plugins-v2.yaml
@@ -0,0 +1,97 @@
+#
+# Test the expected behavior of the
+#
+#    CEPH_FEATURE_ERASURE_CODE_PLUGINS_V2 
+#
+# feature.
+#
+roles:
+- - mon.a
+  - mon.b
+  - osd.0
+  - osd.1
+- - osd.2
+  - mon.c
+tasks:
+#
+# Install firefly
+#
+- install:
+    branch: firefly
+- ceph:
+    fs: xfs
+#
+# We don't need mon.c for now: it will be used later to make sure an old
+# mon cannot join the quorum once the feature has been activated
+#
+- ceph.stop:
+    daemons: [mon.c]
+- exec:
+    mon.a: 
+      - |-
+        ceph osd erasure-code-profile set WRONG plugin=WRONG
+        ceph osd pool create poolWRONG 12 12 erasure WRONG 2>&1 | grep "failed to load plugin using profile WRONG"
+#
+# Partial upgrade, osd.2 is not upgraded
+#
+- install.upgrade:
+    osd.0: 
+#
+# a is the leader
+#
+- ceph.restart:
+    daemons: [mon.a]
+    wait-for-healthy: false
+- exec:
+    mon.a:
+      - |-
+        ceph osd erasure-code-profile set profile-lrc plugin=lrc 2>&1 | grep "unsupported by: the monitor cluster"
+- ceph.restart:
+    daemons: [mon.b, osd.1, osd.0]
+    wait-for-healthy: false
+    wait-for-osds-up: true
+#
+# The lrc plugin cannot be used because osd.2 is not upgraded yet
+# and would crash.
+#
+- exec:
+    mon.a: 
+      - |-
+        ceph osd erasure-code-profile set profile-lrc plugin=lrc 2>&1 | grep "unsupported by: osd.2"
+#
+# Taking osd.2 out, the rest of the cluster is upgraded
+#
+- ceph.stop:
+    daemons: [osd.2]
+- sleep:
+    duration: 60
+#
+# Creating an erasure code profile using the lrc plugin now works
+#
+- exec:
+    mon.a: 
+      - "ceph osd erasure-code-profile set profile-lrc plugin=lrc"
+#
+# osd.2 won't be able to join the because is does not support the feature
+#
+- ceph.restart:
+    daemons: [osd.2]
+    wait-for-healthy: false
+- sleep:
+    duration: 60
+- exec:
+    osd.2: 
+      - |-
+        grep "protocol feature.*missing 100000000000" /var/log/ceph/ceph-osd.2.log
+#
+# mon.c won't be able to join the because it does not support the feature
+#
+- ceph.restart:
+    daemons: [mon.c]
+    wait-for-healthy: false
+- sleep:
+    duration: 60
+- exec:
+    mon.c: 
+      - |-
+        grep "missing.*feature" /var/log/ceph/ceph-mon.c.log
diff --git a/qa/erasure-code/ec-feature-plugins-v3.yaml b/qa/erasure-code/ec-feature-plugins-v3.yaml
new file mode 100644
index 0000000..66a5726
--- /dev/null
+++ b/qa/erasure-code/ec-feature-plugins-v3.yaml
@@ -0,0 +1,97 @@
+#
+# Test the expected behavior of the
+#
+#    CEPH_FEATURE_ERASURE_CODE_PLUGINS_V3
+#
+# feature.
+#
+roles:
+- - mon.a
+  - mon.b
+  - osd.0
+  - osd.1
+- - osd.2
+  - mon.c
+tasks:
+#
+# Install hammer
+#
+- install:
+    branch: hammer
+- ceph:
+    fs: xfs
+#
+# We don't need mon.c for now: it will be used later to make sure an old
+# mon cannot join the quorum once the feature has been activated
+#
+- ceph.stop:
+    daemons: [mon.c]
+- exec:
+    mon.a: 
+      - |-
+        ceph osd erasure-code-profile set WRONG plugin=WRONG
+        ceph osd pool create poolWRONG 12 12 erasure WRONG 2>&1 | grep "failed to load plugin using profile WRONG"
+#
+# Partial upgrade, osd.2 is not upgraded
+#
+- install.upgrade:
+    osd.0: 
+#
+# a is the leader
+#
+- ceph.restart:
+    daemons: [mon.a]
+    wait-for-healthy: false
+- exec:
+    mon.a:
+      - |-
+        ceph osd erasure-code-profile set profile-shec k=2 m=1 c=1 plugin=shec 2>&1 | grep "unsupported by: the monitor cluster"
+- ceph.restart:
+    daemons: [mon.b, osd.1, osd.0]
+    wait-for-healthy: false
+    wait-for-osds-up: true
+#
+# The shec plugin cannot be used because osd.2 is not upgraded yet
+# and would crash.
+#
+- exec:
+    mon.a: 
+      - |-
+        ceph osd erasure-code-profile set profile-shec k=2 m=1 c=1 plugin=shec 2>&1 | grep "unsupported by: osd.2"
+#
+# Taking osd.2 out, the rest of the cluster is upgraded
+#
+- ceph.stop:
+    daemons: [osd.2]
+- sleep:
+    duration: 60
+#
+# Creating an erasure code profile using the shec plugin now works
+#
+- exec:
+    mon.a: 
+      - "ceph osd erasure-code-profile set profile-shec k=2 m=1 c=1 plugin=shec"
+#
+# osd.2 won't be able to join the because is does not support the feature
+#
+- ceph.restart:
+    daemons: [osd.2]
+    wait-for-healthy: false
+- sleep:
+    duration: 60
+- exec:
+    osd.2: 
+      - |-
+        grep "protocol feature.*missing" /var/log/ceph/ceph-osd.2.log
+#
+# mon.c won't be able to join the because it does not support the feature
+#
+- ceph.restart:
+    daemons: [mon.c]
+    wait-for-healthy: false
+- sleep:
+    duration: 60
+- exec:
+    mon.c: 
+      - |-
+        grep "missing.*feature" /var/log/ceph/ceph-mon.c.log
diff --git a/qa/erasure-code/ec-rados-default.yaml b/qa/erasure-code/ec-rados-default.yaml
new file mode 100644
index 0000000..cc62371
--- /dev/null
+++ b/qa/erasure-code/ec-rados-default.yaml
@@ -0,0 +1,19 @@
+tasks:
+  - rados:
+      clients: [client.0]
+      ops: 4000
+      objects: 50
+      ec_pool: true
+      write_append_excl: false
+      op_weights:
+        read: 100
+        write: 0
+        append: 100
+        delete: 50
+        snap_create: 50
+        snap_remove: 50
+        rollback: 50
+        copy_from: 50
+        setattr: 25
+        rmattr: 25
+  - print: "**** done rados ec task"
diff --git a/qa/erasure-code/ec-rados-parallel.yaml b/qa/erasure-code/ec-rados-parallel.yaml
new file mode 100644
index 0000000..0f01d84
--- /dev/null
+++ b/qa/erasure-code/ec-rados-parallel.yaml
@@ -0,0 +1,20 @@
+workload:
+  parallel:
+    - rados:
+       clients: [client.0]
+       ops: 4000
+       objects: 50
+       ec_pool: true
+       write_append_excl: false
+       op_weights:
+         read: 100
+         write: 0
+         append: 100
+         delete: 50
+         snap_create: 50
+         snap_remove: 50
+         rollback: 50
+         copy_from: 50
+         setattr: 25
+         rmattr: 25
+    - print: "**** done rados ec parallel"
diff --git a/qa/erasure-code/ec-rados-plugin=isa-k=2-m=1.yaml b/qa/erasure-code/ec-rados-plugin=isa-k=2-m=1.yaml
new file mode 100644
index 0000000..8d7c497
--- /dev/null
+++ b/qa/erasure-code/ec-rados-plugin=isa-k=2-m=1.yaml
@@ -0,0 +1,25 @@
+tasks:
+- rados:
+    clients: [client.0]
+    ops: 4000
+    objects: 50
+    ec_pool: true
+    write_append_excl: false
+    erasure_code_profile:
+      name: isaprofile
+      plugin: isa
+      k: 2
+      m: 1
+      technique: reed_sol_van
+      ruleset-failure-domain: osd
+    op_weights:
+      read: 100
+      write: 0
+      append: 100
+      delete: 50
+      snap_create: 50
+      snap_remove: 50
+      rollback: 50
+      copy_from: 50
+      setattr: 25
+      rmattr: 25
diff --git a/qa/erasure-code/ec-rados-plugin=jerasure-k=2-m=1.yaml b/qa/erasure-code/ec-rados-plugin=jerasure-k=2-m=1.yaml
new file mode 100644
index 0000000..4fa8d9f
--- /dev/null
+++ b/qa/erasure-code/ec-rados-plugin=jerasure-k=2-m=1.yaml
@@ -0,0 +1,25 @@
+tasks:
+- rados:
+    clients: [client.0]
+    ops: 4000
+    objects: 50
+    ec_pool: true
+    write_append_excl: false
+    erasure_code_profile:
+      name: jerasure21profile
+      plugin: jerasure
+      k: 2
+      m: 1
+      technique: reed_sol_van
+      ruleset-failure-domain: osd
+    op_weights:
+      read: 100
+      write: 0
+      append: 100
+      delete: 50
+      snap_create: 50
+      snap_remove: 50
+      rollback: 50
+      copy_from: 50
+      setattr: 25
+      rmattr: 25
diff --git a/qa/erasure-code/ec-rados-plugin=jerasure-k=3-m=1.yaml b/qa/erasure-code/ec-rados-plugin=jerasure-k=3-m=1.yaml
new file mode 100644
index 0000000..3c31a8b
--- /dev/null
+++ b/qa/erasure-code/ec-rados-plugin=jerasure-k=3-m=1.yaml
@@ -0,0 +1,31 @@
+#
+# k=3 implies a stripe_width of 1376*3 = 4128 which is different from
+# the default value of 4096 It is also not a multiple of 1024*1024 and
+# creates situations where rounding rules during recovery becomes 
+# necessary. 
+#
+tasks:
+- rados:
+    clients: [client.0]
+    ops: 4000
+    objects: 50
+    ec_pool: true
+    write_append_excl: false
+    erasure_code_profile:
+      name: jerasure31profile
+      plugin: jerasure
+      k: 3
+      m: 1
+      technique: reed_sol_van
+      ruleset-failure-domain: osd
+    op_weights:
+      read: 100
+      write: 0
+      append: 100
+      delete: 50
+      snap_create: 50
+      snap_remove: 50
+      rollback: 50
+      copy_from: 50
+      setattr: 25
+      rmattr: 25
diff --git a/qa/erasure-code/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml b/qa/erasure-code/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml
new file mode 100644
index 0000000..3463a01
--- /dev/null
+++ b/qa/erasure-code/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml
@@ -0,0 +1,25 @@
+tasks:
+- rados:
+    clients: [client.0]
+    ops: 400
+    objects: 50
+    ec_pool: true
+    write_append_excl: false
+    erasure_code_profile:
+      name: lrcprofile
+      plugin: lrc
+      k: 4
+      m: 2
+      l: 3
+      ruleset-failure-domain: osd
+    op_weights:
+      read: 100
+      write: 0
+      append: 100
+      delete: 50
+      snap_create: 50
+      snap_remove: 50
+      rollback: 50
+      copy_from: 50
+      setattr: 25
+      rmattr: 25
diff --git a/qa/erasure-code/ec-rados-plugin=shec-k=4-m=3-c=2.yaml b/qa/erasure-code/ec-rados-plugin=shec-k=4-m=3-c=2.yaml
new file mode 100644
index 0000000..696baed
--- /dev/null
+++ b/qa/erasure-code/ec-rados-plugin=shec-k=4-m=3-c=2.yaml
@@ -0,0 +1,25 @@
+tasks:
+- rados:
+    clients: [client.0]
+    ops: 400
+    objects: 50
+    ec_pool: true
+    write_append_excl: false
+    erasure_code_profile:
+      name: shecprofile
+      plugin: shec
+      k: 4
+      m: 3
+      c: 2
+      ruleset-failure-domain: osd
+    op_weights:
+      read: 100
+      write: 0
+      append: 100
+      delete: 50
+      snap_create: 50
+      snap_remove: 50
+      rollback: 50
+      copy_from: 50
+      setattr: 25
+      rmattr: 25
diff --git a/qa/erasure-code/ec-rados-sequential.yaml b/qa/erasure-code/ec-rados-sequential.yaml
new file mode 100644
index 0000000..90536ee
--- /dev/null
+++ b/qa/erasure-code/ec-rados-sequential.yaml
@@ -0,0 +1,20 @@
+workload:
+  sequential:
+    - rados:
+       clients: [client.0]
+       ops: 4000
+       objects: 50
+       ec_pool: true
+       write_append_excl: false
+       op_weights:
+         read: 100
+         write: 0
+         append: 100
+         delete: 50
+         snap_create: 50
+         snap_remove: 50
+         rollback: 50
+         copy_from: 50
+         setattr: 25
+         rmattr: 25
+    - print: "**** done rados ec sequential"
diff --git a/qa/fs/btrfs.yaml b/qa/fs/btrfs.yaml
new file mode 100644
index 0000000..0b3f6fa
--- /dev/null
+++ b/qa/fs/btrfs.yaml
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    fs: btrfs
+    conf:
+      osd:
+        osd sloppy crc: true
+        osd op thread timeout: 60
diff --git a/qa/fs/ext4.yaml b/qa/fs/ext4.yaml
new file mode 100644
index 0000000..1cdf316
--- /dev/null
+++ b/qa/fs/ext4.yaml
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    fs: ext4
+    conf:
+      global:
+        osd max object name len: 460
+        osd max object namespace len: 64
diff --git a/qa/fs/xfs.yaml b/qa/fs/xfs.yaml
new file mode 100644
index 0000000..b4a8291
--- /dev/null
+++ b/qa/fs/xfs.yaml
@@ -0,0 +1,6 @@
+overrides:
+  ceph:
+    fs: xfs
+    conf:
+      osd:
+        osd sloppy crc: true
diff --git a/qa/machine_types/schedule_rados.sh b/qa/machine_types/schedule_rados.sh
new file mode 100755
index 0000000..3aef851
--- /dev/null
+++ b/qa/machine_types/schedule_rados.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+# $1 - part
+# $2 - branch name
+# $3 - machine name
+# $4 - email address
+# $5 - filter out (this arg is to be at the end of the command line for now)
+
+## example #1 
+## (date +%U) week number
+## % 2 - mod 2 (e.g. 0,1,0,1 ...)
+## * 7 -  multiplied by 7 (e.g. 0,7,0,7...)
+## $1 day of the week (0-6)
+## /14 for 2 weeks
+
+## example #2 
+## (date +%U) week number
+## % 4 - mod 4 (e.g. 0,1,2,3,0,1,2,3 ...)
+## * 7 -  multiplied by 7 (e.g. 0,7,14,21,0,7,14,21...)
+## $1 day of the week (0-6)
+## /28 for 4 weeks
+
+teuthology-suite -v -c $2 -m $3 -k distro -s rados --subset $(echo "(($(date +%U) % 4) * 7) + $1" | bc)/28 -e $4 $5
diff --git a/qa/machine_types/schedule_rados_ovh.sh b/qa/machine_types/schedule_rados_ovh.sh
new file mode 100755
index 0000000..cc9e178
--- /dev/null
+++ b/qa/machine_types/schedule_rados_ovh.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+# $1 - part
+# $2 - branch name
+# $3 - machine name
+# $4 - email address
+# $5 - filter out (this arg is to be at the end of the command line for now)
+
+## example #1 
+## (date +%U) week number
+## % 2 - mod 2 (e.g. 0,1,0,1 ...)
+## * 7 -  multiplied by 7 (e.g. 0,7,0,7...)
+## $1 day of the week (0-6)
+## /14 for 2 weeks
+
+## example #2 
+## (date +%U) week number
+## % 4 - mod 4 (e.g. 0,1,2,3,0,1,2,3 ...)
+## * 7 -  multiplied by 7 (e.g. 0,7,14,21,0,7,14,21...)
+## $1 day of the week (0-6)
+## /28 for 4 weeks
+
+teuthology-suite -v -c $2 -m $3 -k distro -s rados --subset $(echo "(($(date +%U) % 4) * 7) + $1" | bc)/28 -e $4 ~/vps.yaml $5
diff --git a/qa/machine_types/vps.yaml b/qa/machine_types/vps.yaml
new file mode 100644
index 0000000..bffa098
--- /dev/null
+++ b/qa/machine_types/vps.yaml
@@ -0,0 +1,16 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        osd heartbeat grace: 100
+        # this line to address issue #1017 
+        mon lease: 15
+        mon lease ack timeout: 25
+  rgw:
+    default_idle_timeout: 1200
+  s3tests:
+    idle_timeout: 1200
+  ceph-fuse:
+    client.0:
+       mount_wait: 60
+       mount_timeout: 120
diff --git a/qa/overrides/2-size-1-min-size.yaml b/qa/overrides/2-size-1-min-size.yaml
new file mode 100644
index 0000000..d710aee
--- /dev/null
+++ b/qa/overrides/2-size-1-min-size.yaml
@@ -0,0 +1,6 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        osd_pool_default_size: 2
+        osd_pool_default_min_size: 1
diff --git a/qa/overrides/2-size-2-min-size.yaml b/qa/overrides/2-size-2-min-size.yaml
new file mode 100644
index 0000000..42b854e
--- /dev/null
+++ b/qa/overrides/2-size-2-min-size.yaml
@@ -0,0 +1,6 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        osd_pool_default_size: 2
+        osd_pool_default_min_size: 2
diff --git a/qa/overrides/3-size-2-min-size.yaml b/qa/overrides/3-size-2-min-size.yaml
new file mode 100644
index 0000000..0257906
--- /dev/null
+++ b/qa/overrides/3-size-2-min-size.yaml
@@ -0,0 +1,8 @@
+overrides:
+  thrashosds:
+    min_in: 4
+  ceph:
+    conf:
+      global:
+        osd_pool_default_size: 3
+        osd_pool_default_min_size: 2
diff --git a/qa/overrides/short_pg_log.yaml b/qa/overrides/short_pg_log.yaml
new file mode 100644
index 0000000..d50d965
--- /dev/null
+++ b/qa/overrides/short_pg_log.yaml
@@ -0,0 +1,6 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        osd_min_pg_log_entries: 150
+        osd_max_pg_log_entries: 300
diff --git a/qa/overrides/whitelist_wrongly_marked_down.yaml b/qa/overrides/whitelist_wrongly_marked_down.yaml
new file mode 100644
index 0000000..5cf329f
--- /dev/null
+++ b/qa/overrides/whitelist_wrongly_marked_down.yaml
@@ -0,0 +1,10 @@
+overrides:
+  ceph:
+    log-whitelist:
+    - wrongly marked me down
+    conf:
+      mds:
+        debug mds: 20
+        debug ms: 1
+      client:
+        debug client: 10
\ No newline at end of file
diff --git a/qa/packages/packages.yaml b/qa/packages/packages.yaml
new file mode 100644
index 0000000..140c42a
--- /dev/null
+++ b/qa/packages/packages.yaml
@@ -0,0 +1,45 @@
+---
+ceph:
+  deb:
+  - ceph
+  - ceph-mds
+  - ceph-common
+  - ceph-fuse
+  - ceph-test
+  - radosgw
+  - python-ceph
+  - libcephfs1
+  - libcephfs-java
+  - libcephfs-jni
+  - librados2
+  - librbd1
+  - rbd-fuse
+  - ceph-common-dbg
+  - ceph-fs-common-dbg
+  - ceph-fuse-dbg
+  - ceph-mds-dbg
+  - ceph-mon-dbg
+  - ceph-osd-dbg
+  - ceph-test-dbg
+  - libcephfs1-dbg
+  - librados2-dbg
+  - libradosstriper1-dbg
+  - librbd1-dbg
+  - librgw2-dbg
+  - radosgw-dbg
+  - rbd-fuse-dbg
+  - rbd-mirror-dbg
+  - rbd-nbd-dbg
+  rpm:
+  - ceph-radosgw
+  - ceph-test
+  - ceph
+  - ceph-fuse
+  - cephfs-java
+  - libcephfs_jni1
+  - libcephfs1
+  - librados2
+  - librbd1
+  - python-ceph
+  - rbd-fuse
+  - ceph-debuginfo
diff --git a/qa/releases/infernalis.yaml b/qa/releases/infernalis.yaml
new file mode 100644
index 0000000..f21e7fe
--- /dev/null
+++ b/qa/releases/infernalis.yaml
@@ -0,0 +1,5 @@
+tasks:
+- exec:
+    osd.0:
+      - ceph osd set sortbitwise
+      - for p in `ceph osd pool ls` ; do ceph osd pool set $p use_gmt_hitset true ; done
diff --git a/qa/releases/jewel.yaml b/qa/releases/jewel.yaml
new file mode 100644
index 0000000..f21e7fe
--- /dev/null
+++ b/qa/releases/jewel.yaml
@@ -0,0 +1,5 @@
+tasks:
+- exec:
+    osd.0:
+      - ceph osd set sortbitwise
+      - for p in `ceph osd pool ls` ; do ceph osd pool set $p use_gmt_hitset true ; done
diff --git a/qa/releases/kraken.yaml b/qa/releases/kraken.yaml
new file mode 100644
index 0000000..4ffb722
--- /dev/null
+++ b/qa/releases/kraken.yaml
@@ -0,0 +1 @@
+#empty placeholder  for now
diff --git a/qa/rgw_pool_type/ec-cache.yaml b/qa/rgw_pool_type/ec-cache.yaml
new file mode 100644
index 0000000..6462fbe
--- /dev/null
+++ b/qa/rgw_pool_type/ec-cache.yaml
@@ -0,0 +1,6 @@
+overrides:
+  rgw:
+    ec-data-pool: true
+    cache-pools: true
+  s3tests:
+    slow_backend: true
diff --git a/qa/rgw_pool_type/ec-profile.yaml b/qa/rgw_pool_type/ec-profile.yaml
new file mode 100644
index 0000000..52798f8
--- /dev/null
+++ b/qa/rgw_pool_type/ec-profile.yaml
@@ -0,0 +1,10 @@
+overrides:
+  rgw:
+    ec-data-pool: true
+    erasure_code_profile:
+      name: testprofile
+      k: 3
+      m: 1
+      ruleset-failure-domain: osd
+  s3tests:
+    slow_backend: true
diff --git a/qa/rgw_pool_type/ec.yaml b/qa/rgw_pool_type/ec.yaml
new file mode 100644
index 0000000..7c99b7f
--- /dev/null
+++ b/qa/rgw_pool_type/ec.yaml
@@ -0,0 +1,5 @@
+overrides:
+  rgw:
+    ec-data-pool: true
+  s3tests:
+    slow_backend: true
diff --git a/qa/rgw_pool_type/replicated.yaml b/qa/rgw_pool_type/replicated.yaml
new file mode 100644
index 0000000..c91709e
--- /dev/null
+++ b/qa/rgw_pool_type/replicated.yaml
@@ -0,0 +1,3 @@
+overrides:
+  rgw:
+    ec-data-pool: false
diff --git a/qa/suites/big/rados-thrash/% b/qa/suites/big/rados-thrash/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/big/rados-thrash/ceph/ceph.yaml b/qa/suites/big/rados-thrash/ceph/ceph.yaml
new file mode 100644
index 0000000..2030acb
--- /dev/null
+++ b/qa/suites/big/rados-thrash/ceph/ceph.yaml
@@ -0,0 +1,3 @@
+tasks:
+- install:
+- ceph:
diff --git a/qa/suites/big/rados-thrash/clusters/big.yaml b/qa/suites/big/rados-thrash/clusters/big.yaml
new file mode 100644
index 0000000..18197ad
--- /dev/null
+++ b/qa/suites/big/rados-thrash/clusters/big.yaml
@@ -0,0 +1,68 @@
+roles:
+- [osd.0, osd.1, osd.2, client.0, mon.a]
+- [osd.3, osd.4, osd.5, client.1, mon.b]
+- [osd.6, osd.7, osd.8, client.2, mon.c]
+- [osd.9, osd.10, osd.11, client.3, mon.d]
+- [osd.12, osd.13, osd.14, client.4, mon.e]
+- [osd.15, osd.16, osd.17, client.5]
+- [osd.18, osd.19, osd.20, client.6]
+- [osd.21, osd.22, osd.23, client.7]
+- [osd.24, osd.25, osd.26, client.8]
+- [osd.27, osd.28, osd.29, client.9]
+- [osd.30, osd.31, osd.32, client.10]
+- [osd.33, osd.34, osd.35, client.11]
+- [osd.36, osd.37, osd.38, client.12]
+- [osd.39, osd.40, osd.41, client.13]
+- [osd.42, osd.43, osd.44, client.14]
+- [osd.45, osd.46, osd.47, client.15]
+- [osd.48, osd.49, osd.50, client.16]
+- [osd.51, osd.52, osd.53, client.17]
+- [osd.54, osd.55, osd.56, client.18]
+- [osd.57, osd.58, osd.59, client.19]
+- [osd.60, osd.61, osd.62, client.20]
+- [osd.63, osd.64, osd.65, client.21]
+- [osd.66, osd.67, osd.68, client.22]
+- [osd.69, osd.70, osd.71, client.23]
+- [osd.72, osd.73, osd.74, client.24]
+- [osd.75, osd.76, osd.77, client.25]
+- [osd.78, osd.79, osd.80, client.26]
+- [osd.81, osd.82, osd.83, client.27]
+- [osd.84, osd.85, osd.86, client.28]
+- [osd.87, osd.88, osd.89, client.29]
+- [osd.90, osd.91, osd.92, client.30]
+- [osd.93, osd.94, osd.95, client.31]
+- [osd.96, osd.97, osd.98, client.32]
+- [osd.99, osd.100, osd.101, client.33]
+- [osd.102, osd.103, osd.104, client.34]
+- [osd.105, osd.106, osd.107, client.35]
+- [osd.108, osd.109, osd.110, client.36]
+- [osd.111, osd.112, osd.113, client.37]
+- [osd.114, osd.115, osd.116, client.38]
+- [osd.117, osd.118, osd.119, client.39]
+- [osd.120, osd.121, osd.122, client.40]
+- [osd.123, osd.124, osd.125, client.41]
+- [osd.126, osd.127, osd.128, client.42]
+- [osd.129, osd.130, osd.131, client.43]
+- [osd.132, osd.133, osd.134, client.44]
+- [osd.135, osd.136, osd.137, client.45]
+- [osd.138, osd.139, osd.140, client.46]
+- [osd.141, osd.142, osd.143, client.47]
+- [osd.144, osd.145, osd.146, client.48]
+- [osd.147, osd.148, osd.149, client.49]
+- [osd.150, osd.151, osd.152, client.50]
+#- [osd.153, osd.154, osd.155, client.51]
+#- [osd.156, osd.157, osd.158, client.52]
+#- [osd.159, osd.160, osd.161, client.53]
+#- [osd.162, osd.163, osd.164, client.54]
+#- [osd.165, osd.166, osd.167, client.55]
+#- [osd.168, osd.169, osd.170, client.56]
+#- [osd.171, osd.172, osd.173, client.57]
+#- [osd.174, osd.175, osd.176, client.58]
+#- [osd.177, osd.178, osd.179, client.59]
+#- [osd.180, osd.181, osd.182, client.60]
+#- [osd.183, osd.184, osd.185, client.61]
+#- [osd.186, osd.187, osd.188, client.62]
+#- [osd.189, osd.190, osd.191, client.63]
+#- [osd.192, osd.193, osd.194, client.64]
+#- [osd.195, osd.196, osd.197, client.65]
+#- [osd.198, osd.199, osd.200, client.66]
diff --git a/qa/suites/big/rados-thrash/clusters/medium.yaml b/qa/suites/big/rados-thrash/clusters/medium.yaml
new file mode 100644
index 0000000..48b66dd
--- /dev/null
+++ b/qa/suites/big/rados-thrash/clusters/medium.yaml
@@ -0,0 +1,22 @@
+roles:
+- [osd.0, osd.1, osd.2, client.0, mon.a]
+- [osd.3, osd.4, osd.5, client.1, mon.b]
+- [osd.6, osd.7, osd.8, client.2, mon.c]
+- [osd.9, osd.10, osd.11, client.3, mon.d]
+- [osd.12, osd.13, osd.14, client.4, mon.e]
+- [osd.15, osd.16, osd.17, client.5]
+- [osd.18, osd.19, osd.20, client.6]
+- [osd.21, osd.22, osd.23, client.7]
+- [osd.24, osd.25, osd.26, client.8]
+- [osd.27, osd.28, osd.29, client.9]
+- [osd.30, osd.31, osd.32, client.10]
+- [osd.33, osd.34, osd.35, client.11]
+- [osd.36, osd.37, osd.38, client.12]
+- [osd.39, osd.40, osd.41, client.13]
+- [osd.42, osd.43, osd.44, client.14]
+- [osd.45, osd.46, osd.47, client.15]
+- [osd.48, osd.49, osd.50, client.16]
+- [osd.51, osd.52, osd.53, client.17]
+- [osd.54, osd.55, osd.56, client.18]
+- [osd.57, osd.58, osd.59, client.19]
+- [osd.60, osd.61, osd.62, client.20]
diff --git a/qa/suites/big/rados-thrash/clusters/small.yaml b/qa/suites/big/rados-thrash/clusters/small.yaml
new file mode 100644
index 0000000..b5a7990
--- /dev/null
+++ b/qa/suites/big/rados-thrash/clusters/small.yaml
@@ -0,0 +1,6 @@
+roles:
+- [osd.0, osd.1, osd.2, client.0, mon.a]
+- [osd.3, osd.4, osd.5, client.1, mon.b]
+- [osd.6, osd.7, osd.8, client.2, mon.c]
+- [osd.9, osd.10, osd.11, client.3, mon.d]
+- [osd.12, osd.13, osd.14, client.4, mon.e]
diff --git a/qa/suites/big/rados-thrash/fs/btrfs.yaml b/qa/suites/big/rados-thrash/fs/btrfs.yaml
new file mode 100644
index 0000000..0b3f6fa
--- /dev/null
+++ b/qa/suites/big/rados-thrash/fs/btrfs.yaml
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    fs: btrfs
+    conf:
+      osd:
+        osd sloppy crc: true
+        osd op thread timeout: 60
diff --git a/qa/suites/big/rados-thrash/fs/xfs.yaml b/qa/suites/big/rados-thrash/fs/xfs.yaml
new file mode 100644
index 0000000..b4a8291
--- /dev/null
+++ b/qa/suites/big/rados-thrash/fs/xfs.yaml
@@ -0,0 +1,6 @@
+overrides:
+  ceph:
+    fs: xfs
+    conf:
+      osd:
+        osd sloppy crc: true
diff --git a/qa/suites/big/rados-thrash/thrashers/default.yaml b/qa/suites/big/rados-thrash/thrashers/default.yaml
new file mode 100644
index 0000000..d67ff20
--- /dev/null
+++ b/qa/suites/big/rados-thrash/thrashers/default.yaml
@@ -0,0 +1,10 @@
+overrides:
+  ceph:
+    log-whitelist:
+    - wrongly marked me down
+    - objects unfound and apparently lost
+tasks:
+- thrashosds:
+    timeout: 1200
+    chance_pgnum_grow: 1
+    chance_pgpnum_fix: 1
diff --git a/qa/suites/big/rados-thrash/workloads/snaps-few-objects.yaml b/qa/suites/big/rados-thrash/workloads/snaps-few-objects.yaml
new file mode 100644
index 0000000..b73bb67
--- /dev/null
+++ b/qa/suites/big/rados-thrash/workloads/snaps-few-objects.yaml
@@ -0,0 +1,13 @@
+tasks:
+- rados:
+    ops: 4000
+    max_seconds: 3600
+    objects: 50
+    op_weights:
+      read: 100
+      write: 100
+      delete: 50
+      snap_create: 50
+      snap_remove: 50
+      rollback: 50
+      copy_from: 50
diff --git a/qa/suites/buildpackages/any/% b/qa/suites/buildpackages/any/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/buildpackages/any/distros/centos.yaml b/qa/suites/buildpackages/any/distros/centos.yaml
new file mode 100644
index 0000000..9dfcc7f
--- /dev/null
+++ b/qa/suites/buildpackages/any/distros/centos.yaml
@@ -0,0 +1,2 @@
+os_type: centos
+os_version: "7.3"
diff --git a/qa/suites/buildpackages/any/distros/centos_6.3.yaml b/qa/suites/buildpackages/any/distros/centos_6.3.yaml
new file mode 100644
index 0000000..32187d6
--- /dev/null
+++ b/qa/suites/buildpackages/any/distros/centos_6.3.yaml
@@ -0,0 +1,2 @@
+os_type: centos
+os_version: "6.3"
diff --git a/qa/suites/buildpackages/any/distros/centos_6.4.yaml b/qa/suites/buildpackages/any/distros/centos_6.4.yaml
new file mode 100644
index 0000000..02383cd
--- /dev/null
+++ b/qa/suites/buildpackages/any/distros/centos_6.4.yaml
@@ -0,0 +1,2 @@
+os_type: centos
+os_version: "6.4"
diff --git a/qa/suites/buildpackages/any/distros/centos_6.5.yaml b/qa/suites/buildpackages/any/distros/centos_6.5.yaml
new file mode 100644
index 0000000..77c9e41
--- /dev/null
+++ b/qa/suites/buildpackages/any/distros/centos_6.5.yaml
@@ -0,0 +1,2 @@
+os_type: centos
+os_version: "6.5"
diff --git a/qa/suites/buildpackages/any/distros/centos_7.0.yaml b/qa/suites/buildpackages/any/distros/centos_7.0.yaml
new file mode 100644
index 0000000..bccb286
--- /dev/null
+++ b/qa/suites/buildpackages/any/distros/centos_7.0.yaml
@@ -0,0 +1,2 @@
+os_type: centos
+os_version: "7.0"
diff --git a/qa/suites/buildpackages/any/distros/centos_7.1.yaml b/qa/suites/buildpackages/any/distros/centos_7.1.yaml
new file mode 100644
index 0000000..74c68f9
--- /dev/null
+++ b/qa/suites/buildpackages/any/distros/centos_7.1.yaml
@@ -0,0 +1,2 @@
+os_type: centos
+os_version: "7.1"
diff --git a/qa/suites/buildpackages/any/distros/centos_7.2.yaml b/qa/suites/buildpackages/any/distros/centos_7.2.yaml
new file mode 100644
index 0000000..44d2f0e
--- /dev/null
+++ b/qa/suites/buildpackages/any/distros/centos_7.2.yaml
@@ -0,0 +1,2 @@
+os_type: centos
+os_version: "7.2"
diff --git a/qa/suites/buildpackages/any/distros/centos_7.3.yaml b/qa/suites/buildpackages/any/distros/centos_7.3.yaml
new file mode 100644
index 0000000..9dfcc7f
--- /dev/null
+++ b/qa/suites/buildpackages/any/distros/centos_7.3.yaml
@@ -0,0 +1,2 @@
+os_type: centos
+os_version: "7.3"
diff --git a/qa/suites/buildpackages/any/distros/debian_6.0.yaml b/qa/suites/buildpackages/any/distros/debian_6.0.yaml
new file mode 100644
index 0000000..6820fa3
--- /dev/null
+++ b/qa/suites/buildpackages/any/distros/debian_6.0.yaml
@@ -0,0 +1,2 @@
+os_type: debian
+os_version: "6.0"
diff --git a/qa/suites/buildpackages/any/distros/debian_7.0.yaml b/qa/suites/buildpackages/any/distros/debian_7.0.yaml
new file mode 100644
index 0000000..8100dc4
--- /dev/null
+++ b/qa/suites/buildpackages/any/distros/debian_7.0.yaml
@@ -0,0 +1,2 @@
+os_type: debian
+os_version: "7.0"
diff --git a/qa/suites/buildpackages/any/distros/debian_8.0.yaml b/qa/suites/buildpackages/any/distros/debian_8.0.yaml
new file mode 100644
index 0000000..300a443
--- /dev/null
+++ b/qa/suites/buildpackages/any/distros/debian_8.0.yaml
@@ -0,0 +1,2 @@
+os_type: debian
+os_version: "8.0"
diff --git a/qa/suites/buildpackages/any/distros/fedora_17.yaml b/qa/suites/buildpackages/any/distros/fedora_17.yaml
new file mode 100644
index 0000000..801053a
--- /dev/null
+++ b/qa/suites/buildpackages/any/distros/fedora_17.yaml
@@ -0,0 +1,2 @@
+os_type: fedora
+os_version: "17"
diff --git a/qa/suites/buildpackages/any/distros/fedora_18.yaml b/qa/suites/buildpackages/any/distros/fedora_18.yaml
new file mode 100644
index 0000000..07872aa
--- /dev/null
+++ b/qa/suites/buildpackages/any/distros/fedora_18.yaml
@@ -0,0 +1,2 @@
+os_type: fedora
+os_version: "18"
diff --git a/qa/suites/buildpackages/any/distros/fedora_19.yaml b/qa/suites/buildpackages/any/distros/fedora_19.yaml
new file mode 100644
index 0000000..5bac8ac
--- /dev/null
+++ b/qa/suites/buildpackages/any/distros/fedora_19.yaml
@@ -0,0 +1,2 @@
+os_type: fedora
+os_version: "19"
diff --git a/qa/suites/buildpackages/any/distros/opensuse_12.2.yaml b/qa/suites/buildpackages/any/distros/opensuse_12.2.yaml
new file mode 100644
index 0000000..ee9f877
--- /dev/null
+++ b/qa/suites/buildpackages/any/distros/opensuse_12.2.yaml
@@ -0,0 +1,2 @@
+os_type: opensuse
+os_version: "12.2"
diff --git a/qa/suites/buildpackages/any/distros/opensuse_13.2.yaml b/qa/suites/buildpackages/any/distros/opensuse_13.2.yaml
new file mode 100644
index 0000000..7551e81
--- /dev/null
+++ b/qa/suites/buildpackages/any/distros/opensuse_13.2.yaml
@@ -0,0 +1,2 @@
+os_type: opensuse
+os_version: "13.2"
diff --git a/qa/suites/buildpackages/any/distros/opensuse_42.1.yaml b/qa/suites/buildpackages/any/distros/opensuse_42.1.yaml
new file mode 100644
index 0000000..48c789d
--- /dev/null
+++ b/qa/suites/buildpackages/any/distros/opensuse_42.1.yaml
@@ -0,0 +1,2 @@
+os_type: opensuse
+os_version: "42.1"
diff --git a/qa/suites/buildpackages/any/distros/opensuse_42.2.yaml b/qa/suites/buildpackages/any/distros/opensuse_42.2.yaml
new file mode 100644
index 0000000..10e8702
--- /dev/null
+++ b/qa/suites/buildpackages/any/distros/opensuse_42.2.yaml
@@ -0,0 +1,2 @@
+os_type: opensuse
+os_version: "42.2"
diff --git a/qa/suites/buildpackages/any/distros/rhel_6.3.yaml b/qa/suites/buildpackages/any/distros/rhel_6.3.yaml
new file mode 100644
index 0000000..6a8edcd
--- /dev/null
+++ b/qa/suites/buildpackages/any/distros/rhel_6.3.yaml
@@ -0,0 +1,2 @@
+os_type: rhel
+os_version: "6.3"
diff --git a/qa/suites/buildpackages/any/distros/rhel_6.4.yaml b/qa/suites/buildpackages/any/distros/rhel_6.4.yaml
new file mode 100644
index 0000000..5225495
--- /dev/null
+++ b/qa/suites/buildpackages/any/distros/rhel_6.4.yaml
@@ -0,0 +1,2 @@
+os_type: rhel
+os_version: "6.4"
diff --git a/qa/suites/buildpackages/any/distros/rhel_6.5.yaml b/qa/suites/buildpackages/any/distros/rhel_6.5.yaml
new file mode 100644
index 0000000..7db54be
--- /dev/null
+++ b/qa/suites/buildpackages/any/distros/rhel_6.5.yaml
@@ -0,0 +1,2 @@
+os_type: rhel
+os_version: "6.5"
diff --git a/qa/suites/buildpackages/any/distros/rhel_7.0.yaml b/qa/suites/buildpackages/any/distros/rhel_7.0.yaml
new file mode 100644
index 0000000..c87c0bc
--- /dev/null
+++ b/qa/suites/buildpackages/any/distros/rhel_7.0.yaml
@@ -0,0 +1,2 @@
+os_type: rhel
+os_version: "7.0"
diff --git a/qa/suites/buildpackages/any/distros/sle_12.2.yaml b/qa/suites/buildpackages/any/distros/sle_12.2.yaml
new file mode 100644
index 0000000..2a4a28c
--- /dev/null
+++ b/qa/suites/buildpackages/any/distros/sle_12.2.yaml
@@ -0,0 +1,2 @@
+os_type: sle
+os_version: "12.2"
diff --git a/qa/suites/buildpackages/any/distros/ubuntu_12.04.yaml b/qa/suites/buildpackages/any/distros/ubuntu_12.04.yaml
new file mode 100644
index 0000000..dbc3a8d
--- /dev/null
+++ b/qa/suites/buildpackages/any/distros/ubuntu_12.04.yaml
@@ -0,0 +1,2 @@
+os_type: ubuntu
+os_version: "12.04"
diff --git a/qa/suites/buildpackages/any/distros/ubuntu_12.10.yaml b/qa/suites/buildpackages/any/distros/ubuntu_12.10.yaml
new file mode 100644
index 0000000..ab65567
--- /dev/null
+++ b/qa/suites/buildpackages/any/distros/ubuntu_12.10.yaml
@@ -0,0 +1,2 @@
+os_type: ubuntu
+os_version: "12.10"
diff --git a/qa/suites/buildpackages/any/distros/ubuntu_14.04.yaml b/qa/suites/buildpackages/any/distros/ubuntu_14.04.yaml
new file mode 100644
index 0000000..309e989
--- /dev/null
+++ b/qa/suites/buildpackages/any/distros/ubuntu_14.04.yaml
@@ -0,0 +1,2 @@
+os_type: ubuntu
+os_version: "14.04"
diff --git a/qa/suites/buildpackages/any/distros/ubuntu_14.04_aarch64.yaml b/qa/suites/buildpackages/any/distros/ubuntu_14.04_aarch64.yaml
new file mode 100644
index 0000000..9dfbcb5
--- /dev/null
+++ b/qa/suites/buildpackages/any/distros/ubuntu_14.04_aarch64.yaml
@@ -0,0 +1,3 @@
+os_type: ubuntu
+os_version: "14.04"
+arch: aarch64
diff --git a/qa/suites/buildpackages/any/distros/ubuntu_14.04_i686.yaml b/qa/suites/buildpackages/any/distros/ubuntu_14.04_i686.yaml
new file mode 100644
index 0000000..4a0652e
--- /dev/null
+++ b/qa/suites/buildpackages/any/distros/ubuntu_14.04_i686.yaml
@@ -0,0 +1,3 @@
+os_type: ubuntu
+os_version: "14.04"
+arch: i686
diff --git a/qa/suites/buildpackages/any/distros/ubuntu_16.04.yaml b/qa/suites/buildpackages/any/distros/ubuntu_16.04.yaml
new file mode 100644
index 0000000..a459fdd
--- /dev/null
+++ b/qa/suites/buildpackages/any/distros/ubuntu_16.04.yaml
@@ -0,0 +1,2 @@
+os_type: ubuntu
+os_version: "16.04"
diff --git a/qa/suites/buildpackages/any/tasks/release.yaml b/qa/suites/buildpackages/any/tasks/release.yaml
new file mode 100644
index 0000000..d7a3b62
--- /dev/null
+++ b/qa/suites/buildpackages/any/tasks/release.yaml
@@ -0,0 +1,8 @@
+# --suite buildpackages/any --ceph v10.0.1 --filter centos_7,ubuntu_14.04
+roles:
+    - [client.0]
+tasks:
+    - install:
+    - exec:
+        client.0:
+          - ceph --version | grep 'version '
diff --git a/qa/suites/buildpackages/tests/% b/qa/suites/buildpackages/tests/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/buildpackages/tests/distros/centos.yaml b/qa/suites/buildpackages/tests/distros/centos.yaml
new file mode 100644
index 0000000..9dfcc7f
--- /dev/null
+++ b/qa/suites/buildpackages/tests/distros/centos.yaml
@@ -0,0 +1,2 @@
+os_type: centos
+os_version: "7.3"
diff --git a/qa/suites/buildpackages/tests/distros/centos_6.3.yaml b/qa/suites/buildpackages/tests/distros/centos_6.3.yaml
new file mode 100644
index 0000000..32187d6
--- /dev/null
+++ b/qa/suites/buildpackages/tests/distros/centos_6.3.yaml
@@ -0,0 +1,2 @@
+os_type: centos
+os_version: "6.3"
diff --git a/qa/suites/buildpackages/tests/distros/centos_6.4.yaml b/qa/suites/buildpackages/tests/distros/centos_6.4.yaml
new file mode 100644
index 0000000..02383cd
--- /dev/null
+++ b/qa/suites/buildpackages/tests/distros/centos_6.4.yaml
@@ -0,0 +1,2 @@
+os_type: centos
+os_version: "6.4"
diff --git a/qa/suites/buildpackages/tests/distros/centos_6.5.yaml b/qa/suites/buildpackages/tests/distros/centos_6.5.yaml
new file mode 100644
index 0000000..77c9e41
--- /dev/null
+++ b/qa/suites/buildpackages/tests/distros/centos_6.5.yaml
@@ -0,0 +1,2 @@
+os_type: centos
+os_version: "6.5"
diff --git a/qa/suites/buildpackages/tests/distros/centos_7.0.yaml b/qa/suites/buildpackages/tests/distros/centos_7.0.yaml
new file mode 100644
index 0000000..bccb286
--- /dev/null
+++ b/qa/suites/buildpackages/tests/distros/centos_7.0.yaml
@@ -0,0 +1,2 @@
+os_type: centos
+os_version: "7.0"
diff --git a/qa/suites/buildpackages/tests/distros/centos_7.1.yaml b/qa/suites/buildpackages/tests/distros/centos_7.1.yaml
new file mode 100644
index 0000000..74c68f9
--- /dev/null
+++ b/qa/suites/buildpackages/tests/distros/centos_7.1.yaml
@@ -0,0 +1,2 @@
+os_type: centos
+os_version: "7.1"
diff --git a/qa/suites/buildpackages/tests/distros/centos_7.2.yaml b/qa/suites/buildpackages/tests/distros/centos_7.2.yaml
new file mode 100644
index 0000000..44d2f0e
--- /dev/null
+++ b/qa/suites/buildpackages/tests/distros/centos_7.2.yaml
@@ -0,0 +1,2 @@
+os_type: centos
+os_version: "7.2"
diff --git a/qa/suites/buildpackages/tests/distros/centos_7.3.yaml b/qa/suites/buildpackages/tests/distros/centos_7.3.yaml
new file mode 100644
index 0000000..9dfcc7f
--- /dev/null
+++ b/qa/suites/buildpackages/tests/distros/centos_7.3.yaml
@@ -0,0 +1,2 @@
+os_type: centos
+os_version: "7.3"
diff --git a/qa/suites/buildpackages/tests/distros/debian_6.0.yaml b/qa/suites/buildpackages/tests/distros/debian_6.0.yaml
new file mode 100644
index 0000000..6820fa3
--- /dev/null
+++ b/qa/suites/buildpackages/tests/distros/debian_6.0.yaml
@@ -0,0 +1,2 @@
+os_type: debian
+os_version: "6.0"
diff --git a/qa/suites/buildpackages/tests/distros/debian_7.0.yaml b/qa/suites/buildpackages/tests/distros/debian_7.0.yaml
new file mode 100644
index 0000000..8100dc4
--- /dev/null
+++ b/qa/suites/buildpackages/tests/distros/debian_7.0.yaml
@@ -0,0 +1,2 @@
+os_type: debian
+os_version: "7.0"
diff --git a/qa/suites/buildpackages/tests/distros/debian_8.0.yaml b/qa/suites/buildpackages/tests/distros/debian_8.0.yaml
new file mode 100644
index 0000000..300a443
--- /dev/null
+++ b/qa/suites/buildpackages/tests/distros/debian_8.0.yaml
@@ -0,0 +1,2 @@
+os_type: debian
+os_version: "8.0"
diff --git a/qa/suites/buildpackages/tests/distros/fedora_17.yaml b/qa/suites/buildpackages/tests/distros/fedora_17.yaml
new file mode 100644
index 0000000..801053a
--- /dev/null
+++ b/qa/suites/buildpackages/tests/distros/fedora_17.yaml
@@ -0,0 +1,2 @@
+os_type: fedora
+os_version: "17"
diff --git a/qa/suites/buildpackages/tests/distros/fedora_18.yaml b/qa/suites/buildpackages/tests/distros/fedora_18.yaml
new file mode 100644
index 0000000..07872aa
--- /dev/null
+++ b/qa/suites/buildpackages/tests/distros/fedora_18.yaml
@@ -0,0 +1,2 @@
+os_type: fedora
+os_version: "18"
diff --git a/qa/suites/buildpackages/tests/distros/fedora_19.yaml b/qa/suites/buildpackages/tests/distros/fedora_19.yaml
new file mode 100644
index 0000000..5bac8ac
--- /dev/null
+++ b/qa/suites/buildpackages/tests/distros/fedora_19.yaml
@@ -0,0 +1,2 @@
+os_type: fedora
+os_version: "19"
diff --git a/qa/suites/buildpackages/tests/distros/opensuse_12.2.yaml b/qa/suites/buildpackages/tests/distros/opensuse_12.2.yaml
new file mode 100644
index 0000000..ee9f877
--- /dev/null
+++ b/qa/suites/buildpackages/tests/distros/opensuse_12.2.yaml
@@ -0,0 +1,2 @@
+os_type: opensuse
+os_version: "12.2"
diff --git a/qa/suites/buildpackages/tests/distros/opensuse_13.2.yaml b/qa/suites/buildpackages/tests/distros/opensuse_13.2.yaml
new file mode 100644
index 0000000..7551e81
--- /dev/null
+++ b/qa/suites/buildpackages/tests/distros/opensuse_13.2.yaml
@@ -0,0 +1,2 @@
+os_type: opensuse
+os_version: "13.2"
diff --git a/qa/suites/buildpackages/tests/distros/opensuse_42.1.yaml b/qa/suites/buildpackages/tests/distros/opensuse_42.1.yaml
new file mode 100644
index 0000000..48c789d
--- /dev/null
+++ b/qa/suites/buildpackages/tests/distros/opensuse_42.1.yaml
@@ -0,0 +1,2 @@
+os_type: opensuse
+os_version: "42.1"
diff --git a/qa/suites/buildpackages/tests/distros/opensuse_42.2.yaml b/qa/suites/buildpackages/tests/distros/opensuse_42.2.yaml
new file mode 100644
index 0000000..10e8702
--- /dev/null
+++ b/qa/suites/buildpackages/tests/distros/opensuse_42.2.yaml
@@ -0,0 +1,2 @@
+os_type: opensuse
+os_version: "42.2"
diff --git a/qa/suites/buildpackages/tests/distros/rhel_6.3.yaml b/qa/suites/buildpackages/tests/distros/rhel_6.3.yaml
new file mode 100644
index 0000000..6a8edcd
--- /dev/null
+++ b/qa/suites/buildpackages/tests/distros/rhel_6.3.yaml
@@ -0,0 +1,2 @@
+os_type: rhel
+os_version: "6.3"
diff --git a/qa/suites/buildpackages/tests/distros/rhel_6.4.yaml b/qa/suites/buildpackages/tests/distros/rhel_6.4.yaml
new file mode 100644
index 0000000..5225495
--- /dev/null
+++ b/qa/suites/buildpackages/tests/distros/rhel_6.4.yaml
@@ -0,0 +1,2 @@
+os_type: rhel
+os_version: "6.4"
diff --git a/qa/suites/buildpackages/tests/distros/rhel_6.5.yaml b/qa/suites/buildpackages/tests/distros/rhel_6.5.yaml
new file mode 100644
index 0000000..7db54be
--- /dev/null
+++ b/qa/suites/buildpackages/tests/distros/rhel_6.5.yaml
@@ -0,0 +1,2 @@
+os_type: rhel
+os_version: "6.5"
diff --git a/qa/suites/buildpackages/tests/distros/rhel_7.0.yaml b/qa/suites/buildpackages/tests/distros/rhel_7.0.yaml
new file mode 100644
index 0000000..c87c0bc
--- /dev/null
+++ b/qa/suites/buildpackages/tests/distros/rhel_7.0.yaml
@@ -0,0 +1,2 @@
+os_type: rhel
+os_version: "7.0"
diff --git a/qa/suites/buildpackages/tests/distros/sle_12.2.yaml b/qa/suites/buildpackages/tests/distros/sle_12.2.yaml
new file mode 100644
index 0000000..2a4a28c
--- /dev/null
+++ b/qa/suites/buildpackages/tests/distros/sle_12.2.yaml
@@ -0,0 +1,2 @@
+os_type: sle
+os_version: "12.2"
diff --git a/qa/suites/buildpackages/tests/distros/ubuntu_12.04.yaml b/qa/suites/buildpackages/tests/distros/ubuntu_12.04.yaml
new file mode 100644
index 0000000..dbc3a8d
--- /dev/null
+++ b/qa/suites/buildpackages/tests/distros/ubuntu_12.04.yaml
@@ -0,0 +1,2 @@
+os_type: ubuntu
+os_version: "12.04"
diff --git a/qa/suites/buildpackages/tests/distros/ubuntu_12.10.yaml b/qa/suites/buildpackages/tests/distros/ubuntu_12.10.yaml
new file mode 100644
index 0000000..ab65567
--- /dev/null
+++ b/qa/suites/buildpackages/tests/distros/ubuntu_12.10.yaml
@@ -0,0 +1,2 @@
+os_type: ubuntu
+os_version: "12.10"
diff --git a/qa/suites/buildpackages/tests/distros/ubuntu_14.04.yaml b/qa/suites/buildpackages/tests/distros/ubuntu_14.04.yaml
new file mode 100644
index 0000000..309e989
--- /dev/null
+++ b/qa/suites/buildpackages/tests/distros/ubuntu_14.04.yaml
@@ -0,0 +1,2 @@
+os_type: ubuntu
+os_version: "14.04"
diff --git a/qa/suites/buildpackages/tests/distros/ubuntu_14.04_aarch64.yaml b/qa/suites/buildpackages/tests/distros/ubuntu_14.04_aarch64.yaml
new file mode 100644
index 0000000..9dfbcb5
--- /dev/null
+++ b/qa/suites/buildpackages/tests/distros/ubuntu_14.04_aarch64.yaml
@@ -0,0 +1,3 @@
+os_type: ubuntu
+os_version: "14.04"
+arch: aarch64
diff --git a/qa/suites/buildpackages/tests/distros/ubuntu_14.04_i686.yaml b/qa/suites/buildpackages/tests/distros/ubuntu_14.04_i686.yaml
new file mode 100644
index 0000000..4a0652e
--- /dev/null
+++ b/qa/suites/buildpackages/tests/distros/ubuntu_14.04_i686.yaml
@@ -0,0 +1,3 @@
+os_type: ubuntu
+os_version: "14.04"
+arch: i686
diff --git a/qa/suites/buildpackages/tests/distros/ubuntu_16.04.yaml b/qa/suites/buildpackages/tests/distros/ubuntu_16.04.yaml
new file mode 100644
index 0000000..a459fdd
--- /dev/null
+++ b/qa/suites/buildpackages/tests/distros/ubuntu_16.04.yaml
@@ -0,0 +1,2 @@
+os_type: ubuntu
+os_version: "16.04"
diff --git a/qa/suites/buildpackages/tests/tasks/release.yaml b/qa/suites/buildpackages/tests/tasks/release.yaml
new file mode 100644
index 0000000..05e8778
--- /dev/null
+++ b/qa/suites/buildpackages/tests/tasks/release.yaml
@@ -0,0 +1,20 @@
+# --suite buildpackages/tests --ceph v10.0.1 --filter centos_7.2,ubuntu_14.04
+overrides:
+   ansible.cephlab:
+     playbook: users.yml
+   buildpackages:
+     good_machine:
+       disk: 20 # GB
+       ram: 2000 # MB
+       cpus: 2
+     min_machine:
+       disk: 10 # GB
+       ram: 1000 # MB
+       cpus: 1
+roles:
+    - [client.0]
+tasks:
+    - install:
+    - exec:
+        client.0:
+          - ceph --version | grep 'version '
diff --git a/qa/suites/calamari/% b/qa/suites/calamari/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/calamari/clusters/osd-3.yaml b/qa/suites/calamari/clusters/osd-3.yaml
new file mode 100644
index 0000000..66f4fe5
--- /dev/null
+++ b/qa/suites/calamari/clusters/osd-3.yaml
@@ -0,0 +1,5 @@
+roles:
+- [client.0]
+- [mon.0, osd.0]
+- [osd.1]
+- [osd.2]
diff --git a/qa/suites/calamari/distros/centos6.4.yaml b/qa/suites/calamari/distros/centos6.4.yaml
new file mode 100644
index 0000000..2240054
--- /dev/null
+++ b/qa/suites/calamari/distros/centos6.4.yaml
@@ -0,0 +1,2 @@
+os_type: centos
+os_version: '6.4'
diff --git a/qa/suites/calamari/distros/centos6.5.yaml b/qa/suites/calamari/distros/centos6.5.yaml
new file mode 100644
index 0000000..e2ee6b3
--- /dev/null
+++ b/qa/suites/calamari/distros/centos6.5.yaml
@@ -0,0 +1,2 @@
+os_type: centos
+os_version: '6.5'
diff --git a/qa/suites/calamari/distros/precise.yaml b/qa/suites/calamari/distros/precise.yaml
new file mode 100644
index 0000000..7aaa31b
--- /dev/null
+++ b/qa/suites/calamari/distros/precise.yaml
@@ -0,0 +1,2 @@
+os_type: ubuntu
+os_version: precise
diff --git a/qa/suites/calamari/distros/rhel6.4.yaml b/qa/suites/calamari/distros/rhel6.4.yaml
new file mode 100644
index 0000000..72dd4d1
--- /dev/null
+++ b/qa/suites/calamari/distros/rhel6.4.yaml
@@ -0,0 +1,2 @@
+os_type: rhel
+os_version: '6.4'
diff --git a/qa/suites/calamari/distros/rhel6.5.yaml b/qa/suites/calamari/distros/rhel6.5.yaml
new file mode 100644
index 0000000..4294d98
--- /dev/null
+++ b/qa/suites/calamari/distros/rhel6.5.yaml
@@ -0,0 +1,2 @@
+os_type: rhel
+os_version: '6.5'
diff --git a/qa/suites/calamari/distros/rhel7.0.yaml b/qa/suites/calamari/distros/rhel7.0.yaml
new file mode 100644
index 0000000..1571f94
--- /dev/null
+++ b/qa/suites/calamari/distros/rhel7.0.yaml
@@ -0,0 +1,2 @@
+os_type: rhel
+os_version: '7.0'
diff --git a/qa/suites/calamari/distros/trusty.yaml b/qa/suites/calamari/distros/trusty.yaml
new file mode 100644
index 0000000..cef9fd0
--- /dev/null
+++ b/qa/suites/calamari/distros/trusty.yaml
@@ -0,0 +1,2 @@
+os_type: ubuntu
+os_version: trusty
diff --git a/qa/suites/calamari/distros/wheezy.yaml.disabled b/qa/suites/calamari/distros/wheezy.yaml.disabled
new file mode 100644
index 0000000..47c54de
--- /dev/null
+++ b/qa/suites/calamari/distros/wheezy.yaml.disabled
@@ -0,0 +1,2 @@
+os_type: debian
+os_version: '7.0'
diff --git a/qa/suites/calamari/tasks/calamari.yaml b/qa/suites/calamari/tasks/calamari.yaml
new file mode 100644
index 0000000..70e1129
--- /dev/null
+++ b/qa/suites/calamari/tasks/calamari.yaml
@@ -0,0 +1,10 @@
+machine_type: vps
+
+tasks:
+- ssh_keys:
+- calamari_setup:
+    iceball_location: http://download.inktank.com/enterprise-testing
+    ice_version: 1.2.2
+    email: calamari at inktank.com
+- calamari_nosetests:
+    calamari_branch: wip-testing-1.2.2
diff --git a/qa/suites/ceph-ansible/smoke/basic/% b/qa/suites/ceph-ansible/smoke/basic/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/ceph-ansible/smoke/basic/0-clusters/3-node.yaml b/qa/suites/ceph-ansible/smoke/basic/0-clusters/3-node.yaml
new file mode 100644
index 0000000..c67a5cb
--- /dev/null
+++ b/qa/suites/ceph-ansible/smoke/basic/0-clusters/3-node.yaml
@@ -0,0 +1,10 @@
+meta:
+- desc: "3-node cluster"
+roles:
+- [mon.a, mds.a, osd.0, osd.1, osd.2]
+- [mon.b, osd.3, osd.4, osd.5]
+- [mon.c, osd.6, osd.7, osd.8, client.0]
+openstack:
+- volumes: # attached to each instance
+    count: 3
+    size: 10 # GB
diff --git a/qa/suites/ceph-ansible/smoke/basic/1-distros/centos_7.3.yaml b/qa/suites/ceph-ansible/smoke/basic/1-distros/centos_7.3.yaml
new file mode 100644
index 0000000..9dfcc7f
--- /dev/null
+++ b/qa/suites/ceph-ansible/smoke/basic/1-distros/centos_7.3.yaml
@@ -0,0 +1,2 @@
+os_type: centos
+os_version: "7.3"
diff --git a/qa/suites/ceph-ansible/smoke/basic/1-distros/ubuntu_16.04.yaml b/qa/suites/ceph-ansible/smoke/basic/1-distros/ubuntu_16.04.yaml
new file mode 100644
index 0000000..6619fe4
--- /dev/null
+++ b/qa/suites/ceph-ansible/smoke/basic/1-distros/ubuntu_16.04.yaml
@@ -0,0 +1,2 @@
+os_type: ubuntu 
+os_version: "16.04"
diff --git a/qa/suites/ceph-ansible/smoke/basic/2-config/ceph_ansible.yaml b/qa/suites/ceph-ansible/smoke/basic/2-config/ceph_ansible.yaml
new file mode 100644
index 0000000..4c16948
--- /dev/null
+++ b/qa/suites/ceph-ansible/smoke/basic/2-config/ceph_ansible.yaml
@@ -0,0 +1,22 @@
+meta:
+- desc: "Build the ceph cluster using ceph-ansible"
+
+overrides:                                                                                                                                                                                                  
+   ceph_ansible:                                                                                                                                                                                            
+     vars:                                                                                                                                                                                                  
+        ceph_conf_overrides:                                                                                                                                                                                
+          global:                                                                                                                                                                                           
+            osd default pool size: 2                                                                                                                                                                        
+            mon pg warn min per osd: 2                                                                                                                                                                      
+        ceph_dev: true                                                                                                                                                                                      
+        ceph_dev_key: https://download.ceph.com/keys/autobuild.asc                                                                                                                                          
+        ceph_origin: upstream                                                                                                                                                                               
+        ceph_test: true                                                                                                                                                                                     
+        journal_collocation: true                                                                                                                                                                           
+        journal_size: 1024                                                                                                                                                                                  
+        osd_auto_discovery: false 
+
+tasks:
+- ssh-keys:
+- ceph_ansible:
+- install.ship_utilities:
diff --git a/qa/suites/ceph-ansible/smoke/basic/3-tasks/ceph-admin-commands.yaml b/qa/suites/ceph-ansible/smoke/basic/3-tasks/ceph-admin-commands.yaml
new file mode 100644
index 0000000..33642d5
--- /dev/null
+++ b/qa/suites/ceph-ansible/smoke/basic/3-tasks/ceph-admin-commands.yaml
@@ -0,0 +1,7 @@
+meta:
+- desc: "Run ceph-admin-commands.sh"
+tasks:
+- workunit:
+    clients:
+      client.0:
+        - ceph-tests/ceph-admin-commands.sh
diff --git a/qa/suites/ceph-ansible/smoke/basic/3-tasks/cls.yaml b/qa/suites/ceph-ansible/smoke/basic/3-tasks/cls.yaml
new file mode 100644
index 0000000..781a4d4
--- /dev/null
+++ b/qa/suites/ceph-ansible/smoke/basic/3-tasks/cls.yaml
@@ -0,0 +1,7 @@
+meta:
+- desc: "Run the rados cls tests"
+tasks:
+- workunit:
+    clients:
+      client.0:
+        - cls
diff --git a/qa/suites/ceph-ansible/smoke/basic/3-tasks/rbd_import_export.yaml b/qa/suites/ceph-ansible/smoke/basic/3-tasks/rbd_import_export.yaml
new file mode 100644
index 0000000..9495934
--- /dev/null
+++ b/qa/suites/ceph-ansible/smoke/basic/3-tasks/rbd_import_export.yaml
@@ -0,0 +1,7 @@
+meta:
+- desc: "Run the rbd import/export tests"
+tasks:
+- workunit:
+    clients:
+      client.0:
+        - rbd/import_export.sh
diff --git a/qa/suites/ceph-deploy-release/% b/qa/suites/ceph-deploy-release/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/ceph-deploy-release/distros/centos_7.3.yaml b/qa/suites/ceph-deploy-release/distros/centos_7.3.yaml
new file mode 100644
index 0000000..9dfcc7f
--- /dev/null
+++ b/qa/suites/ceph-deploy-release/distros/centos_7.3.yaml
@@ -0,0 +1,2 @@
+os_type: centos
+os_version: "7.3"
diff --git a/qa/suites/ceph-deploy-release/distros/ubuntu_14.04.yaml b/qa/suites/ceph-deploy-release/distros/ubuntu_14.04.yaml
new file mode 100644
index 0000000..309e989
--- /dev/null
+++ b/qa/suites/ceph-deploy-release/distros/ubuntu_14.04.yaml
@@ -0,0 +1,2 @@
+os_type: ubuntu
+os_version: "14.04"
diff --git a/qa/suites/ceph-deploy-release/overrides/ceph_deploy_dmcrypt.yaml b/qa/suites/ceph-deploy-release/overrides/ceph_deploy_dmcrypt.yaml
new file mode 100644
index 0000000..859a37f
--- /dev/null
+++ b/qa/suites/ceph-deploy-release/overrides/ceph_deploy_dmcrypt.yaml
@@ -0,0 +1,3 @@
+overrides:
+   ceph-deploy:
+      dmcrypt: yes
diff --git a/qa/suites/ceph-deploy-release/overrides/disable_diff_journal_disk.yaml b/qa/suites/ceph-deploy-release/overrides/disable_diff_journal_disk.yaml
new file mode 100644
index 0000000..5c998c5
--- /dev/null
+++ b/qa/suites/ceph-deploy-release/overrides/disable_diff_journal_disk.yaml
@@ -0,0 +1,3 @@
+overrides:
+   ceph-deploy:
+      separate_journal_disk:
diff --git a/qa/suites/ceph-deploy-release/tasks/release-install-test.yaml b/qa/suites/ceph-deploy-release/tasks/release-install-test.yaml
new file mode 100644
index 0000000..0df7742
--- /dev/null
+++ b/qa/suites/ceph-deploy-release/tasks/release-install-test.yaml
@@ -0,0 +1,40 @@
+overrides:
+  ceph-deploy:
+    branch:
+      stable: jewel
+    conf:
+      global:
+        mon pg warn min per osd: 2
+        osd pool default size: 2
+roles:
+- - mon.a
+  - mds.0
+  - osd.0
+  - osd.1
+  - osd.2
+- - osd.3
+  - osd.4
+  - osd.5
+  - mon.b
+- - client.0
+openstack:
+  - machine:
+      disk: 10 # GB
+      ram: 2000 # MB
+      cpus: 1
+    volumes: # attached to each instance
+      count: 3
+      size: 10 # GB
+tasks:
+- ssh_keys:
+- ceph-deploy:
+- workunit:
+    clients:
+      client.0:
+      - rados/test.sh
+- workunit:
+    clients:
+      client.0:
+        - rbd/test_librbd_python.sh
+    env:
+      RBD_FEATURES: "1"
diff --git a/qa/suites/ceph-deploy/basic/% b/qa/suites/ceph-deploy/basic/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/ceph-deploy/basic/ceph-deploy-overrides/ceph_deploy_dmcrypt.yaml b/qa/suites/ceph-deploy/basic/ceph-deploy-overrides/ceph_deploy_dmcrypt.yaml
new file mode 100644
index 0000000..859a37f
--- /dev/null
+++ b/qa/suites/ceph-deploy/basic/ceph-deploy-overrides/ceph_deploy_dmcrypt.yaml
@@ -0,0 +1,3 @@
+overrides:
+   ceph-deploy:
+      dmcrypt: yes
diff --git a/qa/suites/ceph-deploy/basic/ceph-deploy-overrides/disable_diff_journal_disk.yaml b/qa/suites/ceph-deploy/basic/ceph-deploy-overrides/disable_diff_journal_disk.yaml
new file mode 100644
index 0000000..5c998c5
--- /dev/null
+++ b/qa/suites/ceph-deploy/basic/ceph-deploy-overrides/disable_diff_journal_disk.yaml
@@ -0,0 +1,3 @@
+overrides:
+   ceph-deploy:
+      separate_journal_disk:
diff --git a/qa/suites/ceph-deploy/basic/ceph-deploy-overrides/enable_diff_journal_disk.yaml b/qa/suites/ceph-deploy/basic/ceph-deploy-overrides/enable_diff_journal_disk.yaml
new file mode 100644
index 0000000..ea3f634
--- /dev/null
+++ b/qa/suites/ceph-deploy/basic/ceph-deploy-overrides/enable_diff_journal_disk.yaml
@@ -0,0 +1,3 @@
+overrides:
+   ceph-deploy:
+      separate_journal_disk: yes
diff --git a/qa/suites/ceph-deploy/basic/ceph-deploy-overrides/enable_dmcrypt_diff_journal_disk.yaml b/qa/suites/ceph-deploy/basic/ceph-deploy-overrides/enable_dmcrypt_diff_journal_disk.yaml
new file mode 100644
index 0000000..59cb799
--- /dev/null
+++ b/qa/suites/ceph-deploy/basic/ceph-deploy-overrides/enable_dmcrypt_diff_journal_disk.yaml
@@ -0,0 +1,4 @@
+overrides:
+   ceph-deploy:
+      dmcrypt: yes
+      separate_journal_disk: yes
diff --git a/qa/suites/ceph-deploy/basic/config_options/cephdeploy_conf.yaml b/qa/suites/ceph-deploy/basic/config_options/cephdeploy_conf.yaml
new file mode 100644
index 0000000..7f9f0b7
--- /dev/null
+++ b/qa/suites/ceph-deploy/basic/config_options/cephdeploy_conf.yaml
@@ -0,0 +1,6 @@
+overrides:
+   ceph-deploy:
+      conf:
+         global:
+             mon pg warn min per osd: 2
+             osd pool default size: 2
diff --git a/qa/suites/ceph-deploy/basic/distros/centos_7.3.yaml b/qa/suites/ceph-deploy/basic/distros/centos_7.3.yaml
new file mode 100644
index 0000000..9dfcc7f
--- /dev/null
+++ b/qa/suites/ceph-deploy/basic/distros/centos_7.3.yaml
@@ -0,0 +1,2 @@
+os_type: centos
+os_version: "7.3"
diff --git a/qa/suites/ceph-deploy/basic/distros/ubuntu_14.04.yaml b/qa/suites/ceph-deploy/basic/distros/ubuntu_14.04.yaml
new file mode 100644
index 0000000..309e989
--- /dev/null
+++ b/qa/suites/ceph-deploy/basic/distros/ubuntu_14.04.yaml
@@ -0,0 +1,2 @@
+os_type: ubuntu
+os_version: "14.04"
diff --git a/qa/suites/ceph-deploy/basic/tasks/ceph-admin-commands.yaml b/qa/suites/ceph-deploy/basic/tasks/ceph-admin-commands.yaml
new file mode 100644
index 0000000..0d651ca
--- /dev/null
+++ b/qa/suites/ceph-deploy/basic/tasks/ceph-admin-commands.yaml
@@ -0,0 +1,25 @@
+roles:
+- - mon.a
+  - mds.0
+  - osd.0
+- - osd.1
+  - mon.b
+  - client.0
+openstack:
+  - machine:
+      disk: 10 # GB
+      ram: 2000 # MB
+      cpus: 1
+    volumes: # attached to each instance
+      count: 2
+      size: 10 # GB
+tasks:
+- ssh_keys:
+- print: "**** done ssh_keys"
+- ceph-deploy:
+- print: "**** done ceph-deploy"
+- workunit:
+     clients:
+        client.0:
+           - ceph-tests/ceph-admin-commands.sh
+- print: "**** done ceph-tests/ceph-admin-commands.sh"
diff --git a/qa/suites/ceph-disk/basic/% b/qa/suites/ceph-disk/basic/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/ceph-disk/basic/distros/centos_7.3.yaml b/qa/suites/ceph-disk/basic/distros/centos_7.3.yaml
new file mode 100644
index 0000000..9dfcc7f
--- /dev/null
+++ b/qa/suites/ceph-disk/basic/distros/centos_7.3.yaml
@@ -0,0 +1,2 @@
+os_type: centos
+os_version: "7.3"
diff --git a/qa/suites/ceph-disk/basic/distros/ubuntu_14.04.yaml b/qa/suites/ceph-disk/basic/distros/ubuntu_14.04.yaml
new file mode 100644
index 0000000..309e989
--- /dev/null
+++ b/qa/suites/ceph-disk/basic/distros/ubuntu_14.04.yaml
@@ -0,0 +1,2 @@
+os_type: ubuntu
+os_version: "14.04"
diff --git a/qa/suites/ceph-disk/basic/tasks/ceph-disk.yaml b/qa/suites/ceph-disk/basic/tasks/ceph-disk.yaml
new file mode 100644
index 0000000..b7655a2
--- /dev/null
+++ b/qa/suites/ceph-disk/basic/tasks/ceph-disk.yaml
@@ -0,0 +1,37 @@
+roles:
+- - mon.a
+  - client.0
+- - osd.0
+  - osd.1
+openstack:
+  - machine:
+      disk: 20 # GB
+      ram: 2000 # MB
+      cpus: 1
+    volumes: # attached to each instance
+      count: 3
+      size: 10 # GB
+tasks:
+- install:
+- ceph:
+    fs: xfs # this implicitly means /dev/vd? are used instead of directories
+    wait-for-scrub: false
+    conf:
+       global:
+           mon pg warn min per osd: 2
+           osd pool default size: 2
+           osd crush chooseleaf type: 0 # failure domain == osd
+           osd pg bits: 2
+           osd pgp bits: 2
+#
+# Keep this around for debugging purposes. If uncommented the target
+# will pause and the workunit can be run and debug manually.
+#
+# - exec:
+#     client.0:
+#       - sleep 1000000000 # forever
+#
+- workunit:
+    clients:
+      all:
+        - ceph-disk/ceph-disk.sh
diff --git a/qa/suites/dummy/% b/qa/suites/dummy/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/dummy/all/nop.yaml b/qa/suites/dummy/all/nop.yaml
new file mode 100644
index 0000000..cef190d
--- /dev/null
+++ b/qa/suites/dummy/all/nop.yaml
@@ -0,0 +1,9 @@
+overrides:
+   ansible.cephlab:
+     playbook: users.yml
+roles:
+    - [mon.a, mds.a, osd.0, osd.1, client.0]
+
+tasks:
+    - nop:
+
diff --git a/qa/suites/experimental/multimds/% b/qa/suites/experimental/multimds/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/experimental/multimds/clusters/7-multimds.yaml b/qa/suites/experimental/multimds/clusters/7-multimds.yaml
new file mode 100644
index 0000000..17cfd7b
--- /dev/null
+++ b/qa/suites/experimental/multimds/clusters/7-multimds.yaml
@@ -0,0 +1,8 @@
+roles:
+- [mon.a, mds.a, mds.a-s]
+- [mon.b, mds.b, mds.b-s]
+- [mon.c, mds.c, mds.c-s]
+- [osd.0]
+- [osd.1]
+- [osd.2]
+- [client.0]
diff --git a/qa/suites/experimental/multimds/tasks/fsstress_thrash_subtrees.yaml b/qa/suites/experimental/multimds/tasks/fsstress_thrash_subtrees.yaml
new file mode 100644
index 0000000..bee01a8
--- /dev/null
+++ b/qa/suites/experimental/multimds/tasks/fsstress_thrash_subtrees.yaml
@@ -0,0 +1,15 @@
+tasks:
+- install:
+- ceph:
+    conf:
+      mds:
+        mds thrash exports: 1
+        mds debug subtrees: 1
+        mds debug scatterstat: 1
+        mds verify scatter: 1
+- ceph-fuse:
+- workunit:
+    clients:
+      client.0:
+        - suites/fsstress.sh
+
diff --git a/qa/suites/fs/32bits/% b/qa/suites/fs/32bits/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/fs/32bits/clusters/fixed-2-ucephfs.yaml b/qa/suites/fs/32bits/clusters/fixed-2-ucephfs.yaml
new file mode 100644
index 0000000..bc120ad
--- /dev/null
+++ b/qa/suites/fs/32bits/clusters/fixed-2-ucephfs.yaml
@@ -0,0 +1,10 @@
+roles:
+- [mon.a, mds.a, osd.0, osd.1, client.0]
+- [mon.b, mds.a-s, mon.c, osd.2, osd.3]
+openstack:
+- volumes: # attached to each instance
+    count: 2
+    size: 10 # GB
+log-rotate:
+  ceph-mds: 10G
+  ceph-osd: 10G
diff --git a/qa/suites/fs/32bits/debug/mds_client.yaml b/qa/suites/fs/32bits/debug/mds_client.yaml
new file mode 100644
index 0000000..c6fec3f
--- /dev/null
+++ b/qa/suites/fs/32bits/debug/mds_client.yaml
@@ -0,0 +1,9 @@
+overrides:
+  ceph:
+    conf:
+      mds:
+        debug ms: 1
+        debug mds: 20
+      client:
+        debug ms: 1
+        debug client: 20
\ No newline at end of file
diff --git a/qa/suites/fs/32bits/dirfrag/frag_enable.yaml b/qa/suites/fs/32bits/dirfrag/frag_enable.yaml
new file mode 100644
index 0000000..3d63bb2
--- /dev/null
+++ b/qa/suites/fs/32bits/dirfrag/frag_enable.yaml
@@ -0,0 +1,10 @@
+
+overrides:
+  ceph:
+    conf:
+      mds:
+        mds bal frag: true
+        mds bal split size: 100
+        mds bal merge size: 5
+        mds bal split bits: 3
+
diff --git a/qa/suites/fs/32bits/fs/btrfs.yaml b/qa/suites/fs/32bits/fs/btrfs.yaml
new file mode 100644
index 0000000..0b3f6fa
--- /dev/null
+++ b/qa/suites/fs/32bits/fs/btrfs.yaml
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    fs: btrfs
+    conf:
+      osd:
+        osd sloppy crc: true
+        osd op thread timeout: 60
diff --git a/qa/suites/fs/32bits/mount/ceph-fuse.yaml b/qa/suites/fs/32bits/mount/ceph-fuse.yaml
new file mode 100644
index 0000000..3b3f362
--- /dev/null
+++ b/qa/suites/fs/32bits/mount/ceph-fuse.yaml
@@ -0,0 +1,9 @@
+overrides:
+  ceph:
+    conf:
+      client:
+        client use faked inos: true
+tasks:
+- install:
+- ceph:
+- ceph-fuse:
diff --git a/qa/suites/fs/32bits/overrides/whitelist_wrongly_marked_down.yaml b/qa/suites/fs/32bits/overrides/whitelist_wrongly_marked_down.yaml
new file mode 100644
index 0000000..5cf329f
--- /dev/null
+++ b/qa/suites/fs/32bits/overrides/whitelist_wrongly_marked_down.yaml
@@ -0,0 +1,10 @@
+overrides:
+  ceph:
+    log-whitelist:
+    - wrongly marked me down
+    conf:
+      mds:
+        debug mds: 20
+        debug ms: 1
+      client:
+        debug client: 10
\ No newline at end of file
diff --git a/qa/suites/fs/32bits/tasks/cfuse_workunit_suites_fsstress.yaml b/qa/suites/fs/32bits/tasks/cfuse_workunit_suites_fsstress.yaml
new file mode 100644
index 0000000..ddb18fb
--- /dev/null
+++ b/qa/suites/fs/32bits/tasks/cfuse_workunit_suites_fsstress.yaml
@@ -0,0 +1,5 @@
+tasks:
+- workunit:
+    clients:
+      all:
+        - suites/fsstress.sh
diff --git a/qa/suites/fs/32bits/tasks/cfuse_workunit_suites_pjd.yaml b/qa/suites/fs/32bits/tasks/cfuse_workunit_suites_pjd.yaml
new file mode 100644
index 0000000..e888213
--- /dev/null
+++ b/qa/suites/fs/32bits/tasks/cfuse_workunit_suites_pjd.yaml
@@ -0,0 +1,5 @@
+tasks:
+- workunit:
+    clients:
+      all:
+        - suites/pjd.sh
diff --git a/qa/suites/fs/basic/% b/qa/suites/fs/basic/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/fs/basic/clusters/fixed-2-ucephfs.yaml b/qa/suites/fs/basic/clusters/fixed-2-ucephfs.yaml
new file mode 100644
index 0000000..bc120ad
--- /dev/null
+++ b/qa/suites/fs/basic/clusters/fixed-2-ucephfs.yaml
@@ -0,0 +1,10 @@
+roles:
+- [mon.a, mds.a, osd.0, osd.1, client.0]
+- [mon.b, mds.a-s, mon.c, osd.2, osd.3]
+openstack:
+- volumes: # attached to each instance
+    count: 2
+    size: 10 # GB
+log-rotate:
+  ceph-mds: 10G
+  ceph-osd: 10G
diff --git a/qa/suites/fs/basic/debug/mds_client.yaml b/qa/suites/fs/basic/debug/mds_client.yaml
new file mode 100644
index 0000000..c6fec3f
--- /dev/null
+++ b/qa/suites/fs/basic/debug/mds_client.yaml
@@ -0,0 +1,9 @@
+overrides:
+  ceph:
+    conf:
+      mds:
+        debug ms: 1
+        debug mds: 20
+      client:
+        debug ms: 1
+        debug client: 20
\ No newline at end of file
diff --git a/qa/suites/fs/basic/dirfrag/frag_enable.yaml b/qa/suites/fs/basic/dirfrag/frag_enable.yaml
new file mode 100644
index 0000000..3d63bb2
--- /dev/null
+++ b/qa/suites/fs/basic/dirfrag/frag_enable.yaml
@@ -0,0 +1,10 @@
+
+overrides:
+  ceph:
+    conf:
+      mds:
+        mds bal frag: true
+        mds bal split size: 100
+        mds bal merge size: 5
+        mds bal split bits: 3
+
diff --git a/qa/suites/fs/basic/fs/btrfs.yaml b/qa/suites/fs/basic/fs/btrfs.yaml
new file mode 100644
index 0000000..0b3f6fa
--- /dev/null
+++ b/qa/suites/fs/basic/fs/btrfs.yaml
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    fs: btrfs
+    conf:
+      osd:
+        osd sloppy crc: true
+        osd op thread timeout: 60
diff --git a/qa/suites/fs/basic/inline/no.yaml b/qa/suites/fs/basic/inline/no.yaml
new file mode 100644
index 0000000..2030acb
--- /dev/null
+++ b/qa/suites/fs/basic/inline/no.yaml
@@ -0,0 +1,3 @@
+tasks:
+- install:
+- ceph:
diff --git a/qa/suites/fs/basic/inline/yes.yaml b/qa/suites/fs/basic/inline/yes.yaml
new file mode 100644
index 0000000..fce64c6
--- /dev/null
+++ b/qa/suites/fs/basic/inline/yes.yaml
@@ -0,0 +1,6 @@
+tasks:
+- install:
+- ceph:
+- exec:
+    client.0:
+      - sudo ceph mds set inline_data true --yes-i-really-mean-it
diff --git a/qa/suites/fs/basic/overrides/whitelist_wrongly_marked_down.yaml b/qa/suites/fs/basic/overrides/whitelist_wrongly_marked_down.yaml
new file mode 100644
index 0000000..5cf329f
--- /dev/null
+++ b/qa/suites/fs/basic/overrides/whitelist_wrongly_marked_down.yaml
@@ -0,0 +1,10 @@
+overrides:
+  ceph:
+    log-whitelist:
+    - wrongly marked me down
+    conf:
+      mds:
+        debug mds: 20
+        debug ms: 1
+      client:
+        debug client: 10
\ No newline at end of file
diff --git a/qa/suites/fs/basic/tasks/cephfs_scrub_tests.yaml b/qa/suites/fs/basic/tasks/cephfs_scrub_tests.yaml
new file mode 100644
index 0000000..ae6e727
--- /dev/null
+++ b/qa/suites/fs/basic/tasks/cephfs_scrub_tests.yaml
@@ -0,0 +1,11 @@
+overrides:
+  ceph:
+    conf:
+      mds:
+        mds log max segments: 1
+        mds cache max size: 1000
+tasks:
+- ceph-fuse:
+- cephfs_test_runner:
+    modules:
+      - tasks.cephfs.test_scrub_checks
diff --git a/qa/suites/fs/basic/tasks/cfuse_workunit_kernel_untar_build.yaml b/qa/suites/fs/basic/tasks/cfuse_workunit_kernel_untar_build.yaml
new file mode 100644
index 0000000..3e99204
--- /dev/null
+++ b/qa/suites/fs/basic/tasks/cfuse_workunit_kernel_untar_build.yaml
@@ -0,0 +1,6 @@
+tasks:
+- ceph-fuse:
+- workunit:
+    clients:
+      all:
+        - kernel_untar_build.sh
diff --git a/qa/suites/fs/basic/tasks/cfuse_workunit_misc.yaml b/qa/suites/fs/basic/tasks/cfuse_workunit_misc.yaml
new file mode 100644
index 0000000..6dfec97
--- /dev/null
+++ b/qa/suites/fs/basic/tasks/cfuse_workunit_misc.yaml
@@ -0,0 +1,8 @@
+tasks:
+- ceph-fuse:
+- workunit:
+    timeout: 6h
+    clients:
+      all:
+        - fs/misc
+
diff --git a/qa/suites/fs/basic/tasks/cfuse_workunit_misc_test_o_trunc.yaml b/qa/suites/fs/basic/tasks/cfuse_workunit_misc_test_o_trunc.yaml
new file mode 100644
index 0000000..c9720a2
--- /dev/null
+++ b/qa/suites/fs/basic/tasks/cfuse_workunit_misc_test_o_trunc.yaml
@@ -0,0 +1,6 @@
+tasks:
+- ceph-fuse:
+- workunit:
+    clients:
+      all:
+        - fs/test_o_trunc.sh
diff --git a/qa/suites/fs/basic/tasks/cfuse_workunit_norstats.yaml b/qa/suites/fs/basic/tasks/cfuse_workunit_norstats.yaml
new file mode 100644
index 0000000..6174fad
--- /dev/null
+++ b/qa/suites/fs/basic/tasks/cfuse_workunit_norstats.yaml
@@ -0,0 +1,13 @@
+tasks:
+- ceph-fuse:
+- workunit:
+    timeout: 6h
+    clients:
+      all:
+        - fs/norstats
+
+overrides:
+  ceph:
+    conf:
+      client:
+        client dirsize rbytes: false
diff --git a/qa/suites/fs/basic/tasks/cfuse_workunit_quota.yaml b/qa/suites/fs/basic/tasks/cfuse_workunit_quota.yaml
new file mode 100644
index 0000000..a6d35ab
--- /dev/null
+++ b/qa/suites/fs/basic/tasks/cfuse_workunit_quota.yaml
@@ -0,0 +1,13 @@
+tasks:
+- ceph-fuse:
+- workunit:
+    timeout: 6h
+    clients:
+      all:
+        - fs/quota
+
+overrides:
+  ceph:
+    conf:
+      client:
+        client quota: true
diff --git a/qa/suites/fs/basic/tasks/cfuse_workunit_suites_blogbench.yaml b/qa/suites/fs/basic/tasks/cfuse_workunit_suites_blogbench.yaml
new file mode 100644
index 0000000..09898e1
--- /dev/null
+++ b/qa/suites/fs/basic/tasks/cfuse_workunit_suites_blogbench.yaml
@@ -0,0 +1,6 @@
+tasks:
+- ceph-fuse:
+- workunit:
+    clients:
+      all:
+        - suites/blogbench.sh
diff --git a/qa/suites/fs/basic/tasks/cfuse_workunit_suites_dbench.yaml b/qa/suites/fs/basic/tasks/cfuse_workunit_suites_dbench.yaml
new file mode 100644
index 0000000..ad96b4c
--- /dev/null
+++ b/qa/suites/fs/basic/tasks/cfuse_workunit_suites_dbench.yaml
@@ -0,0 +1,6 @@
+tasks:
+- ceph-fuse:
+- workunit:
+    clients:
+      all:
+        - suites/dbench.sh
diff --git a/qa/suites/fs/basic/tasks/cfuse_workunit_suites_ffsb.yaml b/qa/suites/fs/basic/tasks/cfuse_workunit_suites_ffsb.yaml
new file mode 100644
index 0000000..8600816
--- /dev/null
+++ b/qa/suites/fs/basic/tasks/cfuse_workunit_suites_ffsb.yaml
@@ -0,0 +1,11 @@
+overrides:
+  ceph:
+    conf:
+      osd:
+        filestore flush min: 0
+tasks:
+- ceph-fuse:
+- workunit:
+    clients:
+      all:
+        - suites/ffsb.sh
diff --git a/qa/suites/fs/basic/tasks/cfuse_workunit_suites_fsstress.yaml b/qa/suites/fs/basic/tasks/cfuse_workunit_suites_fsstress.yaml
new file mode 100644
index 0000000..5908d95
--- /dev/null
+++ b/qa/suites/fs/basic/tasks/cfuse_workunit_suites_fsstress.yaml
@@ -0,0 +1,6 @@
+tasks:
+- ceph-fuse:
+- workunit:
+    clients:
+      all:
+        - suites/fsstress.sh
diff --git a/qa/suites/fs/basic/tasks/cfuse_workunit_suites_fsx.yaml b/qa/suites/fs/basic/tasks/cfuse_workunit_suites_fsx.yaml
new file mode 100644
index 0000000..3c11ed7
--- /dev/null
+++ b/qa/suites/fs/basic/tasks/cfuse_workunit_suites_fsx.yaml
@@ -0,0 +1,6 @@
+tasks:
+- ceph-fuse:
+- workunit:
+    clients:
+      all:
+        - suites/fsx.sh
diff --git a/qa/suites/fs/basic/tasks/cfuse_workunit_suites_fsync.yaml b/qa/suites/fs/basic/tasks/cfuse_workunit_suites_fsync.yaml
new file mode 100644
index 0000000..c6043e2
--- /dev/null
+++ b/qa/suites/fs/basic/tasks/cfuse_workunit_suites_fsync.yaml
@@ -0,0 +1,6 @@
+tasks:
+- ceph-fuse:
+- workunit:
+    clients:
+      all:
+        - suites/fsync-tester.sh
diff --git a/qa/suites/fs/basic/tasks/cfuse_workunit_suites_iogen.yaml b/qa/suites/fs/basic/tasks/cfuse_workunit_suites_iogen.yaml
new file mode 100644
index 0000000..6989990
--- /dev/null
+++ b/qa/suites/fs/basic/tasks/cfuse_workunit_suites_iogen.yaml
@@ -0,0 +1,7 @@
+tasks:
+- ceph-fuse:
+- workunit:
+    clients:
+      all:
+        - suites/iogen.sh
+
diff --git a/qa/suites/fs/basic/tasks/cfuse_workunit_suites_iozone.yaml b/qa/suites/fs/basic/tasks/cfuse_workunit_suites_iozone.yaml
new file mode 100644
index 0000000..1e23f67
--- /dev/null
+++ b/qa/suites/fs/basic/tasks/cfuse_workunit_suites_iozone.yaml
@@ -0,0 +1,6 @@
+tasks:
+- ceph-fuse: [client.0]
+- workunit:
+    clients:
+      all:
+        - suites/iozone.sh
diff --git a/qa/suites/fs/basic/tasks/cfuse_workunit_suites_pjd.yaml b/qa/suites/fs/basic/tasks/cfuse_workunit_suites_pjd.yaml
new file mode 100644
index 0000000..65bcd0d
--- /dev/null
+++ b/qa/suites/fs/basic/tasks/cfuse_workunit_suites_pjd.yaml
@@ -0,0 +1,15 @@
+overrides:
+  ceph:
+    conf:
+      client:
+        debug ms: 1
+        debug client: 20
+      mds:
+        debug ms: 1
+        debug mds: 20
+tasks:
+- ceph-fuse:
+- workunit:
+    clients:
+      all:
+        - suites/pjd.sh
diff --git a/qa/suites/fs/basic/tasks/cfuse_workunit_suites_truncate_delay.yaml b/qa/suites/fs/basic/tasks/cfuse_workunit_suites_truncate_delay.yaml
new file mode 100644
index 0000000..911026e
--- /dev/null
+++ b/qa/suites/fs/basic/tasks/cfuse_workunit_suites_truncate_delay.yaml
@@ -0,0 +1,15 @@
+overrides:
+  ceph:
+    conf:
+      client:
+        ms_inject_delay_probability: 1
+        ms_inject_delay_type: osd
+        ms_inject_delay_max: 5
+        client_oc_max_dirty_age: 1
+tasks:
+- ceph-fuse:
+- exec:
+    client.0:
+      - cd $TESTDIR/mnt.* && dd if=/dev/zero of=./foo count=100
+      - sleep 2
+      - cd $TESTDIR/mnt.* && truncate --size 0 ./foo
diff --git a/qa/suites/fs/basic/tasks/cfuse_workunit_trivial_sync.yaml b/qa/suites/fs/basic/tasks/cfuse_workunit_trivial_sync.yaml
new file mode 100644
index 0000000..9509650
--- /dev/null
+++ b/qa/suites/fs/basic/tasks/cfuse_workunit_trivial_sync.yaml
@@ -0,0 +1,5 @@
+tasks:
+- ceph-fuse:
+- workunit:
+    clients:
+      all: [fs/misc/trivial_sync.sh]
diff --git a/qa/suites/fs/basic/tasks/libcephfs_interface_tests.yaml b/qa/suites/fs/basic/tasks/libcephfs_interface_tests.yaml
new file mode 100644
index 0000000..0b1d41f
--- /dev/null
+++ b/qa/suites/fs/basic/tasks/libcephfs_interface_tests.yaml
@@ -0,0 +1,6 @@
+tasks:
+- ceph-fuse:
+- workunit:
+    clients:
+      client.0:
+        - libcephfs/test.sh
diff --git a/qa/suites/fs/basic/tasks/libcephfs_java.yaml b/qa/suites/fs/basic/tasks/libcephfs_java.yaml
new file mode 100644
index 0000000..4330d50
--- /dev/null
+++ b/qa/suites/fs/basic/tasks/libcephfs_java.yaml
@@ -0,0 +1,6 @@
+tasks:
+- ceph-fuse:
+- workunit:
+    clients:
+      client.0:
+        - libcephfs-java/test.sh
diff --git a/qa/suites/fs/basic/tasks/libcephfs_python.yaml b/qa/suites/fs/basic/tasks/libcephfs_python.yaml
new file mode 100644
index 0000000..6f77329
--- /dev/null
+++ b/qa/suites/fs/basic/tasks/libcephfs_python.yaml
@@ -0,0 +1,5 @@
+tasks:
+- workunit:
+    clients:
+      client.0:
+        - fs/test_python.sh
diff --git a/qa/suites/fs/basic/tasks/mds_creation_retry.yaml b/qa/suites/fs/basic/tasks/mds_creation_retry.yaml
new file mode 100644
index 0000000..76ceeaf
--- /dev/null
+++ b/qa/suites/fs/basic/tasks/mds_creation_retry.yaml
@@ -0,0 +1,7 @@
+tasks:
+-mds_creation_failure:
+-ceph-fuse:
+- workunit:
+    clients:
+      all: [fs/misc/trivial_sync.sh]
+
diff --git a/qa/suites/fs/multiclient/% b/qa/suites/fs/multiclient/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/fs/multiclient/clusters/three_clients.yaml b/qa/suites/fs/multiclient/clusters/three_clients.yaml
new file mode 100644
index 0000000..1fc1b44
--- /dev/null
+++ b/qa/suites/fs/multiclient/clusters/three_clients.yaml
@@ -0,0 +1,15 @@
+roles:
+- [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2]
+- [client.2]
+- [client.1]
+- [client.0]
+
+openstack:
+- volumes: # attached to each instance
+    count: 1
+    size: 10 # GB
+
+log-rotate:
+  ceph-mds: 10G
+  ceph-osd: 10G
+
diff --git a/qa/suites/fs/multiclient/clusters/two_clients.yaml b/qa/suites/fs/multiclient/clusters/two_clients.yaml
new file mode 100644
index 0000000..e6d8260
--- /dev/null
+++ b/qa/suites/fs/multiclient/clusters/two_clients.yaml
@@ -0,0 +1,14 @@
+roles:
+- [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2]
+- [client.1]
+- [client.0]
+
+openstack:
+- volumes: # attached to each instance
+    count: 3
+    size: 10 # GB
+
+log-rotate:
+  ceph-mds: 10G
+  ceph-osd: 10G
+
diff --git a/qa/suites/fs/multiclient/debug/mds_client.yaml b/qa/suites/fs/multiclient/debug/mds_client.yaml
new file mode 100644
index 0000000..c6fec3f
--- /dev/null
+++ b/qa/suites/fs/multiclient/debug/mds_client.yaml
@@ -0,0 +1,9 @@
+overrides:
+  ceph:
+    conf:
+      mds:
+        debug ms: 1
+        debug mds: 20
+      client:
+        debug ms: 1
+        debug client: 20
\ No newline at end of file
diff --git a/qa/suites/fs/multiclient/dirfrag/frag_enable.yaml b/qa/suites/fs/multiclient/dirfrag/frag_enable.yaml
new file mode 100644
index 0000000..3d63bb2
--- /dev/null
+++ b/qa/suites/fs/multiclient/dirfrag/frag_enable.yaml
@@ -0,0 +1,10 @@
+
+overrides:
+  ceph:
+    conf:
+      mds:
+        mds bal frag: true
+        mds bal split size: 100
+        mds bal merge size: 5
+        mds bal split bits: 3
+
diff --git a/qa/suites/fs/multiclient/fs/btrfs.yaml b/qa/suites/fs/multiclient/fs/btrfs.yaml
new file mode 100644
index 0000000..0b3f6fa
--- /dev/null
+++ b/qa/suites/fs/multiclient/fs/btrfs.yaml
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    fs: btrfs
+    conf:
+      osd:
+        osd sloppy crc: true
+        osd op thread timeout: 60
diff --git a/qa/suites/fs/multiclient/mount/ceph-fuse.yaml b/qa/suites/fs/multiclient/mount/ceph-fuse.yaml
new file mode 100644
index 0000000..37ac5b6
--- /dev/null
+++ b/qa/suites/fs/multiclient/mount/ceph-fuse.yaml
@@ -0,0 +1,4 @@
+tasks:
+- install:
+- ceph:
+- ceph-fuse:
diff --git a/qa/suites/fs/multiclient/mount/kclient.yaml.disabled b/qa/suites/fs/multiclient/mount/kclient.yaml.disabled
new file mode 100644
index 0000000..04adb48
--- /dev/null
+++ b/qa/suites/fs/multiclient/mount/kclient.yaml.disabled
@@ -0,0 +1,9 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms die on skipped message: false
+tasks:
+- install:
+- ceph:
+- kclient:
diff --git a/qa/suites/fs/multiclient/tasks/cephfs_misc_tests.yaml b/qa/suites/fs/multiclient/tasks/cephfs_misc_tests.yaml
new file mode 100644
index 0000000..4192fca
--- /dev/null
+++ b/qa/suites/fs/multiclient/tasks/cephfs_misc_tests.yaml
@@ -0,0 +1,4 @@
+tasks:
+- cephfs_test_runner:
+    modules:
+      - tasks.cephfs.test_misc
diff --git a/qa/suites/fs/multiclient/tasks/fsx-mpi.yaml.disabled b/qa/suites/fs/multiclient/tasks/fsx-mpi.yaml.disabled
new file mode 100644
index 0000000..cd43309
--- /dev/null
+++ b/qa/suites/fs/multiclient/tasks/fsx-mpi.yaml.disabled
@@ -0,0 +1,20 @@
+# make sure we get the same MPI version on all hosts
+os_type: ubuntu
+os_version: "14.04"
+
+tasks:
+- pexec:
+    clients:
+      - cd $TESTDIR
+      - wget http://download.ceph.com/qa/fsx-mpi.c
+      - mpicc fsx-mpi.c -o fsx-mpi
+      - rm fsx-mpi.c
+      - ln -s $TESTDIR/mnt.* $TESTDIR/gmnt
+- ssh_keys:
+- mpi:
+    exec: $TESTDIR/fsx-mpi 1MB -N 50000 -p 10000 -l 1048576
+    workdir: $TESTDIR/gmnt
+- pexec:
+    all:
+      - rm $TESTDIR/gmnt
+      - rm $TESTDIR/fsx-mpi
diff --git a/qa/suites/fs/multiclient/tasks/ior-shared-file.yaml b/qa/suites/fs/multiclient/tasks/ior-shared-file.yaml
new file mode 100644
index 0000000..94501b2
--- /dev/null
+++ b/qa/suites/fs/multiclient/tasks/ior-shared-file.yaml
@@ -0,0 +1,26 @@
+# make sure we get the same MPI version on all hosts
+os_type: ubuntu
+os_version: "14.04"
+
+tasks:
+- pexec:
+    clients:
+      - cd $TESTDIR
+      - wget http://download.ceph.com/qa/ior.tbz2
+      - tar xvfj ior.tbz2
+      - cd ior
+      - ./configure
+      - make
+      - make install DESTDIR=$TESTDIR/binary/
+      - cd $TESTDIR/
+      - rm ior.tbz2
+      - rm -r ior
+      - ln -s $TESTDIR/mnt.* $TESTDIR/gmnt
+- ssh_keys:
+- mpi:
+    exec: $TESTDIR/binary/usr/local/bin/ior -e -w -r -W -b 10m -a POSIX -o $TESTDIR/gmnt/ior.testfile
+- pexec:
+    all:
+      - rm -f $TESTDIR/gmnt/ior.testfile
+      - rm -f $TESTDIR/gmnt
+      - rm -rf $TESTDIR/binary
diff --git a/qa/suites/fs/multiclient/tasks/mdtest.yaml b/qa/suites/fs/multiclient/tasks/mdtest.yaml
new file mode 100644
index 0000000..fd337bd
--- /dev/null
+++ b/qa/suites/fs/multiclient/tasks/mdtest.yaml
@@ -0,0 +1,23 @@
+# make sure we get the same MPI version on all hosts
+os_type: ubuntu
+os_version: "14.04"
+
+tasks:
+- pexec:
+    clients:
+      - cd $TESTDIR
+      - wget http://download.ceph.com/qa/mdtest-1.9.3.tgz
+      - mkdir mdtest-1.9.3
+      - cd mdtest-1.9.3
+      - tar xvfz $TESTDIR/mdtest-1.9.3.tgz
+      - rm $TESTDIR/mdtest-1.9.3.tgz
+      - MPI_CC=mpicc make
+      - ln -s $TESTDIR/mnt.* $TESTDIR/gmnt
+- ssh_keys:
+- mpi:
+    exec: $TESTDIR/mdtest-1.9.3/mdtest -d $TESTDIR/gmnt -I 20 -z 5 -b 2 -R
+- pexec:
+    all:
+      - rm -f $TESTDIR/gmnt
+      - rm -rf $TESTDIR/mdtest-1.9.3
+      - rm -rf $TESTDIR/._mdtest-1.9.3
diff --git a/qa/suites/fs/multifs/% b/qa/suites/fs/multifs/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/fs/multifs/clusters/2-remote-clients.yaml b/qa/suites/fs/multifs/clusters/2-remote-clients.yaml
new file mode 100644
index 0000000..b5ff712
--- /dev/null
+++ b/qa/suites/fs/multifs/clusters/2-remote-clients.yaml
@@ -0,0 +1,10 @@
+roles:
+- [mon.a, osd.0, mon.b, mds.a, mds.b, client.1]
+- [mds.c, mds.d, mon.c, client.0, osd.1, osd.2]
+openstack:
+- volumes: # attached to each instance
+    count: 2
+    size: 10 # GB
+log-rotate:
+  ceph-mds: 10G
+  ceph-osd: 10G
diff --git a/qa/suites/fs/multifs/debug/mds_client.yaml b/qa/suites/fs/multifs/debug/mds_client.yaml
new file mode 100644
index 0000000..da56489
--- /dev/null
+++ b/qa/suites/fs/multifs/debug/mds_client.yaml
@@ -0,0 +1,15 @@
+overrides:
+  ceph:
+    conf:
+      mds:
+        debug ms: 1
+        debug mds: 20
+      client.0:
+        debug ms: 1
+        debug client: 20
+      client.1:
+        debug ms: 1
+        debug client: 20
+      mon:
+        debug mon: 20
+
diff --git a/qa/suites/fs/multifs/dirfrag/frag_enable.yaml b/qa/suites/fs/multifs/dirfrag/frag_enable.yaml
new file mode 100644
index 0000000..3d63bb2
--- /dev/null
+++ b/qa/suites/fs/multifs/dirfrag/frag_enable.yaml
@@ -0,0 +1,10 @@
+
+overrides:
+  ceph:
+    conf:
+      mds:
+        mds bal frag: true
+        mds bal split size: 100
+        mds bal merge size: 5
+        mds bal split bits: 3
+
diff --git a/qa/suites/fs/multifs/mounts/ceph-fuse.yaml b/qa/suites/fs/multifs/mounts/ceph-fuse.yaml
new file mode 100644
index 0000000..8092598
--- /dev/null
+++ b/qa/suites/fs/multifs/mounts/ceph-fuse.yaml
@@ -0,0 +1,8 @@
+tasks:
+- install:
+- ceph:
+- ceph-fuse:
+    client.0:
+        mounted: false
+    client.1:
+        mounted: false
diff --git a/qa/suites/fs/multifs/tasks/failover.yaml b/qa/suites/fs/multifs/tasks/failover.yaml
new file mode 100644
index 0000000..8f3d5e1
--- /dev/null
+++ b/qa/suites/fs/multifs/tasks/failover.yaml
@@ -0,0 +1,6 @@
+
+tasks:
+  - cephfs_test_runner:
+      modules:
+        - tasks.cephfs.test_failover
+
diff --git a/qa/suites/fs/multifs/xfs.yaml b/qa/suites/fs/multifs/xfs.yaml
new file mode 100644
index 0000000..b4a8291
--- /dev/null
+++ b/qa/suites/fs/multifs/xfs.yaml
@@ -0,0 +1,6 @@
+overrides:
+  ceph:
+    fs: xfs
+    conf:
+      osd:
+        osd sloppy crc: true
diff --git a/qa/suites/fs/permission/% b/qa/suites/fs/permission/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/fs/permission/clusters/fixed-2-ucephfs.yaml b/qa/suites/fs/permission/clusters/fixed-2-ucephfs.yaml
new file mode 100644
index 0000000..bc120ad
--- /dev/null
+++ b/qa/suites/fs/permission/clusters/fixed-2-ucephfs.yaml
@@ -0,0 +1,10 @@
+roles:
+- [mon.a, mds.a, osd.0, osd.1, client.0]
+- [mon.b, mds.a-s, mon.c, osd.2, osd.3]
+openstack:
+- volumes: # attached to each instance
+    count: 2
+    size: 10 # GB
+log-rotate:
+  ceph-mds: 10G
+  ceph-osd: 10G
diff --git a/qa/suites/fs/permission/debug/mds_client.yaml b/qa/suites/fs/permission/debug/mds_client.yaml
new file mode 100644
index 0000000..c6fec3f
--- /dev/null
+++ b/qa/suites/fs/permission/debug/mds_client.yaml
@@ -0,0 +1,9 @@
+overrides:
+  ceph:
+    conf:
+      mds:
+        debug ms: 1
+        debug mds: 20
+      client:
+        debug ms: 1
+        debug client: 20
\ No newline at end of file
diff --git a/qa/suites/fs/permission/dirfrag/frag_enable.yaml b/qa/suites/fs/permission/dirfrag/frag_enable.yaml
new file mode 100644
index 0000000..56d724b
--- /dev/null
+++ b/qa/suites/fs/permission/dirfrag/frag_enable.yaml
@@ -0,0 +1,10 @@
+
+overrides:
+    ceph:
+        conf:
+        mds:
+            mds bal frag: true
+            mds bal split size: 100
+            mds bal merge size: 5
+            mds bal split bits: 3
+
diff --git a/qa/suites/fs/permission/fs/btrfs.yaml b/qa/suites/fs/permission/fs/btrfs.yaml
new file mode 100644
index 0000000..0b3f6fa
--- /dev/null
+++ b/qa/suites/fs/permission/fs/btrfs.yaml
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    fs: btrfs
+    conf:
+      osd:
+        osd sloppy crc: true
+        osd op thread timeout: 60
diff --git a/qa/suites/fs/permission/mount/ceph-fuse.yaml b/qa/suites/fs/permission/mount/ceph-fuse.yaml
new file mode 100644
index 0000000..48e1f1b
--- /dev/null
+++ b/qa/suites/fs/permission/mount/ceph-fuse.yaml
@@ -0,0 +1,10 @@
+overrides:
+  ceph:
+    conf:
+      client:
+        fuse default permissions: false
+        client posix acl: true
+tasks:
+- install:
+- ceph:
+- ceph-fuse:
diff --git a/qa/suites/fs/permission/overrides/whitelist_wrongly_marked_down.yaml b/qa/suites/fs/permission/overrides/whitelist_wrongly_marked_down.yaml
new file mode 100644
index 0000000..5cf329f
--- /dev/null
+++ b/qa/suites/fs/permission/overrides/whitelist_wrongly_marked_down.yaml
@@ -0,0 +1,10 @@
+overrides:
+  ceph:
+    log-whitelist:
+    - wrongly marked me down
+    conf:
+      mds:
+        debug mds: 20
+        debug ms: 1
+      client:
+        debug client: 10
\ No newline at end of file
diff --git a/qa/suites/fs/permission/tasks/cfuse_workunit_suites_pjd.yaml b/qa/suites/fs/permission/tasks/cfuse_workunit_suites_pjd.yaml
new file mode 100644
index 0000000..e888213
--- /dev/null
+++ b/qa/suites/fs/permission/tasks/cfuse_workunit_suites_pjd.yaml
@@ -0,0 +1,5 @@
+tasks:
+- workunit:
+    clients:
+      all:
+        - suites/pjd.sh
diff --git a/qa/suites/fs/recovery/% b/qa/suites/fs/recovery/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/fs/recovery/clusters/4-remote-clients.yaml b/qa/suites/fs/recovery/clusters/4-remote-clients.yaml
new file mode 100644
index 0000000..6b5e23b
--- /dev/null
+++ b/qa/suites/fs/recovery/clusters/4-remote-clients.yaml
@@ -0,0 +1,10 @@
+roles:
+- [mon.a, osd.0, mds.a, mds.b, client.1, client.2, client.3]
+- [client.0, osd.1, osd.2]
+openstack:
+- volumes: # attached to each instance
+    count: 2
+    size: 10 # GB
+log-rotate:
+  ceph-mds: 10G
+  ceph-osd: 10G
diff --git a/qa/suites/fs/recovery/debug/mds_client.yaml b/qa/suites/fs/recovery/debug/mds_client.yaml
new file mode 100644
index 0000000..76cc4d8
--- /dev/null
+++ b/qa/suites/fs/recovery/debug/mds_client.yaml
@@ -0,0 +1,12 @@
+overrides:
+  ceph:
+    conf:
+      mds:
+        debug ms: 1
+        debug mds: 20
+      client.0:
+        debug ms: 1
+        debug client: 20
+      client.1:
+        debug ms: 1
+        debug client: 20
diff --git a/qa/suites/fs/recovery/dirfrag/frag_enable.yaml b/qa/suites/fs/recovery/dirfrag/frag_enable.yaml
new file mode 100644
index 0000000..3d63bb2
--- /dev/null
+++ b/qa/suites/fs/recovery/dirfrag/frag_enable.yaml
@@ -0,0 +1,10 @@
+
+overrides:
+  ceph:
+    conf:
+      mds:
+        mds bal frag: true
+        mds bal split size: 100
+        mds bal merge size: 5
+        mds bal split bits: 3
+
diff --git a/qa/suites/fs/recovery/mounts/ceph-fuse.yaml b/qa/suites/fs/recovery/mounts/ceph-fuse.yaml
new file mode 100644
index 0000000..687baf8
--- /dev/null
+++ b/qa/suites/fs/recovery/mounts/ceph-fuse.yaml
@@ -0,0 +1,12 @@
+tasks:
+- install:
+- ceph:
+- ceph-fuse:
+    client.0:
+        mounted: false
+    client.1:
+        mounted: false
+    client.2:
+        mounted: false
+    client.3:
+        mounted: false
diff --git a/qa/suites/fs/recovery/tasks/auto-repair.yaml b/qa/suites/fs/recovery/tasks/auto-repair.yaml
new file mode 100644
index 0000000..e331cdd
--- /dev/null
+++ b/qa/suites/fs/recovery/tasks/auto-repair.yaml
@@ -0,0 +1,11 @@
+overrides:
+  ceph:
+    log-whitelist:
+      - force file system read-only
+      - bad backtrace
+
+
+tasks:
+  - cephfs_test_runner:
+      modules:
+        - tasks.cephfs.test_auto_repair
diff --git a/qa/suites/fs/recovery/tasks/backtrace.yaml b/qa/suites/fs/recovery/tasks/backtrace.yaml
new file mode 100644
index 0000000..d740a5f
--- /dev/null
+++ b/qa/suites/fs/recovery/tasks/backtrace.yaml
@@ -0,0 +1,5 @@
+
+tasks:
+  - cephfs_test_runner:
+      modules:
+        - tasks.cephfs.test_backtrace
diff --git a/qa/suites/fs/recovery/tasks/cap-flush.yam b/qa/suites/fs/recovery/tasks/cap-flush.yam
new file mode 100644
index 0000000..0d26dc9
--- /dev/null
+++ b/qa/suites/fs/recovery/tasks/cap-flush.yam
@@ -0,0 +1,5 @@
+
+tasks:
+  - cephfs_test_runner:
+      modules:
+        - tasks.cephfs.test_cap_flush
diff --git a/qa/suites/fs/recovery/tasks/client-limits.yaml b/qa/suites/fs/recovery/tasks/client-limits.yaml
new file mode 100644
index 0000000..288866c
--- /dev/null
+++ b/qa/suites/fs/recovery/tasks/client-limits.yaml
@@ -0,0 +1,11 @@
+
+overrides:
+  ceph:
+    log-whitelist:
+      - responding to mclientcaps\(revoke\)
+      - not advance its oldest_client_tid
+
+tasks:
+  - cephfs_test_runner:
+      modules:
+        - tasks.cephfs.test_client_limits
diff --git a/qa/suites/fs/recovery/tasks/client-recovery.yaml b/qa/suites/fs/recovery/tasks/client-recovery.yaml
new file mode 100644
index 0000000..1433ee1
--- /dev/null
+++ b/qa/suites/fs/recovery/tasks/client-recovery.yaml
@@ -0,0 +1,13 @@
+
+# The task interferes with the network, so we need
+# to permit OSDs to complain about that.
+overrides:
+  ceph:
+    log-whitelist:
+      - wrongly marked me down
+      - slow request
+
+tasks:
+  - cephfs_test_runner:
+      modules:
+        - tasks.cephfs.test_client_recovery
diff --git a/qa/suites/fs/recovery/tasks/config-commands.yaml b/qa/suites/fs/recovery/tasks/config-commands.yaml
new file mode 100644
index 0000000..2f51801
--- /dev/null
+++ b/qa/suites/fs/recovery/tasks/config-commands.yaml
@@ -0,0 +1,11 @@
+
+overrides:
+  ceph:
+    conf:
+      global:
+        lockdep: true
+
+tasks:
+  - cephfs_test_runner:
+      modules:
+        - tasks.cephfs.test_config_commands
diff --git a/qa/suites/fs/recovery/tasks/damage.yaml b/qa/suites/fs/recovery/tasks/damage.yaml
new file mode 100644
index 0000000..e8ddd3d
--- /dev/null
+++ b/qa/suites/fs/recovery/tasks/damage.yaml
@@ -0,0 +1,23 @@
+
+overrides:
+  ceph:
+    log-whitelist:
+      - bad backtrace
+      - object missing on disk
+      - error reading table object
+      - error reading sessionmap
+      - Error loading MDS rank
+      - missing journal object
+      - Error recovering journal
+      - error decoding table object
+      - failed to read JournalPointer
+      - Corrupt directory entry
+      - Corrupt fnode header
+      - corrupt sessionmap header
+      - Corrupt dentry
+
+tasks:
+  - cephfs_test_runner:
+      modules:
+        - tasks.cephfs.test_damage
+
diff --git a/qa/suites/fs/recovery/tasks/data-scan.yaml b/qa/suites/fs/recovery/tasks/data-scan.yaml
new file mode 100644
index 0000000..dd0a85f
--- /dev/null
+++ b/qa/suites/fs/recovery/tasks/data-scan.yaml
@@ -0,0 +1,15 @@
+
+overrides:
+  ceph:
+    log-whitelist:
+      - bad backtrace
+      - object missing on disk
+      - error reading table object
+      - error reading sessionmap
+      - unmatched fragstat
+      - was unreadable, recreating it now
+
+tasks:
+  - cephfs_test_runner:
+      modules:
+        - tasks.cephfs.test_data_scan
diff --git a/qa/suites/fs/recovery/tasks/forward-scrub.yaml b/qa/suites/fs/recovery/tasks/forward-scrub.yaml
new file mode 100644
index 0000000..9c73821
--- /dev/null
+++ b/qa/suites/fs/recovery/tasks/forward-scrub.yaml
@@ -0,0 +1,5 @@
+
+tasks:
+  - cephfs_test_runner:
+      modules:
+        - tasks.cephfs.test_forward_scrub
diff --git a/qa/suites/fs/recovery/tasks/journal-repair.yaml b/qa/suites/fs/recovery/tasks/journal-repair.yaml
new file mode 100644
index 0000000..c85f46c
--- /dev/null
+++ b/qa/suites/fs/recovery/tasks/journal-repair.yaml
@@ -0,0 +1,11 @@
+
+overrides:
+  ceph:
+    log-whitelist:
+      - bad backtrace on dir ino
+      - error reading table object
+
+tasks:
+  - cephfs_test_runner:
+      modules:
+        - tasks.cephfs.test_journal_repair
diff --git a/qa/suites/fs/recovery/tasks/mds-flush.yaml b/qa/suites/fs/recovery/tasks/mds-flush.yaml
new file mode 100644
index 0000000..d59a8ad
--- /dev/null
+++ b/qa/suites/fs/recovery/tasks/mds-flush.yaml
@@ -0,0 +1,5 @@
+
+tasks:
+  - cephfs_test_runner:
+      modules:
+        - tasks.cephfs.test_flush
diff --git a/qa/suites/fs/recovery/tasks/mds-full.yaml b/qa/suites/fs/recovery/tasks/mds-full.yaml
new file mode 100644
index 0000000..64ece03
--- /dev/null
+++ b/qa/suites/fs/recovery/tasks/mds-full.yaml
@@ -0,0 +1,25 @@
+
+overrides:
+  ceph:
+    log-whitelist:
+      - OSD full dropping all updates
+      - OSD near full
+      - is full \(reached quota
+    conf:
+      osd:
+        osd mon report interval max: 5
+        osd objectstore: memstore
+        memstore device bytes: 100000000
+      client.0:
+        debug client: 20
+        debug objecter: 20
+        debug objectcacher: 20
+      client.1:
+        debug client: 20
+        debug objecter: 20
+        debug objectcacher: 20
+
+tasks:
+  - cephfs_test_runner:
+      modules:
+        - tasks.cephfs.test_full
diff --git a/qa/suites/fs/recovery/tasks/pool-perm.yaml b/qa/suites/fs/recovery/tasks/pool-perm.yaml
new file mode 100644
index 0000000..f220626
--- /dev/null
+++ b/qa/suites/fs/recovery/tasks/pool-perm.yaml
@@ -0,0 +1,5 @@
+
+tasks:
+  - cephfs_test_runner:
+      modules:
+        - tasks.cephfs.test_pool_perm
diff --git a/qa/suites/fs/recovery/tasks/sessionmap.yaml b/qa/suites/fs/recovery/tasks/sessionmap.yaml
new file mode 100644
index 0000000..22c1fe5
--- /dev/null
+++ b/qa/suites/fs/recovery/tasks/sessionmap.yaml
@@ -0,0 +1,10 @@
+
+overrides:
+  ceph:
+    log-whitelist:
+      - client session with invalid root
+
+tasks:
+  - cephfs_test_runner:
+      modules:
+        - tasks.cephfs.test_sessionmap
diff --git a/qa/suites/fs/recovery/tasks/strays.yaml b/qa/suites/fs/recovery/tasks/strays.yaml
new file mode 100644
index 0000000..2809fc1
--- /dev/null
+++ b/qa/suites/fs/recovery/tasks/strays.yaml
@@ -0,0 +1,5 @@
+
+tasks:
+  - cephfs_test_runner:
+      modules:
+        - tasks.cephfs.test_strays
diff --git a/qa/suites/fs/recovery/tasks/volume-client.yaml b/qa/suites/fs/recovery/tasks/volume-client.yaml
new file mode 100644
index 0000000..154d148
--- /dev/null
+++ b/qa/suites/fs/recovery/tasks/volume-client.yaml
@@ -0,0 +1,5 @@
+
+tasks:
+  - cephfs_test_runner:
+      modules:
+        - tasks.cephfs.test_volume_client
diff --git a/qa/suites/fs/recovery/xfs.yaml b/qa/suites/fs/recovery/xfs.yaml
new file mode 100644
index 0000000..b4a8291
--- /dev/null
+++ b/qa/suites/fs/recovery/xfs.yaml
@@ -0,0 +1,6 @@
+overrides:
+  ceph:
+    fs: xfs
+    conf:
+      osd:
+        osd sloppy crc: true
diff --git a/qa/suites/fs/snaps/% b/qa/suites/fs/snaps/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/fs/snaps/clusters/fixed-2-ucephfs.yaml b/qa/suites/fs/snaps/clusters/fixed-2-ucephfs.yaml
new file mode 100644
index 0000000..bc120ad
--- /dev/null
+++ b/qa/suites/fs/snaps/clusters/fixed-2-ucephfs.yaml
@@ -0,0 +1,10 @@
+roles:
+- [mon.a, mds.a, osd.0, osd.1, client.0]
+- [mon.b, mds.a-s, mon.c, osd.2, osd.3]
+openstack:
+- volumes: # attached to each instance
+    count: 2
+    size: 10 # GB
+log-rotate:
+  ceph-mds: 10G
+  ceph-osd: 10G
diff --git a/qa/suites/fs/snaps/dirfrag/frag_enable.yaml b/qa/suites/fs/snaps/dirfrag/frag_enable.yaml
new file mode 100644
index 0000000..3d63bb2
--- /dev/null
+++ b/qa/suites/fs/snaps/dirfrag/frag_enable.yaml
@@ -0,0 +1,10 @@
+
+overrides:
+  ceph:
+    conf:
+      mds:
+        mds bal frag: true
+        mds bal split size: 100
+        mds bal merge size: 5
+        mds bal split bits: 3
+
diff --git a/qa/suites/fs/snaps/fs/btrfs.yaml b/qa/suites/fs/snaps/fs/btrfs.yaml
new file mode 100644
index 0000000..4c7af31
--- /dev/null
+++ b/qa/suites/fs/snaps/fs/btrfs.yaml
@@ -0,0 +1,6 @@
+overrides:
+  ceph:
+    fs: btrfs
+    conf:
+      osd:
+        osd op thread timeout: 60
diff --git a/qa/suites/fs/snaps/mount/ceph-fuse.yaml b/qa/suites/fs/snaps/mount/ceph-fuse.yaml
new file mode 100644
index 0000000..37ac5b6
--- /dev/null
+++ b/qa/suites/fs/snaps/mount/ceph-fuse.yaml
@@ -0,0 +1,4 @@
+tasks:
+- install:
+- ceph:
+- ceph-fuse:
diff --git a/qa/suites/fs/snaps/tasks/snaptests.yaml b/qa/suites/fs/snaps/tasks/snaptests.yaml
new file mode 100644
index 0000000..790c93c
--- /dev/null
+++ b/qa/suites/fs/snaps/tasks/snaptests.yaml
@@ -0,0 +1,5 @@
+tasks:
+- workunit:
+    clients:
+      all:
+        - fs/snaps
diff --git a/qa/suites/fs/standbyreplay/% b/qa/suites/fs/standbyreplay/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/fs/standbyreplay/clusters/standby-replay.yaml b/qa/suites/fs/standbyreplay/clusters/standby-replay.yaml
new file mode 100644
index 0000000..ba2442b
--- /dev/null
+++ b/qa/suites/fs/standbyreplay/clusters/standby-replay.yaml
@@ -0,0 +1,17 @@
+
+overrides:
+    ceph:
+        conf:
+            mds:
+                mds standby replay: true
+
+roles:
+- [mon.a, mds.a, mds.b-s-0, osd.0, osd.1, client.0]
+- [mon.b, mds.c-s-0, mds.d-s-0, mon.c, osd.2, osd.3]
+openstack:
+- volumes: # attached to each instance
+    count: 2
+    size: 10 # GB
+log-rotate:
+  ceph-mds: 10G
+  ceph-osd: 10G
diff --git a/qa/suites/fs/standbyreplay/dirfrag/frag_enable.yaml b/qa/suites/fs/standbyreplay/dirfrag/frag_enable.yaml
new file mode 100644
index 0000000..3d63bb2
--- /dev/null
+++ b/qa/suites/fs/standbyreplay/dirfrag/frag_enable.yaml
@@ -0,0 +1,10 @@
+
+overrides:
+  ceph:
+    conf:
+      mds:
+        mds bal frag: true
+        mds bal split size: 100
+        mds bal merge size: 5
+        mds bal split bits: 3
+
diff --git a/qa/suites/fs/standbyreplay/mount/fuse.yaml b/qa/suites/fs/standbyreplay/mount/fuse.yaml
new file mode 100644
index 0000000..5769caa
--- /dev/null
+++ b/qa/suites/fs/standbyreplay/mount/fuse.yaml
@@ -0,0 +1,5 @@
+
+tasks:
+    - install:
+    - ceph:
+    - ceph_fuse:
diff --git a/qa/suites/fs/standbyreplay/tasks/migration.yaml b/qa/suites/fs/standbyreplay/tasks/migration.yaml
new file mode 100644
index 0000000..183ef38
--- /dev/null
+++ b/qa/suites/fs/standbyreplay/tasks/migration.yaml
@@ -0,0 +1,5 @@
+
+tasks:
+- cephfs_test_runner:
+    modules:
+      - tasks.cephfs.test_journal_migration
diff --git a/qa/suites/fs/standbyreplay/xfs.yaml b/qa/suites/fs/standbyreplay/xfs.yaml
new file mode 100644
index 0000000..b4a8291
--- /dev/null
+++ b/qa/suites/fs/standbyreplay/xfs.yaml
@@ -0,0 +1,6 @@
+overrides:
+  ceph:
+    fs: xfs
+    conf:
+      osd:
+        osd sloppy crc: true
diff --git a/qa/suites/fs/thrash/% b/qa/suites/fs/thrash/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/fs/thrash/ceph-thrash/default.yaml b/qa/suites/fs/thrash/ceph-thrash/default.yaml
new file mode 100644
index 0000000..aefdf82
--- /dev/null
+++ b/qa/suites/fs/thrash/ceph-thrash/default.yaml
@@ -0,0 +1,2 @@
+tasks:
+- mds_thrash:
diff --git a/qa/suites/fs/thrash/ceph/base.yaml b/qa/suites/fs/thrash/ceph/base.yaml
new file mode 100644
index 0000000..2030acb
--- /dev/null
+++ b/qa/suites/fs/thrash/ceph/base.yaml
@@ -0,0 +1,3 @@
+tasks:
+- install:
+- ceph:
diff --git a/qa/suites/fs/thrash/clusters/mds-1active-1standby.yaml b/qa/suites/fs/thrash/clusters/mds-1active-1standby.yaml
new file mode 100644
index 0000000..cceea69
--- /dev/null
+++ b/qa/suites/fs/thrash/clusters/mds-1active-1standby.yaml
@@ -0,0 +1,10 @@
+roles:
+- [mon.a, mon.c, osd.0, osd.1, osd.2, mds.b-s-a]
+- [mon.b, mds.a, osd.3, osd.4, osd.5, client.0]
+openstack:
+- volumes: # attached to each instance
+    count: 3
+    size: 10 # GB
+log-rotate:
+  ceph-mds: 10G
+  ceph-osd: 10G
diff --git a/qa/suites/fs/thrash/debug/mds_client.yaml b/qa/suites/fs/thrash/debug/mds_client.yaml
new file mode 100644
index 0000000..c6fec3f
--- /dev/null
+++ b/qa/suites/fs/thrash/debug/mds_client.yaml
@@ -0,0 +1,9 @@
+overrides:
+  ceph:
+    conf:
+      mds:
+        debug ms: 1
+        debug mds: 20
+      client:
+        debug ms: 1
+        debug client: 20
\ No newline at end of file
diff --git a/qa/suites/fs/thrash/dirfrag/frag_enable.yaml b/qa/suites/fs/thrash/dirfrag/frag_enable.yaml
new file mode 100644
index 0000000..3d63bb2
--- /dev/null
+++ b/qa/suites/fs/thrash/dirfrag/frag_enable.yaml
@@ -0,0 +1,10 @@
+
+overrides:
+  ceph:
+    conf:
+      mds:
+        mds bal frag: true
+        mds bal split size: 100
+        mds bal merge size: 5
+        mds bal split bits: 3
+
diff --git a/qa/suites/fs/thrash/fs/xfs.yaml b/qa/suites/fs/thrash/fs/xfs.yaml
new file mode 100644
index 0000000..b4a8291
--- /dev/null
+++ b/qa/suites/fs/thrash/fs/xfs.yaml
@@ -0,0 +1,6 @@
+overrides:
+  ceph:
+    fs: xfs
+    conf:
+      osd:
+        osd sloppy crc: true
diff --git a/qa/suites/fs/thrash/msgr-failures/none.yaml b/qa/suites/fs/thrash/msgr-failures/none.yaml
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/fs/thrash/msgr-failures/osd-mds-delay.yaml b/qa/suites/fs/thrash/msgr-failures/osd-mds-delay.yaml
new file mode 100644
index 0000000..adcebc0
--- /dev/null
+++ b/qa/suites/fs/thrash/msgr-failures/osd-mds-delay.yaml
@@ -0,0 +1,8 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms inject socket failures: 2500
+        mds inject delay type: osd mds
+        ms inject delay probability: .005
+        ms inject delay max: 1
diff --git a/qa/suites/fs/thrash/overrides/whitelist_wrongly_marked_down.yaml b/qa/suites/fs/thrash/overrides/whitelist_wrongly_marked_down.yaml
new file mode 100644
index 0000000..5cf329f
--- /dev/null
+++ b/qa/suites/fs/thrash/overrides/whitelist_wrongly_marked_down.yaml
@@ -0,0 +1,10 @@
+overrides:
+  ceph:
+    log-whitelist:
+    - wrongly marked me down
+    conf:
+      mds:
+        debug mds: 20
+        debug ms: 1
+      client:
+        debug client: 10
\ No newline at end of file
diff --git a/qa/suites/fs/thrash/tasks/cfuse_workunit_snaptests.yaml b/qa/suites/fs/thrash/tasks/cfuse_workunit_snaptests.yaml
new file mode 100644
index 0000000..7efe47d
--- /dev/null
+++ b/qa/suites/fs/thrash/tasks/cfuse_workunit_snaptests.yaml
@@ -0,0 +1,6 @@
+tasks:
+- ceph-fuse:
+- workunit:
+    clients:
+      all:
+        - fs/snaps
diff --git a/qa/suites/fs/thrash/tasks/cfuse_workunit_suites_fsstress.yaml b/qa/suites/fs/thrash/tasks/cfuse_workunit_suites_fsstress.yaml
new file mode 100644
index 0000000..5908d95
--- /dev/null
+++ b/qa/suites/fs/thrash/tasks/cfuse_workunit_suites_fsstress.yaml
@@ -0,0 +1,6 @@
+tasks:
+- ceph-fuse:
+- workunit:
+    clients:
+      all:
+        - suites/fsstress.sh
diff --git a/qa/suites/fs/thrash/tasks/cfuse_workunit_suites_pjd.yaml b/qa/suites/fs/thrash/tasks/cfuse_workunit_suites_pjd.yaml
new file mode 100644
index 0000000..930bf4a
--- /dev/null
+++ b/qa/suites/fs/thrash/tasks/cfuse_workunit_suites_pjd.yaml
@@ -0,0 +1,6 @@
+tasks:
+- ceph-fuse:
+- workunit:
+    clients:
+      all:
+        - suites/pjd.sh
diff --git a/qa/suites/fs/thrash/tasks/cfuse_workunit_trivial_sync.yaml b/qa/suites/fs/thrash/tasks/cfuse_workunit_trivial_sync.yaml
new file mode 100644
index 0000000..9509650
--- /dev/null
+++ b/qa/suites/fs/thrash/tasks/cfuse_workunit_trivial_sync.yaml
@@ -0,0 +1,5 @@
+tasks:
+- ceph-fuse:
+- workunit:
+    clients:
+      all: [fs/misc/trivial_sync.sh]
diff --git a/qa/suites/fs/traceless/% b/qa/suites/fs/traceless/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/fs/traceless/clusters/fixed-2-ucephfs.yaml b/qa/suites/fs/traceless/clusters/fixed-2-ucephfs.yaml
new file mode 100644
index 0000000..bc120ad
--- /dev/null
+++ b/qa/suites/fs/traceless/clusters/fixed-2-ucephfs.yaml
@@ -0,0 +1,10 @@
+roles:
+- [mon.a, mds.a, osd.0, osd.1, client.0]
+- [mon.b, mds.a-s, mon.c, osd.2, osd.3]
+openstack:
+- volumes: # attached to each instance
+    count: 2
+    size: 10 # GB
+log-rotate:
+  ceph-mds: 10G
+  ceph-osd: 10G
diff --git a/qa/suites/fs/traceless/debug/mds_client.yaml b/qa/suites/fs/traceless/debug/mds_client.yaml
new file mode 100644
index 0000000..c6fec3f
--- /dev/null
+++ b/qa/suites/fs/traceless/debug/mds_client.yaml
@@ -0,0 +1,9 @@
+overrides:
+  ceph:
+    conf:
+      mds:
+        debug ms: 1
+        debug mds: 20
+      client:
+        debug ms: 1
+        debug client: 20
\ No newline at end of file
diff --git a/qa/suites/fs/traceless/dirfrag/frag_enable.yaml b/qa/suites/fs/traceless/dirfrag/frag_enable.yaml
new file mode 100644
index 0000000..3d63bb2
--- /dev/null
+++ b/qa/suites/fs/traceless/dirfrag/frag_enable.yaml
@@ -0,0 +1,10 @@
+
+overrides:
+  ceph:
+    conf:
+      mds:
+        mds bal frag: true
+        mds bal split size: 100
+        mds bal merge size: 5
+        mds bal split bits: 3
+
diff --git a/qa/suites/fs/traceless/fs/btrfs.yaml b/qa/suites/fs/traceless/fs/btrfs.yaml
new file mode 100644
index 0000000..0b3f6fa
--- /dev/null
+++ b/qa/suites/fs/traceless/fs/btrfs.yaml
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    fs: btrfs
+    conf:
+      osd:
+        osd sloppy crc: true
+        osd op thread timeout: 60
diff --git a/qa/suites/fs/traceless/overrides/whitelist_wrongly_marked_down.yaml b/qa/suites/fs/traceless/overrides/whitelist_wrongly_marked_down.yaml
new file mode 100644
index 0000000..5cf329f
--- /dev/null
+++ b/qa/suites/fs/traceless/overrides/whitelist_wrongly_marked_down.yaml
@@ -0,0 +1,10 @@
+overrides:
+  ceph:
+    log-whitelist:
+    - wrongly marked me down
+    conf:
+      mds:
+        debug mds: 20
+        debug ms: 1
+      client:
+        debug client: 10
\ No newline at end of file
diff --git a/qa/suites/fs/traceless/tasks/cfuse_workunit_suites_blogbench.yaml b/qa/suites/fs/traceless/tasks/cfuse_workunit_suites_blogbench.yaml
new file mode 100644
index 0000000..ed9d92d
--- /dev/null
+++ b/qa/suites/fs/traceless/tasks/cfuse_workunit_suites_blogbench.yaml
@@ -0,0 +1,8 @@
+tasks:
+- install:
+- ceph:
+- ceph-fuse:
+- workunit:
+    clients:
+      all:
+        - suites/blogbench.sh
diff --git a/qa/suites/fs/traceless/tasks/cfuse_workunit_suites_dbench.yaml b/qa/suites/fs/traceless/tasks/cfuse_workunit_suites_dbench.yaml
new file mode 100644
index 0000000..e678ed4
--- /dev/null
+++ b/qa/suites/fs/traceless/tasks/cfuse_workunit_suites_dbench.yaml
@@ -0,0 +1,8 @@
+tasks:
+- install:
+- ceph:
+- ceph-fuse:
+- workunit:
+    clients:
+      all:
+        - suites/dbench.sh
diff --git a/qa/suites/fs/traceless/tasks/cfuse_workunit_suites_ffsb.yaml b/qa/suites/fs/traceless/tasks/cfuse_workunit_suites_ffsb.yaml
new file mode 100644
index 0000000..652a3a6
--- /dev/null
+++ b/qa/suites/fs/traceless/tasks/cfuse_workunit_suites_ffsb.yaml
@@ -0,0 +1,11 @@
+tasks:
+- install:
+- ceph:
+    conf:
+      osd:
+        filestore flush min: 0
+- ceph-fuse:
+- workunit:
+    clients:
+      all:
+        - suites/ffsb.sh
diff --git a/qa/suites/fs/traceless/tasks/cfuse_workunit_suites_fsstress.yaml b/qa/suites/fs/traceless/tasks/cfuse_workunit_suites_fsstress.yaml
new file mode 100644
index 0000000..b58487c
--- /dev/null
+++ b/qa/suites/fs/traceless/tasks/cfuse_workunit_suites_fsstress.yaml
@@ -0,0 +1,8 @@
+tasks:
+- install:
+- ceph:
+- ceph-fuse:
+- workunit:
+    clients:
+      all:
+        - suites/fsstress.sh
diff --git a/qa/suites/fs/traceless/traceless/50pc.yaml b/qa/suites/fs/traceless/traceless/50pc.yaml
new file mode 100644
index 0000000..e0418bc
--- /dev/null
+++ b/qa/suites/fs/traceless/traceless/50pc.yaml
@@ -0,0 +1,5 @@
+overrides:
+  ceph:
+    conf:
+      mds:
+        mds inject traceless reply probability: .5
diff --git a/qa/suites/fs/verify/% b/qa/suites/fs/verify/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/fs/verify/clusters/fixed-2-ucephfs.yaml b/qa/suites/fs/verify/clusters/fixed-2-ucephfs.yaml
new file mode 100644
index 0000000..bc120ad
--- /dev/null
+++ b/qa/suites/fs/verify/clusters/fixed-2-ucephfs.yaml
@@ -0,0 +1,10 @@
+roles:
+- [mon.a, mds.a, osd.0, osd.1, client.0]
+- [mon.b, mds.a-s, mon.c, osd.2, osd.3]
+openstack:
+- volumes: # attached to each instance
+    count: 2
+    size: 10 # GB
+log-rotate:
+  ceph-mds: 10G
+  ceph-osd: 10G
diff --git a/qa/suites/fs/verify/debug/+ b/qa/suites/fs/verify/debug/+
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/fs/verify/debug/mds_client.yaml b/qa/suites/fs/verify/debug/mds_client.yaml
new file mode 100644
index 0000000..c6fec3f
--- /dev/null
+++ b/qa/suites/fs/verify/debug/mds_client.yaml
@@ -0,0 +1,9 @@
+overrides:
+  ceph:
+    conf:
+      mds:
+        debug ms: 1
+        debug mds: 20
+      client:
+        debug ms: 1
+        debug client: 20
\ No newline at end of file
diff --git a/qa/suites/fs/verify/debug/mon.yaml b/qa/suites/fs/verify/debug/mon.yaml
new file mode 100644
index 0000000..6ed3e6d
--- /dev/null
+++ b/qa/suites/fs/verify/debug/mon.yaml
@@ -0,0 +1,6 @@
+overrides:
+  ceph:
+    conf:
+      mon:
+        debug ms: 1
+        debug mon: 20
diff --git a/qa/suites/fs/verify/dirfrag/frag_enable.yaml b/qa/suites/fs/verify/dirfrag/frag_enable.yaml
new file mode 100644
index 0000000..3d63bb2
--- /dev/null
+++ b/qa/suites/fs/verify/dirfrag/frag_enable.yaml
@@ -0,0 +1,10 @@
+
+overrides:
+  ceph:
+    conf:
+      mds:
+        mds bal frag: true
+        mds bal split size: 100
+        mds bal merge size: 5
+        mds bal split bits: 3
+
diff --git a/qa/suites/fs/verify/fs/btrfs.yaml b/qa/suites/fs/verify/fs/btrfs.yaml
new file mode 100644
index 0000000..0b3f6fa
--- /dev/null
+++ b/qa/suites/fs/verify/fs/btrfs.yaml
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    fs: btrfs
+    conf:
+      osd:
+        osd sloppy crc: true
+        osd op thread timeout: 60
diff --git a/qa/suites/fs/verify/overrides/whitelist_wrongly_marked_down.yaml b/qa/suites/fs/verify/overrides/whitelist_wrongly_marked_down.yaml
new file mode 100644
index 0000000..5cf329f
--- /dev/null
+++ b/qa/suites/fs/verify/overrides/whitelist_wrongly_marked_down.yaml
@@ -0,0 +1,10 @@
+overrides:
+  ceph:
+    log-whitelist:
+    - wrongly marked me down
+    conf:
+      mds:
+        debug mds: 20
+        debug ms: 1
+      client:
+        debug client: 10
\ No newline at end of file
diff --git a/qa/suites/fs/verify/tasks/cfuse_workunit_suites_dbench.yaml b/qa/suites/fs/verify/tasks/cfuse_workunit_suites_dbench.yaml
new file mode 100644
index 0000000..7331977
--- /dev/null
+++ b/qa/suites/fs/verify/tasks/cfuse_workunit_suites_dbench.yaml
@@ -0,0 +1,12 @@
+tasks:
+- install:
+- ceph:
+    conf:
+      client:
+        debug client: 1/20
+        debug ms: 0/10
+- ceph-fuse:
+- workunit:
+    clients:
+      all:
+        - suites/dbench.sh
diff --git a/qa/suites/fs/verify/tasks/cfuse_workunit_suites_fsstress.yaml b/qa/suites/fs/verify/tasks/cfuse_workunit_suites_fsstress.yaml
new file mode 100644
index 0000000..b58487c
--- /dev/null
+++ b/qa/suites/fs/verify/tasks/cfuse_workunit_suites_fsstress.yaml
@@ -0,0 +1,8 @@
+tasks:
+- install:
+- ceph:
+- ceph-fuse:
+- workunit:
+    clients:
+      all:
+        - suites/fsstress.sh
diff --git a/qa/suites/fs/verify/tasks/libcephfs_interface_tests.yaml b/qa/suites/fs/verify/tasks/libcephfs_interface_tests.yaml
new file mode 100644
index 0000000..22d1f14
--- /dev/null
+++ b/qa/suites/fs/verify/tasks/libcephfs_interface_tests.yaml
@@ -0,0 +1,8 @@
+tasks:
+- install:
+- ceph:
+- ceph-fuse:
+- workunit:
+    clients:
+      client.0:
+        - libcephfs/test.sh
diff --git a/qa/suites/fs/verify/validater/lockdep.yaml b/qa/suites/fs/verify/validater/lockdep.yaml
new file mode 100644
index 0000000..25f8435
--- /dev/null
+++ b/qa/suites/fs/verify/validater/lockdep.yaml
@@ -0,0 +1,5 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        lockdep: true
diff --git a/qa/suites/fs/verify/validater/valgrind.yaml b/qa/suites/fs/verify/validater/valgrind.yaml
new file mode 100644
index 0000000..ec9adbb
--- /dev/null
+++ b/qa/suites/fs/verify/validater/valgrind.yaml
@@ -0,0 +1,16 @@
+overrides:
+  install:
+    ceph:
+      flavor: notcmalloc
+      debuginfo: true
+  ceph:
+    conf:
+      global:
+        osd heartbeat grace: 40
+    valgrind:
+      mon: [--tool=memcheck, --leak-check=full, --show-reachable=yes]
+      osd: [--tool=memcheck]
+      mds: [--tool=memcheck]
+  ceph-fuse:
+    client.0:
+      valgrind: [--tool=memcheck, --leak-check=full, --show-reachable=yes]
diff --git a/qa/suites/hadoop/basic/% b/qa/suites/hadoop/basic/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/hadoop/basic/clusters/fixed-3.yaml b/qa/suites/hadoop/basic/clusters/fixed-3.yaml
new file mode 100644
index 0000000..44a5535
--- /dev/null
+++ b/qa/suites/hadoop/basic/clusters/fixed-3.yaml
@@ -0,0 +1,13 @@
+overrides:
+  ceph:
+    conf:
+      client:
+        client permissions: false
+roles:
+- [mon.0, mds.0, osd.0, hadoop.master.0]
+- [mon.1, osd.1, hadoop.slave.0]
+- [mon.2, hadoop.slave.1, client.0]
+openstack:
+- volumes: # attached to each instance
+    count: 1
+    size: 10 # GB
diff --git a/qa/suites/hadoop/basic/tasks/repl.yaml b/qa/suites/hadoop/basic/tasks/repl.yaml
new file mode 100644
index 0000000..60cdcca
--- /dev/null
+++ b/qa/suites/hadoop/basic/tasks/repl.yaml
@@ -0,0 +1,8 @@
+tasks:
+- ssh_keys:
+- install:
+- ceph:
+- hadoop:
+- workunit:
+    clients:
+      client.0: [hadoop/repl.sh]
diff --git a/qa/suites/hadoop/basic/tasks/terasort.yaml b/qa/suites/hadoop/basic/tasks/terasort.yaml
new file mode 100644
index 0000000..4377894
--- /dev/null
+++ b/qa/suites/hadoop/basic/tasks/terasort.yaml
@@ -0,0 +1,10 @@
+tasks:
+- ssh_keys:
+- install:
+- ceph:
+- hadoop:
+- workunit: 
+    clients:
+      client.0: [hadoop/terasort.sh]
+    env:
+      NUM_RECORDS: "10000000"
diff --git a/qa/suites/hadoop/basic/tasks/wordcount.yaml b/qa/suites/hadoop/basic/tasks/wordcount.yaml
new file mode 100644
index 0000000..b84941b
--- /dev/null
+++ b/qa/suites/hadoop/basic/tasks/wordcount.yaml
@@ -0,0 +1,8 @@
+tasks:
+- ssh_keys:
+- install:
+- ceph:
+- hadoop:
+- workunit: 
+    clients:
+      client.0: [hadoop/wordcount.sh]
diff --git a/qa/suites/hadoop/basic/xfs.yaml b/qa/suites/hadoop/basic/xfs.yaml
new file mode 100644
index 0000000..b4a8291
--- /dev/null
+++ b/qa/suites/hadoop/basic/xfs.yaml
@@ -0,0 +1,6 @@
+overrides:
+  ceph:
+    fs: xfs
+    conf:
+      osd:
+        osd sloppy crc: true
diff --git a/qa/suites/kcephfs/cephfs/% b/qa/suites/kcephfs/cephfs/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/kcephfs/cephfs/clusters/fixed-3-cephfs.yaml b/qa/suites/kcephfs/cephfs/clusters/fixed-3-cephfs.yaml
new file mode 100644
index 0000000..499c84c
--- /dev/null
+++ b/qa/suites/kcephfs/cephfs/clusters/fixed-3-cephfs.yaml
@@ -0,0 +1,11 @@
+roles:
+- [mon.a, mds.a, osd.0, osd.1]
+- [mon.b, mds.a-s, mon.c, osd.2, osd.3]
+- [client.0]
+openstack:
+- volumes: # attached to each instance
+    count: 2
+    size: 10 # GB
+log-rotate:
+  ceph-mds: 10G
+  ceph-osd: 10G
diff --git a/qa/suites/kcephfs/cephfs/conf.yaml b/qa/suites/kcephfs/cephfs/conf.yaml
new file mode 100644
index 0000000..b3ef404
--- /dev/null
+++ b/qa/suites/kcephfs/cephfs/conf.yaml
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms die on skipped message: false
+      mds:
+        debug mds: 20
diff --git a/qa/suites/kcephfs/cephfs/fs/btrfs.yaml b/qa/suites/kcephfs/cephfs/fs/btrfs.yaml
new file mode 100644
index 0000000..0b3f6fa
--- /dev/null
+++ b/qa/suites/kcephfs/cephfs/fs/btrfs.yaml
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    fs: btrfs
+    conf:
+      osd:
+        osd sloppy crc: true
+        osd op thread timeout: 60
diff --git a/qa/suites/kcephfs/cephfs/inline/no.yaml b/qa/suites/kcephfs/cephfs/inline/no.yaml
new file mode 100644
index 0000000..2030acb
--- /dev/null
+++ b/qa/suites/kcephfs/cephfs/inline/no.yaml
@@ -0,0 +1,3 @@
+tasks:
+- install:
+- ceph:
diff --git a/qa/suites/kcephfs/cephfs/inline/yes.yaml b/qa/suites/kcephfs/cephfs/inline/yes.yaml
new file mode 100644
index 0000000..fce64c6
--- /dev/null
+++ b/qa/suites/kcephfs/cephfs/inline/yes.yaml
@@ -0,0 +1,6 @@
+tasks:
+- install:
+- ceph:
+- exec:
+    client.0:
+      - sudo ceph mds set inline_data true --yes-i-really-mean-it
diff --git a/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_direct_io.yaml b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_direct_io.yaml
new file mode 100644
index 0000000..cc4b32a
--- /dev/null
+++ b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_direct_io.yaml
@@ -0,0 +1,7 @@
+tasks:
+- kclient:
+- workunit:
+    clients:
+      all:
+        - direct_io
+
diff --git a/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_kernel_untar_build.yaml b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_kernel_untar_build.yaml
new file mode 100644
index 0000000..84d15f6
--- /dev/null
+++ b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_kernel_untar_build.yaml
@@ -0,0 +1,6 @@
+tasks:
+- kclient:
+- workunit:
+    clients:
+      all:
+        - kernel_untar_build.sh
diff --git a/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_misc.yaml b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_misc.yaml
new file mode 100644
index 0000000..e3f4fb1
--- /dev/null
+++ b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_misc.yaml
@@ -0,0 +1,6 @@
+tasks:
+- kclient:
+- workunit:
+    clients:
+      all:
+        - fs/misc
diff --git a/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_o_trunc.yaml b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_o_trunc.yaml
new file mode 100644
index 0000000..5219fc9
--- /dev/null
+++ b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_o_trunc.yaml
@@ -0,0 +1,7 @@
+tasks:
+- kclient:
+- workunit:
+    clients:
+      all:
+        - fs/test_o_trunc.sh
+
diff --git a/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_snaps.yaml b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_snaps.yaml
new file mode 100644
index 0000000..e815800
--- /dev/null
+++ b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_snaps.yaml
@@ -0,0 +1,6 @@
+tasks:
+- kclient:
+- workunit:
+    clients:
+      all:
+        - fs/snaps
diff --git a/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_dbench.yaml b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_dbench.yaml
new file mode 100644
index 0000000..8dd810a
--- /dev/null
+++ b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_dbench.yaml
@@ -0,0 +1,6 @@
+tasks:
+- kclient:
+- workunit:
+    clients:
+      all:
+        - suites/dbench.sh
diff --git a/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_ffsb.yaml b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_ffsb.yaml
new file mode 100644
index 0000000..059ffe1
--- /dev/null
+++ b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_ffsb.yaml
@@ -0,0 +1,6 @@
+tasks:
+- kclient:
+- workunit:
+    clients:
+      all:
+        - suites/ffsb.sh
diff --git a/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_fsstress.yaml b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_fsstress.yaml
new file mode 100644
index 0000000..bc49fc9
--- /dev/null
+++ b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_fsstress.yaml
@@ -0,0 +1,6 @@
+tasks:
+- kclient:
+- workunit:
+    clients:
+      all:
+        - suites/fsstress.sh
diff --git a/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_fsx.yaml b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_fsx.yaml
new file mode 100644
index 0000000..38d9604
--- /dev/null
+++ b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_fsx.yaml
@@ -0,0 +1,6 @@
+tasks:
+- kclient:
+- workunit:
+    clients:
+      all:
+        - suites/fsx.sh
diff --git a/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_fsync.yaml b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_fsync.yaml
new file mode 100644
index 0000000..452641c
--- /dev/null
+++ b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_fsync.yaml
@@ -0,0 +1,6 @@
+tasks:
+- kclient:
+- workunit:
+    clients:
+      all:
+        - suites/fsync-tester.sh
diff --git a/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_iozone.yaml b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_iozone.yaml
new file mode 100644
index 0000000..832e024
--- /dev/null
+++ b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_iozone.yaml
@@ -0,0 +1,6 @@
+tasks:
+- kclient:
+- workunit:
+    clients:
+      all:
+        - suites/iozone.sh
diff --git a/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_pjd.yaml b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_pjd.yaml
new file mode 100644
index 0000000..09abaeb
--- /dev/null
+++ b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_pjd.yaml
@@ -0,0 +1,6 @@
+tasks:
+- kclient:
+- workunit:
+    clients:
+      all:
+        - suites/pjd.sh
diff --git a/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_trivial_sync.yaml b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_trivial_sync.yaml
new file mode 100644
index 0000000..d317a39
--- /dev/null
+++ b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_trivial_sync.yaml
@@ -0,0 +1,5 @@
+tasks:
+- kclient:
+- workunit:
+    clients:
+      all: [fs/misc/trivial_sync.sh]
diff --git a/qa/suites/kcephfs/mixed-clients/% b/qa/suites/kcephfs/mixed-clients/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/kcephfs/mixed-clients/clusters/2-clients.yaml b/qa/suites/kcephfs/mixed-clients/clusters/2-clients.yaml
new file mode 100644
index 0000000..26e029d
--- /dev/null
+++ b/qa/suites/kcephfs/mixed-clients/clusters/2-clients.yaml
@@ -0,0 +1,9 @@
+roles:
+- [mon.a, mds.a, osd.0, osd.1]
+- [mon.b, mon.c, osd.2, osd.3]
+- [client.0]
+- [client.1]
+openstack:
+- volumes: # attached to each instance
+    count: 2
+    size: 10 # GB
diff --git a/qa/suites/kcephfs/mixed-clients/conf.yaml b/qa/suites/kcephfs/mixed-clients/conf.yaml
new file mode 100644
index 0000000..75b8558
--- /dev/null
+++ b/qa/suites/kcephfs/mixed-clients/conf.yaml
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms die on skipped message: false
+      mds:
+        debug mds: 20
\ No newline at end of file
diff --git a/qa/suites/kcephfs/mixed-clients/fs/btrfs.yaml b/qa/suites/kcephfs/mixed-clients/fs/btrfs.yaml
new file mode 100644
index 0000000..0b3f6fa
--- /dev/null
+++ b/qa/suites/kcephfs/mixed-clients/fs/btrfs.yaml
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    fs: btrfs
+    conf:
+      osd:
+        osd sloppy crc: true
+        osd op thread timeout: 60
diff --git a/qa/suites/kcephfs/mixed-clients/tasks/kernel_cfuse_workunits_dbench_iozone.yaml b/qa/suites/kcephfs/mixed-clients/tasks/kernel_cfuse_workunits_dbench_iozone.yaml
new file mode 100644
index 0000000..0121a01
--- /dev/null
+++ b/qa/suites/kcephfs/mixed-clients/tasks/kernel_cfuse_workunits_dbench_iozone.yaml
@@ -0,0 +1,20 @@
+tasks:
+- install:
+- ceph:
+- parallel:
+   - user-workload
+   - kclient-workload
+user-workload:
+  sequential:
+  - ceph-fuse: [client.0]
+  - workunit:
+      clients:
+         client.0:
+           - suites/iozone.sh
+kclient-workload:
+  sequential:
+  - kclient: [client.1]
+  - workunit:
+      clients:
+         client.1:
+           - suites/dbench.sh
diff --git a/qa/suites/kcephfs/mixed-clients/tasks/kernel_cfuse_workunits_untarbuild_blogbench.yaml b/qa/suites/kcephfs/mixed-clients/tasks/kernel_cfuse_workunits_untarbuild_blogbench.yaml
new file mode 100644
index 0000000..7b0ce5b
--- /dev/null
+++ b/qa/suites/kcephfs/mixed-clients/tasks/kernel_cfuse_workunits_untarbuild_blogbench.yaml
@@ -0,0 +1,20 @@
+tasks:
+- install:
+- ceph:
+- parallel:
+   - user-workload
+   - kclient-workload
+user-workload:
+  sequential:
+  - ceph-fuse: [client.0]
+  - workunit:
+      clients:
+         client.0:
+           - suites/blogbench.sh
+kclient-workload:
+  sequential:
+  - kclient: [client.1]
+  - workunit:
+      clients:
+         client.1:
+           - kernel_untar_build.sh
diff --git a/qa/suites/kcephfs/thrash/% b/qa/suites/kcephfs/thrash/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/kcephfs/thrash/clusters/fixed-3-cephfs.yaml b/qa/suites/kcephfs/thrash/clusters/fixed-3-cephfs.yaml
new file mode 100644
index 0000000..499c84c
--- /dev/null
+++ b/qa/suites/kcephfs/thrash/clusters/fixed-3-cephfs.yaml
@@ -0,0 +1,11 @@
+roles:
+- [mon.a, mds.a, osd.0, osd.1]
+- [mon.b, mds.a-s, mon.c, osd.2, osd.3]
+- [client.0]
+openstack:
+- volumes: # attached to each instance
+    count: 2
+    size: 10 # GB
+log-rotate:
+  ceph-mds: 10G
+  ceph-osd: 10G
diff --git a/qa/suites/kcephfs/thrash/conf.yaml b/qa/suites/kcephfs/thrash/conf.yaml
new file mode 100644
index 0000000..75b8558
--- /dev/null
+++ b/qa/suites/kcephfs/thrash/conf.yaml
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms die on skipped message: false
+      mds:
+        debug mds: 20
\ No newline at end of file
diff --git a/qa/suites/kcephfs/thrash/fs/btrfs.yaml b/qa/suites/kcephfs/thrash/fs/btrfs.yaml
new file mode 100644
index 0000000..0b3f6fa
--- /dev/null
+++ b/qa/suites/kcephfs/thrash/fs/btrfs.yaml
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    fs: btrfs
+    conf:
+      osd:
+        osd sloppy crc: true
+        osd op thread timeout: 60
diff --git a/qa/suites/kcephfs/thrash/thrashers/default.yaml b/qa/suites/kcephfs/thrash/thrashers/default.yaml
new file mode 100644
index 0000000..14d7725
--- /dev/null
+++ b/qa/suites/kcephfs/thrash/thrashers/default.yaml
@@ -0,0 +1,7 @@
+tasks:
+- install:
+- ceph:
+    log-whitelist:
+    - wrongly marked me down
+    - objects unfound and apparently lost
+- thrashosds:
diff --git a/qa/suites/kcephfs/thrash/thrashers/mds.yaml b/qa/suites/kcephfs/thrash/thrashers/mds.yaml
new file mode 100644
index 0000000..cab4a01
--- /dev/null
+++ b/qa/suites/kcephfs/thrash/thrashers/mds.yaml
@@ -0,0 +1,4 @@
+tasks:
+- install:
+- ceph:
+- mds_thrash:
diff --git a/qa/suites/kcephfs/thrash/thrashers/mon.yaml b/qa/suites/kcephfs/thrash/thrashers/mon.yaml
new file mode 100644
index 0000000..90612f2
--- /dev/null
+++ b/qa/suites/kcephfs/thrash/thrashers/mon.yaml
@@ -0,0 +1,6 @@
+tasks:
+- install:
+- ceph:
+- mon_thrash:
+    revive_delay: 20
+    thrash_delay: 1
diff --git a/qa/suites/kcephfs/thrash/workloads/kclient_workunit_suites_ffsb.yaml b/qa/suites/kcephfs/thrash/workloads/kclient_workunit_suites_ffsb.yaml
new file mode 100644
index 0000000..0c4a152
--- /dev/null
+++ b/qa/suites/kcephfs/thrash/workloads/kclient_workunit_suites_ffsb.yaml
@@ -0,0 +1,11 @@
+overrides:
+  ceph:
+    conf:
+      osd:
+        filestore flush min: 0
+tasks:
+- kclient:
+- workunit:
+    clients:
+      all:
+        - suites/ffsb.sh
diff --git a/qa/suites/kcephfs/thrash/workloads/kclient_workunit_suites_iozone.yaml b/qa/suites/kcephfs/thrash/workloads/kclient_workunit_suites_iozone.yaml
new file mode 100644
index 0000000..832e024
--- /dev/null
+++ b/qa/suites/kcephfs/thrash/workloads/kclient_workunit_suites_iozone.yaml
@@ -0,0 +1,6 @@
+tasks:
+- kclient:
+- workunit:
+    clients:
+      all:
+        - suites/iozone.sh
diff --git a/qa/suites/knfs/basic/% b/qa/suites/knfs/basic/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/knfs/basic/ceph/base.yaml b/qa/suites/knfs/basic/ceph/base.yaml
new file mode 100644
index 0000000..7e80c46
--- /dev/null
+++ b/qa/suites/knfs/basic/ceph/base.yaml
@@ -0,0 +1,13 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms die on skipped message: false
+
+tasks:
+- install:
+- ceph:
+- kclient: [client.0]
+- knfsd:
+    client.0:
+      options: [rw,no_root_squash,async]
diff --git a/qa/suites/knfs/basic/clusters/extra-client.yaml b/qa/suites/knfs/basic/clusters/extra-client.yaml
new file mode 100644
index 0000000..349439c
--- /dev/null
+++ b/qa/suites/knfs/basic/clusters/extra-client.yaml
@@ -0,0 +1,9 @@
+roles:
+- [mon.a, mon.c, osd.0, osd.1, osd.2]
+- [mon.b, mds.a, osd.3, osd.4, osd.5]
+- [client.0]
+- [client.1]
+openstack:
+- volumes: # attached to each instance
+    count: 3
+    size: 10 # GB
diff --git a/qa/suites/knfs/basic/fs/btrfs.yaml b/qa/suites/knfs/basic/fs/btrfs.yaml
new file mode 100644
index 0000000..0b3f6fa
--- /dev/null
+++ b/qa/suites/knfs/basic/fs/btrfs.yaml
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    fs: btrfs
+    conf:
+      osd:
+        osd sloppy crc: true
+        osd op thread timeout: 60
diff --git a/qa/suites/knfs/basic/mount/v3.yaml b/qa/suites/knfs/basic/mount/v3.yaml
new file mode 100644
index 0000000..1b61119
--- /dev/null
+++ b/qa/suites/knfs/basic/mount/v3.yaml
@@ -0,0 +1,5 @@
+tasks:
+- nfs:
+    client.1:
+        server: client.0
+        options: [rw,hard,intr,nfsvers=3]
diff --git a/qa/suites/knfs/basic/mount/v4.yaml b/qa/suites/knfs/basic/mount/v4.yaml
new file mode 100644
index 0000000..8840566
--- /dev/null
+++ b/qa/suites/knfs/basic/mount/v4.yaml
@@ -0,0 +1,5 @@
+tasks:
+- nfs:
+    client.1:
+        server: client.0
+        options: [rw,hard,intr,nfsvers=4]
diff --git a/qa/suites/knfs/basic/tasks/nfs-workunit-kernel-untar-build.yaml b/qa/suites/knfs/basic/tasks/nfs-workunit-kernel-untar-build.yaml
new file mode 100644
index 0000000..b9c0a5e
--- /dev/null
+++ b/qa/suites/knfs/basic/tasks/nfs-workunit-kernel-untar-build.yaml
@@ -0,0 +1,6 @@
+tasks:
+- workunit:
+    timeout: 6h
+    clients:
+        client.1:
+            - kernel_untar_build.sh
diff --git a/qa/suites/knfs/basic/tasks/nfs_workunit_misc.yaml b/qa/suites/knfs/basic/tasks/nfs_workunit_misc.yaml
new file mode 100644
index 0000000..135c4a7
--- /dev/null
+++ b/qa/suites/knfs/basic/tasks/nfs_workunit_misc.yaml
@@ -0,0 +1,11 @@
+tasks:
+- workunit:
+    clients:
+        client.1:
+            - fs/misc/chmod.sh
+            - fs/misc/i_complete_vs_rename.sh
+            - fs/misc/trivial_sync.sh
+            #- fs/misc/multiple_rsync.sh
+            #- fs/misc/xattrs.sh
+# Once we can run multiple_rsync.sh and xattrs.sh we can change to this
+#    - misc
diff --git a/qa/suites/knfs/basic/tasks/nfs_workunit_suites_blogbench.yaml b/qa/suites/knfs/basic/tasks/nfs_workunit_suites_blogbench.yaml
new file mode 100644
index 0000000..e554a3d
--- /dev/null
+++ b/qa/suites/knfs/basic/tasks/nfs_workunit_suites_blogbench.yaml
@@ -0,0 +1,5 @@
+tasks:
+- workunit:
+    clients:
+        client.1:
+            - suites/blogbench.sh
diff --git a/qa/suites/knfs/basic/tasks/nfs_workunit_suites_dbench.yaml b/qa/suites/knfs/basic/tasks/nfs_workunit_suites_dbench.yaml
new file mode 100644
index 0000000..1da1b76
--- /dev/null
+++ b/qa/suites/knfs/basic/tasks/nfs_workunit_suites_dbench.yaml
@@ -0,0 +1,5 @@
+tasks:
+- workunit:
+    clients:
+        client.1:
+            - suites/dbench-short.sh
diff --git a/qa/suites/knfs/basic/tasks/nfs_workunit_suites_ffsb.yaml b/qa/suites/knfs/basic/tasks/nfs_workunit_suites_ffsb.yaml
new file mode 100644
index 0000000..3090f91
--- /dev/null
+++ b/qa/suites/knfs/basic/tasks/nfs_workunit_suites_ffsb.yaml
@@ -0,0 +1,10 @@
+overrides:
+  ceph:
+    conf:
+      osd:
+        filestore flush min: 0
+tasks:
+- workunit:
+    clients:
+        client.1:
+            - suites/ffsb.sh
diff --git a/qa/suites/knfs/basic/tasks/nfs_workunit_suites_fsstress.yaml b/qa/suites/knfs/basic/tasks/nfs_workunit_suites_fsstress.yaml
new file mode 100644
index 0000000..bbe7b7a
--- /dev/null
+++ b/qa/suites/knfs/basic/tasks/nfs_workunit_suites_fsstress.yaml
@@ -0,0 +1,5 @@
+tasks:
+- workunit:
+    clients:
+        client.1:
+            - suites/fsstress.sh
diff --git a/qa/suites/knfs/basic/tasks/nfs_workunit_suites_iozone.yaml b/qa/suites/knfs/basic/tasks/nfs_workunit_suites_iozone.yaml
new file mode 100644
index 0000000..7c3eec2
--- /dev/null
+++ b/qa/suites/knfs/basic/tasks/nfs_workunit_suites_iozone.yaml
@@ -0,0 +1,5 @@
+tasks:
+- workunit:
+    clients:
+        client.1:
+            - suites/iozone.sh
diff --git a/qa/suites/krbd/rbd-nomount/% b/qa/suites/krbd/rbd-nomount/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/krbd/rbd-nomount/clusters/fixed-3.yaml b/qa/suites/krbd/rbd-nomount/clusters/fixed-3.yaml
new file mode 100644
index 0000000..8e622d2
--- /dev/null
+++ b/qa/suites/krbd/rbd-nomount/clusters/fixed-3.yaml
@@ -0,0 +1,8 @@
+roles:
+- [mon.a, mon.c, osd.0, osd.1, osd.2]
+- [mon.b, osd.3, osd.4, osd.5]
+- [client.0]
+openstack:
+- volumes: # attached to each instance
+    count: 3
+    size: 10 # GB
diff --git a/qa/suites/krbd/rbd-nomount/conf.yaml b/qa/suites/krbd/rbd-nomount/conf.yaml
new file mode 100644
index 0000000..3ee426b
--- /dev/null
+++ b/qa/suites/krbd/rbd-nomount/conf.yaml
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms die on skipped message: false
+      client:
+        rbd default features: 1
diff --git a/qa/suites/krbd/rbd-nomount/fs/btrfs.yaml b/qa/suites/krbd/rbd-nomount/fs/btrfs.yaml
new file mode 100644
index 0000000..0b3f6fa
--- /dev/null
+++ b/qa/suites/krbd/rbd-nomount/fs/btrfs.yaml
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    fs: btrfs
+    conf:
+      osd:
+        osd sloppy crc: true
+        osd op thread timeout: 60
diff --git a/qa/suites/krbd/rbd-nomount/install/ceph.yaml b/qa/suites/krbd/rbd-nomount/install/ceph.yaml
new file mode 100644
index 0000000..2030acb
--- /dev/null
+++ b/qa/suites/krbd/rbd-nomount/install/ceph.yaml
@@ -0,0 +1,3 @@
+tasks:
+- install:
+- ceph:
diff --git a/qa/suites/krbd/rbd-nomount/msgr-failures/few.yaml b/qa/suites/krbd/rbd-nomount/msgr-failures/few.yaml
new file mode 100644
index 0000000..0de320d
--- /dev/null
+++ b/qa/suites/krbd/rbd-nomount/msgr-failures/few.yaml
@@ -0,0 +1,5 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms inject socket failures: 5000
diff --git a/qa/suites/krbd/rbd-nomount/msgr-failures/many.yaml b/qa/suites/krbd/rbd-nomount/msgr-failures/many.yaml
new file mode 100644
index 0000000..86f8dde
--- /dev/null
+++ b/qa/suites/krbd/rbd-nomount/msgr-failures/many.yaml
@@ -0,0 +1,5 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms inject socket failures: 500
diff --git a/qa/suites/krbd/rbd-nomount/tasks/rbd_concurrent.yaml b/qa/suites/krbd/rbd-nomount/tasks/rbd_concurrent.yaml
new file mode 100644
index 0000000..675b98e
--- /dev/null
+++ b/qa/suites/krbd/rbd-nomount/tasks/rbd_concurrent.yaml
@@ -0,0 +1,10 @@
+tasks:
+- workunit:
+    clients:
+      all:
+        - rbd/concurrent.sh
+# Options for rbd/concurrent.sh (default values shown)
+#    env:
+#        RBD_CONCURRENT_ITER: 100
+#        RBD_CONCURRENT_COUNT: 5
+#        RBD_CONCURRENT_DELAY: 5
diff --git a/qa/suites/krbd/rbd-nomount/tasks/rbd_huge_tickets.yaml b/qa/suites/krbd/rbd-nomount/tasks/rbd_huge_tickets.yaml
new file mode 100644
index 0000000..ea421ee
--- /dev/null
+++ b/qa/suites/krbd/rbd-nomount/tasks/rbd_huge_tickets.yaml
@@ -0,0 +1,5 @@
+tasks:
+- workunit:
+    clients:
+      all:
+        - rbd/huge-tickets.sh
diff --git a/qa/suites/krbd/rbd-nomount/tasks/rbd_image_read.yaml b/qa/suites/krbd/rbd-nomount/tasks/rbd_image_read.yaml
new file mode 100644
index 0000000..e5017e1
--- /dev/null
+++ b/qa/suites/krbd/rbd-nomount/tasks/rbd_image_read.yaml
@@ -0,0 +1,15 @@
+tasks:
+- workunit:
+    clients:
+      all:
+        - rbd/image_read.sh
+# Options for rbd/image_read.sh (default values shown)
+#    env:
+#        IMAGE_READ_LOCAL_FILES: 'false'
+#        IMAGE_READ_FORMAT: '2'
+#        IMAGE_READ_VERBOSE: 'true'
+#        IMAGE_READ_PAGE_SIZE: '4096'
+#        IMAGE_READ_OBJECT_ORDER: '22'
+#        IMAGE_READ_TEST_CLONES: 'true'
+#        IMAGE_READ_DOUBLE_ORDER: 'true'
+#        IMAGE_READ_HALF_ORDER: 'false'
diff --git a/qa/suites/krbd/rbd-nomount/tasks/rbd_kernel.yaml b/qa/suites/krbd/rbd-nomount/tasks/rbd_kernel.yaml
new file mode 100644
index 0000000..aa15582
--- /dev/null
+++ b/qa/suites/krbd/rbd-nomount/tasks/rbd_kernel.yaml
@@ -0,0 +1,5 @@
+tasks:
+- workunit:
+    clients:
+      all:
+        - rbd/kernel.sh
diff --git a/qa/suites/krbd/rbd-nomount/tasks/rbd_kfsx.yaml b/qa/suites/krbd/rbd-nomount/tasks/rbd_kfsx.yaml
new file mode 100644
index 0000000..0f4b24a
--- /dev/null
+++ b/qa/suites/krbd/rbd-nomount/tasks/rbd_kfsx.yaml
@@ -0,0 +1,11 @@
+tasks:
+- rbd_fsx:
+    clients: [client.0]
+    ops: 10000
+    krbd: true
+    readbdy: 512
+    writebdy: 512
+    truncbdy: 512
+    holebdy: 512
+    punch_holes: true
+    randomized_striping: false
diff --git a/qa/suites/krbd/rbd-nomount/tasks/rbd_map_snapshot_io.yaml b/qa/suites/krbd/rbd-nomount/tasks/rbd_map_snapshot_io.yaml
new file mode 100644
index 0000000..c152939
--- /dev/null
+++ b/qa/suites/krbd/rbd-nomount/tasks/rbd_map_snapshot_io.yaml
@@ -0,0 +1,5 @@
+tasks:
+- workunit:
+    clients:
+      all:
+        - rbd/map-snapshot-io.sh
diff --git a/qa/suites/krbd/rbd-nomount/tasks/rbd_map_unmap.yaml b/qa/suites/krbd/rbd-nomount/tasks/rbd_map_unmap.yaml
new file mode 100644
index 0000000..c216099
--- /dev/null
+++ b/qa/suites/krbd/rbd-nomount/tasks/rbd_map_unmap.yaml
@@ -0,0 +1,5 @@
+tasks:
+- workunit:
+    clients:
+      all:
+        - rbd/map-unmap.sh
diff --git a/qa/suites/krbd/rbd-nomount/tasks/rbd_simple_big.yaml b/qa/suites/krbd/rbd-nomount/tasks/rbd_simple_big.yaml
new file mode 100644
index 0000000..c493cfa
--- /dev/null
+++ b/qa/suites/krbd/rbd-nomount/tasks/rbd_simple_big.yaml
@@ -0,0 +1,6 @@
+tasks:
+- workunit:
+    clients:
+      all:
+        - rbd/simple_big.sh
+
diff --git a/qa/suites/krbd/rbd/% b/qa/suites/krbd/rbd/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/krbd/rbd/clusters/fixed-3.yaml b/qa/suites/krbd/rbd/clusters/fixed-3.yaml
new file mode 100644
index 0000000..8e622d2
--- /dev/null
+++ b/qa/suites/krbd/rbd/clusters/fixed-3.yaml
@@ -0,0 +1,8 @@
+roles:
+- [mon.a, mon.c, osd.0, osd.1, osd.2]
+- [mon.b, osd.3, osd.4, osd.5]
+- [client.0]
+openstack:
+- volumes: # attached to each instance
+    count: 3
+    size: 10 # GB
diff --git a/qa/suites/krbd/rbd/conf.yaml b/qa/suites/krbd/rbd/conf.yaml
new file mode 100644
index 0000000..3ee426b
--- /dev/null
+++ b/qa/suites/krbd/rbd/conf.yaml
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms die on skipped message: false
+      client:
+        rbd default features: 1
diff --git a/qa/suites/krbd/rbd/fs/btrfs.yaml b/qa/suites/krbd/rbd/fs/btrfs.yaml
new file mode 100644
index 0000000..0b3f6fa
--- /dev/null
+++ b/qa/suites/krbd/rbd/fs/btrfs.yaml
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    fs: btrfs
+    conf:
+      osd:
+        osd sloppy crc: true
+        osd op thread timeout: 60
diff --git a/qa/suites/krbd/rbd/msgr-failures/few.yaml b/qa/suites/krbd/rbd/msgr-failures/few.yaml
new file mode 100644
index 0000000..0de320d
--- /dev/null
+++ b/qa/suites/krbd/rbd/msgr-failures/few.yaml
@@ -0,0 +1,5 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms inject socket failures: 5000
diff --git a/qa/suites/krbd/rbd/msgr-failures/many.yaml b/qa/suites/krbd/rbd/msgr-failures/many.yaml
new file mode 100644
index 0000000..86f8dde
--- /dev/null
+++ b/qa/suites/krbd/rbd/msgr-failures/many.yaml
@@ -0,0 +1,5 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms inject socket failures: 500
diff --git a/qa/suites/krbd/rbd/tasks/rbd_fio.yaml b/qa/suites/krbd/rbd/tasks/rbd_fio.yaml
new file mode 100644
index 0000000..548403c
--- /dev/null
+++ b/qa/suites/krbd/rbd/tasks/rbd_fio.yaml
@@ -0,0 +1,11 @@
+tasks:
+- install:
+- ceph: null
+- rbd_fio:
+    client.0:
+       fio-io-size: 90%
+       formats: [2]
+       features: [[layering]]
+       io-engine: sync
+       rw: randrw
+       runtime: 900
diff --git a/qa/suites/krbd/rbd/tasks/rbd_workunit_kernel_untar_build.yaml b/qa/suites/krbd/rbd/tasks/rbd_workunit_kernel_untar_build.yaml
new file mode 100644
index 0000000..ef2a35d
--- /dev/null
+++ b/qa/suites/krbd/rbd/tasks/rbd_workunit_kernel_untar_build.yaml
@@ -0,0 +1,9 @@
+tasks:
+- install:
+- ceph:
+- rbd:
+    all:
+- workunit:
+    clients:
+      all:
+        - kernel_untar_build.sh
diff --git a/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_dbench.yaml b/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_dbench.yaml
new file mode 100644
index 0000000..d779eea
--- /dev/null
+++ b/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_dbench.yaml
@@ -0,0 +1,9 @@
+tasks:
+- install:
+- ceph:
+- rbd:
+    all:
+- workunit:
+    clients:
+      all:
+        - suites/dbench.sh
diff --git a/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_ffsb.yaml b/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_ffsb.yaml
new file mode 100644
index 0000000..5204bb8
--- /dev/null
+++ b/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_ffsb.yaml
@@ -0,0 +1,10 @@
+tasks:
+- install:
+- ceph:
+- rbd:
+    all:
+      image_size: 20480
+- workunit:
+    clients:
+      all:
+        - suites/ffsb.sh
diff --git a/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_fsstress.yaml b/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_fsstress.yaml
new file mode 100644
index 0000000..f9d62fe
--- /dev/null
+++ b/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_fsstress.yaml
@@ -0,0 +1,9 @@
+tasks:
+- install:
+- ceph:
+- rbd:
+    all:
+- workunit:
+    clients:
+      all:
+        - suites/fsstress.sh
diff --git a/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_fsstress_btrfs.yaml b/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_fsstress_btrfs.yaml
new file mode 100644
index 0000000..f3930a8
--- /dev/null
+++ b/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_fsstress_btrfs.yaml
@@ -0,0 +1,10 @@
+tasks:
+- install:
+- ceph:
+- rbd:
+    all:
+      fs_type: btrfs
+- workunit:
+    clients:
+      all:
+        - suites/fsstress.sh
diff --git a/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_fsstress_ext4.yaml b/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_fsstress_ext4.yaml
new file mode 100644
index 0000000..f765b74
--- /dev/null
+++ b/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_fsstress_ext4.yaml
@@ -0,0 +1,10 @@
+tasks:
+- install:
+- ceph:
+- rbd:
+    all:
+      fs_type: ext4
+- workunit:
+    clients:
+      all:
+        - suites/fsstress.sh
diff --git a/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_fsx.yaml b/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_fsx.yaml
new file mode 100644
index 0000000..98c0849
--- /dev/null
+++ b/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_fsx.yaml
@@ -0,0 +1,9 @@
+tasks:
+- install:
+- ceph:
+- rbd:
+    all:
+- workunit:
+    clients:
+      all:
+        - suites/fsx.sh
diff --git a/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_iozone.yaml b/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_iozone.yaml
new file mode 100644
index 0000000..eb8f18d
--- /dev/null
+++ b/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_iozone.yaml
@@ -0,0 +1,10 @@
+tasks:
+- install:
+- ceph:
+- rbd:
+    all:
+      image_size: 20480
+- workunit:
+    clients:
+      all:
+        - suites/iozone.sh
diff --git a/qa/suites/krbd/rbd/tasks/rbd_workunit_trivial_sync.yaml b/qa/suites/krbd/rbd/tasks/rbd_workunit_trivial_sync.yaml
new file mode 100644
index 0000000..7c2796b
--- /dev/null
+++ b/qa/suites/krbd/rbd/tasks/rbd_workunit_trivial_sync.yaml
@@ -0,0 +1,8 @@
+tasks:
+- install:
+- ceph:
+- rbd:
+    all:
+- workunit:
+    clients:
+      all: [fs/misc/trivial_sync.sh]
diff --git a/qa/suites/krbd/singleton/% b/qa/suites/krbd/singleton/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/krbd/singleton/conf.yaml b/qa/suites/krbd/singleton/conf.yaml
new file mode 100644
index 0000000..3ee426b
--- /dev/null
+++ b/qa/suites/krbd/singleton/conf.yaml
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms die on skipped message: false
+      client:
+        rbd default features: 1
diff --git a/qa/suites/krbd/singleton/fs/btrfs.yaml b/qa/suites/krbd/singleton/fs/btrfs.yaml
new file mode 100644
index 0000000..0b3f6fa
--- /dev/null
+++ b/qa/suites/krbd/singleton/fs/btrfs.yaml
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    fs: btrfs
+    conf:
+      osd:
+        osd sloppy crc: true
+        osd op thread timeout: 60
diff --git a/qa/suites/krbd/singleton/msgr-failures/few.yaml b/qa/suites/krbd/singleton/msgr-failures/few.yaml
new file mode 100644
index 0000000..0de320d
--- /dev/null
+++ b/qa/suites/krbd/singleton/msgr-failures/few.yaml
@@ -0,0 +1,5 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms inject socket failures: 5000
diff --git a/qa/suites/krbd/singleton/msgr-failures/many.yaml b/qa/suites/krbd/singleton/msgr-failures/many.yaml
new file mode 100644
index 0000000..86f8dde
--- /dev/null
+++ b/qa/suites/krbd/singleton/msgr-failures/many.yaml
@@ -0,0 +1,5 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms inject socket failures: 500
diff --git a/qa/suites/krbd/singleton/tasks/rbd_xfstests.yaml b/qa/suites/krbd/singleton/tasks/rbd_xfstests.yaml
new file mode 100644
index 0000000..237a494
--- /dev/null
+++ b/qa/suites/krbd/singleton/tasks/rbd_xfstests.yaml
@@ -0,0 +1,23 @@
+roles:
+- [mon.a, mon.c, osd.0, osd.1, osd.2]
+- [mon.b, mds.a, osd.3, osd.4, osd.5]
+- [client.0]
+- [client.1]
+openstack:
+- volumes: # attached to each instance
+    count: 3
+    size: 10 # GB
+tasks:
+- install:
+- ceph:
+- rbd.xfstests:
+    client.0:
+        test_image: 'test_image-0'
+        scratch_image: 'scratch_image-0'
+        tests: '-g auto'
+        randomize: true
+    client.1:
+        test_image: 'test_image-1'
+        scratch_image: 'scratch_image-1'
+        tests: '-g auto'
+        randomize: true
diff --git a/qa/suites/krbd/thrash/% b/qa/suites/krbd/thrash/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/krbd/thrash/clusters/fixed-3.yaml b/qa/suites/krbd/thrash/clusters/fixed-3.yaml
new file mode 100644
index 0000000..8e622d2
--- /dev/null
+++ b/qa/suites/krbd/thrash/clusters/fixed-3.yaml
@@ -0,0 +1,8 @@
+roles:
+- [mon.a, mon.c, osd.0, osd.1, osd.2]
+- [mon.b, osd.3, osd.4, osd.5]
+- [client.0]
+openstack:
+- volumes: # attached to each instance
+    count: 3
+    size: 10 # GB
diff --git a/qa/suites/krbd/thrash/conf.yaml b/qa/suites/krbd/thrash/conf.yaml
new file mode 100644
index 0000000..3ee426b
--- /dev/null
+++ b/qa/suites/krbd/thrash/conf.yaml
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms die on skipped message: false
+      client:
+        rbd default features: 1
diff --git a/qa/suites/krbd/thrash/fs/btrfs.yaml b/qa/suites/krbd/thrash/fs/btrfs.yaml
new file mode 100644
index 0000000..0b3f6fa
--- /dev/null
+++ b/qa/suites/krbd/thrash/fs/btrfs.yaml
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    fs: btrfs
+    conf:
+      osd:
+        osd sloppy crc: true
+        osd op thread timeout: 60
diff --git a/qa/suites/krbd/thrash/thrashers/default.yaml b/qa/suites/krbd/thrash/thrashers/default.yaml
new file mode 100644
index 0000000..14d7725
--- /dev/null
+++ b/qa/suites/krbd/thrash/thrashers/default.yaml
@@ -0,0 +1,7 @@
+tasks:
+- install:
+- ceph:
+    log-whitelist:
+    - wrongly marked me down
+    - objects unfound and apparently lost
+- thrashosds:
diff --git a/qa/suites/krbd/thrash/thrashers/mon-thrasher.yaml b/qa/suites/krbd/thrash/thrashers/mon-thrasher.yaml
new file mode 100644
index 0000000..90612f2
--- /dev/null
+++ b/qa/suites/krbd/thrash/thrashers/mon-thrasher.yaml
@@ -0,0 +1,6 @@
+tasks:
+- install:
+- ceph:
+- mon_thrash:
+    revive_delay: 20
+    thrash_delay: 1
diff --git a/qa/suites/krbd/thrash/workloads/rbd_fio.yaml b/qa/suites/krbd/thrash/workloads/rbd_fio.yaml
new file mode 100644
index 0000000..f219ae1
--- /dev/null
+++ b/qa/suites/krbd/thrash/workloads/rbd_fio.yaml
@@ -0,0 +1,9 @@
+tasks:
+- rbd_fio:
+    client.0:
+       fio-io-size: 90%
+       formats: [2]
+       features: [[layering]]
+       io-engine: sync
+       rw: randrw
+       runtime: 1200
diff --git a/qa/suites/krbd/thrash/workloads/rbd_workunit_suites_ffsb.yaml b/qa/suites/krbd/thrash/workloads/rbd_workunit_suites_ffsb.yaml
new file mode 100644
index 0000000..4ae7d69
--- /dev/null
+++ b/qa/suites/krbd/thrash/workloads/rbd_workunit_suites_ffsb.yaml
@@ -0,0 +1,8 @@
+tasks:
+- rbd:
+    all:
+      image_size: 20480
+- workunit:
+    clients:
+      all:
+        - suites/ffsb.sh
diff --git a/qa/suites/krbd/thrash/workloads/rbd_workunit_suites_iozone.yaml.disabled b/qa/suites/krbd/thrash/workloads/rbd_workunit_suites_iozone.yaml.disabled
new file mode 100644
index 0000000..d61ede1
--- /dev/null
+++ b/qa/suites/krbd/thrash/workloads/rbd_workunit_suites_iozone.yaml.disabled
@@ -0,0 +1,8 @@
+tasks:
+- rbd:
+    all:
+      image_size: 20480
+- workunit:
+    clients:
+      all:
+        - suites/iozone.sh
diff --git a/qa/suites/krbd/unmap/% b/qa/suites/krbd/unmap/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/krbd/unmap/ceph/ceph.yaml b/qa/suites/krbd/unmap/ceph/ceph.yaml
new file mode 100644
index 0000000..a9713b3
--- /dev/null
+++ b/qa/suites/krbd/unmap/ceph/ceph.yaml
@@ -0,0 +1,6 @@
+overrides:
+  ceph:
+    crush_tunables: bobtail
+tasks:
+- install:
+- ceph:
diff --git a/qa/suites/krbd/unmap/clusters/separate-client.yaml b/qa/suites/krbd/unmap/clusters/separate-client.yaml
new file mode 100644
index 0000000..a2184ee
--- /dev/null
+++ b/qa/suites/krbd/unmap/clusters/separate-client.yaml
@@ -0,0 +1,16 @@
+# fixed-1.yaml, but with client.0 on a separate target
+overrides:
+  ceph-deploy:
+    conf:
+      global:
+        osd pool default size: 2
+        osd crush chooseleaf type: 0
+        osd pool default pg num:  128
+        osd pool default pgp num:  128
+roles:
+- [mon.a, osd.0, osd.1, osd.2]
+- [client.0]
+openstack:
+- volumes: # attached to each instance
+    count: 3
+    size: 10 # GB
diff --git a/qa/suites/krbd/unmap/conf.yaml b/qa/suites/krbd/unmap/conf.yaml
new file mode 100644
index 0000000..429b8e1
--- /dev/null
+++ b/qa/suites/krbd/unmap/conf.yaml
@@ -0,0 +1,5 @@
+overrides:
+  ceph:
+    conf:
+      client:
+        rbd default features: 1
diff --git a/qa/suites/krbd/unmap/kernels/pre-single-major.yaml b/qa/suites/krbd/unmap/kernels/pre-single-major.yaml
new file mode 100644
index 0000000..eb5e7a8
--- /dev/null
+++ b/qa/suites/krbd/unmap/kernels/pre-single-major.yaml
@@ -0,0 +1,10 @@
+overrides:
+  kernel:
+    client.0:
+      branch: nightly_pre-single-major # nightly/pre-single-major, v3.13
+tasks:
+- exec:
+    client.0:
+    - "modprobe -r rbd"
+    - "modprobe --first-time rbd"
+    - "test ! -f /sys/module/rbd/parameters/single_major"
diff --git a/qa/suites/krbd/unmap/kernels/single-major-off.yaml b/qa/suites/krbd/unmap/kernels/single-major-off.yaml
new file mode 100644
index 0000000..9dc2488
--- /dev/null
+++ b/qa/suites/krbd/unmap/kernels/single-major-off.yaml
@@ -0,0 +1,6 @@
+tasks:
+- exec:
+    client.0:
+    - "modprobe -r rbd"
+    - "modprobe --first-time rbd single_major=0"
+    - "grep -q N /sys/module/rbd/parameters/single_major"
diff --git a/qa/suites/krbd/unmap/kernels/single-major-on.yaml b/qa/suites/krbd/unmap/kernels/single-major-on.yaml
new file mode 100644
index 0000000..c3889f3
--- /dev/null
+++ b/qa/suites/krbd/unmap/kernels/single-major-on.yaml
@@ -0,0 +1,6 @@
+tasks:
+- exec:
+    client.0:
+    - "modprobe -r rbd"
+    - "modprobe --first-time rbd single_major=1"
+    - "grep -q Y /sys/module/rbd/parameters/single_major"
diff --git a/qa/suites/krbd/unmap/tasks/unmap.yaml b/qa/suites/krbd/unmap/tasks/unmap.yaml
new file mode 100644
index 0000000..05cc5f3
--- /dev/null
+++ b/qa/suites/krbd/unmap/tasks/unmap.yaml
@@ -0,0 +1,5 @@
+tasks:
+- cram:
+    clients:
+      client.0:
+      - http://git.ceph.com/?p={repo};a=blob_plain;hb={branch};f=src/test/cli-integration/rbd/unmap.t
diff --git a/qa/suites/krbd/unmap/xfs.yaml b/qa/suites/krbd/unmap/xfs.yaml
new file mode 100644
index 0000000..b4a8291
--- /dev/null
+++ b/qa/suites/krbd/unmap/xfs.yaml
@@ -0,0 +1,6 @@
+overrides:
+  ceph:
+    fs: xfs
+    conf:
+      osd:
+        osd sloppy crc: true
diff --git a/qa/suites/marginal/basic/% b/qa/suites/marginal/basic/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/marginal/basic/clusters/fixed-3.yaml b/qa/suites/marginal/basic/clusters/fixed-3.yaml
new file mode 100644
index 0000000..0038432
--- /dev/null
+++ b/qa/suites/marginal/basic/clusters/fixed-3.yaml
@@ -0,0 +1,4 @@
+roles:
+- [mon.a, mon.c, osd.0, osd.1, osd.2]
+- [mon.b, mds.a, osd.3, osd.4, osd.5]
+- [client.0]
diff --git a/qa/suites/marginal/basic/fs/btrfs.yaml b/qa/suites/marginal/basic/fs/btrfs.yaml
new file mode 100644
index 0000000..0b3f6fa
--- /dev/null
+++ b/qa/suites/marginal/basic/fs/btrfs.yaml
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    fs: btrfs
+    conf:
+      osd:
+        osd sloppy crc: true
+        osd op thread timeout: 60
diff --git a/qa/suites/marginal/basic/tasks/kclient_workunit_suites_blogbench.yaml b/qa/suites/marginal/basic/tasks/kclient_workunit_suites_blogbench.yaml
new file mode 100644
index 0000000..4f25d80
--- /dev/null
+++ b/qa/suites/marginal/basic/tasks/kclient_workunit_suites_blogbench.yaml
@@ -0,0 +1,8 @@
+tasks:
+- install:
+- ceph:
+- kclient:
+- workunit:
+    clients:
+      all:
+        - suites/blogbench.sh
diff --git a/qa/suites/marginal/basic/tasks/kclient_workunit_suites_fsx.yaml b/qa/suites/marginal/basic/tasks/kclient_workunit_suites_fsx.yaml
new file mode 100644
index 0000000..a0d2e76
--- /dev/null
+++ b/qa/suites/marginal/basic/tasks/kclient_workunit_suites_fsx.yaml
@@ -0,0 +1,8 @@
+tasks:
+- install:
+- ceph:
+- kclient:
+- workunit:
+    clients:
+      all:
+        - suites/fsx.sh
diff --git a/qa/suites/marginal/fs-misc/% b/qa/suites/marginal/fs-misc/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/marginal/fs-misc/clusters/two_clients.yaml b/qa/suites/marginal/fs-misc/clusters/two_clients.yaml
new file mode 100644
index 0000000..2258bef
--- /dev/null
+++ b/qa/suites/marginal/fs-misc/clusters/two_clients.yaml
@@ -0,0 +1,4 @@
+roles:
+- [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2]
+- [client.1]
+- [client.0]
diff --git a/qa/suites/marginal/fs-misc/fs/btrfs.yaml b/qa/suites/marginal/fs-misc/fs/btrfs.yaml
new file mode 100644
index 0000000..0b3f6fa
--- /dev/null
+++ b/qa/suites/marginal/fs-misc/fs/btrfs.yaml
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    fs: btrfs
+    conf:
+      osd:
+        osd sloppy crc: true
+        osd op thread timeout: 60
diff --git a/qa/suites/marginal/fs-misc/tasks/locktest.yaml b/qa/suites/marginal/fs-misc/tasks/locktest.yaml
new file mode 100644
index 0000000..444bb1f
--- /dev/null
+++ b/qa/suites/marginal/fs-misc/tasks/locktest.yaml
@@ -0,0 +1,5 @@
+tasks:
+- install:
+- ceph:
+- kclient:
+- locktest: [client.0, client.1]
diff --git a/qa/suites/marginal/mds_restart/% b/qa/suites/marginal/mds_restart/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/marginal/mds_restart/clusters/one_mds.yaml b/qa/suites/marginal/mds_restart/clusters/one_mds.yaml
new file mode 100644
index 0000000..9e11c02
--- /dev/null
+++ b/qa/suites/marginal/mds_restart/clusters/one_mds.yaml
@@ -0,0 +1,4 @@
+roles:
+- [mon.a, mon.b, mon.c, osd.0, osd.1, osd.2]
+- [mds.a]
+- [client.0]
diff --git a/qa/suites/marginal/mds_restart/tasks/restart-workunit-backtraces.yaml b/qa/suites/marginal/mds_restart/tasks/restart-workunit-backtraces.yaml
new file mode 100644
index 0000000..d086d4c
--- /dev/null
+++ b/qa/suites/marginal/mds_restart/tasks/restart-workunit-backtraces.yaml
@@ -0,0 +1,11 @@
+tasks:
+- install:
+- ceph:
+    conf:
+      mds:
+        mds log segment size: 16384
+        mds log max segments: 1
+- restart:
+    exec:
+     client.0:
+       - test-backtraces.py
diff --git a/qa/suites/marginal/multimds/% b/qa/suites/marginal/multimds/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/marginal/multimds/clusters/3-node-3-mds.yaml b/qa/suites/marginal/multimds/clusters/3-node-3-mds.yaml
new file mode 100644
index 0000000..088d9f0
--- /dev/null
+++ b/qa/suites/marginal/multimds/clusters/3-node-3-mds.yaml
@@ -0,0 +1,5 @@
+roles:
+- [mon.a, mon.c, mds.a, osd.0, osd.1, osd.2]
+- [mon.b, mds.b, mds.c, osd.3, osd.4, osd.5]
+- [client.0]
+- [client.1]
diff --git a/qa/suites/marginal/multimds/clusters/3-node-9-mds.yaml b/qa/suites/marginal/multimds/clusters/3-node-9-mds.yaml
new file mode 100644
index 0000000..be824f0
--- /dev/null
+++ b/qa/suites/marginal/multimds/clusters/3-node-9-mds.yaml
@@ -0,0 +1,5 @@
+roles:
+- [mon.a, mon.c, mds.a, mds.b, mds.c, mds.d, osd.0, osd.1, osd.2]
+- [mon.b, mds.e, mds.f, mds.g, mds.h, mds.i, osd.3, osd.4, osd.5]
+- [client.0]
+- [client.1]
diff --git a/qa/suites/marginal/multimds/fs/btrfs.yaml b/qa/suites/marginal/multimds/fs/btrfs.yaml
new file mode 100644
index 0000000..0b3f6fa
--- /dev/null
+++ b/qa/suites/marginal/multimds/fs/btrfs.yaml
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    fs: btrfs
+    conf:
+      osd:
+        osd sloppy crc: true
+        osd op thread timeout: 60
diff --git a/qa/suites/marginal/multimds/mounts/ceph-fuse.yaml b/qa/suites/marginal/multimds/mounts/ceph-fuse.yaml
new file mode 100644
index 0000000..55d8beb
--- /dev/null
+++ b/qa/suites/marginal/multimds/mounts/ceph-fuse.yaml
@@ -0,0 +1,7 @@
+tasks:
+- install:
+- ceph:
+    conf:
+      client:
+        fuse_default_permissions: 0
+- ceph-fuse:
diff --git a/qa/suites/marginal/multimds/mounts/kclient.yaml b/qa/suites/marginal/multimds/mounts/kclient.yaml
new file mode 100644
index 0000000..c18db8f
--- /dev/null
+++ b/qa/suites/marginal/multimds/mounts/kclient.yaml
@@ -0,0 +1,4 @@
+tasks:
+- install:
+- ceph:
+- kclient:
diff --git a/qa/suites/marginal/multimds/tasks/workunit_misc.yaml b/qa/suites/marginal/multimds/tasks/workunit_misc.yaml
new file mode 100644
index 0000000..aa62b9e
--- /dev/null
+++ b/qa/suites/marginal/multimds/tasks/workunit_misc.yaml
@@ -0,0 +1,5 @@
+tasks:
+- workunit:
+    clients:
+      all:
+        - fs/misc
diff --git a/qa/suites/marginal/multimds/tasks/workunit_suites_blogbench.yaml b/qa/suites/marginal/multimds/tasks/workunit_suites_blogbench.yaml
new file mode 100644
index 0000000..4c1fcc1
--- /dev/null
+++ b/qa/suites/marginal/multimds/tasks/workunit_suites_blogbench.yaml
@@ -0,0 +1,5 @@
+tasks:
+- workunit:
+    clients:
+      all:
+        - suites/blogbench.sh
diff --git a/qa/suites/marginal/multimds/tasks/workunit_suites_dbench.yaml b/qa/suites/marginal/multimds/tasks/workunit_suites_dbench.yaml
new file mode 100644
index 0000000..41b2bc8
--- /dev/null
+++ b/qa/suites/marginal/multimds/tasks/workunit_suites_dbench.yaml
@@ -0,0 +1,5 @@
+tasks:
+- workunit:
+    clients:
+      all:
+        - suites/dbench.sh
diff --git a/qa/suites/marginal/multimds/tasks/workunit_suites_fsstress.yaml b/qa/suites/marginal/multimds/tasks/workunit_suites_fsstress.yaml
new file mode 100644
index 0000000..ddb18fb
--- /dev/null
+++ b/qa/suites/marginal/multimds/tasks/workunit_suites_fsstress.yaml
@@ -0,0 +1,5 @@
+tasks:
+- workunit:
+    clients:
+      all:
+        - suites/fsstress.sh
diff --git a/qa/suites/marginal/multimds/tasks/workunit_suites_fsync.yaml b/qa/suites/marginal/multimds/tasks/workunit_suites_fsync.yaml
new file mode 100644
index 0000000..7efa1ad
--- /dev/null
+++ b/qa/suites/marginal/multimds/tasks/workunit_suites_fsync.yaml
@@ -0,0 +1,5 @@
+tasks:
+- workunit:
+    clients:
+      all:
+        - suites/fsync-tester.sh
diff --git a/qa/suites/marginal/multimds/tasks/workunit_suites_pjd.yaml b/qa/suites/marginal/multimds/tasks/workunit_suites_pjd.yaml
new file mode 100644
index 0000000..dfb3abe
--- /dev/null
+++ b/qa/suites/marginal/multimds/tasks/workunit_suites_pjd.yaml
@@ -0,0 +1,10 @@
+overrides:
+  ceph:
+    conf:
+      client:
+        fuse_default_permissions: 1
+tasks:
+- workunit:
+    clients:
+      all:
+        - suites/pjd.sh
diff --git a/qa/suites/marginal/multimds/tasks/workunit_suites_truncate_delay.yaml b/qa/suites/marginal/multimds/tasks/workunit_suites_truncate_delay.yaml
new file mode 100644
index 0000000..3aa5f88
--- /dev/null
+++ b/qa/suites/marginal/multimds/tasks/workunit_suites_truncate_delay.yaml
@@ -0,0 +1,15 @@
+tasks:
+- install:
+- ceph:
+    conf:
+      client:
+        ms_inject_delay_probability: 1
+        ms_inject_delay_type: osd
+        ms_inject_delay_max: 5
+        client_oc_max_dirty_age: 1
+- ceph-fuse:
+- exec:
+    client.0:
+      - dd if=/dev/zero of=./foo count=100
+      - sleep 2
+      - truncate --size 0 ./foo
diff --git a/qa/suites/marginal/multimds/thrash/exports.yaml b/qa/suites/marginal/multimds/thrash/exports.yaml
new file mode 100644
index 0000000..240b46d
--- /dev/null
+++ b/qa/suites/marginal/multimds/thrash/exports.yaml
@@ -0,0 +1,5 @@
+overrides:
+  ceph:
+    conf:
+      mds:
+        mds thrash exports: 1
diff --git a/qa/suites/marginal/multimds/thrash/normal.yaml b/qa/suites/marginal/multimds/thrash/normal.yaml
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/mixed-clients/basic/clusters/fixed-3.yaml b/qa/suites/mixed-clients/basic/clusters/fixed-3.yaml
new file mode 100644
index 0000000..e1d3c7b
--- /dev/null
+++ b/qa/suites/mixed-clients/basic/clusters/fixed-3.yaml
@@ -0,0 +1,4 @@
+roles:
+- [mon.a, mds.a, osd.0, osd.1]
+- [mon.b, mon.c, osd.2, osd.3, client.0]
+- [client.1]
diff --git a/qa/suites/mixed-clients/basic/fs/btrfs.yaml b/qa/suites/mixed-clients/basic/fs/btrfs.yaml
new file mode 100644
index 0000000..0b3f6fa
--- /dev/null
+++ b/qa/suites/mixed-clients/basic/fs/btrfs.yaml
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    fs: btrfs
+    conf:
+      osd:
+        osd sloppy crc: true
+        osd op thread timeout: 60
diff --git a/qa/suites/mixed-clients/basic/tasks/kernel_cfuse_workunits_dbench_iozone.yaml b/qa/suites/mixed-clients/basic/tasks/kernel_cfuse_workunits_dbench_iozone.yaml
new file mode 100644
index 0000000..bb347be
--- /dev/null
+++ b/qa/suites/mixed-clients/basic/tasks/kernel_cfuse_workunits_dbench_iozone.yaml
@@ -0,0 +1,26 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms die on skipped message: false
+tasks:
+- install:
+    branch: dumpling
+- ceph:
+- parallel:
+   - user-workload
+   - kclient-workload
+user-workload:
+  sequential:
+  - ceph-fuse: [client.0]
+  - workunit:
+      clients:
+         client.0:
+           - suites/iozone.sh
+kclient-workload:
+  sequential:
+  - kclient: [client.1]
+  - workunit:
+      clients:
+         client.1:
+           - suites/dbench.sh 
diff --git a/qa/suites/mixed-clients/basic/tasks/kernel_cfuse_workunits_untarbuild_blogbench.yaml b/qa/suites/mixed-clients/basic/tasks/kernel_cfuse_workunits_untarbuild_blogbench.yaml
new file mode 100644
index 0000000..2c32a61
--- /dev/null
+++ b/qa/suites/mixed-clients/basic/tasks/kernel_cfuse_workunits_untarbuild_blogbench.yaml
@@ -0,0 +1,26 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms die on skipped message: false
+tasks:
+- install:
+    branch: dumpling
+- ceph:
+- parallel:
+   - user-workload
+   - kclient-workload
+user-workload:
+  sequential:
+  - ceph-fuse: [client.0]
+  - workunit:
+      clients:
+         client.0:
+           - suites/blogbench.sh
+kclient-workload:
+  sequential:
+  - kclient: [client.1]
+  - workunit:
+      clients:
+         client.1:
+           - kernel_untar_build.sh
diff --git a/qa/suites/multimds/basic/% b/qa/suites/multimds/basic/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/multimds/basic/ceph/base.yaml b/qa/suites/multimds/basic/ceph/base.yaml
new file mode 100644
index 0000000..50b60b5
--- /dev/null
+++ b/qa/suites/multimds/basic/ceph/base.yaml
@@ -0,0 +1,6 @@
+tasks:
+- install:
+- ceph:
+    conf:
+      client:
+        fuse_default_permissions: 0
diff --git a/qa/suites/multimds/basic/clusters/3-mds.yaml b/qa/suites/multimds/basic/clusters/3-mds.yaml
new file mode 100644
index 0000000..c655b90
--- /dev/null
+++ b/qa/suites/multimds/basic/clusters/3-mds.yaml
@@ -0,0 +1,4 @@
+roles:
+- [mon.a, mon.c, mds.a, osd.0, osd.1, osd.2]
+- [mon.b, mds.b, mds.c, osd.3, osd.4, osd.5]
+- [client.0]
diff --git a/qa/suites/multimds/basic/clusters/9-mds.yaml b/qa/suites/multimds/basic/clusters/9-mds.yaml
new file mode 100644
index 0000000..ed554c9
--- /dev/null
+++ b/qa/suites/multimds/basic/clusters/9-mds.yaml
@@ -0,0 +1,4 @@
+roles:
+- [mon.a, mon.c, mds.a, mds.b, mds.c, mds.d, osd.0, osd.1, osd.2]
+- [mon.b, mds.e, mds.f, mds.g, mds.h, mds.i, osd.3, osd.4, osd.5]
+- [client.0]
diff --git a/qa/suites/multimds/basic/debug/mds_client.yaml b/qa/suites/multimds/basic/debug/mds_client.yaml
new file mode 100644
index 0000000..c6fec3f
--- /dev/null
+++ b/qa/suites/multimds/basic/debug/mds_client.yaml
@@ -0,0 +1,9 @@
+overrides:
+  ceph:
+    conf:
+      mds:
+        debug ms: 1
+        debug mds: 20
+      client:
+        debug ms: 1
+        debug client: 20
\ No newline at end of file
diff --git a/qa/suites/multimds/basic/fs/btrfs.yaml b/qa/suites/multimds/basic/fs/btrfs.yaml
new file mode 100644
index 0000000..0b3f6fa
--- /dev/null
+++ b/qa/suites/multimds/basic/fs/btrfs.yaml
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    fs: btrfs
+    conf:
+      osd:
+        osd sloppy crc: true
+        osd op thread timeout: 60
diff --git a/qa/suites/multimds/basic/inline/no.yaml b/qa/suites/multimds/basic/inline/no.yaml
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/multimds/basic/inline/yes.yaml b/qa/suites/multimds/basic/inline/yes.yaml
new file mode 100644
index 0000000..ae5222f
--- /dev/null
+++ b/qa/suites/multimds/basic/inline/yes.yaml
@@ -0,0 +1,4 @@
+tasks:
+- exec:
+    client.0:
+      - sudo ceph mds set inline_data true --yes-i-really-mean-it
diff --git a/qa/suites/multimds/basic/mount/cfuse.yaml b/qa/suites/multimds/basic/mount/cfuse.yaml
new file mode 100644
index 0000000..e3c34a1
--- /dev/null
+++ b/qa/suites/multimds/basic/mount/cfuse.yaml
@@ -0,0 +1,2 @@
+tasks:
+- ceph-fuse:
diff --git a/qa/suites/multimds/basic/mount/kclient.yaml b/qa/suites/multimds/basic/mount/kclient.yaml
new file mode 100644
index 0000000..f00f16a
--- /dev/null
+++ b/qa/suites/multimds/basic/mount/kclient.yaml
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms die on skipped message: false
+tasks:
+- kclient:
diff --git a/qa/suites/multimds/basic/overrides/whitelist_wrongly_marked_down.yaml b/qa/suites/multimds/basic/overrides/whitelist_wrongly_marked_down.yaml
new file mode 100644
index 0000000..5cf329f
--- /dev/null
+++ b/qa/suites/multimds/basic/overrides/whitelist_wrongly_marked_down.yaml
@@ -0,0 +1,10 @@
+overrides:
+  ceph:
+    log-whitelist:
+    - wrongly marked me down
+    conf:
+      mds:
+        debug mds: 20
+        debug ms: 1
+      client:
+        debug client: 10
\ No newline at end of file
diff --git a/qa/suites/multimds/basic/tasks/kernel_untar_build.yaml b/qa/suites/multimds/basic/tasks/kernel_untar_build.yaml
new file mode 100644
index 0000000..8dbc24a
--- /dev/null
+++ b/qa/suites/multimds/basic/tasks/kernel_untar_build.yaml
@@ -0,0 +1,10 @@
+overrides:
+  ceph:
+    conf:
+      client:
+        fuse_default_permissions: 0
+tasks:
+- workunit:
+    clients:
+      all:
+        - kernel_untar_build.sh
diff --git a/qa/suites/multimds/basic/tasks/misc.yaml b/qa/suites/multimds/basic/tasks/misc.yaml
new file mode 100644
index 0000000..6c8327b
--- /dev/null
+++ b/qa/suites/multimds/basic/tasks/misc.yaml
@@ -0,0 +1,6 @@
+tasks:
+- workunit:
+    timeout: 5h
+    clients:
+      all:
+        - fs/misc
diff --git a/qa/suites/multimds/basic/tasks/misc_test_o_trunc.yaml b/qa/suites/multimds/basic/tasks/misc_test_o_trunc.yaml
new file mode 100644
index 0000000..c9de5c3
--- /dev/null
+++ b/qa/suites/multimds/basic/tasks/misc_test_o_trunc.yaml
@@ -0,0 +1,5 @@
+tasks:
+- workunit:
+    clients:
+      all:
+        - fs/test_o_trunc.sh
diff --git a/qa/suites/multimds/basic/tasks/suites_blogbench.yaml b/qa/suites/multimds/basic/tasks/suites_blogbench.yaml
new file mode 100644
index 0000000..4c1fcc1
--- /dev/null
+++ b/qa/suites/multimds/basic/tasks/suites_blogbench.yaml
@@ -0,0 +1,5 @@
+tasks:
+- workunit:
+    clients:
+      all:
+        - suites/blogbench.sh
diff --git a/qa/suites/multimds/basic/tasks/suites_dbench.yaml b/qa/suites/multimds/basic/tasks/suites_dbench.yaml
new file mode 100644
index 0000000..41b2bc8
--- /dev/null
+++ b/qa/suites/multimds/basic/tasks/suites_dbench.yaml
@@ -0,0 +1,5 @@
+tasks:
+- workunit:
+    clients:
+      all:
+        - suites/dbench.sh
diff --git a/qa/suites/multimds/basic/tasks/suites_ffsb.yaml b/qa/suites/multimds/basic/tasks/suites_ffsb.yaml
new file mode 100644
index 0000000..4a2a627
--- /dev/null
+++ b/qa/suites/multimds/basic/tasks/suites_ffsb.yaml
@@ -0,0 +1,10 @@
+overrides:
+  ceph:
+    conf:
+      osd:
+        filestore flush min: 0
+tasks:
+- workunit:
+    clients:
+      all:
+        - suites/ffsb.sh
diff --git a/qa/suites/multimds/basic/tasks/suites_fsstress.yaml b/qa/suites/multimds/basic/tasks/suites_fsstress.yaml
new file mode 100644
index 0000000..ddb18fb
--- /dev/null
+++ b/qa/suites/multimds/basic/tasks/suites_fsstress.yaml
@@ -0,0 +1,5 @@
+tasks:
+- workunit:
+    clients:
+      all:
+        - suites/fsstress.sh
diff --git a/qa/suites/multimds/basic/tasks/suites_fsx.yaml b/qa/suites/multimds/basic/tasks/suites_fsx.yaml
new file mode 100644
index 0000000..8b2b1ab
--- /dev/null
+++ b/qa/suites/multimds/basic/tasks/suites_fsx.yaml
@@ -0,0 +1,5 @@
+tasks:
+- workunit:
+    clients:
+      all:
+        - suites/fsx.sh
diff --git a/qa/suites/multimds/basic/tasks/suites_fsync.yaml b/qa/suites/multimds/basic/tasks/suites_fsync.yaml
new file mode 100644
index 0000000..7efa1ad
--- /dev/null
+++ b/qa/suites/multimds/basic/tasks/suites_fsync.yaml
@@ -0,0 +1,5 @@
+tasks:
+- workunit:
+    clients:
+      all:
+        - suites/fsync-tester.sh
diff --git a/qa/suites/multimds/basic/tasks/suites_iogen.yaml b/qa/suites/multimds/basic/tasks/suites_iogen.yaml
new file mode 100644
index 0000000..d45d4ea
--- /dev/null
+++ b/qa/suites/multimds/basic/tasks/suites_iogen.yaml
@@ -0,0 +1,5 @@
+tasks:
+- workunit:
+    clients:
+      all:
+        - suites/iogen.sh
diff --git a/qa/suites/multimds/basic/tasks/suites_iozone.yaml b/qa/suites/multimds/basic/tasks/suites_iozone.yaml
new file mode 100644
index 0000000..9270f3c
--- /dev/null
+++ b/qa/suites/multimds/basic/tasks/suites_iozone.yaml
@@ -0,0 +1,5 @@
+tasks:
+- workunit:
+    clients:
+      all:
+        - suites/iozone.sh
diff --git a/qa/suites/multimds/basic/tasks/suites_pjd.yaml b/qa/suites/multimds/basic/tasks/suites_pjd.yaml
new file mode 100644
index 0000000..de21f7c
--- /dev/null
+++ b/qa/suites/multimds/basic/tasks/suites_pjd.yaml
@@ -0,0 +1,14 @@
+overrides:
+  ceph:
+    conf:
+      client:
+        debug ms: 1
+        debug client: 20
+      mds:
+        debug ms: 1
+        debug mds: 20
+tasks:
+- workunit:
+    clients:
+      all:
+        - suites/pjd.sh
diff --git a/qa/suites/multimds/basic/tasks/suites_truncate_delay.yaml b/qa/suites/multimds/basic/tasks/suites_truncate_delay.yaml
new file mode 100644
index 0000000..ac5c9b1
--- /dev/null
+++ b/qa/suites/multimds/basic/tasks/suites_truncate_delay.yaml
@@ -0,0 +1,14 @@
+overrides:
+  ceph:
+    conf:
+      client:
+        ms_inject_delay_probability: 1
+        ms_inject_delay_type: osd
+        ms_inject_delay_max: 5
+        client_oc_max_dirty_age: 1
+tasks:
+- exec:
+    client.0:
+      - dd if=/dev/zero of=./foo count=100
+      - sleep 2
+      - truncate --size 0 ./foo
diff --git a/qa/suites/multimds/basic/tasks/trivial_sync.yaml b/qa/suites/multimds/basic/tasks/trivial_sync.yaml
new file mode 100644
index 0000000..36e7411
--- /dev/null
+++ b/qa/suites/multimds/basic/tasks/trivial_sync.yaml
@@ -0,0 +1,4 @@
+tasks:
+- workunit:
+    clients:
+      all: [fs/misc/trivial_sync.sh]
diff --git a/qa/suites/multimds/libcephfs/% b/qa/suites/multimds/libcephfs/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/multimds/libcephfs/ceph/base.yaml b/qa/suites/multimds/libcephfs/ceph/base.yaml
new file mode 100644
index 0000000..50b60b5
--- /dev/null
+++ b/qa/suites/multimds/libcephfs/ceph/base.yaml
@@ -0,0 +1,6 @@
+tasks:
+- install:
+- ceph:
+    conf:
+      client:
+        fuse_default_permissions: 0
diff --git a/qa/suites/multimds/libcephfs/clusters/3-mds.yaml b/qa/suites/multimds/libcephfs/clusters/3-mds.yaml
new file mode 100644
index 0000000..1f46e7f
--- /dev/null
+++ b/qa/suites/multimds/libcephfs/clusters/3-mds.yaml
@@ -0,0 +1,4 @@
+roles:
+- [mon.a, mon.c, mds.a, osd.0, osd.1, osd.2]
+- [mon.b, mds.b, mds.c, osd.3, osd.4, osd.5, client.0]
+
diff --git a/qa/suites/multimds/libcephfs/clusters/9-mds.yaml b/qa/suites/multimds/libcephfs/clusters/9-mds.yaml
new file mode 100644
index 0000000..252a63e
--- /dev/null
+++ b/qa/suites/multimds/libcephfs/clusters/9-mds.yaml
@@ -0,0 +1,3 @@
+roles:
+- [mon.a, mon.c, mds.a, mds.b, mds.c, mds.d, osd.0, osd.1, osd.2]
+- [mon.b, mds.e, mds.f, mds.g, mds.h, mds.i, osd.3, osd.4, osd.5, client.0]
diff --git a/qa/suites/multimds/libcephfs/debug/mds_client.yaml b/qa/suites/multimds/libcephfs/debug/mds_client.yaml
new file mode 100644
index 0000000..c6fec3f
--- /dev/null
+++ b/qa/suites/multimds/libcephfs/debug/mds_client.yaml
@@ -0,0 +1,9 @@
+overrides:
+  ceph:
+    conf:
+      mds:
+        debug ms: 1
+        debug mds: 20
+      client:
+        debug ms: 1
+        debug client: 20
\ No newline at end of file
diff --git a/qa/suites/multimds/libcephfs/fs/btrfs.yaml b/qa/suites/multimds/libcephfs/fs/btrfs.yaml
new file mode 100644
index 0000000..0b3f6fa
--- /dev/null
+++ b/qa/suites/multimds/libcephfs/fs/btrfs.yaml
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    fs: btrfs
+    conf:
+      osd:
+        osd sloppy crc: true
+        osd op thread timeout: 60
diff --git a/qa/suites/multimds/libcephfs/inline/no.yaml b/qa/suites/multimds/libcephfs/inline/no.yaml
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/multimds/libcephfs/inline/yes.yaml b/qa/suites/multimds/libcephfs/inline/yes.yaml
new file mode 100644
index 0000000..ae5222f
--- /dev/null
+++ b/qa/suites/multimds/libcephfs/inline/yes.yaml
@@ -0,0 +1,4 @@
+tasks:
+- exec:
+    client.0:
+      - sudo ceph mds set inline_data true --yes-i-really-mean-it
diff --git a/qa/suites/multimds/libcephfs/overrides/whitelist_wrongly_marked_down.yaml b/qa/suites/multimds/libcephfs/overrides/whitelist_wrongly_marked_down.yaml
new file mode 100644
index 0000000..5cf329f
--- /dev/null
+++ b/qa/suites/multimds/libcephfs/overrides/whitelist_wrongly_marked_down.yaml
@@ -0,0 +1,10 @@
+overrides:
+  ceph:
+    log-whitelist:
+    - wrongly marked me down
+    conf:
+      mds:
+        debug mds: 20
+        debug ms: 1
+      client:
+        debug client: 10
\ No newline at end of file
diff --git a/qa/suites/multimds/libcephfs/tasks/libcephfs_interface_tests.yaml b/qa/suites/multimds/libcephfs/tasks/libcephfs_interface_tests.yaml
new file mode 100644
index 0000000..0b1d41f
--- /dev/null
+++ b/qa/suites/multimds/libcephfs/tasks/libcephfs_interface_tests.yaml
@@ -0,0 +1,6 @@
+tasks:
+- ceph-fuse:
+- workunit:
+    clients:
+      client.0:
+        - libcephfs/test.sh
diff --git a/qa/suites/multimds/libcephfs/tasks/libcephfs_java.yaml b/qa/suites/multimds/libcephfs/tasks/libcephfs_java.yaml
new file mode 100644
index 0000000..4330d50
--- /dev/null
+++ b/qa/suites/multimds/libcephfs/tasks/libcephfs_java.yaml
@@ -0,0 +1,6 @@
+tasks:
+- ceph-fuse:
+- workunit:
+    clients:
+      client.0:
+        - libcephfs-java/test.sh
diff --git a/qa/suites/multimds/libcephfs/tasks/mds_creation_retry.yaml b/qa/suites/multimds/libcephfs/tasks/mds_creation_retry.yaml
new file mode 100644
index 0000000..cd87f28
--- /dev/null
+++ b/qa/suites/multimds/libcephfs/tasks/mds_creation_retry.yaml
@@ -0,0 +1,6 @@
+tasks:
+-mds_creation_failure:
+-ceph-fuse:
+- workunit:
+    clients:
+      all: [fs/misc/trivial_sync.sh]
diff --git a/qa/suites/multimds/verify/% b/qa/suites/multimds/verify/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/multimds/verify/ceph/base.yaml b/qa/suites/multimds/verify/ceph/base.yaml
new file mode 100644
index 0000000..50b60b5
--- /dev/null
+++ b/qa/suites/multimds/verify/ceph/base.yaml
@@ -0,0 +1,6 @@
+tasks:
+- install:
+- ceph:
+    conf:
+      client:
+        fuse_default_permissions: 0
diff --git a/qa/suites/multimds/verify/clusters/3-mds.yaml b/qa/suites/multimds/verify/clusters/3-mds.yaml
new file mode 100644
index 0000000..7ebdeba
--- /dev/null
+++ b/qa/suites/multimds/verify/clusters/3-mds.yaml
@@ -0,0 +1,3 @@
+roles:
+- [mon.a, mon.c, mds.a, osd.0, osd.1, osd.2]
+- [mon.b, mds.b, mds.c, osd.3, osd.4, osd.5, client.0]
diff --git a/qa/suites/multimds/verify/clusters/9-mds.yaml b/qa/suites/multimds/verify/clusters/9-mds.yaml
new file mode 100644
index 0000000..252a63e
--- /dev/null
+++ b/qa/suites/multimds/verify/clusters/9-mds.yaml
@@ -0,0 +1,3 @@
+roles:
+- [mon.a, mon.c, mds.a, mds.b, mds.c, mds.d, osd.0, osd.1, osd.2]
+- [mon.b, mds.e, mds.f, mds.g, mds.h, mds.i, osd.3, osd.4, osd.5, client.0]
diff --git a/qa/suites/multimds/verify/debug/mds_client.yaml b/qa/suites/multimds/verify/debug/mds_client.yaml
new file mode 100644
index 0000000..c6fec3f
--- /dev/null
+++ b/qa/suites/multimds/verify/debug/mds_client.yaml
@@ -0,0 +1,9 @@
+overrides:
+  ceph:
+    conf:
+      mds:
+        debug ms: 1
+        debug mds: 20
+      client:
+        debug ms: 1
+        debug client: 20
\ No newline at end of file
diff --git a/qa/suites/multimds/verify/fs/btrfs.yaml b/qa/suites/multimds/verify/fs/btrfs.yaml
new file mode 100644
index 0000000..0b3f6fa
--- /dev/null
+++ b/qa/suites/multimds/verify/fs/btrfs.yaml
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    fs: btrfs
+    conf:
+      osd:
+        osd sloppy crc: true
+        osd op thread timeout: 60
diff --git a/qa/suites/multimds/verify/overrides/whitelist_wrongly_marked_down.yaml b/qa/suites/multimds/verify/overrides/whitelist_wrongly_marked_down.yaml
new file mode 100644
index 0000000..5cf329f
--- /dev/null
+++ b/qa/suites/multimds/verify/overrides/whitelist_wrongly_marked_down.yaml
@@ -0,0 +1,10 @@
+overrides:
+  ceph:
+    log-whitelist:
+    - wrongly marked me down
+    conf:
+      mds:
+        debug mds: 20
+        debug ms: 1
+      client:
+        debug client: 10
\ No newline at end of file
diff --git a/qa/suites/multimds/verify/tasks/cfuse_workunit_suites_dbench.yaml b/qa/suites/multimds/verify/tasks/cfuse_workunit_suites_dbench.yaml
new file mode 100644
index 0000000..ad96b4c
--- /dev/null
+++ b/qa/suites/multimds/verify/tasks/cfuse_workunit_suites_dbench.yaml
@@ -0,0 +1,6 @@
+tasks:
+- ceph-fuse:
+- workunit:
+    clients:
+      all:
+        - suites/dbench.sh
diff --git a/qa/suites/multimds/verify/tasks/cfuse_workunit_suites_fsstress.yaml b/qa/suites/multimds/verify/tasks/cfuse_workunit_suites_fsstress.yaml
new file mode 100644
index 0000000..5908d95
--- /dev/null
+++ b/qa/suites/multimds/verify/tasks/cfuse_workunit_suites_fsstress.yaml
@@ -0,0 +1,6 @@
+tasks:
+- ceph-fuse:
+- workunit:
+    clients:
+      all:
+        - suites/fsstress.sh
diff --git a/qa/suites/multimds/verify/tasks/libcephfs_interface_tests.yaml b/qa/suites/multimds/verify/tasks/libcephfs_interface_tests.yaml
new file mode 100644
index 0000000..0b1d41f
--- /dev/null
+++ b/qa/suites/multimds/verify/tasks/libcephfs_interface_tests.yaml
@@ -0,0 +1,6 @@
+tasks:
+- ceph-fuse:
+- workunit:
+    clients:
+      client.0:
+        - libcephfs/test.sh
diff --git a/qa/suites/multimds/verify/validater/lockdep.yaml b/qa/suites/multimds/verify/validater/lockdep.yaml
new file mode 100644
index 0000000..25f8435
--- /dev/null
+++ b/qa/suites/multimds/verify/validater/lockdep.yaml
@@ -0,0 +1,5 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        lockdep: true
diff --git a/qa/suites/multimds/verify/validater/valgrind.yaml b/qa/suites/multimds/verify/validater/valgrind.yaml
new file mode 100644
index 0000000..ec9adbb
--- /dev/null
+++ b/qa/suites/multimds/verify/validater/valgrind.yaml
@@ -0,0 +1,16 @@
+overrides:
+  install:
+    ceph:
+      flavor: notcmalloc
+      debuginfo: true
+  ceph:
+    conf:
+      global:
+        osd heartbeat grace: 40
+    valgrind:
+      mon: [--tool=memcheck, --leak-check=full, --show-reachable=yes]
+      osd: [--tool=memcheck]
+      mds: [--tool=memcheck]
+  ceph-fuse:
+    client.0:
+      valgrind: [--tool=memcheck, --leak-check=full, --show-reachable=yes]
diff --git a/qa/suites/powercycle/osd/% b/qa/suites/powercycle/osd/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/powercycle/osd/clusters/3osd-1per-target.yaml b/qa/suites/powercycle/osd/clusters/3osd-1per-target.yaml
new file mode 100644
index 0000000..d5503a4
--- /dev/null
+++ b/qa/suites/powercycle/osd/clusters/3osd-1per-target.yaml
@@ -0,0 +1,5 @@
+roles:
+- [mon.0, mon.1, mon.2, mds.0, client.0]
+- [osd.0]
+- [osd.1]
+- [osd.2]
diff --git a/qa/suites/powercycle/osd/fs/btrfs.yaml b/qa/suites/powercycle/osd/fs/btrfs.yaml
new file mode 100644
index 0000000..0b3f6fa
--- /dev/null
+++ b/qa/suites/powercycle/osd/fs/btrfs.yaml
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    fs: btrfs
+    conf:
+      osd:
+        osd sloppy crc: true
+        osd op thread timeout: 60
diff --git a/qa/suites/powercycle/osd/fs/xfs.yaml b/qa/suites/powercycle/osd/fs/xfs.yaml
new file mode 100644
index 0000000..b4a8291
--- /dev/null
+++ b/qa/suites/powercycle/osd/fs/xfs.yaml
@@ -0,0 +1,6 @@
+overrides:
+  ceph:
+    fs: xfs
+    conf:
+      osd:
+        osd sloppy crc: true
diff --git a/qa/suites/powercycle/osd/powercycle/default.yaml b/qa/suites/powercycle/osd/powercycle/default.yaml
new file mode 100644
index 0000000..b632e83
--- /dev/null
+++ b/qa/suites/powercycle/osd/powercycle/default.yaml
@@ -0,0 +1,7 @@
+tasks:
+- install:
+- ceph:
+- thrashosds:
+    chance_down: 1.0
+    powercycle: true
+    timeout: 600
diff --git a/qa/suites/powercycle/osd/tasks/admin_socket_objecter_requests.yaml b/qa/suites/powercycle/osd/tasks/admin_socket_objecter_requests.yaml
new file mode 100644
index 0000000..3b1a892
--- /dev/null
+++ b/qa/suites/powercycle/osd/tasks/admin_socket_objecter_requests.yaml
@@ -0,0 +1,13 @@
+overrides:
+  ceph:
+    conf:
+      client.0:
+        admin socket: /var/run/ceph/ceph-$name.asok
+tasks:
+- radosbench:
+    clients: [client.0]
+    time: 60
+- admin_socket:
+    client.0:
+      objecter_requests:
+        test: "http://git.ceph.com/?p={repo};a=blob_plain;f=src/test/admin_socket/objecter_requests;hb={branch}"
diff --git a/qa/suites/powercycle/osd/tasks/cfuse_workunit_kernel_untar_build.yaml b/qa/suites/powercycle/osd/tasks/cfuse_workunit_kernel_untar_build.yaml
new file mode 100644
index 0000000..87f8f57
--- /dev/null
+++ b/qa/suites/powercycle/osd/tasks/cfuse_workunit_kernel_untar_build.yaml
@@ -0,0 +1,12 @@
+overrides:
+  ceph:
+    conf:
+      client:
+        fuse_default_permissions: 0
+tasks:
+- ceph-fuse:
+- workunit:
+    timeout: 6h
+    clients:
+      all:
+        - kernel_untar_build.sh
diff --git a/qa/suites/powercycle/osd/tasks/cfuse_workunit_misc.yaml b/qa/suites/powercycle/osd/tasks/cfuse_workunit_misc.yaml
new file mode 100644
index 0000000..683d3f5
--- /dev/null
+++ b/qa/suites/powercycle/osd/tasks/cfuse_workunit_misc.yaml
@@ -0,0 +1,7 @@
+tasks:
+- ceph-fuse:
+- workunit:
+    timeout: 6h
+    clients:
+      all:
+        - fs/misc
diff --git a/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_ffsb.yaml b/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_ffsb.yaml
new file mode 100644
index 0000000..9f3fa7b
--- /dev/null
+++ b/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_ffsb.yaml
@@ -0,0 +1,14 @@
+overrides:
+  ceph:
+    conf:
+      osd:
+        filestore flush min: 0
+      mds:
+        debug ms: 1
+        debug mds: 20
+tasks:
+- ceph-fuse:
+- workunit:
+    clients:
+      all:
+        - suites/ffsb.sh
diff --git a/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_fsstress.yaml b/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_fsstress.yaml
new file mode 100644
index 0000000..5908d95
--- /dev/null
+++ b/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_fsstress.yaml
@@ -0,0 +1,6 @@
+tasks:
+- ceph-fuse:
+- workunit:
+    clients:
+      all:
+        - suites/fsstress.sh
diff --git a/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_fsx.yaml b/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_fsx.yaml
new file mode 100644
index 0000000..9403151
--- /dev/null
+++ b/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_fsx.yaml
@@ -0,0 +1,7 @@
+tasks:
+- ceph-fuse:
+- workunit:
+    timeout: 6h
+    clients:
+      all:
+        - suites/fsx.sh
diff --git a/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_fsync.yaml b/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_fsync.yaml
new file mode 100644
index 0000000..c6043e2
--- /dev/null
+++ b/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_fsync.yaml
@@ -0,0 +1,6 @@
+tasks:
+- ceph-fuse:
+- workunit:
+    clients:
+      all:
+        - suites/fsync-tester.sh
diff --git a/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_pjd.yaml b/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_pjd.yaml
new file mode 100644
index 0000000..930bf4a
--- /dev/null
+++ b/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_pjd.yaml
@@ -0,0 +1,6 @@
+tasks:
+- ceph-fuse:
+- workunit:
+    clients:
+      all:
+        - suites/pjd.sh
diff --git a/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_truncate_delay.yaml b/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_truncate_delay.yaml
new file mode 100644
index 0000000..f3efafa
--- /dev/null
+++ b/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_truncate_delay.yaml
@@ -0,0 +1,15 @@
+overrides:
+  ceph:
+    conf:
+      client:
+        ms_inject_delay_probability: 1
+        ms_inject_delay_type: osd
+        ms_inject_delay_max: 5
+        client_oc_max_dirty_age: 1
+tasks:
+- ceph-fuse:
+- exec:
+    client.0:
+      - dd if=/dev/zero of=./foo count=100
+      - sleep 2
+      - truncate --size 0 ./foo
diff --git a/qa/suites/powercycle/osd/tasks/rados_api_tests.yaml b/qa/suites/powercycle/osd/tasks/rados_api_tests.yaml
new file mode 100644
index 0000000..e654998
--- /dev/null
+++ b/qa/suites/powercycle/osd/tasks/rados_api_tests.yaml
@@ -0,0 +1,10 @@
+overrides:
+  ceph:
+    log-whitelist:
+      - reached quota
+tasks:
+- ceph-fuse:
+- workunit:
+    clients:
+      client.0:
+        - rados/test.sh
diff --git a/qa/suites/powercycle/osd/tasks/radosbench.yaml b/qa/suites/powercycle/osd/tasks/radosbench.yaml
new file mode 100644
index 0000000..1788ff7
--- /dev/null
+++ b/qa/suites/powercycle/osd/tasks/radosbench.yaml
@@ -0,0 +1,26 @@
+tasks:
+- full_sequential:
+  - radosbench:
+      clients: [client.0]
+      time: 150
+  - radosbench:
+      clients: [client.0]
+      time: 150
+  - radosbench:
+      clients: [client.0]
+      time: 150
+  - radosbench:
+      clients: [client.0]
+      time: 150
+  - radosbench:
+      clients: [client.0]
+      time: 150
+  - radosbench:
+      clients: [client.0]
+      time: 150
+  - radosbench:
+      clients: [client.0]
+      time: 150
+  - radosbench:
+      clients: [client.0]
+      time: 150
diff --git a/qa/suites/powercycle/osd/tasks/readwrite.yaml b/qa/suites/powercycle/osd/tasks/readwrite.yaml
new file mode 100644
index 0000000..c53e52b
--- /dev/null
+++ b/qa/suites/powercycle/osd/tasks/readwrite.yaml
@@ -0,0 +1,9 @@
+tasks:
+- rados:
+    clients: [client.0]
+    ops: 4000
+    objects: 500
+    op_weights:
+      read: 45
+      write: 45
+      delete: 10
diff --git a/qa/suites/powercycle/osd/tasks/snaps-few-objects.yaml b/qa/suites/powercycle/osd/tasks/snaps-few-objects.yaml
new file mode 100644
index 0000000..aa82d97
--- /dev/null
+++ b/qa/suites/powercycle/osd/tasks/snaps-few-objects.yaml
@@ -0,0 +1,13 @@
+tasks:
+- rados:
+    clients: [client.0]
+    ops: 4000
+    objects: 50
+    op_weights:
+      read: 100
+      write: 100
+      delete: 50
+      snap_create: 50
+      snap_remove: 50
+      rollback: 50
+      copy_from: 50
diff --git a/qa/suites/powercycle/osd/tasks/snaps-many-objects.yaml b/qa/suites/powercycle/osd/tasks/snaps-many-objects.yaml
new file mode 100644
index 0000000..1ffe4e1
--- /dev/null
+++ b/qa/suites/powercycle/osd/tasks/snaps-many-objects.yaml
@@ -0,0 +1,13 @@
+tasks:
+- rados:
+    clients: [client.0]
+    ops: 4000
+    objects: 500
+    op_weights:
+      read: 100
+      write: 100
+      delete: 50
+      snap_create: 50
+      snap_remove: 50
+      rollback: 50
+      copy_from: 50
diff --git a/qa/suites/rados/basic/% b/qa/suites/rados/basic/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/rados/basic/clusters/+ b/qa/suites/rados/basic/clusters/+
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/rados/basic/clusters/fixed-2.yaml b/qa/suites/rados/basic/clusters/fixed-2.yaml
new file mode 100644
index 0000000..861e509
--- /dev/null
+++ b/qa/suites/rados/basic/clusters/fixed-2.yaml
@@ -0,0 +1,7 @@
+roles:
+- [mon.a, mon.c, osd.0, osd.1, osd.2, client.0]
+- [mon.b, osd.3, osd.4, osd.5, client.1]
+openstack:
+- volumes: # attached to each instance
+    count: 3
+    size: 10 # GB
diff --git a/qa/suites/rados/basic/clusters/openstack.yaml b/qa/suites/rados/basic/clusters/openstack.yaml
new file mode 100644
index 0000000..e4d8423
--- /dev/null
+++ b/qa/suites/rados/basic/clusters/openstack.yaml
@@ -0,0 +1,4 @@
+openstack:
+  - volumes: # attached to each instance
+      count: 3
+      size: 10 # GB
diff --git a/qa/suites/rados/basic/fs/btrfs.yaml b/qa/suites/rados/basic/fs/btrfs.yaml
new file mode 100644
index 0000000..0b3f6fa
--- /dev/null
+++ b/qa/suites/rados/basic/fs/btrfs.yaml
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    fs: btrfs
+    conf:
+      osd:
+        osd sloppy crc: true
+        osd op thread timeout: 60
diff --git a/qa/suites/rados/basic/fs/xfs.yaml b/qa/suites/rados/basic/fs/xfs.yaml
new file mode 100644
index 0000000..b4a8291
--- /dev/null
+++ b/qa/suites/rados/basic/fs/xfs.yaml
@@ -0,0 +1,6 @@
+overrides:
+  ceph:
+    fs: xfs
+    conf:
+      osd:
+        osd sloppy crc: true
diff --git a/qa/suites/rados/basic/msgr-failures/few.yaml b/qa/suites/rados/basic/msgr-failures/few.yaml
new file mode 100644
index 0000000..0de320d
--- /dev/null
+++ b/qa/suites/rados/basic/msgr-failures/few.yaml
@@ -0,0 +1,5 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms inject socket failures: 5000
diff --git a/qa/suites/rados/basic/msgr-failures/many.yaml b/qa/suites/rados/basic/msgr-failures/many.yaml
new file mode 100644
index 0000000..038c3a7
--- /dev/null
+++ b/qa/suites/rados/basic/msgr-failures/many.yaml
@@ -0,0 +1,5 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms inject socket failures: 1500
diff --git a/qa/suites/rados/basic/msgr/async.yaml b/qa/suites/rados/basic/msgr/async.yaml
new file mode 100644
index 0000000..9c77eaa
--- /dev/null
+++ b/qa/suites/rados/basic/msgr/async.yaml
@@ -0,0 +1,6 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms type: async
+        enable experimental unrecoverable data corrupting features: '*'
diff --git a/qa/suites/rados/basic/msgr/random.yaml b/qa/suites/rados/basic/msgr/random.yaml
new file mode 100644
index 0000000..64404b3
--- /dev/null
+++ b/qa/suites/rados/basic/msgr/random.yaml
@@ -0,0 +1,6 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms type: random
+        enable experimental unrecoverable data corrupting features: '*'
diff --git a/qa/suites/rados/basic/msgr/simple.yaml b/qa/suites/rados/basic/msgr/simple.yaml
new file mode 100644
index 0000000..5c4f853
--- /dev/null
+++ b/qa/suites/rados/basic/msgr/simple.yaml
@@ -0,0 +1,5 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms type: simple
diff --git a/qa/suites/rados/basic/rados.yaml b/qa/suites/rados/basic/rados.yaml
new file mode 100644
index 0000000..6d9aa9d
--- /dev/null
+++ b/qa/suites/rados/basic/rados.yaml
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    conf:
+      osd:
+        osd op queue: debug_random
+        osd op queue cut off: debug_random
+        osd debug verify cached snaps: true
diff --git a/qa/suites/rados/basic/tasks/rados_api_tests.yaml b/qa/suites/rados/basic/tasks/rados_api_tests.yaml
new file mode 100644
index 0000000..acfc597
--- /dev/null
+++ b/qa/suites/rados/basic/tasks/rados_api_tests.yaml
@@ -0,0 +1,14 @@
+overrides:
+  ceph:
+    log-whitelist:
+    - reached quota
+    - wrongly marked me down
+tasks:
+- install:
+- ceph:
+- workunit:
+    clients:
+      client.0:
+        - rados/test.sh
+        - rados/test_pool_quota.sh
+
diff --git a/qa/suites/rados/basic/tasks/rados_cls_all.yaml b/qa/suites/rados/basic/tasks/rados_cls_all.yaml
new file mode 100644
index 0000000..34f7cbb
--- /dev/null
+++ b/qa/suites/rados/basic/tasks/rados_cls_all.yaml
@@ -0,0 +1,7 @@
+tasks:
+- install:
+- ceph:
+- workunit:
+    clients:
+      client.0:
+        - cls
diff --git a/qa/suites/rados/basic/tasks/rados_python.yaml b/qa/suites/rados/basic/tasks/rados_python.yaml
new file mode 100644
index 0000000..0032053
--- /dev/null
+++ b/qa/suites/rados/basic/tasks/rados_python.yaml
@@ -0,0 +1,9 @@
+tasks:
+- install:
+- ceph:
+    log-whitelist:
+    - wrongly marked me down
+- workunit:
+    clients:
+      client.0:
+        - rados/test_python.sh
diff --git a/qa/suites/rados/basic/tasks/rados_stress_watch.yaml b/qa/suites/rados/basic/tasks/rados_stress_watch.yaml
new file mode 100644
index 0000000..ae2e5fd
--- /dev/null
+++ b/qa/suites/rados/basic/tasks/rados_stress_watch.yaml
@@ -0,0 +1,7 @@
+tasks:
+- install:
+- ceph:
+- workunit:
+    clients:
+      client.0:
+        - rados/stress_watch.sh
diff --git a/qa/suites/rados/basic/tasks/rados_workunit_loadgen_big.yaml b/qa/suites/rados/basic/tasks/rados_workunit_loadgen_big.yaml
new file mode 100644
index 0000000..9432367
--- /dev/null
+++ b/qa/suites/rados/basic/tasks/rados_workunit_loadgen_big.yaml
@@ -0,0 +1,11 @@
+overrides:
+  ceph:
+    log-whitelist:
+    - wrongly marked me down
+tasks:
+- install:
+- ceph:
+- workunit:
+    clients:
+      all:
+        - rados/load-gen-big.sh
diff --git a/qa/suites/rados/basic/tasks/rados_workunit_loadgen_mix.yaml b/qa/suites/rados/basic/tasks/rados_workunit_loadgen_mix.yaml
new file mode 100644
index 0000000..7d882ca
--- /dev/null
+++ b/qa/suites/rados/basic/tasks/rados_workunit_loadgen_mix.yaml
@@ -0,0 +1,11 @@
+overrides:
+  ceph:
+    log-whitelist:
+    - wrongly marked me down
+tasks:
+- install:
+- ceph:
+- workunit:
+    clients:
+      all:
+        - rados/load-gen-mix.sh
diff --git a/qa/suites/rados/basic/tasks/rados_workunit_loadgen_mostlyread.yaml b/qa/suites/rados/basic/tasks/rados_workunit_loadgen_mostlyread.yaml
new file mode 100644
index 0000000..69c06b7
--- /dev/null
+++ b/qa/suites/rados/basic/tasks/rados_workunit_loadgen_mostlyread.yaml
@@ -0,0 +1,11 @@
+overrides:
+  ceph:
+    log-whitelist:
+    - wrongly marked me down
+tasks:
+- install:
+- ceph:
+- workunit:
+    clients:
+      all:
+        - rados/load-gen-mostlyread.sh
diff --git a/qa/suites/rados/basic/tasks/repair_test.yaml b/qa/suites/rados/basic/tasks/repair_test.yaml
new file mode 100644
index 0000000..277a5a2
--- /dev/null
+++ b/qa/suites/rados/basic/tasks/repair_test.yaml
@@ -0,0 +1,11 @@
+overrides:
+  ceph:
+    log-whitelist: ['candidate had a stat error', 'candidate had a read error', 'deep-scrub 0 missing, 1 inconsistent objects', 'deep-scrub 0 missing, 4 inconsistent objects', 'deep-scrub 1 errors', 'deep-scrub 4 errors', '!= known omap_digest', '!= known data_digest', 'repair 0 missing, 1 inconsistent objects', 'repair 0 missing, 4 inconsistent objects', 'repair 1 errors, 1 fixed', 'repair 4 errors, 4 fixed', 'scrub 0 missing, 1 inconsistent', 'scrub 1 errors', 'size 1 != known size', ' [...]
+    conf:
+      osd:
+        filestore debug inject read err : true
+tasks:
+- install:
+- ceph:
+- repair_test:
+
diff --git a/qa/suites/rados/basic/tasks/scrub_test.yaml b/qa/suites/rados/basic/tasks/scrub_test.yaml
new file mode 100644
index 0000000..e3b07b6
--- /dev/null
+++ b/qa/suites/rados/basic/tasks/scrub_test.yaml
@@ -0,0 +1,19 @@
+overrides:
+  ceph:
+    log-whitelist:
+    - '!= best guess digest'
+    - '!= best guess data_digest'
+    - '!= best guess omap_digest'
+    - '!= known digest'
+    - '!= known data_digest'
+    - '!= known omap_digest'
+    - deep-scrub 0 missing, 1 inconsistent objects
+    - deep-scrub 1 errors
+    - repair 0 missing, 1 inconsistent objects
+    - repair 1 errors, 1 fixed
+    - shard [0-9]+ missing
+    - deep-scrub 1 missing, 0 inconsistent objects
+tasks:
+- install:
+- ceph:
+- scrub_test:
diff --git a/qa/suites/rados/monthrash/% b/qa/suites/rados/monthrash/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/rados/monthrash/ceph/ceph.yaml b/qa/suites/rados/monthrash/ceph/ceph.yaml
new file mode 100644
index 0000000..a2c0efc
--- /dev/null
+++ b/qa/suites/rados/monthrash/ceph/ceph.yaml
@@ -0,0 +1,9 @@
+overrides:
+  ceph:
+    conf:
+      mon:
+        mon min osdmap epochs: 25
+        paxos service trim min: 5
+tasks:
+- install:
+- ceph:
diff --git a/qa/suites/rados/monthrash/clusters/3-mons.yaml b/qa/suites/rados/monthrash/clusters/3-mons.yaml
new file mode 100644
index 0000000..b3a2caf
--- /dev/null
+++ b/qa/suites/rados/monthrash/clusters/3-mons.yaml
@@ -0,0 +1,7 @@
+roles:
+- [mon.a, mon.c, osd.0, osd.1, osd.2]
+- [mon.b, osd.3, osd.4, osd.5, client.0]
+openstack:
+  - volumes: # attached to each instance
+      count: 3
+      size: 10 # GB
diff --git a/qa/suites/rados/monthrash/clusters/9-mons.yaml b/qa/suites/rados/monthrash/clusters/9-mons.yaml
new file mode 100644
index 0000000..5433211
--- /dev/null
+++ b/qa/suites/rados/monthrash/clusters/9-mons.yaml
@@ -0,0 +1,7 @@
+roles:
+- [mon.a, mon.b, mon.c, mon.d, mon.e, osd.0, osd.1, osd.2]
+- [mon.f, mon.g, mon.h, mon.i, osd.3, osd.4, osd.5, client.0]
+openstack:
+  - volumes: # attached to each instance
+      count: 3
+      size: 10 # GB
diff --git a/qa/suites/rados/monthrash/fs/xfs.yaml b/qa/suites/rados/monthrash/fs/xfs.yaml
new file mode 100644
index 0000000..b4a8291
--- /dev/null
+++ b/qa/suites/rados/monthrash/fs/xfs.yaml
@@ -0,0 +1,6 @@
+overrides:
+  ceph:
+    fs: xfs
+    conf:
+      osd:
+        osd sloppy crc: true
diff --git a/qa/suites/rados/monthrash/msgr-failures/few.yaml b/qa/suites/rados/monthrash/msgr-failures/few.yaml
new file mode 100644
index 0000000..0de320d
--- /dev/null
+++ b/qa/suites/rados/monthrash/msgr-failures/few.yaml
@@ -0,0 +1,5 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms inject socket failures: 5000
diff --git a/qa/suites/rados/monthrash/msgr-failures/mon-delay.yaml b/qa/suites/rados/monthrash/msgr-failures/mon-delay.yaml
new file mode 100644
index 0000000..03b7e37
--- /dev/null
+++ b/qa/suites/rados/monthrash/msgr-failures/mon-delay.yaml
@@ -0,0 +1,9 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms inject socket failures: 2500
+        ms inject delay type: mon
+        ms inject delay probability: .005
+        ms inject delay max: 1
+        ms inject internal delays: .002
diff --git a/qa/suites/rados/monthrash/msgr/async.yaml b/qa/suites/rados/monthrash/msgr/async.yaml
new file mode 100644
index 0000000..9c77eaa
--- /dev/null
+++ b/qa/suites/rados/monthrash/msgr/async.yaml
@@ -0,0 +1,6 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms type: async
+        enable experimental unrecoverable data corrupting features: '*'
diff --git a/qa/suites/rados/monthrash/msgr/random.yaml b/qa/suites/rados/monthrash/msgr/random.yaml
new file mode 100644
index 0000000..64404b3
--- /dev/null
+++ b/qa/suites/rados/monthrash/msgr/random.yaml
@@ -0,0 +1,6 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms type: random
+        enable experimental unrecoverable data corrupting features: '*'
diff --git a/qa/suites/rados/monthrash/msgr/simple.yaml b/qa/suites/rados/monthrash/msgr/simple.yaml
new file mode 100644
index 0000000..5c4f853
--- /dev/null
+++ b/qa/suites/rados/monthrash/msgr/simple.yaml
@@ -0,0 +1,5 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms type: simple
diff --git a/qa/suites/rados/monthrash/rados.yaml b/qa/suites/rados/monthrash/rados.yaml
new file mode 100644
index 0000000..6d9aa9d
--- /dev/null
+++ b/qa/suites/rados/monthrash/rados.yaml
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    conf:
+      osd:
+        osd op queue: debug_random
+        osd op queue cut off: debug_random
+        osd debug verify cached snaps: true
diff --git a/qa/suites/rados/monthrash/thrashers/force-sync-many.yaml b/qa/suites/rados/monthrash/thrashers/force-sync-many.yaml
new file mode 100644
index 0000000..2867f2d
--- /dev/null
+++ b/qa/suites/rados/monthrash/thrashers/force-sync-many.yaml
@@ -0,0 +1,6 @@
+tasks:
+- mon_thrash:
+    revive_delay: 90
+    thrash_delay: 1
+    thrash_store: true
+    thrash_many: true
diff --git a/qa/suites/rados/monthrash/thrashers/many.yaml b/qa/suites/rados/monthrash/thrashers/many.yaml
new file mode 100644
index 0000000..fe52bb2
--- /dev/null
+++ b/qa/suites/rados/monthrash/thrashers/many.yaml
@@ -0,0 +1,13 @@
+overrides:
+  ceph:
+    conf:
+      osd:
+        mon client ping interval: 4
+        mon client ping timeout: 12
+tasks:
+- mon_thrash:
+    revive_delay: 20
+    thrash_delay: 1
+    thrash_many: true
+    freeze_mon_duration: 20
+    freeze_mon_probability: 10
diff --git a/qa/suites/rados/monthrash/thrashers/one.yaml b/qa/suites/rados/monthrash/thrashers/one.yaml
new file mode 100644
index 0000000..2ce44c8
--- /dev/null
+++ b/qa/suites/rados/monthrash/thrashers/one.yaml
@@ -0,0 +1,4 @@
+tasks:
+- mon_thrash:
+    revive_delay: 20
+    thrash_delay: 1
diff --git a/qa/suites/rados/monthrash/thrashers/sync-many.yaml b/qa/suites/rados/monthrash/thrashers/sync-many.yaml
new file mode 100644
index 0000000..9868f18
--- /dev/null
+++ b/qa/suites/rados/monthrash/thrashers/sync-many.yaml
@@ -0,0 +1,11 @@
+overrides:
+  ceph:
+    conf:
+      mon:
+        paxos min: 10
+        paxos trim min: 10
+tasks:
+- mon_thrash:
+    revive_delay: 90
+    thrash_delay: 1
+    thrash_many: true
diff --git a/qa/suites/rados/monthrash/thrashers/sync.yaml b/qa/suites/rados/monthrash/thrashers/sync.yaml
new file mode 100644
index 0000000..1e7054c
--- /dev/null
+++ b/qa/suites/rados/monthrash/thrashers/sync.yaml
@@ -0,0 +1,10 @@
+overrides:
+  ceph:
+    conf:
+      mon:
+        paxos min: 10
+        paxos trim min: 10
+tasks:
+- mon_thrash:
+    revive_delay: 90
+    thrash_delay: 1
diff --git a/qa/suites/rados/monthrash/workloads/pool-create-delete.yaml b/qa/suites/rados/monthrash/workloads/pool-create-delete.yaml
new file mode 100644
index 0000000..c0f0f2e
--- /dev/null
+++ b/qa/suites/rados/monthrash/workloads/pool-create-delete.yaml
@@ -0,0 +1,56 @@
+overrides:
+  ceph:
+    log-whitelist:
+      - slow request
+tasks:
+- exec:
+    client.0:
+      - ceph_test_rados_delete_pools_parallel
+      - ceph_test_rados_delete_pools_parallel
+      - ceph_test_rados_delete_pools_parallel
+      - ceph_test_rados_delete_pools_parallel
+      - ceph_test_rados_delete_pools_parallel
+      - ceph_test_rados_delete_pools_parallel
+      - ceph_test_rados_delete_pools_parallel
+      - ceph_test_rados_delete_pools_parallel
+      - ceph_test_rados_delete_pools_parallel
+      - ceph_test_rados_delete_pools_parallel
+      - ceph_test_rados_delete_pools_parallel
+      - ceph_test_rados_delete_pools_parallel
+      - ceph_test_rados_delete_pools_parallel
+      - ceph_test_rados_delete_pools_parallel
+      - ceph_test_rados_delete_pools_parallel
+      - ceph_test_rados_delete_pools_parallel
+      - ceph_test_rados_delete_pools_parallel
+      - ceph_test_rados_delete_pools_parallel
+      - ceph_test_rados_delete_pools_parallel
+      - ceph_test_rados_delete_pools_parallel
+      - ceph_test_rados_delete_pools_parallel
+      - ceph_test_rados_delete_pools_parallel
+      - ceph_test_rados_delete_pools_parallel
+      - ceph_test_rados_delete_pools_parallel
+      - ceph_test_rados_delete_pools_parallel
+      - ceph_test_rados_delete_pools_parallel
+      - ceph_test_rados_delete_pools_parallel
+      - ceph_test_rados_delete_pools_parallel
+      - ceph_test_rados_delete_pools_parallel
+      - ceph_test_rados_delete_pools_parallel
+      - ceph_test_rados_delete_pools_parallel
+      - ceph_test_rados_delete_pools_parallel
+      - ceph_test_rados_delete_pools_parallel
+      - ceph_test_rados_delete_pools_parallel
+      - ceph_test_rados_delete_pools_parallel
+      - ceph_test_rados_delete_pools_parallel
+      - ceph_test_rados_delete_pools_parallel
+      - ceph_test_rados_delete_pools_parallel
+      - ceph_test_rados_delete_pools_parallel
+      - ceph_test_rados_delete_pools_parallel
+      - ceph_test_rados_delete_pools_parallel
+      - ceph_test_rados_delete_pools_parallel
+      - ceph_test_rados_delete_pools_parallel
+      - ceph_test_rados_delete_pools_parallel
+      - ceph_test_rados_delete_pools_parallel
+      - ceph_test_rados_delete_pools_parallel
+      - ceph_test_rados_delete_pools_parallel
+      - ceph_test_rados_delete_pools_parallel
+      - ceph_test_rados_delete_pools_parallel
diff --git a/qa/suites/rados/monthrash/workloads/rados_5925.yaml b/qa/suites/rados/monthrash/workloads/rados_5925.yaml
new file mode 100644
index 0000000..b49937f
--- /dev/null
+++ b/qa/suites/rados/monthrash/workloads/rados_5925.yaml
@@ -0,0 +1,4 @@
+tasks:
+- exec:
+    client.0:
+      - ceph_test_rados_delete_pools_parallel --debug_objecter 20 --debug_ms 1 --debug_rados 20 --debug_monc 20
diff --git a/qa/suites/rados/monthrash/workloads/rados_api_tests.yaml b/qa/suites/rados/monthrash/workloads/rados_api_tests.yaml
new file mode 100644
index 0000000..b536557
--- /dev/null
+++ b/qa/suites/rados/monthrash/workloads/rados_api_tests.yaml
@@ -0,0 +1,14 @@
+overrides:
+  ceph:
+    log-whitelist:
+      - reached quota
+    conf:
+      global:
+        debug objecter: 20
+        debug rados: 20
+        debug ms: 1
+tasks:
+- workunit:
+    clients:
+      client.0:
+        - rados/test.sh
diff --git a/qa/suites/rados/monthrash/workloads/rados_mon_workunits.yaml b/qa/suites/rados/monthrash/workloads/rados_mon_workunits.yaml
new file mode 100644
index 0000000..31465cf
--- /dev/null
+++ b/qa/suites/rados/monthrash/workloads/rados_mon_workunits.yaml
@@ -0,0 +1,13 @@
+overrides:
+  ceph:
+    log-whitelist:
+    - wrongly marked me down
+tasks:
+- workunit:
+    clients:
+      client.0:
+        - mon/pool_ops.sh
+        - mon/crush_ops.sh
+        - mon/osd.sh
+        - mon/caps.sh
+
diff --git a/qa/suites/rados/monthrash/workloads/snaps-few-objects.yaml b/qa/suites/rados/monthrash/workloads/snaps-few-objects.yaml
new file mode 100644
index 0000000..aa82d97
--- /dev/null
+++ b/qa/suites/rados/monthrash/workloads/snaps-few-objects.yaml
@@ -0,0 +1,13 @@
+tasks:
+- rados:
+    clients: [client.0]
+    ops: 4000
+    objects: 50
+    op_weights:
+      read: 100
+      write: 100
+      delete: 50
+      snap_create: 50
+      snap_remove: 50
+      rollback: 50
+      copy_from: 50
diff --git a/qa/suites/rados/multimon/% b/qa/suites/rados/multimon/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/rados/multimon/clusters/21.yaml b/qa/suites/rados/multimon/clusters/21.yaml
new file mode 100644
index 0000000..646ff15
--- /dev/null
+++ b/qa/suites/rados/multimon/clusters/21.yaml
@@ -0,0 +1,8 @@
+roles:
+- [mon.a, mon.d, mon.g, mon.j, mon.m, mon.p, mon.s, osd.0]
+- [mon.b, mon.e, mon.h, mon.k, mon.n, mon.q, mon.t]
+- [mon.c, mon.f, mon.i, mon.l, mon.o, mon.r, mon.u, osd.1]
+openstack:
+- volumes: # attached to each instance
+    count: 1
+    size: 10 # GB
diff --git a/qa/suites/rados/multimon/clusters/3.yaml b/qa/suites/rados/multimon/clusters/3.yaml
new file mode 100644
index 0000000..e30dc76
--- /dev/null
+++ b/qa/suites/rados/multimon/clusters/3.yaml
@@ -0,0 +1,6 @@
+roles:
+- [mon.a, mon.b, mon.c, osd.0, osd.1]
+openstack:
+- volumes: # attached to each instance
+    count: 2
+    size: 10 # GB
diff --git a/qa/suites/rados/multimon/clusters/6.yaml b/qa/suites/rados/multimon/clusters/6.yaml
new file mode 100644
index 0000000..b16e326
--- /dev/null
+++ b/qa/suites/rados/multimon/clusters/6.yaml
@@ -0,0 +1,7 @@
+roles:
+- [mon.a, mon.c, mon.e, osd.0]
+- [mon.b, mon.d, mon.f, osd.1]
+openstack:
+- volumes: # attached to each instance
+    count: 1
+    size: 10 # GB
diff --git a/qa/suites/rados/multimon/clusters/9.yaml b/qa/suites/rados/multimon/clusters/9.yaml
new file mode 100644
index 0000000..c2c7b49
--- /dev/null
+++ b/qa/suites/rados/multimon/clusters/9.yaml
@@ -0,0 +1,8 @@
+roles:
+- [mon.a, mon.d, mon.g, osd.0]
+- [mon.b, mon.e, mon.h]
+- [mon.c, mon.f, mon.i, osd.1]
+openstack:
+- volumes: # attached to each instance
+    count: 1
+    size: 10 # GB
diff --git a/qa/suites/rados/multimon/fs/xfs.yaml b/qa/suites/rados/multimon/fs/xfs.yaml
new file mode 100644
index 0000000..b4a8291
--- /dev/null
+++ b/qa/suites/rados/multimon/fs/xfs.yaml
@@ -0,0 +1,6 @@
+overrides:
+  ceph:
+    fs: xfs
+    conf:
+      osd:
+        osd sloppy crc: true
diff --git a/qa/suites/rados/multimon/msgr-failures/few.yaml b/qa/suites/rados/multimon/msgr-failures/few.yaml
new file mode 100644
index 0000000..0de320d
--- /dev/null
+++ b/qa/suites/rados/multimon/msgr-failures/few.yaml
@@ -0,0 +1,5 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms inject socket failures: 5000
diff --git a/qa/suites/rados/multimon/msgr-failures/many.yaml b/qa/suites/rados/multimon/msgr-failures/many.yaml
new file mode 100644
index 0000000..86f8dde
--- /dev/null
+++ b/qa/suites/rados/multimon/msgr-failures/many.yaml
@@ -0,0 +1,5 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms inject socket failures: 500
diff --git a/qa/suites/rados/multimon/msgr/async.yaml b/qa/suites/rados/multimon/msgr/async.yaml
new file mode 100644
index 0000000..9c77eaa
--- /dev/null
+++ b/qa/suites/rados/multimon/msgr/async.yaml
@@ -0,0 +1,6 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms type: async
+        enable experimental unrecoverable data corrupting features: '*'
diff --git a/qa/suites/rados/multimon/msgr/random.yaml b/qa/suites/rados/multimon/msgr/random.yaml
new file mode 100644
index 0000000..64404b3
--- /dev/null
+++ b/qa/suites/rados/multimon/msgr/random.yaml
@@ -0,0 +1,6 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms type: random
+        enable experimental unrecoverable data corrupting features: '*'
diff --git a/qa/suites/rados/multimon/msgr/simple.yaml b/qa/suites/rados/multimon/msgr/simple.yaml
new file mode 100644
index 0000000..5c4f853
--- /dev/null
+++ b/qa/suites/rados/multimon/msgr/simple.yaml
@@ -0,0 +1,5 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms type: simple
diff --git a/qa/suites/rados/multimon/rados.yaml b/qa/suites/rados/multimon/rados.yaml
new file mode 100644
index 0000000..6d9aa9d
--- /dev/null
+++ b/qa/suites/rados/multimon/rados.yaml
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    conf:
+      osd:
+        osd op queue: debug_random
+        osd op queue cut off: debug_random
+        osd debug verify cached snaps: true
diff --git a/qa/suites/rados/multimon/tasks/mon_clock_no_skews.yaml b/qa/suites/rados/multimon/tasks/mon_clock_no_skews.yaml
new file mode 100644
index 0000000..e86bdde
--- /dev/null
+++ b/qa/suites/rados/multimon/tasks/mon_clock_no_skews.yaml
@@ -0,0 +1,9 @@
+tasks:
+- install:
+- ceph:
+    log-whitelist:
+    - slow request
+    - .*clock.*skew.*
+    - clocks not synchronized
+- mon_clock_skew_check:
+    expect-skew: false
diff --git a/qa/suites/rados/multimon/tasks/mon_clock_with_skews.yaml b/qa/suites/rados/multimon/tasks/mon_clock_with_skews.yaml
new file mode 100644
index 0000000..2953e0d
--- /dev/null
+++ b/qa/suites/rados/multimon/tasks/mon_clock_with_skews.yaml
@@ -0,0 +1,15 @@
+overrides:
+  ceph:
+    conf:
+      mon.b:
+        clock offset: 10
+tasks:
+- install:
+- ceph:
+    wait-for-healthy: false
+    log-whitelist:
+    - slow request
+    - .*clock.*skew.*
+    - clocks not synchronized
+- mon_clock_skew_check:
+    expect-skew: true
diff --git a/qa/suites/rados/multimon/tasks/mon_recovery.yaml b/qa/suites/rados/multimon/tasks/mon_recovery.yaml
new file mode 100644
index 0000000..94721ea
--- /dev/null
+++ b/qa/suites/rados/multimon/tasks/mon_recovery.yaml
@@ -0,0 +1,4 @@
+tasks:
+- install:
+- ceph:
+- mon_recovery:
diff --git a/qa/suites/rados/objectstore/alloc-hint.yaml b/qa/suites/rados/objectstore/alloc-hint.yaml
new file mode 100644
index 0000000..cc60866
--- /dev/null
+++ b/qa/suites/rados/objectstore/alloc-hint.yaml
@@ -0,0 +1,21 @@
+roles:
+- [mon.a, osd.0, osd.1, osd.2, client.0]
+openstack:
+  - volumes: # attached to each instance
+      count: 3
+      size: 10 # GB
+
+overrides:
+  ceph:
+    fs: xfs
+    conf:
+      osd:
+        filestore xfs extsize: true
+
+tasks:
+- install:
+- ceph:
+- workunit:
+    clients:
+      all:
+        - rados/test_alloc_hint.sh
diff --git a/qa/suites/rados/objectstore/ceph_objectstore_tool.yaml b/qa/suites/rados/objectstore/ceph_objectstore_tool.yaml
new file mode 100644
index 0000000..e062459
--- /dev/null
+++ b/qa/suites/rados/objectstore/ceph_objectstore_tool.yaml
@@ -0,0 +1,16 @@
+roles:
+- [mon.0, osd.0, osd.1, osd.2, osd.3, osd.4, osd.5, client.0]
+openstack:
+- volumes: # attached to each instance
+    count: 6
+    size: 10 # GB
+tasks:
+- install:
+- ceph:
+    fs: xfs
+    conf:
+      global:
+        osd max object name len: 460
+        osd max object namespace len: 64
+- ceph_objectstore_tool:
+    objects: 20
diff --git a/qa/suites/rados/objectstore/filejournal.yaml b/qa/suites/rados/objectstore/filejournal.yaml
new file mode 100644
index 0000000..7e401d1
--- /dev/null
+++ b/qa/suites/rados/objectstore/filejournal.yaml
@@ -0,0 +1,13 @@
+roles:
+- [mon.0, osd.0, osd.1, client.0]
+openstack:
+- volumes: # attached to each instance
+    count: 2
+    size: 10 # GB
+tasks:
+- install:
+- ceph:
+    fs: xfs
+- exec:
+    client.0:
+      - ceph_test_filejournal
diff --git a/qa/suites/rados/objectstore/filestore-idempotent-aio-journal.yaml b/qa/suites/rados/objectstore/filestore-idempotent-aio-journal.yaml
new file mode 100644
index 0000000..53a93f6
--- /dev/null
+++ b/qa/suites/rados/objectstore/filestore-idempotent-aio-journal.yaml
@@ -0,0 +1,14 @@
+roles:
+- [mon.0, osd.0, osd.1, client.0]
+openstack:
+- volumes: # attached to each instance
+    count: 2
+    size: 10 # GB
+tasks:
+- install:
+- ceph:
+    fs: xfs
+    conf:
+      global:
+        journal aio: true
+- filestore_idempotent:
diff --git a/qa/suites/rados/objectstore/filestore-idempotent.yaml b/qa/suites/rados/objectstore/filestore-idempotent.yaml
new file mode 100644
index 0000000..64dd3a7
--- /dev/null
+++ b/qa/suites/rados/objectstore/filestore-idempotent.yaml
@@ -0,0 +1,11 @@
+roles:
+- [mon.0, osd.0, osd.1, client.0]
+openstack:
+- volumes: # attached to each instance
+    count: 2
+    size: 10 # GB
+tasks:
+- install:
+- ceph:
+    fs: xfs
+- filestore_idempotent:
diff --git a/qa/suites/rados/objectstore/fusestore.yaml b/qa/suites/rados/objectstore/fusestore.yaml
new file mode 100644
index 0000000..78f61c2
--- /dev/null
+++ b/qa/suites/rados/objectstore/fusestore.yaml
@@ -0,0 +1,9 @@
+roles:
+- [mon.0, osd.0, osd.1, client.0]
+tasks:
+- install:
+- workunit:
+    clients:
+      all:
+        - objectstore/test_fuse.sh
+
diff --git a/qa/suites/rados/objectstore/keyvaluedb.yaml b/qa/suites/rados/objectstore/keyvaluedb.yaml
new file mode 100644
index 0000000..6921c5d
--- /dev/null
+++ b/qa/suites/rados/objectstore/keyvaluedb.yaml
@@ -0,0 +1,8 @@
+roles:
+- [mon.0, osd.0, osd.1, client.0]
+tasks:
+- install:
+- exec:
+    client.0:
+      - mkdir $TESTDIR/kvtest && cd $TESTDIR/kvtest && ceph_test_keyvaluedb
+      - rm -rf $TESTDIR/kvtest
diff --git a/qa/suites/rados/objectstore/objectcacher-stress.yaml b/qa/suites/rados/objectstore/objectcacher-stress.yaml
new file mode 100644
index 0000000..386d12c
--- /dev/null
+++ b/qa/suites/rados/objectstore/objectcacher-stress.yaml
@@ -0,0 +1,14 @@
+roles:
+- [mon.0, osd.0, osd.1, client.0]
+openstack:
+- volumes: # attached to each instance
+    count: 2
+    size: 10 # GB
+tasks:
+- install:
+- ceph:
+    fs: xfs
+- workunit:
+    clients:
+      all:
+        - osdc/stress_objectcacher.sh
diff --git a/qa/suites/rados/objectstore/objectstore.yaml b/qa/suites/rados/objectstore/objectstore.yaml
new file mode 100644
index 0000000..f6137a3
--- /dev/null
+++ b/qa/suites/rados/objectstore/objectstore.yaml
@@ -0,0 +1,12 @@
+roles:
+- [mon.0, osd.0, osd.1, client.0]
+openstack:
+- volumes: # attached to each instance
+    count: 2
+    size: 10 # GB
+tasks:
+- install:
+- exec:
+    client.0:
+      - mkdir $TESTDIR/ostest && cd $TESTDIR/ostest && ceph_test_objectstore --gtest_filter=-*/2
+      - rm -rf $TESTDIR/ostest
diff --git a/qa/suites/rados/singleton-nomsgr/% b/qa/suites/rados/singleton-nomsgr/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/rados/singleton-nomsgr/all/11429.yaml b/qa/suites/rados/singleton-nomsgr/all/11429.yaml
new file mode 100644
index 0000000..93e15b3
--- /dev/null
+++ b/qa/suites/rados/singleton-nomsgr/all/11429.yaml
@@ -0,0 +1,135 @@
+# we don't have el7 packages for old releases
+# http://tracker.ceph.com/issues/15139
+os_type: ubuntu
+os_version: "14.04"
+overrides:
+  ceph:
+    conf:
+      mon:
+        debug mon: 20
+        debug ms: 1
+        debug paxos: 20
+        mon warn on legacy crush tunables: false
+        mon min osdmap epochs: 3
+      osd:
+        osd map cache size: 2
+        osd map max advance: 1
+        debug filestore: 20
+        debug journal: 20
+        debug ms: 1
+        debug osd: 20
+    log-whitelist:
+    - osd_map_cache_size
+    - slow request
+    - scrub mismatch
+    - ScrubResult
+    - failed to encode
+roles:
+- - mon.a
+  - osd.0
+  - osd.1
+  - mon.b
+  - mon.c
+  - osd.2
+  - client.0
+openstack:
+  - volumes: # attached to each instance
+      count: 3
+      size: 10 # GB
+tasks:
+- install:
+    branch: v0.80.8
+- print: '**** done installing firefly'
+- ceph:
+    fs: xfs
+- print: '**** done ceph'
+- full_sequential:
+  - ceph_manager.create_pool:
+      args: ['toremove']
+      kwargs:
+        pg_num: 4096
+  - sleep:
+      duration: 30
+  - ceph_manager.wait_for_clean: null
+  - radosbench:
+      clients: [client.0]
+      time: 120
+      size: 1
+      pool: toremove
+      create_pool: false
+      cleanup: false
+  - ceph_manager.remove_pool:
+      args: ['toremove']
+  - exec:
+      client.0:
+        - rados -p rbd ls -
+  - exec:
+      osd.0:
+        - ceph daemon osd.0 config set filestore_blackhole true
+  - ceph.restart:
+      daemons:
+        - osd.0
+        - osd.1
+        - osd.2
+  - exec:
+      osd.0:
+        - sudo grep -c 'load_pgs. skipping PG' /var/log/ceph/ceph-osd.0.log
+  - ceph_manager.wait_for_clean: null
+  - ceph_manager.create_pool:
+      args: ['newpool']
+  - loop:
+      count: 100
+      body:
+        - ceph_manager.set_pool_property:
+            args: ['newpool', 'min_size', 2]
+        - ceph_manager.set_pool_property:
+            args: ['newpool', 'min_size', 1]
+  - ceph_manager.wait_for_clean: null
+  - loop:
+      count: 100
+      body:
+        - ceph_manager.set_pool_property:
+            args: ['newpool', 'min_size', 2]
+        - ceph_manager.set_pool_property:
+            args: ['newpool', 'min_size', 1]
+  - sleep:
+      duration: 10
+  - ceph_manager.wait_for_clean: null
+  - print: '**** done creating zombie pgs'
+
+  - install.upgrade:
+      mon.a:
+        branch: hammer
+  - ceph.restart: [mon.a, mon.b, mon.c, osd.0, osd.1, osd.2]
+  - ceph_manager.wait_for_clean: null
+  - ceph.restart: [osd.0, osd.1, osd.2]
+  - ceph_manager.wait_for_clean: null
+  - exec:
+      osd.0:
+        - sudo grep -c 'Skipping the pg for now' /var/log/ceph/ceph-osd.0.log
+  - print: '**** done verifying hammer upgrade'
+
+  - install.upgrade:
+      mon.a: null
+  - ceph.restart:
+      daemons: [mon.a, mon.b, mon.c, osd.0, osd.1, osd.2]
+      wait-for-healthy: false
+      wait-for-osds-up: true
+  - exec:
+      osd.0:
+        - sleep 300 # http://tracker.ceph.com/issues/17808
+        - ceph osd set require_jewel_osds
+  - ceph.healthy:
+  - ceph_manager.wait_for_clean: null
+  - ceph.restart: [osd.0, osd.1, osd.2]
+  - ceph_manager.wait_for_clean: null
+  - exec:
+      osd.0:
+        - sudo grep -c 'unable to peek at' /var/log/ceph/ceph-osd.0.log
+  - radosbench:
+      clients: [client.0]
+      time: 5
+      size: 1
+  - ceph.restart: [osd.0, osd.1, osd.2]
+  - ceph_manager.wait_for_clean: null
+  - print: '**** done verifying final upgrade'
diff --git a/qa/suites/rados/singleton-nomsgr/all/16113.yaml b/qa/suites/rados/singleton-nomsgr/all/16113.yaml
new file mode 100644
index 0000000..5922789
--- /dev/null
+++ b/qa/suites/rados/singleton-nomsgr/all/16113.yaml
@@ -0,0 +1,103 @@
+os_type: ubuntu
+os_version: "14.04"
+overrides:
+  ceph:
+    conf:
+      mon:
+        debug mon: 20
+        debug ms: 1
+        debug paxos: 20
+        mon warn on legacy crush tunables: false
+        mon min osdmap epochs: 3
+        osd pool default size: 2
+        osd pool default min size: 1
+      osd:
+        osd map cache size: 2
+        osd map max advance: 1
+        debug filestore: 20
+        debug journal: 20
+        debug ms: 1
+        debug osd: 20
+    log-whitelist:
+    - osd_map_cache_size
+    - slow request
+    - scrub mismatch
+    - ScrubResult
+    - failed to encode
+    - wrongly marked me down
+roles:
+- - mon.a
+  - osd.0
+  - osd.1
+  - mon.b
+  - mon.c
+  - osd.2
+  - client.0
+openstack:
+  - volumes: # attached to each instance
+      count: 3
+      size: 10 # GB
+tasks:
+- install:
+    branch: hammer
+- print: '**** done installing hammer'
+- ceph:
+    fs: xfs
+- print: '**** done ceph'
+- ceph_manager.create_pool:
+    args: ['test']
+    kwargs:
+      pg_num: 1024
+- sleep:
+    duration: 10
+- ceph_manager.wait_for_clean: null
+- sequential:
+  - radosbench:
+      pool: test
+      size: 1
+      time: 100
+      cleanup: false
+      create_pool: false
+- install.upgrade:
+    mon.a: null
+- ceph.restart:
+    daemons: [mon.a, mon.b, mon.c, osd.0, osd.1, osd.2]
+    wait-for-healthy: false
+    wait-for-osds-up: true
+- exec:
+    osd.0:
+      - sleep 300 # http://tracker.ceph.com/issues/17808
+      - ceph osd set require_jewel_osds
+- ceph.healthy:
+- sleep:
+    duration: 10
+- ceph_manager.wait_for_clean: null
+- exec:
+    mon.a:
+      - ceph osd set sortbitwise
+- sleep:
+    duration: 10
+- ceph_manager.wait_for_clean: null
+- sequential:
+  - radosbench:
+      pool: test
+      size: 1
+      time: 400
+      cleanup: false
+      create_pool: false
+  - sleep:
+      duration: 30
+  - ceph_manager.kill_osd:
+      kwargs:
+        osd: 0
+  - sleep:
+      duration: 30
+  - ceph_manager.revive_osd:
+      kwargs:
+        osd: 0
+  - sleep:
+      duration: 30
+  - ceph_manager.wait_for_clean: null
+- sleep:
+    duration: 30
+- ceph_manager.wait_for_clean: null
diff --git a/qa/suites/rados/singleton-nomsgr/all/cache-fs-trunc.yaml b/qa/suites/rados/singleton-nomsgr/all/cache-fs-trunc.yaml
new file mode 100644
index 0000000..3e2bf0b
--- /dev/null
+++ b/qa/suites/rados/singleton-nomsgr/all/cache-fs-trunc.yaml
@@ -0,0 +1,42 @@
+roles:
+- [mon.0, mds.a, osd.0, osd.1, osd.2, client.0, client.1]
+tasks:
+- install:
+- ceph:
+    conf:
+      global:
+        osd max object name len: 460
+        osd max object namespace len: 64
+- exec:
+    client.0:
+    - ceph osd pool create data_cache 4
+    - ceph osd tier add cephfs_data data_cache
+    - ceph osd tier cache-mode data_cache writeback
+    - ceph osd tier set-overlay cephfs_data data_cache
+    - ceph osd pool set data_cache hit_set_type bloom
+    - ceph osd pool set data_cache hit_set_count 8
+    - ceph osd pool set data_cache hit_set_period 3600
+    - ceph osd pool set data_cache min_read_recency_for_promote 0
+- ceph-fuse:
+- exec:
+    client.0:
+      - sudo chmod 777 $TESTDIR/mnt.0/
+      - dd if=/dev/urandom of=$TESTDIR/mnt.0/foo bs=1M count=5
+      - ls -al $TESTDIR/mnt.0/foo
+      - truncate --size 0 $TESTDIR/mnt.0/foo
+      - ls -al $TESTDIR/mnt.0/foo
+      - dd if=/dev/urandom of=$TESTDIR/mnt.0/foo bs=1M count=5
+      - ls -al $TESTDIR/mnt.0/foo
+      - cp $TESTDIR/mnt.0/foo /tmp/foo
+      - sync
+      - rados -p data_cache ls -
+      - sleep 10
+      - rados -p data_cache ls -
+      - rados -p data_cache cache-flush-evict-all
+      - rados -p data_cache ls -
+      - sleep 1
+- exec:
+    client.1:
+      - hexdump -C /tmp/foo | head
+      - hexdump -C $TESTDIR/mnt.1/foo | head
+      - cmp $TESTDIR/mnt.1/foo /tmp/foo
diff --git a/qa/suites/rados/singleton-nomsgr/all/ceph-post-file.yaml b/qa/suites/rados/singleton-nomsgr/all/ceph-post-file.yaml
new file mode 100644
index 0000000..d0a4db0
--- /dev/null
+++ b/qa/suites/rados/singleton-nomsgr/all/ceph-post-file.yaml
@@ -0,0 +1,8 @@
+roles:
+- [mon.a, osd.0, osd.1, osd.2, client.0]
+tasks:
+- install:
+- workunit:
+    clients:
+      all:
+        - post-file.sh
diff --git a/qa/suites/rados/singleton-nomsgr/all/export-after-evict.yaml b/qa/suites/rados/singleton-nomsgr/all/export-after-evict.yaml
new file mode 100644
index 0000000..4a9de25
--- /dev/null
+++ b/qa/suites/rados/singleton-nomsgr/all/export-after-evict.yaml
@@ -0,0 +1,29 @@
+roles:
+- - mon.a
+  - osd.0
+  - osd.1
+  - osd.2
+  - client.0
+tasks:
+- install:
+- ceph:
+    conf:
+      global:
+        osd max object name len: 460
+        osd max object namespace len: 64
+- exec:
+    client.0:
+    - ceph osd pool create base-pool 4
+    - ceph osd pool create cache-pool 4
+    - ceph osd tier add base-pool cache-pool
+    - ceph osd tier cache-mode cache-pool writeback
+    - ceph osd tier set-overlay base-pool cache-pool
+    - dd if=/dev/urandom of=$TESTDIR/foo bs=1M count=1
+    - rbd import --image-format 2 $TESTDIR/foo base-pool/bar
+    - rbd snap create base-pool/bar at snap
+    - rados -p base-pool cache-flush-evict-all
+    - rbd export base-pool/bar $TESTDIR/bar
+    - rbd export base-pool/bar at snap $TESTDIR/snap
+    - cmp $TESTDIR/foo $TESTDIR/bar
+    - cmp $TESTDIR/foo $TESTDIR/snap
+    - rm $TESTDIR/foo $TESTDIR/bar $TESTDIR/snap
diff --git a/qa/suites/rados/singleton-nomsgr/all/full-tiering.yaml b/qa/suites/rados/singleton-nomsgr/all/full-tiering.yaml
new file mode 100644
index 0000000..6b46bc5
--- /dev/null
+++ b/qa/suites/rados/singleton-nomsgr/all/full-tiering.yaml
@@ -0,0 +1,26 @@
+# verify #13098 fix
+roles:
+- [mon.a, osd.0, osd.1, osd.2, client.0]
+overrides:
+  ceph:
+    log-whitelist:
+      - is full
+tasks:
+- install:
+- ceph:
+    conf:
+      global:
+        osd max object name len: 460
+        osd max object namespace len: 64
+- exec:
+    client.0:
+      - ceph osd pool create ec-ca 1 1
+      - ceph osd pool create ec 1 1 erasure default
+      - ceph osd tier add ec ec-ca
+      - ceph osd tier cache-mode ec-ca readproxy
+      - ceph osd tier set-overlay ec ec-ca
+      - ceph osd pool set ec-ca hit_set_type bloom
+      - ceph osd pool set-quota ec-ca max_bytes 20480000
+      - ceph osd pool set-quota ec max_bytes 20480000
+      - ceph osd pool set ec-ca target_max_bytes 20480000
+      - timeout 30 rados -p ec-ca bench 30 write || true
diff --git a/qa/suites/rados/singleton-nomsgr/all/lfn-upgrade-hammer.yaml b/qa/suites/rados/singleton-nomsgr/all/lfn-upgrade-hammer.yaml
new file mode 100644
index 0000000..b9c3146
--- /dev/null
+++ b/qa/suites/rados/singleton-nomsgr/all/lfn-upgrade-hammer.yaml
@@ -0,0 +1,97 @@
+os_type: ubuntu
+os_version: "14.04"
+overrides:
+  ceph:
+    conf:
+      mon:
+        debug mon: 20
+        debug ms: 1
+        debug paxos: 20
+        mon warn on legacy crush tunables: false
+        mon min osdmap epochs: 3
+      osd:
+        osd map cache size: 2
+        osd map max advance: 1
+        debug filestore: 20
+        debug journal: 20
+        debug ms: 1
+        debug osd: 20
+    log-whitelist:
+    - osd_map_cache_size
+    - slow request
+    - scrub mismatch
+    - ScrubResult
+    - failed to encode
+roles:
+- - mon.a
+  - osd.0
+  - osd.1
+  - mon.b
+  - mon.c
+  - osd.2
+  - client.0
+openstack:
+  - volumes: # attached to each instance
+      count: 3
+      size: 10 # GB
+tasks:
+- install:
+    branch: hammer
+- print: '**** done installing hammer'
+- ceph:
+    fs: xfs
+- print: '**** done ceph'
+- ceph_manager.create_pool:
+    args: ['test']
+    kwargs:
+      pg_num: 1
+- sleep:
+    duration: 10
+- ceph_manager.wait_for_clean: null
+- create_verify_lfn_objects:
+    pool: 'test'
+    prefix: 'hammer-x'
+    namespace:
+    - 'long_namespace__________________________________________________'
+    - ''
+    - 'namespace'
+    num_objects: 5
+    name_length: [400, 800, 1600]
+- sequential:
+  - create_verify_lfn_objects:
+      pool: 'test'
+      prefix: 'hammer-mixed'
+      namespace:
+      - 'long_namespace__________________________________________________'
+      - ''
+      - 'namespace'
+      num_objects: 5
+      name_length: [400, 800, 1600]
+  - install.upgrade:
+      mon.a: null
+  - ceph.restart: [mon.a, mon.b, mon.c, osd.0, osd.1]
+  - ceph_manager.wait_for_clean: null
+  - ceph_manager.do_pg_scrub:
+      args: ['test', 0, 'scrub']
+- ceph_manager.do_pg_scrub:
+    args: ['test', 0, 'scrub']
+- create_verify_lfn_objects:
+    pool: 'test'
+    prefix: 'mixed-x'
+    namespace:
+    - 'long_namespace__________________________________________________'
+    - ''
+    - 'namespace'
+    num_objects: 5
+    name_length: [400, 800, 1600]
+- ceph.restart:
+    daemons: [osd.2]
+    wait-for-healthy: false
+    wait-for-osds-up: true
+- exec:
+    mon.a:
+    - sleep 60
+    - ceph osd set require_jewel_osds
+- ceph_manager.wait_for_clean: null
+- ceph_manager.do_pg_scrub:
+    args: ['test', 0, 'scrub']
diff --git a/qa/suites/rados/singleton-nomsgr/all/lfn-upgrade-infernalis.yaml b/qa/suites/rados/singleton-nomsgr/all/lfn-upgrade-infernalis.yaml
new file mode 100644
index 0000000..d7de28e
--- /dev/null
+++ b/qa/suites/rados/singleton-nomsgr/all/lfn-upgrade-infernalis.yaml
@@ -0,0 +1,97 @@
+os_type: ubuntu
+os_version: "14.04"
+overrides:
+  ceph:
+    conf:
+      mon:
+        debug mon: 20
+        debug ms: 1
+        debug paxos: 20
+        mon warn on legacy crush tunables: false
+        mon min osdmap epochs: 3
+      osd:
+        osd map cache size: 2
+        osd map max advance: 1
+        debug filestore: 20
+        debug journal: 20
+        debug ms: 1
+        debug osd: 20
+    log-whitelist:
+    - osd_map_cache_size
+    - slow request
+    - scrub mismatch
+    - ScrubResult
+    - failed to encode
+roles:
+- - mon.a
+  - osd.0
+  - osd.1
+  - mon.b
+  - mon.c
+  - osd.2
+  - client.0
+openstack:
+  - volumes: # attached to each instance
+      count: 3
+      size: 10 # GB
+tasks:
+- install:
+    branch: infernalis
+- print: '**** done installing infernalis'
+- ceph:
+    fs: xfs
+- print: '**** done ceph'
+- ceph_manager.create_pool:
+    args: ['test']
+    kwargs:
+      pg_num: 1
+- sleep:
+    duration: 10
+- ceph_manager.wait_for_clean: null
+- create_verify_lfn_objects:
+    pool: 'test'
+    prefix: 'infernalis-x'
+    namespace:
+    - 'long_namespace__________________________________________________'
+    - ''
+    - 'namespace'
+    num_objects: 5
+    name_length: [400, 800, 1600]
+- sequential:
+  - create_verify_lfn_objects:
+      pool: 'test'
+      prefix: 'infernalis-mixed'
+      namespace:
+      - 'long_namespace__________________________________________________'
+      - ''
+      - 'namespace'
+      num_objects: 5
+      name_length: [400, 800, 1600]
+  - install.upgrade:
+      mon.a: null
+  - ceph.restart: [mon.a, mon.b, mon.c, osd.0, osd.1]
+  - ceph_manager.wait_for_clean: null
+  - ceph_manager.do_pg_scrub:
+      args: ['test', 0, 'scrub']
+- ceph_manager.do_pg_scrub:
+    args: ['test', 0, 'scrub']
+- create_verify_lfn_objects:
+    pool: 'test'
+    prefix: 'mixed-x'
+    namespace:
+    - 'long_namespace__________________________________________________'
+    - ''
+    - 'namespace'
+    num_objects: 5
+    name_length: [400, 800, 1600]
+- ceph.restart:
+    daemons: [osd.2]
+    wait-for-healthy: false
+    wait-for-osds-up: true
+- exec:
+    mon.a:
+    - sleep 60
+    - ceph osd set require_jewel_osds
+- ceph_manager.wait_for_clean: null
+- ceph_manager.do_pg_scrub:
+    args: ['test', 0, 'scrub']
diff --git a/qa/suites/rados/singleton-nomsgr/all/msgr.yaml b/qa/suites/rados/singleton-nomsgr/all/msgr.yaml
new file mode 100644
index 0000000..1f9a695
--- /dev/null
+++ b/qa/suites/rados/singleton-nomsgr/all/msgr.yaml
@@ -0,0 +1,21 @@
+roles:
+- [mon.0, osd.0, osd.1, client.0]
+tasks:
+- install:
+- exec:
+    client.0:
+          - ceph_test_async_driver
+          - ceph_test_msgr
+openstack:
+  - machine:
+      disk: 40 # GB
+      ram: 15000 # MB
+      cpus: 1
+    volumes: # attached to each instance
+      count: 0
+      size: 1 # GB
+overrides:
+  ceph:
+    conf:
+      client:
+        debug ms: 20
diff --git a/qa/suites/rados/singleton-nomsgr/all/multi-backfill-reject.yaml b/qa/suites/rados/singleton-nomsgr/all/multi-backfill-reject.yaml
new file mode 100644
index 0000000..78f6e96
--- /dev/null
+++ b/qa/suites/rados/singleton-nomsgr/all/multi-backfill-reject.yaml
@@ -0,0 +1,32 @@
+roles:
+- - mon.a
+  - osd.0
+  - osd.1
+  - osd.2
+  - client.0
+- - osd.3
+  - osd.4
+  - osd.5
+tasks:
+- install:
+- ceph:
+    conf:
+      osd:
+        osd debug reject backfill probability: .3
+        osd min pg log entries: 25
+        osd max pg log entries: 100
+        osd max object name len: 460
+        osd max object namespace len: 64
+- exec:
+    client.0:
+      - sudo ceph osd pool create foo 64
+      - rados -p foo bench 60 write -b 1024 --no-cleanup
+      - sudo ceph osd pool set foo size 3
+      - sudo ceph osd out 0 1
+- sleep:
+    duration: 60
+- exec:
+    client.0:
+      - sudo ceph osd in 0 1
+- sleep:
+    duration: 60
diff --git a/qa/suites/rados/singleton-nomsgr/all/valgrind-leaks.yaml b/qa/suites/rados/singleton-nomsgr/all/valgrind-leaks.yaml
new file mode 100644
index 0000000..c6bb76f
--- /dev/null
+++ b/qa/suites/rados/singleton-nomsgr/all/valgrind-leaks.yaml
@@ -0,0 +1,21 @@
+overrides:
+  install:
+    ceph:
+      flavor: notcmalloc
+      debuginfo: true
+  ceph:
+    conf:
+      global:
+        osd heartbeat grace: 40
+        debug deliberately leak memory: true
+        osd max object name len: 460
+        osd max object namespace len: 64
+    valgrind:
+      mon: [--tool=memcheck, --leak-check=full, --show-reachable=yes]
+      osd: [--tool=memcheck]
+roles:
+- [mon.0, osd.0, osd.1, client.0]
+tasks:
+- install:
+- ceph:
+    expect_valgrind_errors: true
diff --git a/qa/suites/rados/singleton-nomsgr/rados.yaml b/qa/suites/rados/singleton-nomsgr/rados.yaml
new file mode 100644
index 0000000..6d9aa9d
--- /dev/null
+++ b/qa/suites/rados/singleton-nomsgr/rados.yaml
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    conf:
+      osd:
+        osd op queue: debug_random
+        osd op queue cut off: debug_random
+        osd debug verify cached snaps: true
diff --git a/qa/suites/rados/singleton/% b/qa/suites/rados/singleton/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/rados/singleton/all/admin-socket.yaml b/qa/suites/rados/singleton/all/admin-socket.yaml
new file mode 100644
index 0000000..5a61e21
--- /dev/null
+++ b/qa/suites/rados/singleton/all/admin-socket.yaml
@@ -0,0 +1,24 @@
+roles:
+- - mon.a
+  - osd.0
+  - osd.1
+  - client.a
+openstack:
+  - volumes: # attached to each instance
+      count: 2
+      size: 10 # GB
+tasks:
+- install:
+- ceph:
+- admin_socket:
+    osd.0:
+      version:
+      git_version:
+      help:
+      config show:
+      config set filestore_dump_file /tmp/foo:
+      perf dump:
+      perf schema:
+      get_heap_property tcmalloc.max_total_thread_cache_byte:
+      set_heap_property tcmalloc.max_total_thread_cache_bytes 67108864:
+      set_heap_property tcmalloc.max_total_thread_cache_bytes 33554432:
diff --git a/qa/suites/rados/singleton/all/cephtool.yaml b/qa/suites/rados/singleton/all/cephtool.yaml
new file mode 100644
index 0000000..b0a8863
--- /dev/null
+++ b/qa/suites/rados/singleton/all/cephtool.yaml
@@ -0,0 +1,25 @@
+roles:
+- - mon.a
+  - mon.b
+  - mon.c
+  - osd.0
+  - osd.1
+  - osd.2
+  - client.0
+openstack:
+  - volumes: # attached to each instance
+      count: 3
+      size: 10 # GB
+tasks:
+- install:
+- ceph:
+    log-whitelist:
+    - wrongly marked me down
+    - had wrong client addr
+    - had wrong cluster addr
+    - must scrub before tier agent can activate
+- workunit:
+    clients:
+      all:
+        - cephtool
+        - mon/pool_ops.sh
diff --git a/qa/suites/rados/singleton/all/divergent_priors.yaml b/qa/suites/rados/singleton/all/divergent_priors.yaml
new file mode 100644
index 0000000..3103703
--- /dev/null
+++ b/qa/suites/rados/singleton/all/divergent_priors.yaml
@@ -0,0 +1,21 @@
+roles:
+- - mon.0
+  - osd.0
+  - osd.1
+  - osd.2
+  - client.0
+openstack:
+  - volumes: # attached to each instance
+      count: 3
+      size: 10 # GB
+
+overrides:
+  ceph:
+    conf:
+      osd:
+        debug osd: 5
+
+tasks:
+- install:
+- ceph:
+- divergent_priors:
diff --git a/qa/suites/rados/singleton/all/divergent_priors2.yaml b/qa/suites/rados/singleton/all/divergent_priors2.yaml
new file mode 100644
index 0000000..dd00b87
--- /dev/null
+++ b/qa/suites/rados/singleton/all/divergent_priors2.yaml
@@ -0,0 +1,21 @@
+roles:
+- - mon.0
+  - osd.0
+  - osd.1
+  - osd.2
+  - client.0
+openstack:
+  - volumes: # attached to each instance
+      count: 3
+      size: 10 # GB
+
+overrides:
+  ceph:
+    conf:
+      osd:
+        debug osd: 5
+
+tasks:
+- install:
+- ceph:
+- divergent_priors2:
diff --git a/qa/suites/rados/singleton/all/dump-stuck.yaml b/qa/suites/rados/singleton/all/dump-stuck.yaml
new file mode 100644
index 0000000..f153de6
--- /dev/null
+++ b/qa/suites/rados/singleton/all/dump-stuck.yaml
@@ -0,0 +1,14 @@
+roles:
+- - mon.a
+  - osd.0
+  - osd.1
+openstack:
+  - volumes: # attached to each instance
+      count: 2
+      size: 10 # GB
+tasks:
+- install:
+- ceph:
+    log-whitelist:
+    - wrongly marked me down
+- dump_stuck:
diff --git a/qa/suites/rados/singleton/all/ec-lost-unfound-upgrade.yaml b/qa/suites/rados/singleton/all/ec-lost-unfound-upgrade.yaml
new file mode 100644
index 0000000..9228b30
--- /dev/null
+++ b/qa/suites/rados/singleton/all/ec-lost-unfound-upgrade.yaml
@@ -0,0 +1,30 @@
+os_type: centos
+roles:
+- - mon.a
+  - mon.b
+  - mon.c
+  - osd.0
+  - osd.1
+  - osd.2
+- - osd.3
+openstack:
+  - volumes: # attached to each instance
+      count: 3
+      size: 10 # GB
+tasks:
+- install:
+    branch: infernalis
+- ceph:
+    log-whitelist:
+    - objects unfound and apparently lost
+    - wrongly marked me down
+    - objects unfound and apparently lost
+    - log bound mismatch
+    - failed to encode
+- install.upgrade:
+    mon.a:
+- print: "upgraded mon.a and friends"
+- ceph.restart:
+    daemons: [mon.a, mon.b, mon.c, osd.0, osd.1, osd.2]
+- ec_lost_unfound:
+    parallel_bench: false
diff --git a/qa/suites/rados/singleton/all/ec-lost-unfound.yaml b/qa/suites/rados/singleton/all/ec-lost-unfound.yaml
new file mode 100644
index 0000000..7c4b5f0
--- /dev/null
+++ b/qa/suites/rados/singleton/all/ec-lost-unfound.yaml
@@ -0,0 +1,18 @@
+roles:
+- - mon.a
+  - mon.b
+  - mon.c
+  - osd.0
+  - osd.1
+  - osd.2
+  - osd.3
+openstack:
+  - volumes: # attached to each instance
+      count: 4
+      size: 10 # GB
+tasks:
+- install:
+- ceph:
+    log-whitelist:
+    - objects unfound and apparently lost
+- ec_lost_unfound:
diff --git a/qa/suites/rados/singleton/all/lost-unfound-delete-upgrade.yaml b/qa/suites/rados/singleton/all/lost-unfound-delete-upgrade.yaml
new file mode 100644
index 0000000..61ef7d8
--- /dev/null
+++ b/qa/suites/rados/singleton/all/lost-unfound-delete-upgrade.yaml
@@ -0,0 +1,29 @@
+os_type: centos
+roles:
+- - mon.a
+  - mon.b
+  - mon.c
+  - osd.0
+  - osd.1
+- - osd.2
+openstack:
+  - volumes: # attached to each instance
+      count: 2
+      size: 10 # GB
+tasks:
+- install:
+    branch: infernalis
+- ceph:
+    log-whitelist:
+    - objects unfound and apparently lost
+    - wrongly marked me down
+    - objects unfound and apparently lost
+    - log bound mismatch
+    - failed to encode
+- install.upgrade:
+    mon.a:
+- print: "upgraded mon.a and friends"
+- ceph.restart:
+    daemons: [mon.a, mon.b, mon.c, osd.0, osd.1]
+- rep_lost_unfound_delete:
+    parallel_bench: false
diff --git a/qa/suites/rados/singleton/all/lost-unfound-delete.yaml b/qa/suites/rados/singleton/all/lost-unfound-delete.yaml
new file mode 100644
index 0000000..fbc24e0
--- /dev/null
+++ b/qa/suites/rados/singleton/all/lost-unfound-delete.yaml
@@ -0,0 +1,17 @@
+roles:
+- - mon.a
+  - mon.b
+  - mon.c
+  - osd.0
+  - osd.1
+  - osd.2
+openstack:
+  - volumes: # attached to each instance
+      count: 3
+      size: 10 # GB
+tasks:
+- install:
+- ceph:
+    log-whitelist:
+    - objects unfound and apparently lost
+- rep_lost_unfound_delete:
diff --git a/qa/suites/rados/singleton/all/lost-unfound-upgrade.yaml b/qa/suites/rados/singleton/all/lost-unfound-upgrade.yaml
new file mode 100644
index 0000000..0040071
--- /dev/null
+++ b/qa/suites/rados/singleton/all/lost-unfound-upgrade.yaml
@@ -0,0 +1,29 @@
+os_type: centos
+roles:
+- - mon.a
+  - mon.b
+  - mon.c
+  - osd.0
+  - osd.1
+- - osd.2
+openstack:
+  - volumes: # attached to each instance
+      count: 2
+      size: 10 # GB
+tasks:
+- install:
+    branch: infernalis
+- ceph:
+    log-whitelist:
+    - objects unfound and apparently lost
+    - wrongly marked me down
+    - objects unfound and apparently lost
+    - log bound mismatch
+    - failed to encode
+- install.upgrade:
+    mon.a:
+- print: "upgraded mon.a and friends"
+- ceph.restart:
+    daemons: [mon.a, mon.b, mon.c, osd.0, osd.1]
+- lost_unfound:
+    parallel_bench: false
diff --git a/qa/suites/rados/singleton/all/lost-unfound.yaml b/qa/suites/rados/singleton/all/lost-unfound.yaml
new file mode 100644
index 0000000..31649db
--- /dev/null
+++ b/qa/suites/rados/singleton/all/lost-unfound.yaml
@@ -0,0 +1,17 @@
+roles:
+- - mon.a
+  - mon.b
+  - mon.c
+  - osd.0
+  - osd.1
+  - osd.2
+openstack:
+  - volumes: # attached to each instance
+      count: 3
+      size: 10 # GB
+tasks:
+- install:
+- ceph:
+    log-whitelist:
+    - objects unfound and apparently lost
+- lost_unfound:
diff --git a/qa/suites/rados/singleton/all/mon-config-keys.yaml b/qa/suites/rados/singleton/all/mon-config-keys.yaml
new file mode 100644
index 0000000..b905691
--- /dev/null
+++ b/qa/suites/rados/singleton/all/mon-config-keys.yaml
@@ -0,0 +1,19 @@
+roles:
+- - mon.0
+  - mon.1
+  - mon.2
+  - osd.0
+  - osd.1
+  - osd.2
+  - client.0
+openstack:
+  - volumes: # attached to each instance
+      count: 3
+      size: 10 # GB
+tasks:
+- install:
+- ceph:
+- workunit:
+    clients:
+      all:
+        - mon/test_mon_config_key.py
diff --git a/qa/suites/rados/singleton/all/mon-thrasher.yaml b/qa/suites/rados/singleton/all/mon-thrasher.yaml
new file mode 100644
index 0000000..5caddda
--- /dev/null
+++ b/qa/suites/rados/singleton/all/mon-thrasher.yaml
@@ -0,0 +1,25 @@
+roles:
+- - mon.a
+  - mon.b
+  - mon.c
+  - osd.0
+  - osd.1
+  - client.0
+openstack:
+  - volumes: # attached to each instance
+      count: 2
+      size: 10 # GB
+tasks:
+- install:
+- ceph:
+- mon_thrash:
+    revive_delay: 20
+    thrash_delay: 1
+- workunit:
+    clients:
+      all:
+        - mon/workloadgen.sh
+    env:
+      LOADGEN_NUM_OSDS: "5"
+      VERBOSE: "1"
+      DURATION: "600"
diff --git a/qa/suites/rados/singleton/all/osd-backfill.yaml b/qa/suites/rados/singleton/all/osd-backfill.yaml
new file mode 100644
index 0000000..c9263db
--- /dev/null
+++ b/qa/suites/rados/singleton/all/osd-backfill.yaml
@@ -0,0 +1,20 @@
+roles:
+- - mon.a
+  - mon.b
+  - mon.c
+  - osd.0
+  - osd.1
+  - osd.2
+openstack:
+  - volumes: # attached to each instance
+      count: 3
+      size: 10 # GB
+tasks:
+- install:
+- ceph:
+    log-whitelist:
+    - wrongly marked me down
+    conf:
+      osd:
+        osd min pg log entries: 5
+- osd_backfill:
diff --git a/qa/suites/rados/singleton/all/osd-recovery-incomplete.yaml b/qa/suites/rados/singleton/all/osd-recovery-incomplete.yaml
new file mode 100644
index 0000000..5983b7a
--- /dev/null
+++ b/qa/suites/rados/singleton/all/osd-recovery-incomplete.yaml
@@ -0,0 +1,21 @@
+roles:
+- - mon.a
+  - mon.b
+  - mon.c
+  - osd.0
+  - osd.1
+  - osd.2
+  - osd.3
+openstack:
+  - volumes: # attached to each instance
+      count: 4
+      size: 10 # GB
+tasks:
+- install:
+- ceph:
+    log-whitelist:
+    - wrongly marked me down
+    conf:
+      osd:
+        osd min pg log entries: 5
+- osd_recovery.test_incomplete_pgs:
diff --git a/qa/suites/rados/singleton/all/osd-recovery.yaml b/qa/suites/rados/singleton/all/osd-recovery.yaml
new file mode 100644
index 0000000..977fa63
--- /dev/null
+++ b/qa/suites/rados/singleton/all/osd-recovery.yaml
@@ -0,0 +1,20 @@
+roles:
+- - mon.a
+  - mon.b
+  - mon.c
+  - osd.0
+  - osd.1
+  - osd.2
+openstack:
+  - volumes: # attached to each instance
+      count: 3
+      size: 10 # GB
+tasks:
+- install:
+- ceph:
+    log-whitelist:
+    - wrongly marked me down
+    conf:
+      osd:
+        osd min pg log entries: 5
+- osd_recovery:
diff --git a/qa/suites/rados/singleton/all/peer.yaml b/qa/suites/rados/singleton/all/peer.yaml
new file mode 100644
index 0000000..9eaaa94
--- /dev/null
+++ b/qa/suites/rados/singleton/all/peer.yaml
@@ -0,0 +1,20 @@
+roles:
+- - mon.0
+  - mon.1
+  - mon.2
+  - osd.0
+  - osd.1
+  - osd.2
+openstack:
+  - volumes: # attached to each instance
+      count: 3
+      size: 10 # GB
+tasks:
+- install:
+- ceph:
+    config:
+      global:
+        osd pool default min size : 1
+    log-whitelist:
+    - objects unfound and apparently lost
+- peer:
diff --git a/qa/suites/rados/singleton/all/pg-removal-interruption.yaml b/qa/suites/rados/singleton/all/pg-removal-interruption.yaml
new file mode 100644
index 0000000..efb9fa1
--- /dev/null
+++ b/qa/suites/rados/singleton/all/pg-removal-interruption.yaml
@@ -0,0 +1,31 @@
+roles:
+- - mon.a
+  - osd.0
+  - osd.1
+  - osd.2
+  - client.0
+openstack:
+  - volumes: # attached to each instance
+      count: 3
+      size: 10 # GB
+tasks:
+- install:
+- ceph:
+    log-whitelist:
+    - wrongly marked me down
+    - slow request
+- exec:
+    client.0:
+      - sudo ceph osd pool create foo 128 128
+      - sleep 5
+      - sudo ceph tell osd.0 injectargs -- --osd-inject-failure-on-pg-removal
+      - sudo ceph osd pool delete foo foo --yes-i-really-really-mean-it
+- ceph.wait_for_failure: [osd.0]
+- exec:
+    client.0:
+      - sudo ceph osd down 0
+- ceph.restart: [osd.0]
+- exec:
+    client.0:
+      - sudo ceph tell osd.0 flush_pg_stats
+- ceph.healthy:
diff --git a/qa/suites/rados/singleton/all/radostool.yaml b/qa/suites/rados/singleton/all/radostool.yaml
new file mode 100644
index 0000000..0760ebd
--- /dev/null
+++ b/qa/suites/rados/singleton/all/radostool.yaml
@@ -0,0 +1,21 @@
+roles:
+- - mon.a
+  - osd.0
+  - osd.1
+  - client.0
+openstack:
+  - volumes: # attached to each instance
+      count: 2
+      size: 10 # GB
+tasks:
+- install:
+- ceph:
+    log-whitelist:
+    - wrongly marked me down
+    - had wrong client addr
+    - had wrong cluster addr
+    - reached quota
+- workunit:
+    clients:
+      all:
+        - rados/test_rados_tool.sh
diff --git a/qa/suites/rados/singleton/all/reg11184.yaml b/qa/suites/rados/singleton/all/reg11184.yaml
new file mode 100644
index 0000000..8bc502a
--- /dev/null
+++ b/qa/suites/rados/singleton/all/reg11184.yaml
@@ -0,0 +1,21 @@
+roles:
+- - mon.0
+  - osd.0
+  - osd.1
+  - osd.2
+  - client.0
+openstack:
+  - volumes: # attached to each instance
+      count: 3
+      size: 10 # GB
+
+overrides:
+  ceph:
+    conf:
+      osd:
+        debug osd: 5
+
+tasks:
+- install:
+- ceph:
+- reg11184:
diff --git a/qa/suites/rados/singleton/all/rest-api.yaml b/qa/suites/rados/singleton/all/rest-api.yaml
new file mode 100644
index 0000000..ce39a6b
--- /dev/null
+++ b/qa/suites/rados/singleton/all/rest-api.yaml
@@ -0,0 +1,29 @@
+roles:
+- - mon.0
+  - mon.1
+  - mon.2
+  - osd.0
+  - osd.1
+  - osd.2
+  - mds.a
+  - client.0
+openstack:
+  - volumes: # attached to each instance
+      count: 3
+      size: 10 # GB
+tasks:
+- install:
+- ceph:
+    log-whitelist:
+    - wrongly marked me down
+    - had wrong client addr
+    conf:
+      client.rest0:
+        debug ms: 1
+        debug objecter: 20
+        debug rados: 20
+- rest-api: [client.0]
+- workunit:
+    clients:
+      all:
+        - rest/test.py
diff --git a/qa/suites/rados/singleton/all/thrash-rados.yaml b/qa/suites/rados/singleton/all/thrash-rados.yaml
new file mode 100644
index 0000000..af96310
--- /dev/null
+++ b/qa/suites/rados/singleton/all/thrash-rados.yaml
@@ -0,0 +1,26 @@
+roles:
+- - mon.a
+  - osd.0
+  - osd.1
+  - osd.2
+- - osd.3
+  - osd.4
+  - osd.5
+  - client.0
+openstack:
+  - volumes: # attached to each instance
+      count: 3
+      size: 10 # GB
+tasks:
+- install:
+- ceph:
+    log-whitelist:
+    - wrongly marked me down
+- thrashosds:
+    op_delay: 30
+    clean_interval: 120
+    chance_down: .5
+- workunit:
+    clients:
+      all:
+      - rados/load-gen-mix-small.sh
diff --git a/qa/suites/rados/singleton/all/thrash_cache_writeback_proxy_none.yaml b/qa/suites/rados/singleton/all/thrash_cache_writeback_proxy_none.yaml
new file mode 100644
index 0000000..02c228d
--- /dev/null
+++ b/qa/suites/rados/singleton/all/thrash_cache_writeback_proxy_none.yaml
@@ -0,0 +1,66 @@
+roles:
+- - mon.a
+  - osd.0
+  - osd.1
+  - osd.2
+- - osd.3
+  - osd.4
+  - osd.5
+  - client.0
+openstack:
+  - volumes: # attached to each instance
+      count: 3
+      size: 30 # GB
+tasks:
+- install:
+- ceph:
+    log-whitelist:
+    - wrongly marked me down
+    - slow request
+- exec:
+    client.0:
+      - sudo ceph osd pool create base 4
+      - sudo ceph osd pool create cache 4
+      - sudo ceph osd tier add base cache
+      - sudo ceph osd tier cache-mode cache writeback
+      - sudo ceph osd tier set-overlay base cache
+      - sudo ceph osd pool set cache hit_set_type bloom
+      - sudo ceph osd pool set cache hit_set_count 8
+      - sudo ceph osd pool set cache hit_set_period 60
+      - sudo ceph osd pool set cache target_max_objects 500
+- background_exec:
+    mon.a:
+      - while true
+      - do sleep 30
+      - echo proxy
+      - sudo ceph osd tier cache-mode cache proxy
+      - sleep 10
+      - sudo ceph osd pool set cache cache_target_full_ratio .001
+      - echo cache-try-flush-evict-all
+      - rados -p cache cache-try-flush-evict-all
+      - sleep 5
+      - echo cache-flush-evict-all
+      - rados -p cache cache-flush-evict-all
+      - sleep 5
+      - echo remove overlay
+      - sudo ceph osd tier remove-overlay base
+      - sleep 20
+      - echo add writeback overlay
+      - sudo ceph osd tier cache-mode cache writeback
+      - sudo ceph osd pool set cache cache_target_full_ratio .8
+      - sudo ceph osd tier set-overlay base cache
+      - sleep 30
+      - sudo ceph osd tier cache-mode cache readproxy
+      - done
+- rados:
+    clients: [client.0]
+    pools: [base]
+    max_seconds: 600
+    ops: 400000
+    objects: 10000
+    size: 1024
+    op_weights:
+      read: 100
+      write: 100
+      delete: 50
+      copy_from: 50
diff --git a/qa/suites/rados/singleton/all/watch-notify-same-primary.yaml b/qa/suites/rados/singleton/all/watch-notify-same-primary.yaml
new file mode 100644
index 0000000..48900f3
--- /dev/null
+++ b/qa/suites/rados/singleton/all/watch-notify-same-primary.yaml
@@ -0,0 +1,26 @@
+roles:
+- - mon.0
+  - mon.1
+  - mon.2
+  - osd.0
+  - osd.1
+  - osd.2
+  - client.0
+openstack:
+  - volumes: # attached to each instance
+      count: 3
+      size: 10 # GB
+tasks:
+- install:
+- ceph:
+    config:
+      global:
+        osd pool default min size : 1
+      client:
+        debug ms: 1
+        debug objecter: 20
+        debug rados: 20
+    log-whitelist:
+    - objects unfound and apparently lost
+- watch_notify_same_primary:
+    clients: [client.0]
diff --git a/qa/suites/rados/singleton/fs/xfs.yaml b/qa/suites/rados/singleton/fs/xfs.yaml
new file mode 100644
index 0000000..b4a8291
--- /dev/null
+++ b/qa/suites/rados/singleton/fs/xfs.yaml
@@ -0,0 +1,6 @@
+overrides:
+  ceph:
+    fs: xfs
+    conf:
+      osd:
+        osd sloppy crc: true
diff --git a/qa/suites/rados/singleton/msgr-failures/few.yaml b/qa/suites/rados/singleton/msgr-failures/few.yaml
new file mode 100644
index 0000000..0de320d
--- /dev/null
+++ b/qa/suites/rados/singleton/msgr-failures/few.yaml
@@ -0,0 +1,5 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms inject socket failures: 5000
diff --git a/qa/suites/rados/singleton/msgr-failures/many.yaml b/qa/suites/rados/singleton/msgr-failures/many.yaml
new file mode 100644
index 0000000..86f8dde
--- /dev/null
+++ b/qa/suites/rados/singleton/msgr-failures/many.yaml
@@ -0,0 +1,5 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms inject socket failures: 500
diff --git a/qa/suites/rados/singleton/msgr/async.yaml b/qa/suites/rados/singleton/msgr/async.yaml
new file mode 100644
index 0000000..9c77eaa
--- /dev/null
+++ b/qa/suites/rados/singleton/msgr/async.yaml
@@ -0,0 +1,6 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms type: async
+        enable experimental unrecoverable data corrupting features: '*'
diff --git a/qa/suites/rados/singleton/msgr/random.yaml b/qa/suites/rados/singleton/msgr/random.yaml
new file mode 100644
index 0000000..64404b3
--- /dev/null
+++ b/qa/suites/rados/singleton/msgr/random.yaml
@@ -0,0 +1,6 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms type: random
+        enable experimental unrecoverable data corrupting features: '*'
diff --git a/qa/suites/rados/singleton/msgr/simple.yaml b/qa/suites/rados/singleton/msgr/simple.yaml
new file mode 100644
index 0000000..5c4f853
--- /dev/null
+++ b/qa/suites/rados/singleton/msgr/simple.yaml
@@ -0,0 +1,5 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms type: simple
diff --git a/qa/suites/rados/singleton/rados.yaml b/qa/suites/rados/singleton/rados.yaml
new file mode 100644
index 0000000..6d9aa9d
--- /dev/null
+++ b/qa/suites/rados/singleton/rados.yaml
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    conf:
+      osd:
+        osd op queue: debug_random
+        osd op queue cut off: debug_random
+        osd debug verify cached snaps: true
diff --git a/qa/suites/rados/thrash-erasure-code-big/% b/qa/suites/rados/thrash-erasure-code-big/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/rados/thrash-erasure-code-big/cluster/+ b/qa/suites/rados/thrash-erasure-code-big/cluster/+
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/rados/thrash-erasure-code-big/cluster/12-osds.yaml b/qa/suites/rados/thrash-erasure-code-big/cluster/12-osds.yaml
new file mode 100644
index 0000000..11317e6
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-big/cluster/12-osds.yaml
@@ -0,0 +1,5 @@
+roles:
+- [osd.0, osd.1, osd.2, client.0, mon.a]
+- [osd.3, osd.4, osd.5, mon.b]
+- [osd.6, osd.7, osd.8, mon.c]
+- [osd.9, osd.10, osd.11]
diff --git a/qa/suites/rados/thrash-erasure-code-big/cluster/openstack.yaml b/qa/suites/rados/thrash-erasure-code-big/cluster/openstack.yaml
new file mode 100644
index 0000000..e4d8423
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-big/cluster/openstack.yaml
@@ -0,0 +1,4 @@
+openstack:
+  - volumes: # attached to each instance
+      count: 3
+      size: 10 # GB
diff --git a/qa/suites/rados/thrash-erasure-code-big/fs/btrfs.yaml b/qa/suites/rados/thrash-erasure-code-big/fs/btrfs.yaml
new file mode 100644
index 0000000..0b3f6fa
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-big/fs/btrfs.yaml
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    fs: btrfs
+    conf:
+      osd:
+        osd sloppy crc: true
+        osd op thread timeout: 60
diff --git a/qa/suites/rados/thrash-erasure-code-big/fs/xfs.yaml b/qa/suites/rados/thrash-erasure-code-big/fs/xfs.yaml
new file mode 100644
index 0000000..b4a8291
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-big/fs/xfs.yaml
@@ -0,0 +1,6 @@
+overrides:
+  ceph:
+    fs: xfs
+    conf:
+      osd:
+        osd sloppy crc: true
diff --git a/qa/suites/rados/thrash-erasure-code-big/msgr-failures/fastclose.yaml b/qa/suites/rados/thrash-erasure-code-big/msgr-failures/fastclose.yaml
new file mode 100644
index 0000000..77fd730
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-big/msgr-failures/fastclose.yaml
@@ -0,0 +1,6 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms inject socket failures: 2500
+        ms tcp read timeout: 5
diff --git a/qa/suites/rados/thrash-erasure-code-big/msgr-failures/few.yaml b/qa/suites/rados/thrash-erasure-code-big/msgr-failures/few.yaml
new file mode 100644
index 0000000..477bffe
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-big/msgr-failures/few.yaml
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms inject socket failures: 5000
+      osd:
+        osd heartbeat use min delay socket: true
diff --git a/qa/suites/rados/thrash-erasure-code-big/msgr-failures/osd-delay.yaml b/qa/suites/rados/thrash-erasure-code-big/msgr-failures/osd-delay.yaml
new file mode 100644
index 0000000..a33ba89
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-big/msgr-failures/osd-delay.yaml
@@ -0,0 +1,9 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms inject socket failures: 2500
+        ms inject delay type: osd
+        ms inject delay probability: .005
+        ms inject delay max: 1
+        ms inject internal delays: .002
diff --git a/qa/suites/rados/thrash-erasure-code-big/rados.yaml b/qa/suites/rados/thrash-erasure-code-big/rados.yaml
new file mode 100644
index 0000000..6d9aa9d
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-big/rados.yaml
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    conf:
+      osd:
+        osd op queue: debug_random
+        osd op queue cut off: debug_random
+        osd debug verify cached snaps: true
diff --git a/qa/suites/rados/thrash-erasure-code-big/thrashers/default.yaml b/qa/suites/rados/thrash-erasure-code-big/thrashers/default.yaml
new file mode 100644
index 0000000..e5728c9
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-big/thrashers/default.yaml
@@ -0,0 +1,18 @@
+tasks:
+- install:
+- ceph:
+    log-whitelist:
+    - wrongly marked me down
+    - objects unfound and apparently lost
+    - slow request
+    conf:
+      osd:
+        osd debug reject backfill probability: .3
+        osd max backfills: 1
+        osd scrub min interval: 60
+        osd scrub max interval: 120
+- thrashosds:
+    timeout: 1200
+    chance_pgnum_grow: 1
+    chance_pgpnum_fix: 1
+    min_in: 8
diff --git a/qa/suites/rados/thrash-erasure-code-big/thrashers/fastread.yaml b/qa/suites/rados/thrash-erasure-code-big/thrashers/fastread.yaml
new file mode 100644
index 0000000..1a6e900
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-big/thrashers/fastread.yaml
@@ -0,0 +1,19 @@
+tasks:
+- install:
+- ceph:
+    log-whitelist:
+    - wrongly marked me down
+    - objects unfound and apparently lost
+    conf:
+      mon:
+        mon osd pool ec fast read: 1
+      osd:
+        osd debug reject backfill probability: .3
+        osd max backfills: 1
+        osd scrub min interval: 60
+        osd scrub max interval: 120
+- thrashosds:
+    timeout: 1200
+    chance_pgnum_grow: 1
+    chance_pgpnum_fix: 1
+    min_in: 4
diff --git a/qa/suites/rados/thrash-erasure-code-big/thrashers/mapgap.yaml b/qa/suites/rados/thrash-erasure-code-big/thrashers/mapgap.yaml
new file mode 100644
index 0000000..67720fe
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-big/thrashers/mapgap.yaml
@@ -0,0 +1,22 @@
+overrides:
+  ceph:
+    conf:
+      mon:
+        mon min osdmap epochs: 2
+      osd:
+        osd map cache size: 1
+        osd scrub min interval: 60
+        osd scrub max interval: 120
+tasks:
+- install:
+- ceph:
+    log-whitelist:
+    - wrongly marked me down
+    - objects unfound and apparently lost
+    - osd_map_cache_size
+- thrashosds:
+    timeout: 1800
+    chance_pgnum_grow: 1
+    chance_pgpnum_fix: 1
+    chance_test_map_discontinuity: 0.5
+    min_in: 8
diff --git a/qa/suites/rados/thrash-erasure-code-big/thrashers/morepggrow.yaml b/qa/suites/rados/thrash-erasure-code-big/thrashers/morepggrow.yaml
new file mode 100644
index 0000000..99906ba
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-big/thrashers/morepggrow.yaml
@@ -0,0 +1,16 @@
+tasks:
+- install:
+- ceph:
+    conf:
+      osd:
+        osd max backfills: 1
+        osd scrub min interval: 60
+        osd scrub max interval: 120
+    log-whitelist:
+    - wrongly marked me down
+    - objects unfound and apparently lost
+- thrashosds:
+    timeout: 1200
+    chance_pgnum_grow: 3
+    chance_pgpnum_fix: 1
+    min_in: 8
diff --git a/qa/suites/rados/thrash-erasure-code-big/thrashers/pggrow.yaml b/qa/suites/rados/thrash-erasure-code-big/thrashers/pggrow.yaml
new file mode 100644
index 0000000..1117cdd
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-big/thrashers/pggrow.yaml
@@ -0,0 +1,15 @@
+tasks:
+- install:
+- ceph:
+    log-whitelist:
+    - wrongly marked me down
+    - objects unfound and apparently lost
+    conf:
+      osd:
+        osd scrub min interval: 60
+        osd scrub max interval: 120
+- thrashosds:
+    timeout: 1200
+    chance_pgnum_grow: 2
+    chance_pgpnum_fix: 1
+    min_in: 8
diff --git a/qa/suites/rados/thrash-erasure-code-big/workloads/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml b/qa/suites/rados/thrash-erasure-code-big/workloads/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml
new file mode 100644
index 0000000..3463a01
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-big/workloads/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml
@@ -0,0 +1,25 @@
+tasks:
+- rados:
+    clients: [client.0]
+    ops: 400
+    objects: 50
+    ec_pool: true
+    write_append_excl: false
+    erasure_code_profile:
+      name: lrcprofile
+      plugin: lrc
+      k: 4
+      m: 2
+      l: 3
+      ruleset-failure-domain: osd
+    op_weights:
+      read: 100
+      write: 0
+      append: 100
+      delete: 50
+      snap_create: 50
+      snap_remove: 50
+      rollback: 50
+      copy_from: 50
+      setattr: 25
+      rmattr: 25
diff --git a/qa/suites/rados/thrash-erasure-code-isa/% b/qa/suites/rados/thrash-erasure-code-isa/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/rados/thrash-erasure-code-isa/arch/x86_64.yaml b/qa/suites/rados/thrash-erasure-code-isa/arch/x86_64.yaml
new file mode 100644
index 0000000..c2409f5
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-isa/arch/x86_64.yaml
@@ -0,0 +1 @@
+arch: x86_64
diff --git a/qa/suites/rados/thrash-erasure-code-isa/clusters/+ b/qa/suites/rados/thrash-erasure-code-isa/clusters/+
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/rados/thrash-erasure-code-isa/clusters/fixed-2.yaml b/qa/suites/rados/thrash-erasure-code-isa/clusters/fixed-2.yaml
new file mode 100644
index 0000000..861e509
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-isa/clusters/fixed-2.yaml
@@ -0,0 +1,7 @@
+roles:
+- [mon.a, mon.c, osd.0, osd.1, osd.2, client.0]
+- [mon.b, osd.3, osd.4, osd.5, client.1]
+openstack:
+- volumes: # attached to each instance
+    count: 3
+    size: 10 # GB
diff --git a/qa/suites/rados/thrash-erasure-code-isa/clusters/openstack.yaml b/qa/suites/rados/thrash-erasure-code-isa/clusters/openstack.yaml
new file mode 100644
index 0000000..f4d1349
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-isa/clusters/openstack.yaml
@@ -0,0 +1,4 @@
+openstack:
+  - volumes: # attached to each instance
+      count: 3
+      size: 30 # GB
diff --git a/qa/suites/rados/thrash-erasure-code-isa/fs/btrfs.yaml b/qa/suites/rados/thrash-erasure-code-isa/fs/btrfs.yaml
new file mode 100644
index 0000000..0b3f6fa
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-isa/fs/btrfs.yaml
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    fs: btrfs
+    conf:
+      osd:
+        osd sloppy crc: true
+        osd op thread timeout: 60
diff --git a/qa/suites/rados/thrash-erasure-code-isa/fs/xfs.yaml b/qa/suites/rados/thrash-erasure-code-isa/fs/xfs.yaml
new file mode 100644
index 0000000..b4a8291
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-isa/fs/xfs.yaml
@@ -0,0 +1,6 @@
+overrides:
+  ceph:
+    fs: xfs
+    conf:
+      osd:
+        osd sloppy crc: true
diff --git a/qa/suites/rados/thrash-erasure-code-isa/msgr-failures/fastclose.yaml b/qa/suites/rados/thrash-erasure-code-isa/msgr-failures/fastclose.yaml
new file mode 100644
index 0000000..77fd730
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-isa/msgr-failures/fastclose.yaml
@@ -0,0 +1,6 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms inject socket failures: 2500
+        ms tcp read timeout: 5
diff --git a/qa/suites/rados/thrash-erasure-code-isa/msgr-failures/few.yaml b/qa/suites/rados/thrash-erasure-code-isa/msgr-failures/few.yaml
new file mode 100644
index 0000000..477bffe
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-isa/msgr-failures/few.yaml
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms inject socket failures: 5000
+      osd:
+        osd heartbeat use min delay socket: true
diff --git a/qa/suites/rados/thrash-erasure-code-isa/msgr-failures/osd-delay.yaml b/qa/suites/rados/thrash-erasure-code-isa/msgr-failures/osd-delay.yaml
new file mode 100644
index 0000000..a33ba89
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-isa/msgr-failures/osd-delay.yaml
@@ -0,0 +1,9 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms inject socket failures: 2500
+        ms inject delay type: osd
+        ms inject delay probability: .005
+        ms inject delay max: 1
+        ms inject internal delays: .002
diff --git a/qa/suites/rados/thrash-erasure-code-isa/rados.yaml b/qa/suites/rados/thrash-erasure-code-isa/rados.yaml
new file mode 100644
index 0000000..6d9aa9d
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-isa/rados.yaml
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    conf:
+      osd:
+        osd op queue: debug_random
+        osd op queue cut off: debug_random
+        osd debug verify cached snaps: true
diff --git a/qa/suites/rados/thrash-erasure-code-isa/supported/centos_7.3.yaml b/qa/suites/rados/thrash-erasure-code-isa/supported/centos_7.3.yaml
new file mode 100644
index 0000000..9dfcc7f
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-isa/supported/centos_7.3.yaml
@@ -0,0 +1,2 @@
+os_type: centos
+os_version: "7.3"
diff --git a/qa/suites/rados/thrash-erasure-code-isa/supported/ubuntu_14.04.yaml b/qa/suites/rados/thrash-erasure-code-isa/supported/ubuntu_14.04.yaml
new file mode 100644
index 0000000..309e989
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-isa/supported/ubuntu_14.04.yaml
@@ -0,0 +1,2 @@
+os_type: ubuntu
+os_version: "14.04"
diff --git a/qa/suites/rados/thrash-erasure-code-isa/thrashers/default.yaml b/qa/suites/rados/thrash-erasure-code-isa/thrashers/default.yaml
new file mode 100644
index 0000000..fabfc4f
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-isa/thrashers/default.yaml
@@ -0,0 +1,16 @@
+tasks:
+- install:
+- ceph:
+    log-whitelist:
+    - wrongly marked me down
+    - objects unfound and apparently lost
+    conf:
+      osd:
+        osd debug reject backfill probability: .3
+        osd max backfills: 1
+        osd scrub min interval: 60
+        osd scrub max interval: 120
+- thrashosds:
+    timeout: 1200
+    chance_pgnum_grow: 1
+    chance_pgpnum_fix: 1
diff --git a/qa/suites/rados/thrash-erasure-code-isa/thrashers/mapgap.yaml b/qa/suites/rados/thrash-erasure-code-isa/thrashers/mapgap.yaml
new file mode 100644
index 0000000..26c3032
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-isa/thrashers/mapgap.yaml
@@ -0,0 +1,21 @@
+overrides:
+  ceph:
+    conf:
+      mon:
+        mon min osdmap epochs: 2
+      osd:
+        osd map cache size: 1
+        osd scrub min interval: 60
+        osd scrub max interval: 120
+tasks:
+- install:
+- ceph:
+    log-whitelist:
+    - wrongly marked me down
+    - objects unfound and apparently lost
+    - osd_map_cache_size
+- thrashosds:
+    timeout: 1800
+    chance_pgnum_grow: 0.25
+    chance_pgpnum_fix: 0.25
+    chance_test_map_discontinuity: 2
diff --git a/qa/suites/rados/thrash-erasure-code-isa/thrashers/morepggrow.yaml b/qa/suites/rados/thrash-erasure-code-isa/thrashers/morepggrow.yaml
new file mode 100644
index 0000000..20c84b1
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-isa/thrashers/morepggrow.yaml
@@ -0,0 +1,22 @@
+tasks:
+- install:
+- ceph:
+    conf:
+      osd:
+        osd max backfills: 1
+        osd scrub min interval: 60
+        osd scrub max interval: 120
+        journal throttle high multiple: 2
+        journal throttle max multiple: 10
+        filestore queue throttle high multiple: 2
+        filestore queue throttle max multiple: 10
+    log-whitelist:
+    - wrongly marked me down
+    - objects unfound and apparently lost
+- thrashosds:
+    timeout: 1200
+    chance_pgnum_grow: 3
+    chance_pgpnum_fix: 1
+openstack:
+- volumes:
+    size: 50
diff --git a/qa/suites/rados/thrash-erasure-code-isa/thrashers/pggrow.yaml b/qa/suites/rados/thrash-erasure-code-isa/thrashers/pggrow.yaml
new file mode 100644
index 0000000..8b1cf46
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-isa/thrashers/pggrow.yaml
@@ -0,0 +1,15 @@
+tasks:
+- install:
+- ceph:
+    log-whitelist:
+    - wrongly marked me down
+    - objects unfound and apparently lost
+    conf:
+      osd:
+        osd scrub min interval: 60
+        osd scrub max interval: 120
+        filestore odsync write: true
+- thrashosds:
+    timeout: 1200
+    chance_pgnum_grow: 2
+    chance_pgpnum_fix: 1
diff --git a/qa/suites/rados/thrash-erasure-code-isa/workloads/ec-rados-plugin=isa-k=2-m=1.yaml b/qa/suites/rados/thrash-erasure-code-isa/workloads/ec-rados-plugin=isa-k=2-m=1.yaml
new file mode 100644
index 0000000..8d7c497
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-isa/workloads/ec-rados-plugin=isa-k=2-m=1.yaml
@@ -0,0 +1,25 @@
+tasks:
+- rados:
+    clients: [client.0]
+    ops: 4000
+    objects: 50
+    ec_pool: true
+    write_append_excl: false
+    erasure_code_profile:
+      name: isaprofile
+      plugin: isa
+      k: 2
+      m: 1
+      technique: reed_sol_van
+      ruleset-failure-domain: osd
+    op_weights:
+      read: 100
+      write: 0
+      append: 100
+      delete: 50
+      snap_create: 50
+      snap_remove: 50
+      rollback: 50
+      copy_from: 50
+      setattr: 25
+      rmattr: 25
diff --git a/qa/suites/rados/thrash-erasure-code-shec/% b/qa/suites/rados/thrash-erasure-code-shec/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/rados/thrash-erasure-code-shec/clusters/+ b/qa/suites/rados/thrash-erasure-code-shec/clusters/+
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/rados/thrash-erasure-code-shec/clusters/fixed-4.yaml b/qa/suites/rados/thrash-erasure-code-shec/clusters/fixed-4.yaml
new file mode 100644
index 0000000..7f051dc
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-shec/clusters/fixed-4.yaml
@@ -0,0 +1,5 @@
+roles: 
+- [mon.a, osd.0, osd.4, osd.8, osd.12] 
+- [mon.b, osd.1, osd.5, osd.9, osd.13] 
+- [mon.c, osd.2, osd.6, osd.10, osd.14] 
+- [osd.3, osd.7, osd.11, osd.15, client.0] 
diff --git a/qa/suites/rados/thrash-erasure-code-shec/clusters/openstack.yaml b/qa/suites/rados/thrash-erasure-code-shec/clusters/openstack.yaml
new file mode 100644
index 0000000..e559d91
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-shec/clusters/openstack.yaml
@@ -0,0 +1,4 @@
+openstack:
+  - volumes: # attached to each instance
+      count: 4
+      size: 10 # GB
diff --git a/qa/suites/rados/thrash-erasure-code-shec/fs/xfs.yaml b/qa/suites/rados/thrash-erasure-code-shec/fs/xfs.yaml
new file mode 100644
index 0000000..b4a8291
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-shec/fs/xfs.yaml
@@ -0,0 +1,6 @@
+overrides:
+  ceph:
+    fs: xfs
+    conf:
+      osd:
+        osd sloppy crc: true
diff --git a/qa/suites/rados/thrash-erasure-code-shec/msgr-failures/fastclose.yaml b/qa/suites/rados/thrash-erasure-code-shec/msgr-failures/fastclose.yaml
new file mode 100644
index 0000000..77fd730
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-shec/msgr-failures/fastclose.yaml
@@ -0,0 +1,6 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms inject socket failures: 2500
+        ms tcp read timeout: 5
diff --git a/qa/suites/rados/thrash-erasure-code-shec/msgr-failures/few.yaml b/qa/suites/rados/thrash-erasure-code-shec/msgr-failures/few.yaml
new file mode 100644
index 0000000..477bffe
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-shec/msgr-failures/few.yaml
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms inject socket failures: 5000
+      osd:
+        osd heartbeat use min delay socket: true
diff --git a/qa/suites/rados/thrash-erasure-code-shec/msgr-failures/osd-delay.yaml b/qa/suites/rados/thrash-erasure-code-shec/msgr-failures/osd-delay.yaml
new file mode 100644
index 0000000..a33ba89
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-shec/msgr-failures/osd-delay.yaml
@@ -0,0 +1,9 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms inject socket failures: 2500
+        ms inject delay type: osd
+        ms inject delay probability: .005
+        ms inject delay max: 1
+        ms inject internal delays: .002
diff --git a/qa/suites/rados/thrash-erasure-code-shec/rados.yaml b/qa/suites/rados/thrash-erasure-code-shec/rados.yaml
new file mode 100644
index 0000000..6d9aa9d
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-shec/rados.yaml
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    conf:
+      osd:
+        osd op queue: debug_random
+        osd op queue cut off: debug_random
+        osd debug verify cached snaps: true
diff --git a/qa/suites/rados/thrash-erasure-code-shec/thrashers/default.yaml b/qa/suites/rados/thrash-erasure-code-shec/thrashers/default.yaml
new file mode 100644
index 0000000..e5728c9
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-shec/thrashers/default.yaml
@@ -0,0 +1,18 @@
+tasks:
+- install:
+- ceph:
+    log-whitelist:
+    - wrongly marked me down
+    - objects unfound and apparently lost
+    - slow request
+    conf:
+      osd:
+        osd debug reject backfill probability: .3
+        osd max backfills: 1
+        osd scrub min interval: 60
+        osd scrub max interval: 120
+- thrashosds:
+    timeout: 1200
+    chance_pgnum_grow: 1
+    chance_pgpnum_fix: 1
+    min_in: 8
diff --git a/qa/suites/rados/thrash-erasure-code-shec/workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml b/qa/suites/rados/thrash-erasure-code-shec/workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml
new file mode 100644
index 0000000..696baed
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-shec/workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml
@@ -0,0 +1,25 @@
+tasks:
+- rados:
+    clients: [client.0]
+    ops: 400
+    objects: 50
+    ec_pool: true
+    write_append_excl: false
+    erasure_code_profile:
+      name: shecprofile
+      plugin: shec
+      k: 4
+      m: 3
+      c: 2
+      ruleset-failure-domain: osd
+    op_weights:
+      read: 100
+      write: 0
+      append: 100
+      delete: 50
+      snap_create: 50
+      snap_remove: 50
+      rollback: 50
+      copy_from: 50
+      setattr: 25
+      rmattr: 25
diff --git a/qa/suites/rados/thrash-erasure-code/% b/qa/suites/rados/thrash-erasure-code/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/rados/thrash-erasure-code/clusters/+ b/qa/suites/rados/thrash-erasure-code/clusters/+
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/rados/thrash-erasure-code/clusters/fixed-2.yaml b/qa/suites/rados/thrash-erasure-code/clusters/fixed-2.yaml
new file mode 100644
index 0000000..861e509
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code/clusters/fixed-2.yaml
@@ -0,0 +1,7 @@
+roles:
+- [mon.a, mon.c, osd.0, osd.1, osd.2, client.0]
+- [mon.b, osd.3, osd.4, osd.5, client.1]
+openstack:
+- volumes: # attached to each instance
+    count: 3
+    size: 10 # GB
diff --git a/qa/suites/rados/thrash-erasure-code/clusters/openstack.yaml b/qa/suites/rados/thrash-erasure-code/clusters/openstack.yaml
new file mode 100644
index 0000000..f4d1349
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code/clusters/openstack.yaml
@@ -0,0 +1,4 @@
+openstack:
+  - volumes: # attached to each instance
+      count: 3
+      size: 30 # GB
diff --git a/qa/suites/rados/thrash-erasure-code/fs/btrfs.yaml b/qa/suites/rados/thrash-erasure-code/fs/btrfs.yaml
new file mode 100644
index 0000000..0b3f6fa
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code/fs/btrfs.yaml
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    fs: btrfs
+    conf:
+      osd:
+        osd sloppy crc: true
+        osd op thread timeout: 60
diff --git a/qa/suites/rados/thrash-erasure-code/fs/xfs.yaml b/qa/suites/rados/thrash-erasure-code/fs/xfs.yaml
new file mode 100644
index 0000000..b4a8291
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code/fs/xfs.yaml
@@ -0,0 +1,6 @@
+overrides:
+  ceph:
+    fs: xfs
+    conf:
+      osd:
+        osd sloppy crc: true
diff --git a/qa/suites/rados/thrash-erasure-code/msgr-failures/fastclose.yaml b/qa/suites/rados/thrash-erasure-code/msgr-failures/fastclose.yaml
new file mode 100644
index 0000000..77fd730
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code/msgr-failures/fastclose.yaml
@@ -0,0 +1,6 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms inject socket failures: 2500
+        ms tcp read timeout: 5
diff --git a/qa/suites/rados/thrash-erasure-code/msgr-failures/few.yaml b/qa/suites/rados/thrash-erasure-code/msgr-failures/few.yaml
new file mode 100644
index 0000000..477bffe
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code/msgr-failures/few.yaml
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms inject socket failures: 5000
+      osd:
+        osd heartbeat use min delay socket: true
diff --git a/qa/suites/rados/thrash-erasure-code/msgr-failures/osd-delay.yaml b/qa/suites/rados/thrash-erasure-code/msgr-failures/osd-delay.yaml
new file mode 100644
index 0000000..a33ba89
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code/msgr-failures/osd-delay.yaml
@@ -0,0 +1,9 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms inject socket failures: 2500
+        ms inject delay type: osd
+        ms inject delay probability: .005
+        ms inject delay max: 1
+        ms inject internal delays: .002
diff --git a/qa/suites/rados/thrash-erasure-code/rados.yaml b/qa/suites/rados/thrash-erasure-code/rados.yaml
new file mode 100644
index 0000000..6d9aa9d
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code/rados.yaml
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    conf:
+      osd:
+        osd op queue: debug_random
+        osd op queue cut off: debug_random
+        osd debug verify cached snaps: true
diff --git a/qa/suites/rados/thrash-erasure-code/thrashers/default.yaml b/qa/suites/rados/thrash-erasure-code/thrashers/default.yaml
new file mode 100644
index 0000000..fade054
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code/thrashers/default.yaml
@@ -0,0 +1,17 @@
+tasks:
+- install:
+- ceph:
+    log-whitelist:
+    - wrongly marked me down
+    - objects unfound and apparently lost
+    conf:
+      osd:
+        osd debug reject backfill probability: .3
+        osd max backfills: 1
+        osd scrub min interval: 60
+        osd scrub max interval: 120
+- thrashosds:
+    timeout: 1200
+    chance_pgnum_grow: 1
+    chance_pgpnum_fix: 1
+    min_in: 4
diff --git a/qa/suites/rados/thrash-erasure-code/thrashers/fastread.yaml b/qa/suites/rados/thrash-erasure-code/thrashers/fastread.yaml
new file mode 100644
index 0000000..1a6e900
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code/thrashers/fastread.yaml
@@ -0,0 +1,19 @@
+tasks:
+- install:
+- ceph:
+    log-whitelist:
+    - wrongly marked me down
+    - objects unfound and apparently lost
+    conf:
+      mon:
+        mon osd pool ec fast read: 1
+      osd:
+        osd debug reject backfill probability: .3
+        osd max backfills: 1
+        osd scrub min interval: 60
+        osd scrub max interval: 120
+- thrashosds:
+    timeout: 1200
+    chance_pgnum_grow: 1
+    chance_pgpnum_fix: 1
+    min_in: 4
diff --git a/qa/suites/rados/thrash-erasure-code/thrashers/mapgap.yaml b/qa/suites/rados/thrash-erasure-code/thrashers/mapgap.yaml
new file mode 100644
index 0000000..c37147f
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code/thrashers/mapgap.yaml
@@ -0,0 +1,22 @@
+overrides:
+  ceph:
+    conf:
+      mon:
+        mon min osdmap epochs: 2
+      osd:
+        osd map cache size: 1
+        osd scrub min interval: 60
+        osd scrub max interval: 120
+tasks:
+- install:
+- ceph:
+    log-whitelist:
+    - wrongly marked me down
+    - objects unfound and apparently lost
+    - osd_map_cache_size
+- thrashosds:
+    timeout: 1800
+    chance_pgnum_grow: 1
+    chance_pgpnum_fix: 1
+    chance_test_map_discontinuity: 0.5
+    min_in: 4
diff --git a/qa/suites/rados/thrash-erasure-code/thrashers/morepggrow.yaml b/qa/suites/rados/thrash-erasure-code/thrashers/morepggrow.yaml
new file mode 100644
index 0000000..9ba1b9e
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code/thrashers/morepggrow.yaml
@@ -0,0 +1,16 @@
+tasks:
+- install:
+- ceph:
+    conf:
+      osd:
+        osd max backfills: 1
+        osd scrub min interval: 60
+        osd scrub max interval: 120
+    log-whitelist:
+    - wrongly marked me down
+    - objects unfound and apparently lost
+- thrashosds:
+    timeout: 1200
+    chance_pgnum_grow: 3
+    chance_pgpnum_fix: 1
+    min_in: 4
diff --git a/qa/suites/rados/thrash-erasure-code/thrashers/pggrow.yaml b/qa/suites/rados/thrash-erasure-code/thrashers/pggrow.yaml
new file mode 100644
index 0000000..744761d
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code/thrashers/pggrow.yaml
@@ -0,0 +1,15 @@
+tasks:
+- install:
+- ceph:
+    log-whitelist:
+    - wrongly marked me down
+    - objects unfound and apparently lost
+    conf:
+      osd:
+        osd scrub min interval: 60
+        osd scrub max interval: 120
+- thrashosds:
+    timeout: 1200
+    chance_pgnum_grow: 2
+    chance_pgpnum_fix: 1
+    min_in: 4
diff --git a/qa/suites/rados/thrash-erasure-code/workloads/ec-rados-plugin=jerasure-k=2-m=1.yaml b/qa/suites/rados/thrash-erasure-code/workloads/ec-rados-plugin=jerasure-k=2-m=1.yaml
new file mode 100644
index 0000000..4fa8d9f
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code/workloads/ec-rados-plugin=jerasure-k=2-m=1.yaml
@@ -0,0 +1,25 @@
+tasks:
+- rados:
+    clients: [client.0]
+    ops: 4000
+    objects: 50
+    ec_pool: true
+    write_append_excl: false
+    erasure_code_profile:
+      name: jerasure21profile
+      plugin: jerasure
+      k: 2
+      m: 1
+      technique: reed_sol_van
+      ruleset-failure-domain: osd
+    op_weights:
+      read: 100
+      write: 0
+      append: 100
+      delete: 50
+      snap_create: 50
+      snap_remove: 50
+      rollback: 50
+      copy_from: 50
+      setattr: 25
+      rmattr: 25
diff --git a/qa/suites/rados/thrash-erasure-code/workloads/ec-rados-plugin=jerasure-k=3-m=1.yaml b/qa/suites/rados/thrash-erasure-code/workloads/ec-rados-plugin=jerasure-k=3-m=1.yaml
new file mode 100644
index 0000000..3c31a8b
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code/workloads/ec-rados-plugin=jerasure-k=3-m=1.yaml
@@ -0,0 +1,31 @@
+#
+# k=3 implies a stripe_width of 1376*3 = 4128 which is different from
+# the default value of 4096 It is also not a multiple of 1024*1024 and
+# creates situations where rounding rules during recovery becomes 
+# necessary. 
+#
+tasks:
+- rados:
+    clients: [client.0]
+    ops: 4000
+    objects: 50
+    ec_pool: true
+    write_append_excl: false
+    erasure_code_profile:
+      name: jerasure31profile
+      plugin: jerasure
+      k: 3
+      m: 1
+      technique: reed_sol_van
+      ruleset-failure-domain: osd
+    op_weights:
+      read: 100
+      write: 0
+      append: 100
+      delete: 50
+      snap_create: 50
+      snap_remove: 50
+      rollback: 50
+      copy_from: 50
+      setattr: 25
+      rmattr: 25
diff --git a/qa/suites/rados/thrash-erasure-code/workloads/ec-radosbench.yaml b/qa/suites/rados/thrash-erasure-code/workloads/ec-radosbench.yaml
new file mode 100644
index 0000000..3c2ff7a
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code/workloads/ec-radosbench.yaml
@@ -0,0 +1,27 @@
+tasks:
+- full_sequential:
+  - radosbench:
+      clients: [client.0]
+      time: 150
+      unique_pool: true
+      ec_pool: true
+  - radosbench:
+      clients: [client.0]
+      time: 150
+      unique_pool: true
+      ec_pool: true
+  - radosbench:
+      clients: [client.0]
+      time: 150
+      unique_pool: true
+      ec_pool: true
+  - radosbench:
+      clients: [client.0]
+      time: 150
+      unique_pool: true
+      ec_pool: true
+  - radosbench:
+      clients: [client.0]
+      time: 150
+      unique_pool: true
+      ec_pool: true
diff --git a/qa/suites/rados/thrash-erasure-code/workloads/ec-small-objects-fast-read.yaml b/qa/suites/rados/thrash-erasure-code/workloads/ec-small-objects-fast-read.yaml
new file mode 100644
index 0000000..e732ec6
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code/workloads/ec-small-objects-fast-read.yaml
@@ -0,0 +1,21 @@
+tasks:
+- rados:
+    clients: [client.0]
+    ops: 400000
+    max_seconds: 600
+    max_in_flight: 64
+    objects: 1024
+    size: 16384
+    ec_pool: true
+    fast_read: true
+    op_weights:
+      read: 100
+      write: 0
+      append: 100
+      delete: 50
+      snap_create: 50
+      snap_remove: 50
+      rollback: 50
+      copy_from: 50
+      setattr: 25
+      rmattr: 25
diff --git a/qa/suites/rados/thrash-erasure-code/workloads/ec-small-objects.yaml b/qa/suites/rados/thrash-erasure-code/workloads/ec-small-objects.yaml
new file mode 100644
index 0000000..a8ac397
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code/workloads/ec-small-objects.yaml
@@ -0,0 +1,20 @@
+tasks:
+- rados:
+    clients: [client.0]
+    ops: 400000
+    max_seconds: 600
+    max_in_flight: 64
+    objects: 1024
+    size: 16384
+    ec_pool: true
+    op_weights:
+      read: 100
+      write: 0
+      append: 100
+      delete: 50
+      snap_create: 50
+      snap_remove: 50
+      rollback: 50
+      copy_from: 50
+      setattr: 25
+      rmattr: 25
diff --git a/qa/suites/rados/thrash/% b/qa/suites/rados/thrash/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/rados/thrash/0-size-min-size-overrides/2-size-1-min-size.yaml b/qa/suites/rados/thrash/0-size-min-size-overrides/2-size-1-min-size.yaml
new file mode 100644
index 0000000..d710aee
--- /dev/null
+++ b/qa/suites/rados/thrash/0-size-min-size-overrides/2-size-1-min-size.yaml
@@ -0,0 +1,6 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        osd_pool_default_size: 2
+        osd_pool_default_min_size: 1
diff --git a/qa/suites/rados/thrash/0-size-min-size-overrides/2-size-2-min-size.yaml b/qa/suites/rados/thrash/0-size-min-size-overrides/2-size-2-min-size.yaml
new file mode 100644
index 0000000..42b854e
--- /dev/null
+++ b/qa/suites/rados/thrash/0-size-min-size-overrides/2-size-2-min-size.yaml
@@ -0,0 +1,6 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        osd_pool_default_size: 2
+        osd_pool_default_min_size: 2
diff --git a/qa/suites/rados/thrash/0-size-min-size-overrides/3-size-2-min-size.yaml b/qa/suites/rados/thrash/0-size-min-size-overrides/3-size-2-min-size.yaml
new file mode 100644
index 0000000..0257906
--- /dev/null
+++ b/qa/suites/rados/thrash/0-size-min-size-overrides/3-size-2-min-size.yaml
@@ -0,0 +1,8 @@
+overrides:
+  thrashosds:
+    min_in: 4
+  ceph:
+    conf:
+      global:
+        osd_pool_default_size: 3
+        osd_pool_default_min_size: 2
diff --git a/qa/suites/rados/thrash/1-pg-log-overrides/normal_pg_log.yaml b/qa/suites/rados/thrash/1-pg-log-overrides/normal_pg_log.yaml
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/rados/thrash/1-pg-log-overrides/short_pg_log.yaml b/qa/suites/rados/thrash/1-pg-log-overrides/short_pg_log.yaml
new file mode 100644
index 0000000..d50d965
--- /dev/null
+++ b/qa/suites/rados/thrash/1-pg-log-overrides/short_pg_log.yaml
@@ -0,0 +1,6 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        osd_min_pg_log_entries: 150
+        osd_max_pg_log_entries: 300
diff --git a/qa/suites/rados/thrash/clusters/+ b/qa/suites/rados/thrash/clusters/+
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/rados/thrash/clusters/fixed-2.yaml b/qa/suites/rados/thrash/clusters/fixed-2.yaml
new file mode 100644
index 0000000..861e509
--- /dev/null
+++ b/qa/suites/rados/thrash/clusters/fixed-2.yaml
@@ -0,0 +1,7 @@
+roles:
+- [mon.a, mon.c, osd.0, osd.1, osd.2, client.0]
+- [mon.b, osd.3, osd.4, osd.5, client.1]
+openstack:
+- volumes: # attached to each instance
+    count: 3
+    size: 10 # GB
diff --git a/qa/suites/rados/thrash/clusters/openstack.yaml b/qa/suites/rados/thrash/clusters/openstack.yaml
new file mode 100644
index 0000000..f4d1349
--- /dev/null
+++ b/qa/suites/rados/thrash/clusters/openstack.yaml
@@ -0,0 +1,4 @@
+openstack:
+  - volumes: # attached to each instance
+      count: 3
+      size: 30 # GB
diff --git a/qa/suites/rados/thrash/fs/btrfs.yaml b/qa/suites/rados/thrash/fs/btrfs.yaml
new file mode 100644
index 0000000..0b3f6fa
--- /dev/null
+++ b/qa/suites/rados/thrash/fs/btrfs.yaml
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    fs: btrfs
+    conf:
+      osd:
+        osd sloppy crc: true
+        osd op thread timeout: 60
diff --git a/qa/suites/rados/thrash/fs/xfs.yaml b/qa/suites/rados/thrash/fs/xfs.yaml
new file mode 100644
index 0000000..b4a8291
--- /dev/null
+++ b/qa/suites/rados/thrash/fs/xfs.yaml
@@ -0,0 +1,6 @@
+overrides:
+  ceph:
+    fs: xfs
+    conf:
+      osd:
+        osd sloppy crc: true
diff --git a/qa/suites/rados/thrash/hobj-sort.yaml b/qa/suites/rados/thrash/hobj-sort.yaml
new file mode 100644
index 0000000..4140094
--- /dev/null
+++ b/qa/suites/rados/thrash/hobj-sort.yaml
@@ -0,0 +1,5 @@
+overrides:
+  ceph:
+    conf:
+      osd:
+        osd debug randomize hobject sort order: true
diff --git a/qa/suites/rados/thrash/msgr-failures/fastclose.yaml b/qa/suites/rados/thrash/msgr-failures/fastclose.yaml
new file mode 100644
index 0000000..77fd730
--- /dev/null
+++ b/qa/suites/rados/thrash/msgr-failures/fastclose.yaml
@@ -0,0 +1,6 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms inject socket failures: 2500
+        ms tcp read timeout: 5
diff --git a/qa/suites/rados/thrash/msgr-failures/few.yaml b/qa/suites/rados/thrash/msgr-failures/few.yaml
new file mode 100644
index 0000000..477bffe
--- /dev/null
+++ b/qa/suites/rados/thrash/msgr-failures/few.yaml
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms inject socket failures: 5000
+      osd:
+        osd heartbeat use min delay socket: true
diff --git a/qa/suites/rados/thrash/msgr-failures/osd-delay.yaml b/qa/suites/rados/thrash/msgr-failures/osd-delay.yaml
new file mode 100644
index 0000000..a33ba89
--- /dev/null
+++ b/qa/suites/rados/thrash/msgr-failures/osd-delay.yaml
@@ -0,0 +1,9 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms inject socket failures: 2500
+        ms inject delay type: osd
+        ms inject delay probability: .005
+        ms inject delay max: 1
+        ms inject internal delays: .002
diff --git a/qa/suites/rados/thrash/msgr/async.yaml b/qa/suites/rados/thrash/msgr/async.yaml
new file mode 100644
index 0000000..9c77eaa
--- /dev/null
+++ b/qa/suites/rados/thrash/msgr/async.yaml
@@ -0,0 +1,6 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms type: async
+        enable experimental unrecoverable data corrupting features: '*'
diff --git a/qa/suites/rados/thrash/msgr/random.yaml b/qa/suites/rados/thrash/msgr/random.yaml
new file mode 100644
index 0000000..64404b3
--- /dev/null
+++ b/qa/suites/rados/thrash/msgr/random.yaml
@@ -0,0 +1,6 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms type: random
+        enable experimental unrecoverable data corrupting features: '*'
diff --git a/qa/suites/rados/thrash/msgr/simple.yaml b/qa/suites/rados/thrash/msgr/simple.yaml
new file mode 100644
index 0000000..5c4f853
--- /dev/null
+++ b/qa/suites/rados/thrash/msgr/simple.yaml
@@ -0,0 +1,5 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms type: simple
diff --git a/qa/suites/rados/thrash/rados.yaml b/qa/suites/rados/thrash/rados.yaml
new file mode 100644
index 0000000..6d9aa9d
--- /dev/null
+++ b/qa/suites/rados/thrash/rados.yaml
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    conf:
+      osd:
+        osd op queue: debug_random
+        osd op queue cut off: debug_random
+        osd debug verify cached snaps: true
diff --git a/qa/suites/rados/thrash/thrashers/default.yaml b/qa/suites/rados/thrash/thrashers/default.yaml
new file mode 100644
index 0000000..fabfc4f
--- /dev/null
+++ b/qa/suites/rados/thrash/thrashers/default.yaml
@@ -0,0 +1,16 @@
+tasks:
+- install:
+- ceph:
+    log-whitelist:
+    - wrongly marked me down
+    - objects unfound and apparently lost
+    conf:
+      osd:
+        osd debug reject backfill probability: .3
+        osd max backfills: 1
+        osd scrub min interval: 60
+        osd scrub max interval: 120
+- thrashosds:
+    timeout: 1200
+    chance_pgnum_grow: 1
+    chance_pgpnum_fix: 1
diff --git a/qa/suites/rados/thrash/thrashers/mapgap.yaml b/qa/suites/rados/thrash/thrashers/mapgap.yaml
new file mode 100644
index 0000000..26c3032
--- /dev/null
+++ b/qa/suites/rados/thrash/thrashers/mapgap.yaml
@@ -0,0 +1,21 @@
+overrides:
+  ceph:
+    conf:
+      mon:
+        mon min osdmap epochs: 2
+      osd:
+        osd map cache size: 1
+        osd scrub min interval: 60
+        osd scrub max interval: 120
+tasks:
+- install:
+- ceph:
+    log-whitelist:
+    - wrongly marked me down
+    - objects unfound and apparently lost
+    - osd_map_cache_size
+- thrashosds:
+    timeout: 1800
+    chance_pgnum_grow: 0.25
+    chance_pgpnum_fix: 0.25
+    chance_test_map_discontinuity: 2
diff --git a/qa/suites/rados/thrash/thrashers/morepggrow.yaml b/qa/suites/rados/thrash/thrashers/morepggrow.yaml
new file mode 100644
index 0000000..20c84b1
--- /dev/null
+++ b/qa/suites/rados/thrash/thrashers/morepggrow.yaml
@@ -0,0 +1,22 @@
+tasks:
+- install:
+- ceph:
+    conf:
+      osd:
+        osd max backfills: 1
+        osd scrub min interval: 60
+        osd scrub max interval: 120
+        journal throttle high multiple: 2
+        journal throttle max multiple: 10
+        filestore queue throttle high multiple: 2
+        filestore queue throttle max multiple: 10
+    log-whitelist:
+    - wrongly marked me down
+    - objects unfound and apparently lost
+- thrashosds:
+    timeout: 1200
+    chance_pgnum_grow: 3
+    chance_pgpnum_fix: 1
+openstack:
+- volumes:
+    size: 50
diff --git a/qa/suites/rados/thrash/thrashers/pggrow.yaml b/qa/suites/rados/thrash/thrashers/pggrow.yaml
new file mode 100644
index 0000000..8b1cf46
--- /dev/null
+++ b/qa/suites/rados/thrash/thrashers/pggrow.yaml
@@ -0,0 +1,15 @@
+tasks:
+- install:
+- ceph:
+    log-whitelist:
+    - wrongly marked me down
+    - objects unfound and apparently lost
+    conf:
+      osd:
+        osd scrub min interval: 60
+        osd scrub max interval: 120
+        filestore odsync write: true
+- thrashosds:
+    timeout: 1200
+    chance_pgnum_grow: 2
+    chance_pgpnum_fix: 1
diff --git a/qa/suites/rados/thrash/workloads/admin_socket_objecter_requests.yaml b/qa/suites/rados/thrash/workloads/admin_socket_objecter_requests.yaml
new file mode 100644
index 0000000..8c9764a
--- /dev/null
+++ b/qa/suites/rados/thrash/workloads/admin_socket_objecter_requests.yaml
@@ -0,0 +1,13 @@
+overrides:
+  ceph:
+    conf:
+      client.0:
+        admin socket: /var/run/ceph/ceph-$name.asok
+tasks:
+- radosbench:
+    clients: [client.0]
+    time: 150
+- admin_socket:
+    client.0:
+      objecter_requests:
+        test: "http://git.ceph.com/?p={repo};a=blob_plain;f=src/test/admin_socket/objecter_requests;hb={branch}"
diff --git a/qa/suites/rados/thrash/workloads/cache-agent-big.yaml b/qa/suites/rados/thrash/workloads/cache-agent-big.yaml
new file mode 100644
index 0000000..1a54cd6
--- /dev/null
+++ b/qa/suites/rados/thrash/workloads/cache-agent-big.yaml
@@ -0,0 +1,29 @@
+overrides:
+  ceph:
+    log-whitelist:
+      - must scrub before tier agent can activate
+tasks:
+- exec:
+    client.0:
+      - sudo ceph osd erasure-code-profile set teuthologyprofile ruleset-failure-domain=osd m=1 k=2
+      - sudo ceph osd pool create base 4 4 erasure teuthologyprofile
+      - sudo ceph osd pool create cache 4
+      - sudo ceph osd tier add base cache
+      - sudo ceph osd tier cache-mode cache writeback
+      - sudo ceph osd tier set-overlay base cache
+      - sudo ceph osd pool set cache hit_set_type bloom
+      - sudo ceph osd pool set cache hit_set_count 8
+      - sudo ceph osd pool set cache hit_set_period 60
+      - sudo ceph osd pool set cache target_max_objects 5000
+- rados:
+    clients: [client.0]
+    pools: [base]
+    ops: 10000
+    objects: 6600
+    max_seconds: 1200
+    size: 1024
+    op_weights:
+      read: 100
+      write: 100
+      delete: 50
+      copy_from: 50
diff --git a/qa/suites/rados/thrash/workloads/cache-agent-small.yaml b/qa/suites/rados/thrash/workloads/cache-agent-small.yaml
new file mode 100644
index 0000000..efa8419
--- /dev/null
+++ b/qa/suites/rados/thrash/workloads/cache-agent-small.yaml
@@ -0,0 +1,29 @@
+overrides:
+  ceph:
+    crush_tunables: firefly
+    log-whitelist:
+      - must scrub before tier agent can activate
+tasks:
+- exec:
+    client.0:
+      - sudo ceph osd pool create base 4
+      - sudo ceph osd pool create cache 4
+      - sudo ceph osd tier add base cache
+      - sudo ceph osd tier cache-mode cache writeback
+      - sudo ceph osd tier set-overlay base cache
+      - sudo ceph osd pool set cache hit_set_type bloom
+      - sudo ceph osd pool set cache hit_set_count 8
+      - sudo ceph osd pool set cache hit_set_period 60
+      - sudo ceph osd pool set cache target_max_objects 250
+      - sudo ceph osd pool set cache min_read_recency_for_promote 2
+      - sudo ceph osd pool set cache min_write_recency_for_promote 2
+- rados:
+    clients: [client.0]
+    pools: [base]
+    ops: 4000
+    objects: 500
+    op_weights:
+      read: 100
+      write: 100
+      delete: 50
+      copy_from: 50
diff --git a/qa/suites/rados/thrash/workloads/cache-pool-snaps-readproxy.yaml b/qa/suites/rados/thrash/workloads/cache-pool-snaps-readproxy.yaml
new file mode 100644
index 0000000..0f5462b
--- /dev/null
+++ b/qa/suites/rados/thrash/workloads/cache-pool-snaps-readproxy.yaml
@@ -0,0 +1,33 @@
+overrides:
+  ceph:
+    log-whitelist:
+      - must scrub before tier agent can activate
+tasks:
+- exec:
+    client.0:
+      - sudo ceph osd pool create base 4
+      - sudo ceph osd pool create cache 4
+      - sudo ceph osd tier add base cache
+      - sudo ceph osd tier cache-mode cache readproxy
+      - sudo ceph osd tier set-overlay base cache
+      - sudo ceph osd pool set cache hit_set_type bloom
+      - sudo ceph osd pool set cache hit_set_count 8
+      - sudo ceph osd pool set cache hit_set_period 3600
+      - sudo ceph osd pool set cache target_max_objects 250
+- rados:
+    clients: [client.0]
+    pools: [base]
+    ops: 4000
+    objects: 500
+    pool_snaps: true
+    op_weights:
+      read: 100
+      write: 100
+      delete: 50
+      copy_from: 50
+      flush: 50
+      try_flush: 50
+      evict: 50
+      snap_create: 50
+      snap_remove: 50
+      rollback: 50
diff --git a/qa/suites/rados/thrash/workloads/cache-pool-snaps.yaml b/qa/suites/rados/thrash/workloads/cache-pool-snaps.yaml
new file mode 100644
index 0000000..bd0321b
--- /dev/null
+++ b/qa/suites/rados/thrash/workloads/cache-pool-snaps.yaml
@@ -0,0 +1,35 @@
+overrides:
+  ceph:
+    log-whitelist:
+      - must scrub before tier agent can activate
+tasks:
+- exec:
+    client.0:
+      - sudo ceph osd pool create base 4
+      - sudo ceph osd pool create cache 4
+      - sudo ceph osd tier add base cache
+      - sudo ceph osd tier cache-mode cache writeback
+      - sudo ceph osd tier set-overlay base cache
+      - sudo ceph osd pool set cache hit_set_type bloom
+      - sudo ceph osd pool set cache hit_set_count 8
+      - sudo ceph osd pool set cache hit_set_period 3600
+      - sudo ceph osd pool set cache target_max_objects 250
+      - sudo ceph osd pool set cache min_read_recency_for_promote 0
+      - sudo ceph osd pool set cache min_write_recency_for_promote 0
+- rados:
+    clients: [client.0]
+    pools: [base]
+    ops: 4000
+    objects: 500
+    pool_snaps: true
+    op_weights:
+      read: 100
+      write: 100
+      delete: 50
+      copy_from: 50
+      flush: 50
+      try_flush: 50
+      evict: 50
+      snap_create: 50
+      snap_remove: 50
+      rollback: 50
diff --git a/qa/suites/rados/thrash/workloads/cache-snaps.yaml b/qa/suites/rados/thrash/workloads/cache-snaps.yaml
new file mode 100644
index 0000000..ee68490
--- /dev/null
+++ b/qa/suites/rados/thrash/workloads/cache-snaps.yaml
@@ -0,0 +1,33 @@
+overrides:
+  ceph:
+    log-whitelist:
+      - must scrub before tier agent can activate
+tasks:
+- exec:
+    client.0:
+      - sudo ceph osd pool create base 4
+      - sudo ceph osd pool create cache 4
+      - sudo ceph osd tier add base cache
+      - sudo ceph osd tier cache-mode cache writeback
+      - sudo ceph osd tier set-overlay base cache
+      - sudo ceph osd pool set cache hit_set_type bloom
+      - sudo ceph osd pool set cache hit_set_count 8
+      - sudo ceph osd pool set cache hit_set_period 3600
+      - sudo ceph osd pool set cache target_max_objects 250
+      - sudo ceph osd pool set cache min_read_recency_for_promote 2
+- rados:
+    clients: [client.0]
+    pools: [base]
+    ops: 4000
+    objects: 500
+    op_weights:
+      read: 100
+      write: 100
+      delete: 50
+      copy_from: 50
+      flush: 50
+      try_flush: 50
+      evict: 50
+      snap_create: 50
+      snap_remove: 50
+      rollback: 50
diff --git a/qa/suites/rados/thrash/workloads/cache.yaml b/qa/suites/rados/thrash/workloads/cache.yaml
new file mode 100644
index 0000000..8087e1c
--- /dev/null
+++ b/qa/suites/rados/thrash/workloads/cache.yaml
@@ -0,0 +1,30 @@
+overrides:
+  ceph:
+    log-whitelist:
+      - must scrub before tier agent can activate
+tasks:
+- exec:
+    client.0:
+      - sudo ceph osd pool create base 4
+      - sudo ceph osd pool create cache 4
+      - sudo ceph osd tier add base cache
+      - sudo ceph osd tier cache-mode cache writeback
+      - sudo ceph osd tier set-overlay base cache
+      - sudo ceph osd pool set cache hit_set_type bloom
+      - sudo ceph osd pool set cache hit_set_count 8
+      - sudo ceph osd pool set cache hit_set_period 3600
+      - sudo ceph osd pool set cache min_read_recency_for_promote 0
+      - sudo ceph osd pool set cache min_write_recency_for_promote 0
+- rados:
+    clients: [client.0]
+    pools: [base]
+    ops: 4000
+    objects: 500
+    op_weights:
+      read: 100
+      write: 100
+      delete: 50
+      copy_from: 50
+      flush: 50
+      try_flush: 50
+      evict: 50
diff --git a/qa/suites/rados/thrash/workloads/pool-snaps-few-objects.yaml b/qa/suites/rados/thrash/workloads/pool-snaps-few-objects.yaml
new file mode 100644
index 0000000..b5f6dca
--- /dev/null
+++ b/qa/suites/rados/thrash/workloads/pool-snaps-few-objects.yaml
@@ -0,0 +1,14 @@
+tasks:
+- rados:
+    clients: [client.0]
+    ops: 4000
+    objects: 50
+    pool_snaps: true
+    op_weights:
+      read: 100
+      write: 100
+      delete: 50
+      snap_create: 50
+      snap_remove: 50
+      rollback: 50
+      copy_from: 50
diff --git a/qa/suites/rados/thrash/workloads/rados_api_tests.yaml b/qa/suites/rados/thrash/workloads/rados_api_tests.yaml
new file mode 100644
index 0000000..66f1eda
--- /dev/null
+++ b/qa/suites/rados/thrash/workloads/rados_api_tests.yaml
@@ -0,0 +1,15 @@
+overrides:
+  ceph:
+    log-whitelist:
+      - reached quota
+    crush_tunables: hammer
+    conf:
+      client:
+        debug ms: 1
+        debug objecter: 20
+        debug rados: 20
+tasks:
+- workunit:
+    clients:
+      client.0:
+        - rados/test.sh
diff --git a/qa/suites/rados/thrash/workloads/radosbench.yaml b/qa/suites/rados/thrash/workloads/radosbench.yaml
new file mode 100644
index 0000000..6f10e4d
--- /dev/null
+++ b/qa/suites/rados/thrash/workloads/radosbench.yaml
@@ -0,0 +1,24 @@
+overrides:
+  ceph:
+    conf:
+      client.0:
+        debug ms: 1
+        debug objecter: 20
+        debug rados: 20
+tasks:
+- full_sequential:
+  - radosbench:
+      clients: [client.0]
+      time: 150
+  - radosbench:
+      clients: [client.0]
+      time: 150
+  - radosbench:
+      clients: [client.0]
+      time: 150
+  - radosbench:
+      clients: [client.0]
+      time: 150
+  - radosbench:
+      clients: [client.0]
+      time: 150
diff --git a/qa/suites/rados/thrash/workloads/readwrite.yaml b/qa/suites/rados/thrash/workloads/readwrite.yaml
new file mode 100644
index 0000000..8429090
--- /dev/null
+++ b/qa/suites/rados/thrash/workloads/readwrite.yaml
@@ -0,0 +1,12 @@
+overrides:
+  ceph:
+    crush_tunables: optimal
+tasks:
+- rados:
+    clients: [client.0]
+    ops: 4000
+    objects: 500
+    op_weights:
+      read: 45
+      write: 45
+      delete: 10
diff --git a/qa/suites/rados/thrash/workloads/rgw_snaps.yaml b/qa/suites/rados/thrash/workloads/rgw_snaps.yaml
new file mode 100644
index 0000000..8200cc3
--- /dev/null
+++ b/qa/suites/rados/thrash/workloads/rgw_snaps.yaml
@@ -0,0 +1,25 @@
+tasks:
+- rgw:
+    default_idle_timeout: 3600
+    client.0: null
+- thrash_pool_snaps:
+    pools:
+    - .rgw.buckets
+    - .rgw.root
+    - .rgw.control
+    - .rgw
+    - .users.uid
+    - .users.email
+    - .users
+- s3readwrite:
+    client.0:
+      rgw_server: client.0
+      readwrite:
+        bucket: rwtest
+        readers: 10
+        writers: 3
+        duration: 300
+        files:
+          num: 10
+          size: 2000
+          stddev: 500
diff --git a/qa/suites/rados/thrash/workloads/small-objects.yaml b/qa/suites/rados/thrash/workloads/small-objects.yaml
new file mode 100644
index 0000000..d8545b9
--- /dev/null
+++ b/qa/suites/rados/thrash/workloads/small-objects.yaml
@@ -0,0 +1,21 @@
+overrides:
+  ceph:
+    crush_tunables: legacy
+tasks:
+- rados:
+    clients: [client.0]
+    ops: 400000
+    max_seconds: 600
+    max_in_flight: 64
+    objects: 1024
+    size: 16384
+    op_weights:
+      read: 100
+      write: 100
+      delete: 50
+      snap_create: 50
+      snap_remove: 50
+      rollback: 50
+      copy_from: 50
+      setattr: 25
+      rmattr: 25
diff --git a/qa/suites/rados/thrash/workloads/snaps-few-objects.yaml b/qa/suites/rados/thrash/workloads/snaps-few-objects.yaml
new file mode 100644
index 0000000..aa82d97
--- /dev/null
+++ b/qa/suites/rados/thrash/workloads/snaps-few-objects.yaml
@@ -0,0 +1,13 @@
+tasks:
+- rados:
+    clients: [client.0]
+    ops: 4000
+    objects: 50
+    op_weights:
+      read: 100
+      write: 100
+      delete: 50
+      snap_create: 50
+      snap_remove: 50
+      rollback: 50
+      copy_from: 50
diff --git a/qa/suites/rados/thrash/workloads/write_fadvise_dontneed.yaml b/qa/suites/rados/thrash/workloads/write_fadvise_dontneed.yaml
new file mode 100644
index 0000000..606dcae
--- /dev/null
+++ b/qa/suites/rados/thrash/workloads/write_fadvise_dontneed.yaml
@@ -0,0 +1,8 @@
+tasks:
+- rados:
+    clients: [client.0]
+    ops: 4000
+    objects: 500
+    write_fadvise_dontneed: true
+    op_weights:
+      write: 100
diff --git a/qa/suites/rados/upgrade/% b/qa/suites/rados/upgrade/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/rados/upgrade/hammer-x-singleton/% b/qa/suites/rados/upgrade/hammer-x-singleton/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/rados/upgrade/hammer-x-singleton/0-cluster/+ b/qa/suites/rados/upgrade/hammer-x-singleton/0-cluster/+
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/rados/upgrade/hammer-x-singleton/0-cluster/openstack.yaml b/qa/suites/rados/upgrade/hammer-x-singleton/0-cluster/openstack.yaml
new file mode 100644
index 0000000..e4d8423
--- /dev/null
+++ b/qa/suites/rados/upgrade/hammer-x-singleton/0-cluster/openstack.yaml
@@ -0,0 +1,4 @@
+openstack:
+  - volumes: # attached to each instance
+      count: 3
+      size: 10 # GB
diff --git a/qa/suites/rados/upgrade/hammer-x-singleton/0-cluster/start.yaml b/qa/suites/rados/upgrade/hammer-x-singleton/0-cluster/start.yaml
new file mode 100644
index 0000000..1209365
--- /dev/null
+++ b/qa/suites/rados/upgrade/hammer-x-singleton/0-cluster/start.yaml
@@ -0,0 +1,16 @@
+overrides:
+  ceph:
+    conf:
+      mon:
+        mon warn on legacy crush tunables: false
+roles:
+- - mon.a
+  - mon.b
+  - mon.c
+  - osd.0
+  - osd.1
+  - osd.2
+- - osd.3
+  - osd.4
+  - osd.5
+- - client.0
diff --git a/qa/suites/rados/upgrade/hammer-x-singleton/1-hammer-install/hammer.yaml b/qa/suites/rados/upgrade/hammer-x-singleton/1-hammer-install/hammer.yaml
new file mode 100644
index 0000000..5c5012a
--- /dev/null
+++ b/qa/suites/rados/upgrade/hammer-x-singleton/1-hammer-install/hammer.yaml
@@ -0,0 +1,7 @@
+tasks:
+- install:
+    branch: hammer
+- print: "**** done  install hammer"
+- ceph:
+    fs: xfs
+- print: "**** done ceph"
diff --git a/qa/suites/rados/upgrade/hammer-x-singleton/2-partial-upgrade/firsthalf.yaml b/qa/suites/rados/upgrade/hammer-x-singleton/2-partial-upgrade/firsthalf.yaml
new file mode 100644
index 0000000..cd0414d
--- /dev/null
+++ b/qa/suites/rados/upgrade/hammer-x-singleton/2-partial-upgrade/firsthalf.yaml
@@ -0,0 +1,8 @@
+os_type: centos
+tasks:
+- install.upgrade:
+    osd.0: 
+- print: "**** done install.upgrade osd.0"
+- ceph.restart:
+    daemons: [osd.0, osd.1, osd.2, osd.3]
+- print: "**** done ceph.restart 1st half"
diff --git a/qa/suites/rados/upgrade/hammer-x-singleton/3-thrash/default.yaml b/qa/suites/rados/upgrade/hammer-x-singleton/3-thrash/default.yaml
new file mode 100644
index 0000000..6976399
--- /dev/null
+++ b/qa/suites/rados/upgrade/hammer-x-singleton/3-thrash/default.yaml
@@ -0,0 +1,13 @@
+overrides:
+  ceph:
+    log-whitelist:
+    - wrongly marked me down
+    - objects unfound and apparently lost
+    - log bound mismatch
+    - failed to encode
+tasks:
+- thrashosds:
+    timeout: 1200
+    chance_pgnum_grow: 1
+    chance_pgpnum_fix: 1
+- print: "**** done thrashosds 3-thrash"
diff --git a/qa/suites/rados/upgrade/hammer-x-singleton/4-mon/mona.yaml b/qa/suites/rados/upgrade/hammer-x-singleton/4-mon/mona.yaml
new file mode 100644
index 0000000..7c75c10
--- /dev/null
+++ b/qa/suites/rados/upgrade/hammer-x-singleton/4-mon/mona.yaml
@@ -0,0 +1,6 @@
+tasks:
+- ceph.restart:
+    daemons: [mon.a]
+    wait-for-healthy: false
+    wait-for-osds-up: true
+- print: "**** done ceph.restart mon.a"
diff --git a/qa/suites/rados/upgrade/hammer-x-singleton/5-workload/+ b/qa/suites/rados/upgrade/hammer-x-singleton/5-workload/+
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/rados/upgrade/hammer-x-singleton/5-workload/rbd-cls.yaml b/qa/suites/rados/upgrade/hammer-x-singleton/5-workload/rbd-cls.yaml
new file mode 100644
index 0000000..1bcd1b8
--- /dev/null
+++ b/qa/suites/rados/upgrade/hammer-x-singleton/5-workload/rbd-cls.yaml
@@ -0,0 +1,7 @@
+tasks:
+- workunit:
+    branch: hammer
+    clients:
+      client.0:
+        - cls/test_cls_rbd.sh
+- print: "**** done cls/test_cls_rbd.sh 5-workload"
diff --git a/qa/suites/rados/upgrade/hammer-x-singleton/5-workload/rbd-import-export.yaml b/qa/suites/rados/upgrade/hammer-x-singleton/5-workload/rbd-import-export.yaml
new file mode 100644
index 0000000..882d979
--- /dev/null
+++ b/qa/suites/rados/upgrade/hammer-x-singleton/5-workload/rbd-import-export.yaml
@@ -0,0 +1,9 @@
+tasks:
+- workunit:
+    branch: hammer
+    clients:
+      client.0:
+        - rbd/import_export.sh
+    env:
+      RBD_CREATE_ARGS: --new-format
+- print: "**** done rbd/import_export.sh 5-workload"
diff --git a/qa/suites/rados/upgrade/hammer-x-singleton/5-workload/readwrite.yaml b/qa/suites/rados/upgrade/hammer-x-singleton/5-workload/readwrite.yaml
new file mode 100644
index 0000000..922e61f
--- /dev/null
+++ b/qa/suites/rados/upgrade/hammer-x-singleton/5-workload/readwrite.yaml
@@ -0,0 +1,11 @@
+tasks:
+- rados:
+    clients: [client.0]
+    ops: 4000
+    objects: 500
+    write_append_excl: false
+    op_weights:
+      read: 45
+      write: 45
+      delete: 10
+- print: "**** done rados/readwrite 5-workload"
diff --git a/qa/suites/rados/upgrade/hammer-x-singleton/5-workload/snaps-few-objects.yaml b/qa/suites/rados/upgrade/hammer-x-singleton/5-workload/snaps-few-objects.yaml
new file mode 100644
index 0000000..33c820d
--- /dev/null
+++ b/qa/suites/rados/upgrade/hammer-x-singleton/5-workload/snaps-few-objects.yaml
@@ -0,0 +1,14 @@
+tasks:
+- rados:
+    clients: [client.0]
+    ops: 4000
+    objects: 50
+    write_append_excl: false
+    op_weights:
+      read: 100
+      write: 100
+      delete: 50
+      snap_create: 50
+      snap_remove: 50
+      rollback: 50
+- print: "**** done rados/snaps-few-objects 5-workload"
diff --git a/qa/suites/rados/upgrade/hammer-x-singleton/6-next-mon/monb.yaml b/qa/suites/rados/upgrade/hammer-x-singleton/6-next-mon/monb.yaml
new file mode 100644
index 0000000..22e87c7
--- /dev/null
+++ b/qa/suites/rados/upgrade/hammer-x-singleton/6-next-mon/monb.yaml
@@ -0,0 +1,6 @@
+tasks:
+- ceph.restart:
+    daemons: [mon.b]
+    wait-for-healthy: false
+    wait-for-osds-up: true
+- print: "**** done ceph.restart mon.b 6-next-mon"
diff --git a/qa/suites/rados/upgrade/hammer-x-singleton/7-workload/+ b/qa/suites/rados/upgrade/hammer-x-singleton/7-workload/+
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/rados/upgrade/hammer-x-singleton/7-workload/radosbench.yaml b/qa/suites/rados/upgrade/hammer-x-singleton/7-workload/radosbench.yaml
new file mode 100644
index 0000000..b5121f0
--- /dev/null
+++ b/qa/suites/rados/upgrade/hammer-x-singleton/7-workload/radosbench.yaml
@@ -0,0 +1,18 @@
+tasks:
+- full_sequential:
+  - radosbench:
+      clients: [client.0]
+      time: 150
+  - radosbench:
+      clients: [client.0]
+      time: 150
+  - radosbench:
+      clients: [client.0]
+      time: 150
+  - radosbench:
+      clients: [client.0]
+      time: 150
+  - radosbench:
+      clients: [client.0]
+      time: 150
+- print: "**** done radosbench 7-workload"
diff --git a/qa/suites/rados/upgrade/hammer-x-singleton/7-workload/rbd_api.yaml b/qa/suites/rados/upgrade/hammer-x-singleton/7-workload/rbd_api.yaml
new file mode 100644
index 0000000..19a24c2
--- /dev/null
+++ b/qa/suites/rados/upgrade/hammer-x-singleton/7-workload/rbd_api.yaml
@@ -0,0 +1,7 @@
+tasks:
+- workunit:
+     branch: hammer
+     clients:
+        client.0:
+           - rbd/test_librbd.sh
+- print: "**** done rbd/test_librbd.sh 7-workload"
diff --git a/qa/suites/rados/upgrade/hammer-x-singleton/8-next-mon/monc.yaml b/qa/suites/rados/upgrade/hammer-x-singleton/8-next-mon/monc.yaml
new file mode 100644
index 0000000..6125368
--- /dev/null
+++ b/qa/suites/rados/upgrade/hammer-x-singleton/8-next-mon/monc.yaml
@@ -0,0 +1,8 @@
+tasks:
+- ceph.restart:
+    daemons: [mon.c]
+    wait-for-healthy: false
+    wait-for-osds-up: true
+- print: "**** done ceph.restart mon.c 8-next-mon"
+- ceph.wait_for_mon_quorum: [a, b, c]
+- print: "**** done wait_for_mon_quorum 8-next-mon"
diff --git a/qa/suites/rados/upgrade/hammer-x-singleton/9-workload/+ b/qa/suites/rados/upgrade/hammer-x-singleton/9-workload/+
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/rados/upgrade/hammer-x-singleton/9-workload/ec-rados-plugin=jerasure-k=3-m=1.yaml b/qa/suites/rados/upgrade/hammer-x-singleton/9-workload/ec-rados-plugin=jerasure-k=3-m=1.yaml
new file mode 100644
index 0000000..7714312
--- /dev/null
+++ b/qa/suites/rados/upgrade/hammer-x-singleton/9-workload/ec-rados-plugin=jerasure-k=3-m=1.yaml
@@ -0,0 +1,31 @@
+#
+# k=3 implies a stripe_width of 1376*3 = 4128 which is different from
+# the default value of 4096 It is also not a multiple of 1024*1024 and
+# creates situations where rounding rules during recovery becomes
+# necessary.
+#
+tasks:
+- rados:
+    clients: [client.0]
+    ops: 4000
+    objects: 50
+    ec_pool: true
+    write_append_excl: false
+    erasure_code_profile:
+      name: jerasure31profile
+      plugin: jerasure
+      k: 3
+      m: 1
+      technique: reed_sol_van
+      ruleset-failure-domain: osd
+    op_weights:
+      read: 100
+      write: 0
+      append: 100
+      delete: 50
+      snap_create: 50
+      snap_remove: 50
+      rollback: 50
+      copy_from: 50
+      setattr: 25
+      rmattr: 25
diff --git a/qa/suites/rados/upgrade/hammer-x-singleton/9-workload/rbd-python.yaml b/qa/suites/rados/upgrade/hammer-x-singleton/9-workload/rbd-python.yaml
new file mode 100644
index 0000000..0ea5639
--- /dev/null
+++ b/qa/suites/rados/upgrade/hammer-x-singleton/9-workload/rbd-python.yaml
@@ -0,0 +1,7 @@
+tasks:
+- workunit:
+    branch: hammer
+    clients:
+      client.0:
+        - rbd/test_librbd_python.sh
+- print: "**** done rbd/test_librbd_python.sh 9-workload"
diff --git a/qa/suites/rados/upgrade/hammer-x-singleton/9-workload/rgw-swift.yaml b/qa/suites/rados/upgrade/hammer-x-singleton/9-workload/rgw-swift.yaml
new file mode 100644
index 0000000..8f14160
--- /dev/null
+++ b/qa/suites/rados/upgrade/hammer-x-singleton/9-workload/rgw-swift.yaml
@@ -0,0 +1,9 @@
+tasks:
+- rgw: 
+    client.0:
+    default_idle_timeout: 300
+- print: "**** done rgw 9-workload"
+- swift:
+    client.0:
+      rgw_server: client.0
+- print: "**** done swift 9-workload"
diff --git a/qa/suites/rados/upgrade/hammer-x-singleton/9-workload/snaps-many-objects.yaml b/qa/suites/rados/upgrade/hammer-x-singleton/9-workload/snaps-many-objects.yaml
new file mode 100644
index 0000000..39b44f6
--- /dev/null
+++ b/qa/suites/rados/upgrade/hammer-x-singleton/9-workload/snaps-many-objects.yaml
@@ -0,0 +1,13 @@
+tasks:
+- rados:
+    clients: [client.0]
+    ops: 4000
+    objects: 500
+    write_append_excl: false
+    op_weights:
+      read: 100
+      write: 100
+      delete: 50
+      snap_create: 50
+      snap_remove: 50
+      rollback: 50
diff --git a/qa/suites/rados/upgrade/hammer-x-singleton/9-workload/test_cache-pool-snaps.yaml b/qa/suites/rados/upgrade/hammer-x-singleton/9-workload/test_cache-pool-snaps.yaml
new file mode 100644
index 0000000..844b47a
--- /dev/null
+++ b/qa/suites/rados/upgrade/hammer-x-singleton/9-workload/test_cache-pool-snaps.yaml
@@ -0,0 +1,36 @@
+overrides:
+  ceph:
+    log-whitelist:
+      - must scrub before tier agent can activate
+tasks:
+workunit:
+  sequential:
+    - exec:
+        client.0:
+          - sudo ceph osd pool create base 4
+          - sudo ceph osd pool create cache 4
+          - sudo ceph osd tier add base cache
+          - sudo ceph osd tier cache-mode cache writeback
+          - sudo ceph osd tier set-overlay base cache
+          - sudo ceph osd pool set cache hit_set_type bloom
+          - sudo ceph osd pool set cache hit_set_count 8
+          - sudo ceph osd pool set cache hit_set_period 3600
+          - sudo ceph osd pool set cache target_max_objects 250
+    - rados:
+        clients: [client.0]
+        pools: [base]
+        ops: 4000
+        objects: 500
+        pool_snaps: true
+        op_weights:
+          read: 100
+          write: 100
+          delete: 50
+          copy_from: 50
+          flush: 50
+          try_flush: 50
+          evict: 50
+          snap_create: 50
+          snap_remove: 50
+          rollback: 50
+    - print: "**** done test_cache-pool-snaps 9-workload"
diff --git a/qa/suites/rados/upgrade/rados.yaml b/qa/suites/rados/upgrade/rados.yaml
new file mode 100644
index 0000000..6d9aa9d
--- /dev/null
+++ b/qa/suites/rados/upgrade/rados.yaml
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    conf:
+      osd:
+        osd op queue: debug_random
+        osd op queue cut off: debug_random
+        osd debug verify cached snaps: true
diff --git a/qa/suites/rados/verify/% b/qa/suites/rados/verify/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/rados/verify/1thrash/default.yaml b/qa/suites/rados/verify/1thrash/default.yaml
new file mode 100644
index 0000000..9435b14
--- /dev/null
+++ b/qa/suites/rados/verify/1thrash/default.yaml
@@ -0,0 +1,10 @@
+tasks:
+- install:
+- ceph:
+    log-whitelist:
+    - wrongly marked me down
+    - objects unfound and apparently lost
+- thrashosds:
+    timeout: 1200
+    chance_pgnum_grow: 1
+    chance_pgpnum_fix: 1
diff --git a/qa/suites/rados/verify/1thrash/none.yaml b/qa/suites/rados/verify/1thrash/none.yaml
new file mode 100644
index 0000000..2030acb
--- /dev/null
+++ b/qa/suites/rados/verify/1thrash/none.yaml
@@ -0,0 +1,3 @@
+tasks:
+- install:
+- ceph:
diff --git a/qa/suites/rados/verify/clusters/+ b/qa/suites/rados/verify/clusters/+
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/rados/verify/clusters/fixed-2.yaml b/qa/suites/rados/verify/clusters/fixed-2.yaml
new file mode 100644
index 0000000..861e509
--- /dev/null
+++ b/qa/suites/rados/verify/clusters/fixed-2.yaml
@@ -0,0 +1,7 @@
+roles:
+- [mon.a, mon.c, osd.0, osd.1, osd.2, client.0]
+- [mon.b, osd.3, osd.4, osd.5, client.1]
+openstack:
+- volumes: # attached to each instance
+    count: 3
+    size: 10 # GB
diff --git a/qa/suites/rados/verify/clusters/openstack.yaml b/qa/suites/rados/verify/clusters/openstack.yaml
new file mode 100644
index 0000000..e4d8423
--- /dev/null
+++ b/qa/suites/rados/verify/clusters/openstack.yaml
@@ -0,0 +1,4 @@
+openstack:
+  - volumes: # attached to each instance
+      count: 3
+      size: 10 # GB
diff --git a/qa/suites/rados/verify/fs/btrfs.yaml b/qa/suites/rados/verify/fs/btrfs.yaml
new file mode 100644
index 0000000..0b3f6fa
--- /dev/null
+++ b/qa/suites/rados/verify/fs/btrfs.yaml
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    fs: btrfs
+    conf:
+      osd:
+        osd sloppy crc: true
+        osd op thread timeout: 60
diff --git a/qa/suites/rados/verify/msgr-failures/few.yaml b/qa/suites/rados/verify/msgr-failures/few.yaml
new file mode 100644
index 0000000..0de320d
--- /dev/null
+++ b/qa/suites/rados/verify/msgr-failures/few.yaml
@@ -0,0 +1,5 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms inject socket failures: 5000
diff --git a/qa/suites/rados/verify/msgr/async.yaml b/qa/suites/rados/verify/msgr/async.yaml
new file mode 100644
index 0000000..9c77eaa
--- /dev/null
+++ b/qa/suites/rados/verify/msgr/async.yaml
@@ -0,0 +1,6 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms type: async
+        enable experimental unrecoverable data corrupting features: '*'
diff --git a/qa/suites/rados/verify/msgr/random.yaml b/qa/suites/rados/verify/msgr/random.yaml
new file mode 100644
index 0000000..64404b3
--- /dev/null
+++ b/qa/suites/rados/verify/msgr/random.yaml
@@ -0,0 +1,6 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms type: random
+        enable experimental unrecoverable data corrupting features: '*'
diff --git a/qa/suites/rados/verify/msgr/simple.yaml b/qa/suites/rados/verify/msgr/simple.yaml
new file mode 100644
index 0000000..5c4f853
--- /dev/null
+++ b/qa/suites/rados/verify/msgr/simple.yaml
@@ -0,0 +1,5 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms type: simple
diff --git a/qa/suites/rados/verify/rados.yaml b/qa/suites/rados/verify/rados.yaml
new file mode 100644
index 0000000..6d9aa9d
--- /dev/null
+++ b/qa/suites/rados/verify/rados.yaml
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    conf:
+      osd:
+        osd op queue: debug_random
+        osd op queue cut off: debug_random
+        osd debug verify cached snaps: true
diff --git a/qa/suites/rados/verify/tasks/mon_recovery.yaml b/qa/suites/rados/verify/tasks/mon_recovery.yaml
new file mode 100644
index 0000000..6986303
--- /dev/null
+++ b/qa/suites/rados/verify/tasks/mon_recovery.yaml
@@ -0,0 +1,2 @@
+tasks:
+- mon_recovery:
diff --git a/qa/suites/rados/verify/tasks/rados_api_tests.yaml b/qa/suites/rados/verify/tasks/rados_api_tests.yaml
new file mode 100644
index 0000000..11e3858
--- /dev/null
+++ b/qa/suites/rados/verify/tasks/rados_api_tests.yaml
@@ -0,0 +1,16 @@
+overrides:
+  ceph:
+    log-whitelist:
+      - reached quota
+    conf:
+      client:
+        debug ms: 1
+        debug objecter: 20
+        debug rados: 20
+        debug monc: 20
+tasks:
+- workunit:
+    timeout: 6h
+    clients:
+      client.0:
+        - rados/test.sh
diff --git a/qa/suites/rados/verify/tasks/rados_cls_all.yaml b/qa/suites/rados/verify/tasks/rados_cls_all.yaml
new file mode 100644
index 0000000..853da39
--- /dev/null
+++ b/qa/suites/rados/verify/tasks/rados_cls_all.yaml
@@ -0,0 +1,5 @@
+tasks:
+- workunit:
+    clients:
+      client.0:
+        - cls
diff --git a/qa/suites/rados/verify/validater/lockdep.yaml b/qa/suites/rados/verify/validater/lockdep.yaml
new file mode 100644
index 0000000..25f8435
--- /dev/null
+++ b/qa/suites/rados/verify/validater/lockdep.yaml
@@ -0,0 +1,5 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        lockdep: true
diff --git a/qa/suites/rados/verify/validater/valgrind.yaml b/qa/suites/rados/verify/validater/valgrind.yaml
new file mode 100644
index 0000000..dc63731
--- /dev/null
+++ b/qa/suites/rados/verify/validater/valgrind.yaml
@@ -0,0 +1,13 @@
+overrides:
+  install:
+    ceph:
+      flavor: notcmalloc
+      debuginfo: true
+  ceph:
+    conf:
+      global:
+        osd heartbeat grace: 40
+    valgrind:
+      mon: [--tool=memcheck, --leak-check=full, --show-reachable=yes]
+      osd: [--tool=memcheck]
+      mds: [--tool=memcheck]
diff --git a/qa/suites/rbd/basic/% b/qa/suites/rbd/basic/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/rbd/basic/base/install.yaml b/qa/suites/rbd/basic/base/install.yaml
new file mode 100644
index 0000000..2030acb
--- /dev/null
+++ b/qa/suites/rbd/basic/base/install.yaml
@@ -0,0 +1,3 @@
+tasks:
+- install:
+- ceph:
diff --git a/qa/suites/rbd/basic/cachepool/none.yaml b/qa/suites/rbd/basic/cachepool/none.yaml
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/rbd/basic/cachepool/small.yaml b/qa/suites/rbd/basic/cachepool/small.yaml
new file mode 100644
index 0000000..8262be3
--- /dev/null
+++ b/qa/suites/rbd/basic/cachepool/small.yaml
@@ -0,0 +1,11 @@
+tasks:
+- exec:
+    client.0:
+      - sudo ceph osd pool create cache 4
+      - sudo ceph osd tier add rbd cache
+      - sudo ceph osd tier cache-mode cache writeback
+      - sudo ceph osd tier set-overlay rbd cache
+      - sudo ceph osd pool set cache hit_set_type bloom
+      - sudo ceph osd pool set cache hit_set_count 8
+      - sudo ceph osd pool set cache hit_set_period 60
+      - sudo ceph osd pool set cache target_max_objects 250
diff --git a/qa/suites/rbd/basic/clusters/+ b/qa/suites/rbd/basic/clusters/+
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/rbd/basic/clusters/fixed-1.yaml b/qa/suites/rbd/basic/clusters/fixed-1.yaml
new file mode 100644
index 0000000..3768607
--- /dev/null
+++ b/qa/suites/rbd/basic/clusters/fixed-1.yaml
@@ -0,0 +1,10 @@
+overrides:
+  ceph-deploy:
+    conf:
+      global:
+        osd pool default size: 2
+        osd crush chooseleaf type: 0
+        osd pool default pg num:  128
+        osd pool default pgp num:  128
+roles:
+- [mon.a, osd.0, osd.1, osd.2, client.0]
diff --git a/qa/suites/rbd/basic/clusters/openstack.yaml b/qa/suites/rbd/basic/clusters/openstack.yaml
new file mode 100644
index 0000000..f4d1349
--- /dev/null
+++ b/qa/suites/rbd/basic/clusters/openstack.yaml
@@ -0,0 +1,4 @@
+openstack:
+  - volumes: # attached to each instance
+      count: 3
+      size: 30 # GB
diff --git a/qa/suites/rbd/basic/fs/xfs.yaml b/qa/suites/rbd/basic/fs/xfs.yaml
new file mode 100644
index 0000000..b4a8291
--- /dev/null
+++ b/qa/suites/rbd/basic/fs/xfs.yaml
@@ -0,0 +1,6 @@
+overrides:
+  ceph:
+    fs: xfs
+    conf:
+      osd:
+        osd sloppy crc: true
diff --git a/qa/suites/rbd/basic/msgr-failures/few.yaml b/qa/suites/rbd/basic/msgr-failures/few.yaml
new file mode 100644
index 0000000..0de320d
--- /dev/null
+++ b/qa/suites/rbd/basic/msgr-failures/few.yaml
@@ -0,0 +1,5 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms inject socket failures: 5000
diff --git a/qa/suites/rbd/basic/msgr-failures/many.yaml b/qa/suites/rbd/basic/msgr-failures/many.yaml
new file mode 100644
index 0000000..86f8dde
--- /dev/null
+++ b/qa/suites/rbd/basic/msgr-failures/many.yaml
@@ -0,0 +1,5 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms inject socket failures: 500
diff --git a/qa/suites/rbd/basic/tasks/rbd_api_tests_old_format.yaml b/qa/suites/rbd/basic/tasks/rbd_api_tests_old_format.yaml
new file mode 100644
index 0000000..a987685
--- /dev/null
+++ b/qa/suites/rbd/basic/tasks/rbd_api_tests_old_format.yaml
@@ -0,0 +1,5 @@
+tasks:
+- workunit:
+    clients:
+      client.0:
+        - rbd/test_librbd.sh
diff --git a/qa/suites/rbd/basic/tasks/rbd_cli_tests.yaml b/qa/suites/rbd/basic/tasks/rbd_cli_tests.yaml
new file mode 100644
index 0000000..a37db05
--- /dev/null
+++ b/qa/suites/rbd/basic/tasks/rbd_cli_tests.yaml
@@ -0,0 +1,6 @@
+tasks:
+- workunit:
+    clients:
+      client.0:
+        - rbd/run_cli_tests.sh
+
diff --git a/qa/suites/rbd/basic/tasks/rbd_cls_tests.yaml b/qa/suites/rbd/basic/tasks/rbd_cls_tests.yaml
new file mode 100644
index 0000000..9ccd57c
--- /dev/null
+++ b/qa/suites/rbd/basic/tasks/rbd_cls_tests.yaml
@@ -0,0 +1,5 @@
+tasks:
+- workunit:
+    clients:
+      client.0:
+        - cls/test_cls_rbd.sh
diff --git a/qa/suites/rbd/basic/tasks/rbd_lock_and_fence.yaml b/qa/suites/rbd/basic/tasks/rbd_lock_and_fence.yaml
new file mode 100644
index 0000000..d2c80ad
--- /dev/null
+++ b/qa/suites/rbd/basic/tasks/rbd_lock_and_fence.yaml
@@ -0,0 +1,5 @@
+tasks:
+- workunit:
+    clients:
+      client.0:
+        - rbd/test_lock_fence.sh
diff --git a/qa/suites/rbd/basic/tasks/rbd_python_api_tests_old_format.yaml b/qa/suites/rbd/basic/tasks/rbd_python_api_tests_old_format.yaml
new file mode 100644
index 0000000..263b784
--- /dev/null
+++ b/qa/suites/rbd/basic/tasks/rbd_python_api_tests_old_format.yaml
@@ -0,0 +1,5 @@
+tasks:
+- workunit:
+    clients:
+      client.0:
+        - rbd/test_librbd_python.sh
diff --git a/qa/suites/rbd/cli/% b/qa/suites/rbd/cli/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/rbd/cli/base/install.yaml b/qa/suites/rbd/cli/base/install.yaml
new file mode 100644
index 0000000..2030acb
--- /dev/null
+++ b/qa/suites/rbd/cli/base/install.yaml
@@ -0,0 +1,3 @@
+tasks:
+- install:
+- ceph:
diff --git a/qa/suites/rbd/cli/cachepool/none.yaml b/qa/suites/rbd/cli/cachepool/none.yaml
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/rbd/cli/cachepool/small.yaml b/qa/suites/rbd/cli/cachepool/small.yaml
new file mode 100644
index 0000000..8262be3
--- /dev/null
+++ b/qa/suites/rbd/cli/cachepool/small.yaml
@@ -0,0 +1,11 @@
+tasks:
+- exec:
+    client.0:
+      - sudo ceph osd pool create cache 4
+      - sudo ceph osd tier add rbd cache
+      - sudo ceph osd tier cache-mode cache writeback
+      - sudo ceph osd tier set-overlay rbd cache
+      - sudo ceph osd pool set cache hit_set_type bloom
+      - sudo ceph osd pool set cache hit_set_count 8
+      - sudo ceph osd pool set cache hit_set_period 60
+      - sudo ceph osd pool set cache target_max_objects 250
diff --git a/qa/suites/rbd/cli/clusters/+ b/qa/suites/rbd/cli/clusters/+
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/rbd/cli/clusters/fixed-1.yaml b/qa/suites/rbd/cli/clusters/fixed-1.yaml
new file mode 100644
index 0000000..3768607
--- /dev/null
+++ b/qa/suites/rbd/cli/clusters/fixed-1.yaml
@@ -0,0 +1,10 @@
+overrides:
+  ceph-deploy:
+    conf:
+      global:
+        osd pool default size: 2
+        osd crush chooseleaf type: 0
+        osd pool default pg num:  128
+        osd pool default pgp num:  128
+roles:
+- [mon.a, osd.0, osd.1, osd.2, client.0]
diff --git a/qa/suites/rbd/cli/clusters/openstack.yaml b/qa/suites/rbd/cli/clusters/openstack.yaml
new file mode 100644
index 0000000..f4d1349
--- /dev/null
+++ b/qa/suites/rbd/cli/clusters/openstack.yaml
@@ -0,0 +1,4 @@
+openstack:
+  - volumes: # attached to each instance
+      count: 3
+      size: 30 # GB
diff --git a/qa/suites/rbd/cli/features/defaults.yaml b/qa/suites/rbd/cli/features/defaults.yaml
new file mode 100644
index 0000000..fd42254
--- /dev/null
+++ b/qa/suites/rbd/cli/features/defaults.yaml
@@ -0,0 +1,6 @@
+overrides:
+  ceph:
+    conf:
+      client:
+        rbd default format: 2
+        rbd default features: 61
diff --git a/qa/suites/rbd/cli/features/format-1.yaml b/qa/suites/rbd/cli/features/format-1.yaml
new file mode 100644
index 0000000..9c53208
--- /dev/null
+++ b/qa/suites/rbd/cli/features/format-1.yaml
@@ -0,0 +1,5 @@
+overrides:
+  ceph:
+    conf:
+      client:
+        rbd default format: 1
diff --git a/qa/suites/rbd/cli/features/journaling.yaml b/qa/suites/rbd/cli/features/journaling.yaml
new file mode 100644
index 0000000..322a728
--- /dev/null
+++ b/qa/suites/rbd/cli/features/journaling.yaml
@@ -0,0 +1,6 @@
+overrides:
+  ceph:
+    conf:
+      client:
+        rbd default format: 2
+        rbd default features: 125
diff --git a/qa/suites/rbd/cli/features/layering.yaml b/qa/suites/rbd/cli/features/layering.yaml
new file mode 100644
index 0000000..420e3d5
--- /dev/null
+++ b/qa/suites/rbd/cli/features/layering.yaml
@@ -0,0 +1,6 @@
+overrides:
+  ceph:
+    conf:
+      client:
+        rbd default format: 2
+        rbd default features: 1
diff --git a/qa/suites/rbd/cli/fs/xfs.yaml b/qa/suites/rbd/cli/fs/xfs.yaml
new file mode 100644
index 0000000..b4a8291
--- /dev/null
+++ b/qa/suites/rbd/cli/fs/xfs.yaml
@@ -0,0 +1,6 @@
+overrides:
+  ceph:
+    fs: xfs
+    conf:
+      osd:
+        osd sloppy crc: true
diff --git a/qa/suites/rbd/cli/msgr-failures/few.yaml b/qa/suites/rbd/cli/msgr-failures/few.yaml
new file mode 100644
index 0000000..0de320d
--- /dev/null
+++ b/qa/suites/rbd/cli/msgr-failures/few.yaml
@@ -0,0 +1,5 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms inject socket failures: 5000
diff --git a/qa/suites/rbd/cli/msgr-failures/many.yaml b/qa/suites/rbd/cli/msgr-failures/many.yaml
new file mode 100644
index 0000000..86f8dde
--- /dev/null
+++ b/qa/suites/rbd/cli/msgr-failures/many.yaml
@@ -0,0 +1,5 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms inject socket failures: 500
diff --git a/qa/suites/rbd/cli/workloads/rbd_cli_copy.yaml b/qa/suites/rbd/cli/workloads/rbd_cli_copy.yaml
new file mode 100644
index 0000000..2f99f89
--- /dev/null
+++ b/qa/suites/rbd/cli/workloads/rbd_cli_copy.yaml
@@ -0,0 +1,5 @@
+tasks:
+- workunit:
+    clients:
+      client.0:
+        - rbd/copy.sh
diff --git a/qa/suites/rbd/cli/workloads/rbd_cli_import_export.yaml b/qa/suites/rbd/cli/workloads/rbd_cli_import_export.yaml
new file mode 100644
index 0000000..b08f261
--- /dev/null
+++ b/qa/suites/rbd/cli/workloads/rbd_cli_import_export.yaml
@@ -0,0 +1,5 @@
+tasks:
+- workunit:
+    clients:
+      client.0:
+        - rbd/import_export.sh
diff --git a/qa/suites/rbd/librbd/% b/qa/suites/rbd/librbd/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/rbd/librbd/cache/none.yaml b/qa/suites/rbd/librbd/cache/none.yaml
new file mode 100644
index 0000000..42fd9c9
--- /dev/null
+++ b/qa/suites/rbd/librbd/cache/none.yaml
@@ -0,0 +1,6 @@
+tasks:
+- install:
+- ceph:
+    conf:
+      client:
+        rbd cache: false
diff --git a/qa/suites/rbd/librbd/cache/writeback.yaml b/qa/suites/rbd/librbd/cache/writeback.yaml
new file mode 100644
index 0000000..86fe06a
--- /dev/null
+++ b/qa/suites/rbd/librbd/cache/writeback.yaml
@@ -0,0 +1,6 @@
+tasks:
+- install:
+- ceph:
+    conf:
+      client:
+        rbd cache: true
diff --git a/qa/suites/rbd/librbd/cache/writethrough.yaml b/qa/suites/rbd/librbd/cache/writethrough.yaml
new file mode 100644
index 0000000..6dc29e1
--- /dev/null
+++ b/qa/suites/rbd/librbd/cache/writethrough.yaml
@@ -0,0 +1,7 @@
+tasks:
+- install:
+- ceph:
+    conf:
+      client:
+        rbd cache: true
+        rbd cache max dirty: 0
diff --git a/qa/suites/rbd/librbd/cachepool/none.yaml b/qa/suites/rbd/librbd/cachepool/none.yaml
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/rbd/librbd/cachepool/small.yaml b/qa/suites/rbd/librbd/cachepool/small.yaml
new file mode 100644
index 0000000..8262be3
--- /dev/null
+++ b/qa/suites/rbd/librbd/cachepool/small.yaml
@@ -0,0 +1,11 @@
+tasks:
+- exec:
+    client.0:
+      - sudo ceph osd pool create cache 4
+      - sudo ceph osd tier add rbd cache
+      - sudo ceph osd tier cache-mode cache writeback
+      - sudo ceph osd tier set-overlay rbd cache
+      - sudo ceph osd pool set cache hit_set_type bloom
+      - sudo ceph osd pool set cache hit_set_count 8
+      - sudo ceph osd pool set cache hit_set_period 60
+      - sudo ceph osd pool set cache target_max_objects 250
diff --git a/qa/suites/rbd/librbd/clusters/+ b/qa/suites/rbd/librbd/clusters/+
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/rbd/librbd/clusters/fixed-3.yaml b/qa/suites/rbd/librbd/clusters/fixed-3.yaml
new file mode 100644
index 0000000..8e622d2
--- /dev/null
+++ b/qa/suites/rbd/librbd/clusters/fixed-3.yaml
@@ -0,0 +1,8 @@
+roles:
+- [mon.a, mon.c, osd.0, osd.1, osd.2]
+- [mon.b, osd.3, osd.4, osd.5]
+- [client.0]
+openstack:
+- volumes: # attached to each instance
+    count: 3
+    size: 10 # GB
diff --git a/qa/suites/rbd/librbd/clusters/openstack.yaml b/qa/suites/rbd/librbd/clusters/openstack.yaml
new file mode 100644
index 0000000..f4d1349
--- /dev/null
+++ b/qa/suites/rbd/librbd/clusters/openstack.yaml
@@ -0,0 +1,4 @@
+openstack:
+  - volumes: # attached to each instance
+      count: 3
+      size: 30 # GB
diff --git a/qa/suites/rbd/librbd/copy-on-read/off.yaml b/qa/suites/rbd/librbd/copy-on-read/off.yaml
new file mode 100644
index 0000000..638d14a
--- /dev/null
+++ b/qa/suites/rbd/librbd/copy-on-read/off.yaml
@@ -0,0 +1,5 @@
+overrides:
+  ceph:
+    conf:
+      client:
+        rbd clone copy on read: false
diff --git a/qa/suites/rbd/librbd/copy-on-read/on.yaml b/qa/suites/rbd/librbd/copy-on-read/on.yaml
new file mode 100644
index 0000000..ce99e7e
--- /dev/null
+++ b/qa/suites/rbd/librbd/copy-on-read/on.yaml
@@ -0,0 +1,5 @@
+overrides:
+  ceph:
+    conf:
+      client:
+        rbd clone copy on read: true
diff --git a/qa/suites/rbd/librbd/fs/xfs.yaml b/qa/suites/rbd/librbd/fs/xfs.yaml
new file mode 100644
index 0000000..b4a8291
--- /dev/null
+++ b/qa/suites/rbd/librbd/fs/xfs.yaml
@@ -0,0 +1,6 @@
+overrides:
+  ceph:
+    fs: xfs
+    conf:
+      osd:
+        osd sloppy crc: true
diff --git a/qa/suites/rbd/librbd/msgr-failures/few.yaml b/qa/suites/rbd/librbd/msgr-failures/few.yaml
new file mode 100644
index 0000000..a8bc683
--- /dev/null
+++ b/qa/suites/rbd/librbd/msgr-failures/few.yaml
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms inject socket failures: 5000
+    log-whitelist:
+    - wrongly marked me down
diff --git a/qa/suites/rbd/librbd/workloads/c_api_tests.yaml b/qa/suites/rbd/librbd/workloads/c_api_tests.yaml
new file mode 100644
index 0000000..188ddc5
--- /dev/null
+++ b/qa/suites/rbd/librbd/workloads/c_api_tests.yaml
@@ -0,0 +1,7 @@
+tasks:
+- workunit:
+    clients:
+      client.0:
+        - rbd/test_librbd.sh
+    env:
+      RBD_FEATURES: "1"
diff --git a/qa/suites/rbd/librbd/workloads/c_api_tests_with_defaults.yaml b/qa/suites/rbd/librbd/workloads/c_api_tests_with_defaults.yaml
new file mode 100644
index 0000000..ee1de61
--- /dev/null
+++ b/qa/suites/rbd/librbd/workloads/c_api_tests_with_defaults.yaml
@@ -0,0 +1,7 @@
+tasks:
+- workunit:
+    clients:
+      client.0:
+        - rbd/test_librbd.sh
+    env:
+      RBD_FEATURES: "61"
diff --git a/qa/suites/rbd/librbd/workloads/c_api_tests_with_journaling.yaml b/qa/suites/rbd/librbd/workloads/c_api_tests_with_journaling.yaml
new file mode 100644
index 0000000..eda2b5e
--- /dev/null
+++ b/qa/suites/rbd/librbd/workloads/c_api_tests_with_journaling.yaml
@@ -0,0 +1,7 @@
+tasks:
+- workunit:
+    clients:
+      client.0:
+        - rbd/test_librbd.sh
+    env:
+      RBD_FEATURES: "125"
diff --git a/qa/suites/rbd/librbd/workloads/fsx.yaml b/qa/suites/rbd/librbd/workloads/fsx.yaml
new file mode 100644
index 0000000..6d8cd5f
--- /dev/null
+++ b/qa/suites/rbd/librbd/workloads/fsx.yaml
@@ -0,0 +1,4 @@
+tasks:
+- rbd_fsx:
+    clients: [client.0]
+    ops: 20000
diff --git a/qa/suites/rbd/librbd/workloads/python_api_tests.yaml b/qa/suites/rbd/librbd/workloads/python_api_tests.yaml
new file mode 100644
index 0000000..a7b3ce7
--- /dev/null
+++ b/qa/suites/rbd/librbd/workloads/python_api_tests.yaml
@@ -0,0 +1,7 @@
+tasks:
+- workunit:
+    clients:
+      client.0:
+        - rbd/test_librbd_python.sh
+    env:
+      RBD_FEATURES: "1"
diff --git a/qa/suites/rbd/librbd/workloads/python_api_tests_with_defaults.yaml b/qa/suites/rbd/librbd/workloads/python_api_tests_with_defaults.yaml
new file mode 100644
index 0000000..40b2312
--- /dev/null
+++ b/qa/suites/rbd/librbd/workloads/python_api_tests_with_defaults.yaml
@@ -0,0 +1,7 @@
+tasks:
+- workunit:
+    clients:
+      client.0:
+        - rbd/test_librbd_python.sh
+    env:
+      RBD_FEATURES: "61"
diff --git a/qa/suites/rbd/librbd/workloads/python_api_tests_with_journaling.yaml b/qa/suites/rbd/librbd/workloads/python_api_tests_with_journaling.yaml
new file mode 100644
index 0000000..d0e905f
--- /dev/null
+++ b/qa/suites/rbd/librbd/workloads/python_api_tests_with_journaling.yaml
@@ -0,0 +1,7 @@
+tasks:
+- workunit:
+    clients:
+      client.0:
+        - rbd/test_librbd_python.sh
+    env:
+      RBD_FEATURES: "125"
diff --git a/qa/suites/rbd/librbd/workloads/rbd_fio.yaml b/qa/suites/rbd/librbd/workloads/rbd_fio.yaml
new file mode 100644
index 0000000..ff788c6
--- /dev/null
+++ b/qa/suites/rbd/librbd/workloads/rbd_fio.yaml
@@ -0,0 +1,10 @@
+tasks:
+- rbd_fio:
+    client.0:
+      fio-io-size: 80%
+      formats: [2]
+      features: [[layering],[layering,exclusive-lock,object-map]]
+      io-engine: rbd
+      test-clone-io: 1
+      rw: randrw
+      runtime: 900
diff --git a/qa/suites/rbd/maintenance/% b/qa/suites/rbd/maintenance/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/rbd/maintenance/base/install.yaml b/qa/suites/rbd/maintenance/base/install.yaml
new file mode 100644
index 0000000..2030acb
--- /dev/null
+++ b/qa/suites/rbd/maintenance/base/install.yaml
@@ -0,0 +1,3 @@
+tasks:
+- install:
+- ceph:
diff --git a/qa/suites/rbd/maintenance/clusters/+ b/qa/suites/rbd/maintenance/clusters/+
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/rbd/maintenance/clusters/fixed-3.yaml b/qa/suites/rbd/maintenance/clusters/fixed-3.yaml
new file mode 100644
index 0000000..8e622d2
--- /dev/null
+++ b/qa/suites/rbd/maintenance/clusters/fixed-3.yaml
@@ -0,0 +1,8 @@
+roles:
+- [mon.a, mon.c, osd.0, osd.1, osd.2]
+- [mon.b, osd.3, osd.4, osd.5]
+- [client.0]
+openstack:
+- volumes: # attached to each instance
+    count: 3
+    size: 10 # GB
diff --git a/qa/suites/rbd/maintenance/clusters/openstack.yaml b/qa/suites/rbd/maintenance/clusters/openstack.yaml
new file mode 100644
index 0000000..f879958
--- /dev/null
+++ b/qa/suites/rbd/maintenance/clusters/openstack.yaml
@@ -0,0 +1,8 @@
+openstack:
+  - machine:
+      disk: 40 # GB
+      ram: 30000 # MB
+      cpus: 1
+    volumes: # attached to each instance
+      count: 3
+      size: 30 # GB
diff --git a/qa/suites/rbd/maintenance/qemu/xfstests.yaml b/qa/suites/rbd/maintenance/qemu/xfstests.yaml
new file mode 100644
index 0000000..bd0f3e4
--- /dev/null
+++ b/qa/suites/rbd/maintenance/qemu/xfstests.yaml
@@ -0,0 +1,13 @@
+tasks:
+- parallel:
+    - io_workload
+    - op_workload
+io_workload:
+  sequential:
+    - qemu:
+        client.0:
+          clone: true
+          type: block
+          num_rbd: 2
+          test: http://git.ceph.com/?p={repo};a=blob_plain;hb={branch};f=qa/run_xfstests_qemu.sh
+exclude_arch: armv7l
diff --git a/qa/suites/rbd/maintenance/workloads/dynamic_features.yaml b/qa/suites/rbd/maintenance/workloads/dynamic_features.yaml
new file mode 100644
index 0000000..d7e1c1e
--- /dev/null
+++ b/qa/suites/rbd/maintenance/workloads/dynamic_features.yaml
@@ -0,0 +1,8 @@
+op_workload:
+  sequential:
+    - workunit:
+        clients:
+          client.0:
+          - rbd/qemu_dynamic_features.sh
+        env:
+          IMAGE_NAME: client.0.1-clone
diff --git a/qa/suites/rbd/maintenance/workloads/rebuild_object_map.yaml b/qa/suites/rbd/maintenance/workloads/rebuild_object_map.yaml
new file mode 100644
index 0000000..308158f
--- /dev/null
+++ b/qa/suites/rbd/maintenance/workloads/rebuild_object_map.yaml
@@ -0,0 +1,8 @@
+op_workload:
+  sequential:
+    - workunit:
+        clients:
+          client.0:
+          - rbd/qemu_rebuild_object_map.sh
+        env:
+          IMAGE_NAME: client.0.1-clone
diff --git a/qa/suites/rbd/maintenance/xfs.yaml b/qa/suites/rbd/maintenance/xfs.yaml
new file mode 100644
index 0000000..b4a8291
--- /dev/null
+++ b/qa/suites/rbd/maintenance/xfs.yaml
@@ -0,0 +1,6 @@
+overrides:
+  ceph:
+    fs: xfs
+    conf:
+      osd:
+        osd sloppy crc: true
diff --git a/qa/suites/rbd/mirror/% b/qa/suites/rbd/mirror/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/rbd/mirror/base/install.yaml b/qa/suites/rbd/mirror/base/install.yaml
new file mode 100644
index 0000000..365c3a8
--- /dev/null
+++ b/qa/suites/rbd/mirror/base/install.yaml
@@ -0,0 +1,9 @@
+meta:
+- desc: run two ceph clusters and install rbd-mirror
+tasks:
+- install:
+    extra_packages: [rbd-mirror]
+- ceph:
+    cluster: cluster1
+- ceph:
+    cluster: cluster2
diff --git a/qa/suites/rbd/mirror/cluster/+ b/qa/suites/rbd/mirror/cluster/+
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/rbd/mirror/cluster/2-node.yaml b/qa/suites/rbd/mirror/cluster/2-node.yaml
new file mode 100644
index 0000000..e88b12b
--- /dev/null
+++ b/qa/suites/rbd/mirror/cluster/2-node.yaml
@@ -0,0 +1,19 @@
+meta:
+- desc: 2 ceph clusters with 3 mons and 3 osds each
+roles:
+- - cluster1.mon.a
+  - cluster1.mon.b
+  - cluster1.osd.0
+  - cluster1.osd.1
+  - cluster1.osd.2
+  - cluster2.mon.c
+  - cluster1.client.0
+  - cluster2.client.0
+- - cluster1.mon.c
+  - cluster2.mon.a
+  - cluster2.mon.b
+  - cluster2.osd.0
+  - cluster2.osd.1
+  - cluster2.osd.2
+  - cluster1.client.mirror
+  - cluster2.client.mirror
diff --git a/qa/suites/rbd/mirror/cluster/openstack.yaml b/qa/suites/rbd/mirror/cluster/openstack.yaml
new file mode 100644
index 0000000..f4d1349
--- /dev/null
+++ b/qa/suites/rbd/mirror/cluster/openstack.yaml
@@ -0,0 +1,4 @@
+openstack:
+  - volumes: # attached to each instance
+      count: 3
+      size: 30 # GB
diff --git a/qa/suites/rbd/mirror/fs/xfs.yaml b/qa/suites/rbd/mirror/fs/xfs.yaml
new file mode 100644
index 0000000..b4a8291
--- /dev/null
+++ b/qa/suites/rbd/mirror/fs/xfs.yaml
@@ -0,0 +1,6 @@
+overrides:
+  ceph:
+    fs: xfs
+    conf:
+      osd:
+        osd sloppy crc: true
diff --git a/qa/suites/rbd/mirror/msgr-failures/few.yaml b/qa/suites/rbd/mirror/msgr-failures/few.yaml
new file mode 100644
index 0000000..0de320d
--- /dev/null
+++ b/qa/suites/rbd/mirror/msgr-failures/few.yaml
@@ -0,0 +1,5 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms inject socket failures: 5000
diff --git a/qa/suites/rbd/mirror/msgr-failures/many.yaml b/qa/suites/rbd/mirror/msgr-failures/many.yaml
new file mode 100644
index 0000000..86f8dde
--- /dev/null
+++ b/qa/suites/rbd/mirror/msgr-failures/many.yaml
@@ -0,0 +1,5 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms inject socket failures: 500
diff --git a/qa/suites/rbd/mirror/rbd-mirror/one-per-cluster.yaml b/qa/suites/rbd/mirror/rbd-mirror/one-per-cluster.yaml
new file mode 100644
index 0000000..5a7a5f5
--- /dev/null
+++ b/qa/suites/rbd/mirror/rbd-mirror/one-per-cluster.yaml
@@ -0,0 +1,14 @@
+meta:
+- desc: run one rbd-mirror daemon per cluster
+overrides:
+  ceph:
+    conf:
+      client.mirror:
+        # override to make these names predictable
+        admin socket: /var/run/ceph/$cluster-$name.asok
+        pid file: /var/run/ceph/$cluster-$name.pid
+tasks:
+- rbd-mirror:
+    client: cluster1.client.mirror
+- rbd-mirror:
+    client: cluster2.client.mirror
diff --git a/qa/suites/rbd/mirror/workloads/rbd-mirror-stress-workunit.yaml b/qa/suites/rbd/mirror/workloads/rbd-mirror-stress-workunit.yaml
new file mode 100644
index 0000000..cdc4864
--- /dev/null
+++ b/qa/suites/rbd/mirror/workloads/rbd-mirror-stress-workunit.yaml
@@ -0,0 +1,12 @@
+meta:
+- desc: run the rbd_mirror_stress.sh workunit to test the rbd-mirror daemon
+tasks:
+- workunit:
+    clients:
+      cluster1.client.mirror: [rbd/rbd_mirror_stress.sh]
+    env:
+      # override workunit setting of CEPH_ARGS='--cluster'
+      CEPH_ARGS: ''
+      RBD_MIRROR_USE_EXISTING_CLUSTER: '1'
+      RBD_MIRROR_USE_RBD_MIRROR: '1'
+    timeout: 6h
diff --git a/qa/suites/rbd/mirror/workloads/rbd-mirror-workunit.yaml b/qa/suites/rbd/mirror/workloads/rbd-mirror-workunit.yaml
new file mode 100644
index 0000000..2e16642
--- /dev/null
+++ b/qa/suites/rbd/mirror/workloads/rbd-mirror-workunit.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: run the rbd_mirror.sh workunit to test the rbd-mirror daemon
+tasks:
+- workunit:
+    clients:
+      cluster1.client.mirror: [rbd/rbd_mirror.sh]
+    env:
+      # override workunit setting of CEPH_ARGS='--cluster'
+      CEPH_ARGS: ''
+      RBD_MIRROR_USE_EXISTING_CLUSTER: '1'
+      RBD_MIRROR_USE_RBD_MIRROR: '1'
diff --git a/qa/suites/rbd/qemu/% b/qa/suites/rbd/qemu/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/rbd/qemu/cache/none.yaml b/qa/suites/rbd/qemu/cache/none.yaml
new file mode 100644
index 0000000..42fd9c9
--- /dev/null
+++ b/qa/suites/rbd/qemu/cache/none.yaml
@@ -0,0 +1,6 @@
+tasks:
+- install:
+- ceph:
+    conf:
+      client:
+        rbd cache: false
diff --git a/qa/suites/rbd/qemu/cache/writeback.yaml b/qa/suites/rbd/qemu/cache/writeback.yaml
new file mode 100644
index 0000000..86fe06a
--- /dev/null
+++ b/qa/suites/rbd/qemu/cache/writeback.yaml
@@ -0,0 +1,6 @@
+tasks:
+- install:
+- ceph:
+    conf:
+      client:
+        rbd cache: true
diff --git a/qa/suites/rbd/qemu/cache/writethrough.yaml b/qa/suites/rbd/qemu/cache/writethrough.yaml
new file mode 100644
index 0000000..6dc29e1
--- /dev/null
+++ b/qa/suites/rbd/qemu/cache/writethrough.yaml
@@ -0,0 +1,7 @@
+tasks:
+- install:
+- ceph:
+    conf:
+      client:
+        rbd cache: true
+        rbd cache max dirty: 0
diff --git a/qa/suites/rbd/qemu/cachepool/ec-cache.yaml b/qa/suites/rbd/qemu/cachepool/ec-cache.yaml
new file mode 100644
index 0000000..554aba3
--- /dev/null
+++ b/qa/suites/rbd/qemu/cachepool/ec-cache.yaml
@@ -0,0 +1,14 @@
+tasks:
+- exec:
+    client.0:
+      - sudo ceph osd erasure-code-profile set teuthologyprofile ruleset-failure-domain=osd m=1 k=2
+      - sudo ceph osd pool delete rbd rbd --yes-i-really-really-mean-it
+      - sudo ceph osd pool create rbd 4 4 erasure teuthologyprofile
+      - sudo ceph osd pool create cache 4
+      - sudo ceph osd tier add rbd cache
+      - sudo ceph osd tier cache-mode cache writeback
+      - sudo ceph osd tier set-overlay rbd cache
+      - sudo ceph osd pool set cache hit_set_type bloom
+      - sudo ceph osd pool set cache hit_set_count 8
+      - sudo ceph osd pool set cache hit_set_period 60
+      - sudo ceph osd pool set cache target_max_objects 250
diff --git a/qa/suites/rbd/qemu/cachepool/none.yaml b/qa/suites/rbd/qemu/cachepool/none.yaml
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/rbd/qemu/cachepool/small.yaml b/qa/suites/rbd/qemu/cachepool/small.yaml
new file mode 100644
index 0000000..8262be3
--- /dev/null
+++ b/qa/suites/rbd/qemu/cachepool/small.yaml
@@ -0,0 +1,11 @@
+tasks:
+- exec:
+    client.0:
+      - sudo ceph osd pool create cache 4
+      - sudo ceph osd tier add rbd cache
+      - sudo ceph osd tier cache-mode cache writeback
+      - sudo ceph osd tier set-overlay rbd cache
+      - sudo ceph osd pool set cache hit_set_type bloom
+      - sudo ceph osd pool set cache hit_set_count 8
+      - sudo ceph osd pool set cache hit_set_period 60
+      - sudo ceph osd pool set cache target_max_objects 250
diff --git a/qa/suites/rbd/qemu/clusters/+ b/qa/suites/rbd/qemu/clusters/+
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/rbd/qemu/clusters/fixed-3.yaml b/qa/suites/rbd/qemu/clusters/fixed-3.yaml
new file mode 100644
index 0000000..8e622d2
--- /dev/null
+++ b/qa/suites/rbd/qemu/clusters/fixed-3.yaml
@@ -0,0 +1,8 @@
+roles:
+- [mon.a, mon.c, osd.0, osd.1, osd.2]
+- [mon.b, osd.3, osd.4, osd.5]
+- [client.0]
+openstack:
+- volumes: # attached to each instance
+    count: 3
+    size: 10 # GB
diff --git a/qa/suites/rbd/qemu/clusters/openstack.yaml b/qa/suites/rbd/qemu/clusters/openstack.yaml
new file mode 100644
index 0000000..f879958
--- /dev/null
+++ b/qa/suites/rbd/qemu/clusters/openstack.yaml
@@ -0,0 +1,8 @@
+openstack:
+  - machine:
+      disk: 40 # GB
+      ram: 30000 # MB
+      cpus: 1
+    volumes: # attached to each instance
+      count: 3
+      size: 30 # GB
diff --git a/qa/suites/rbd/qemu/features/defaults.yaml b/qa/suites/rbd/qemu/features/defaults.yaml
new file mode 100644
index 0000000..fd42254
--- /dev/null
+++ b/qa/suites/rbd/qemu/features/defaults.yaml
@@ -0,0 +1,6 @@
+overrides:
+  ceph:
+    conf:
+      client:
+        rbd default format: 2
+        rbd default features: 61
diff --git a/qa/suites/rbd/qemu/features/journaling.yaml b/qa/suites/rbd/qemu/features/journaling.yaml
new file mode 100644
index 0000000..322a728
--- /dev/null
+++ b/qa/suites/rbd/qemu/features/journaling.yaml
@@ -0,0 +1,6 @@
+overrides:
+  ceph:
+    conf:
+      client:
+        rbd default format: 2
+        rbd default features: 125
diff --git a/qa/suites/rbd/qemu/fs/xfs.yaml b/qa/suites/rbd/qemu/fs/xfs.yaml
new file mode 100644
index 0000000..b4a8291
--- /dev/null
+++ b/qa/suites/rbd/qemu/fs/xfs.yaml
@@ -0,0 +1,6 @@
+overrides:
+  ceph:
+    fs: xfs
+    conf:
+      osd:
+        osd sloppy crc: true
diff --git a/qa/suites/rbd/qemu/msgr-failures/few.yaml b/qa/suites/rbd/qemu/msgr-failures/few.yaml
new file mode 100644
index 0000000..a8bc683
--- /dev/null
+++ b/qa/suites/rbd/qemu/msgr-failures/few.yaml
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms inject socket failures: 5000
+    log-whitelist:
+    - wrongly marked me down
diff --git a/qa/suites/rbd/qemu/workloads/qemu_bonnie.yaml b/qa/suites/rbd/qemu/workloads/qemu_bonnie.yaml
new file mode 100644
index 0000000..e06a587
--- /dev/null
+++ b/qa/suites/rbd/qemu/workloads/qemu_bonnie.yaml
@@ -0,0 +1,6 @@
+tasks:
+- qemu:
+    all:
+      clone: true
+      test: http://git.ceph.com/?p={repo};a=blob_plain;hb={branch};f=qa/workunits/suites/bonnie.sh
+exclude_arch: armv7l
diff --git a/qa/suites/rbd/qemu/workloads/qemu_fsstress.yaml b/qa/suites/rbd/qemu/workloads/qemu_fsstress.yaml
new file mode 100644
index 0000000..a78801d
--- /dev/null
+++ b/qa/suites/rbd/qemu/workloads/qemu_fsstress.yaml
@@ -0,0 +1,6 @@
+tasks:
+- qemu:
+    all:
+      clone: true
+      test: http://git.ceph.com/?p={repo};a=blob_plain;hb={branch};f=qa/workunits/suites/fsstress.sh
+exclude_arch: armv7l
diff --git a/qa/suites/rbd/qemu/workloads/qemu_iozone.yaml.disabled b/qa/suites/rbd/qemu/workloads/qemu_iozone.yaml.disabled
new file mode 100644
index 0000000..c436ba1
--- /dev/null
+++ b/qa/suites/rbd/qemu/workloads/qemu_iozone.yaml.disabled
@@ -0,0 +1,6 @@
+tasks:
+- qemu:
+    all:
+      test: http://git.ceph.com/?p={repo};a=blob_plain;h={branch};f=qa/workunits/suites/iozone.sh
+      image_size: 20480
+exclude_arch: armv7l
diff --git a/qa/suites/rbd/qemu/workloads/qemu_xfstests.yaml b/qa/suites/rbd/qemu/workloads/qemu_xfstests.yaml
new file mode 100644
index 0000000..670fd5f
--- /dev/null
+++ b/qa/suites/rbd/qemu/workloads/qemu_xfstests.yaml
@@ -0,0 +1,8 @@
+tasks:
+- qemu:
+    all:
+      clone: true
+      type: block
+      num_rbd: 2
+      test: http://git.ceph.com/?p={repo};a=blob_plain;hb={branch};f=qa/run_xfstests_qemu.sh
+exclude_arch: armv7l
diff --git a/qa/suites/rbd/singleton/% b/qa/suites/rbd/singleton/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/rbd/singleton/all/admin_socket.yaml b/qa/suites/rbd/singleton/all/admin_socket.yaml
new file mode 100644
index 0000000..6394cc7
--- /dev/null
+++ b/qa/suites/rbd/singleton/all/admin_socket.yaml
@@ -0,0 +1,9 @@
+roles:
+- [mon.a, osd.0, osd.1, client.0]
+tasks:
+- install:
+- ceph:
+    fs: xfs
+- workunit:
+    clients:
+      all: [rbd/test_admin_socket.sh]
diff --git a/qa/suites/rbd/singleton/all/formatted-output.yaml b/qa/suites/rbd/singleton/all/formatted-output.yaml
new file mode 100644
index 0000000..8bed75c
--- /dev/null
+++ b/qa/suites/rbd/singleton/all/formatted-output.yaml
@@ -0,0 +1,10 @@
+roles:
+- [mon.a, osd.0, osd.1, client.0]
+tasks:
+- install:
+- ceph:
+    fs: xfs
+- cram:
+    clients:
+      client.0:
+      - http://git.ceph.com/?p={repo};a=blob_plain;hb={branch};f=src/test/cli-integration/rbd/formatted-output.t
diff --git a/qa/suites/rbd/singleton/all/merge_diff.yaml b/qa/suites/rbd/singleton/all/merge_diff.yaml
new file mode 100644
index 0000000..76a999d
--- /dev/null
+++ b/qa/suites/rbd/singleton/all/merge_diff.yaml
@@ -0,0 +1,9 @@
+roles:
+- [mon.a, osd.0, osd.1, client.0]
+tasks:
+- install:
+- ceph:
+    fs: xfs
+- workunit:
+    clients:
+      all: [rbd/merge_diff.sh]
diff --git a/qa/suites/rbd/singleton/all/permissions.yaml b/qa/suites/rbd/singleton/all/permissions.yaml
new file mode 100644
index 0000000..bb7e12b
--- /dev/null
+++ b/qa/suites/rbd/singleton/all/permissions.yaml
@@ -0,0 +1,9 @@
+roles:
+- [mon.a, osd.0, osd.1, client.0]
+tasks:
+- install:
+- ceph:
+    fs: xfs
+- workunit:
+    clients:
+      all: [rbd/permissions.sh]
diff --git a/qa/suites/rbd/singleton/all/qemu-iotests-no-cache.yaml b/qa/suites/rbd/singleton/all/qemu-iotests-no-cache.yaml
new file mode 100644
index 0000000..bf67dba
--- /dev/null
+++ b/qa/suites/rbd/singleton/all/qemu-iotests-no-cache.yaml
@@ -0,0 +1,13 @@
+exclude_arch: armv7l
+roles:
+- [mon.a, osd.0, osd.1, client.0]
+tasks:
+- install:
+- ceph:
+    fs: xfs
+    conf:
+      client:
+        rbd cache: false
+- workunit:
+    clients:
+      all: [rbd/qemu-iotests.sh]
diff --git a/qa/suites/rbd/singleton/all/qemu-iotests-writeback.yaml b/qa/suites/rbd/singleton/all/qemu-iotests-writeback.yaml
new file mode 100644
index 0000000..823e672
--- /dev/null
+++ b/qa/suites/rbd/singleton/all/qemu-iotests-writeback.yaml
@@ -0,0 +1,13 @@
+exclude_arch: armv7l
+roles:
+- [mon.a, osd.0, osd.1, client.0]
+tasks:
+- install:
+- ceph:
+    fs: xfs
+    conf:
+      client:
+        rbd cache: true
+- workunit:
+    clients:
+      all: [rbd/qemu-iotests.sh]
diff --git a/qa/suites/rbd/singleton/all/qemu-iotests-writethrough.yaml b/qa/suites/rbd/singleton/all/qemu-iotests-writethrough.yaml
new file mode 100644
index 0000000..64667d9
--- /dev/null
+++ b/qa/suites/rbd/singleton/all/qemu-iotests-writethrough.yaml
@@ -0,0 +1,14 @@
+exclude_arch: armv7l
+roles:
+- [mon.a, osd.0, osd.1, client.0]
+tasks:
+- install:
+- ceph:
+    fs: xfs
+    conf:
+      client:
+        rbd cache: true
+        rbd cache max dirty: 0
+- workunit:
+    clients:
+      all: [rbd/qemu-iotests.sh]
diff --git a/qa/suites/rbd/singleton/all/rbd-vs-unmanaged-snaps.yaml b/qa/suites/rbd/singleton/all/rbd-vs-unmanaged-snaps.yaml
new file mode 100644
index 0000000..434575d
--- /dev/null
+++ b/qa/suites/rbd/singleton/all/rbd-vs-unmanaged-snaps.yaml
@@ -0,0 +1,14 @@
+roles:
+- [mon.a, osd.0, osd.1, client.0]
+tasks:
+- install:
+- ceph:
+    fs: xfs
+    conf:
+      client:
+        rbd validate pool: false
+- workunit:
+    clients:
+      all:
+        - mon/rbd_snaps_ops.sh
+
diff --git a/qa/suites/rbd/singleton/all/rbd_mirror.yaml b/qa/suites/rbd/singleton/all/rbd_mirror.yaml
new file mode 100644
index 0000000..9caf644
--- /dev/null
+++ b/qa/suites/rbd/singleton/all/rbd_mirror.yaml
@@ -0,0 +1,9 @@
+roles:
+- [mon.a, osd.0, osd.1, client.0]
+tasks:
+- install:
+- ceph:
+    fs: xfs
+- workunit:
+    clients:
+      all: [rbd/test_rbd_mirror.sh]
diff --git a/qa/suites/rbd/singleton/all/rbdmap_RBDMAPFILE.yaml b/qa/suites/rbd/singleton/all/rbdmap_RBDMAPFILE.yaml
new file mode 100644
index 0000000..0053e66
--- /dev/null
+++ b/qa/suites/rbd/singleton/all/rbdmap_RBDMAPFILE.yaml
@@ -0,0 +1,7 @@
+roles:
+- [client.0]
+tasks:
+- install:
+- workunit:
+    clients:
+      all: [rbd/test_rbdmap_RBDMAPFILE.sh]
diff --git a/qa/suites/rbd/singleton/all/read-flags-no-cache.yaml b/qa/suites/rbd/singleton/all/read-flags-no-cache.yaml
new file mode 100644
index 0000000..a28875c
--- /dev/null
+++ b/qa/suites/rbd/singleton/all/read-flags-no-cache.yaml
@@ -0,0 +1,12 @@
+roles:
+- [mon.a, osd.0, osd.1, client.0]
+tasks:
+- install:
+- ceph:
+    fs: xfs
+    conf:
+      client:
+        rbd cache: false
+- workunit:
+    clients:
+      all: [rbd/read-flags.sh]
diff --git a/qa/suites/rbd/singleton/all/read-flags-writeback.yaml b/qa/suites/rbd/singleton/all/read-flags-writeback.yaml
new file mode 100644
index 0000000..4f2bd19
--- /dev/null
+++ b/qa/suites/rbd/singleton/all/read-flags-writeback.yaml
@@ -0,0 +1,12 @@
+roles:
+- [mon.a, osd.0, osd.1, client.0]
+tasks:
+- install:
+- ceph:
+    fs: xfs
+    conf:
+      client:
+        rbd cache: true
+- workunit:
+    clients:
+      all: [rbd/read-flags.sh]
diff --git a/qa/suites/rbd/singleton/all/read-flags-writethrough.yaml b/qa/suites/rbd/singleton/all/read-flags-writethrough.yaml
new file mode 100644
index 0000000..543a849
--- /dev/null
+++ b/qa/suites/rbd/singleton/all/read-flags-writethrough.yaml
@@ -0,0 +1,13 @@
+roles:
+- [mon.a, osd.0, osd.1, client.0]
+tasks:
+- install:
+- ceph:
+    fs: xfs
+    conf:
+      client:
+        rbd cache: true
+        rbd cache max dirty: 0
+- workunit:
+    clients:
+      all: [rbd/read-flags.sh]
diff --git a/qa/suites/rbd/singleton/all/verify_pool.yaml b/qa/suites/rbd/singleton/all/verify_pool.yaml
new file mode 100644
index 0000000..92b1102
--- /dev/null
+++ b/qa/suites/rbd/singleton/all/verify_pool.yaml
@@ -0,0 +1,9 @@
+roles:
+- [mon.a, osd.0, osd.1, client.0]
+tasks:
+- install:
+- ceph:
+    fs: xfs
+- workunit:
+    clients:
+      all: [rbd/verify_pool.sh]
diff --git a/qa/suites/rbd/singleton/openstack.yaml b/qa/suites/rbd/singleton/openstack.yaml
new file mode 100644
index 0000000..21eca2b
--- /dev/null
+++ b/qa/suites/rbd/singleton/openstack.yaml
@@ -0,0 +1,4 @@
+openstack:
+  - volumes: # attached to each instance
+      count: 2
+      size: 30 # GB
diff --git a/qa/suites/rbd/thrash/% b/qa/suites/rbd/thrash/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/rbd/thrash/base/install.yaml b/qa/suites/rbd/thrash/base/install.yaml
new file mode 100644
index 0000000..2030acb
--- /dev/null
+++ b/qa/suites/rbd/thrash/base/install.yaml
@@ -0,0 +1,3 @@
+tasks:
+- install:
+- ceph:
diff --git a/qa/suites/rbd/thrash/clusters/+ b/qa/suites/rbd/thrash/clusters/+
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/rbd/thrash/clusters/fixed-2.yaml b/qa/suites/rbd/thrash/clusters/fixed-2.yaml
new file mode 100644
index 0000000..861e509
--- /dev/null
+++ b/qa/suites/rbd/thrash/clusters/fixed-2.yaml
@@ -0,0 +1,7 @@
+roles:
+- [mon.a, mon.c, osd.0, osd.1, osd.2, client.0]
+- [mon.b, osd.3, osd.4, osd.5, client.1]
+openstack:
+- volumes: # attached to each instance
+    count: 3
+    size: 10 # GB
diff --git a/qa/suites/rbd/thrash/clusters/openstack.yaml b/qa/suites/rbd/thrash/clusters/openstack.yaml
new file mode 100644
index 0000000..39e43d0
--- /dev/null
+++ b/qa/suites/rbd/thrash/clusters/openstack.yaml
@@ -0,0 +1,8 @@
+openstack:
+  - machine:
+      disk: 40 # GB
+      ram: 8000 # MB
+      cpus: 1
+    volumes: # attached to each instance
+      count: 3
+      size: 30 # GB
diff --git a/qa/suites/rbd/thrash/fs/xfs.yaml b/qa/suites/rbd/thrash/fs/xfs.yaml
new file mode 100644
index 0000000..b4a8291
--- /dev/null
+++ b/qa/suites/rbd/thrash/fs/xfs.yaml
@@ -0,0 +1,6 @@
+overrides:
+  ceph:
+    fs: xfs
+    conf:
+      osd:
+        osd sloppy crc: true
diff --git a/qa/suites/rbd/thrash/msgr-failures/few.yaml b/qa/suites/rbd/thrash/msgr-failures/few.yaml
new file mode 100644
index 0000000..0de320d
--- /dev/null
+++ b/qa/suites/rbd/thrash/msgr-failures/few.yaml
@@ -0,0 +1,5 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms inject socket failures: 5000
diff --git a/qa/suites/rbd/thrash/thrashers/cache.yaml b/qa/suites/rbd/thrash/thrashers/cache.yaml
new file mode 100644
index 0000000..17544f3
--- /dev/null
+++ b/qa/suites/rbd/thrash/thrashers/cache.yaml
@@ -0,0 +1,18 @@
+overrides:
+  ceph:
+    log-whitelist:
+    - wrongly marked me down
+    - objects unfound and apparently lost
+tasks:
+- exec:
+    client.0:
+      - sudo ceph osd pool create cache 4
+      - sudo ceph osd tier add rbd cache
+      - sudo ceph osd tier cache-mode cache writeback
+      - sudo ceph osd tier set-overlay rbd cache
+      - sudo ceph osd pool set cache hit_set_type bloom
+      - sudo ceph osd pool set cache hit_set_count 8
+      - sudo ceph osd pool set cache hit_set_period 60
+      - sudo ceph osd pool set cache target_max_objects 250
+- thrashosds:
+    timeout: 1200
diff --git a/qa/suites/rbd/thrash/thrashers/default.yaml b/qa/suites/rbd/thrash/thrashers/default.yaml
new file mode 100644
index 0000000..89c9bdf
--- /dev/null
+++ b/qa/suites/rbd/thrash/thrashers/default.yaml
@@ -0,0 +1,8 @@
+overrides:
+  ceph:
+    log-whitelist:
+    - wrongly marked me down
+    - objects unfound and apparently lost
+tasks:
+- thrashosds:
+    timeout: 1200
diff --git a/qa/suites/rbd/thrash/workloads/journal.yaml b/qa/suites/rbd/thrash/workloads/journal.yaml
new file mode 100644
index 0000000..4dae106
--- /dev/null
+++ b/qa/suites/rbd/thrash/workloads/journal.yaml
@@ -0,0 +1,5 @@
+tasks:
+- workunit:
+    clients:
+      client.0:
+        - rbd/journal.sh
diff --git a/qa/suites/rbd/thrash/workloads/rbd_api_tests.yaml b/qa/suites/rbd/thrash/workloads/rbd_api_tests.yaml
new file mode 100644
index 0000000..ee1de61
--- /dev/null
+++ b/qa/suites/rbd/thrash/workloads/rbd_api_tests.yaml
@@ -0,0 +1,7 @@
+tasks:
+- workunit:
+    clients:
+      client.0:
+        - rbd/test_librbd.sh
+    env:
+      RBD_FEATURES: "61"
diff --git a/qa/suites/rbd/thrash/workloads/rbd_api_tests_copy_on_read.yaml b/qa/suites/rbd/thrash/workloads/rbd_api_tests_copy_on_read.yaml
new file mode 100644
index 0000000..cfa0a25
--- /dev/null
+++ b/qa/suites/rbd/thrash/workloads/rbd_api_tests_copy_on_read.yaml
@@ -0,0 +1,12 @@
+tasks:
+- workunit:
+    clients:
+      client.0:
+        - rbd/test_librbd.sh
+    env:
+      RBD_FEATURES: "61"
+overrides:
+  ceph:
+    conf:
+      client:
+        rbd clone copy on read: true
diff --git a/qa/suites/rbd/thrash/workloads/rbd_api_tests_journaling.yaml b/qa/suites/rbd/thrash/workloads/rbd_api_tests_journaling.yaml
new file mode 100644
index 0000000..eda2b5e
--- /dev/null
+++ b/qa/suites/rbd/thrash/workloads/rbd_api_tests_journaling.yaml
@@ -0,0 +1,7 @@
+tasks:
+- workunit:
+    clients:
+      client.0:
+        - rbd/test_librbd.sh
+    env:
+      RBD_FEATURES: "125"
diff --git a/qa/suites/rbd/thrash/workloads/rbd_api_tests_no_locking.yaml b/qa/suites/rbd/thrash/workloads/rbd_api_tests_no_locking.yaml
new file mode 100644
index 0000000..188ddc5
--- /dev/null
+++ b/qa/suites/rbd/thrash/workloads/rbd_api_tests_no_locking.yaml
@@ -0,0 +1,7 @@
+tasks:
+- workunit:
+    clients:
+      client.0:
+        - rbd/test_librbd.sh
+    env:
+      RBD_FEATURES: "1"
diff --git a/qa/suites/rbd/thrash/workloads/rbd_fsx_cache_writeback.yaml b/qa/suites/rbd/thrash/workloads/rbd_fsx_cache_writeback.yaml
new file mode 100644
index 0000000..98e0b39
--- /dev/null
+++ b/qa/suites/rbd/thrash/workloads/rbd_fsx_cache_writeback.yaml
@@ -0,0 +1,9 @@
+tasks:
+- rbd_fsx:
+    clients: [client.0]
+    ops: 6000
+overrides:
+  ceph:
+    conf:
+      client:
+        rbd cache: true
diff --git a/qa/suites/rbd/thrash/workloads/rbd_fsx_cache_writethrough.yaml b/qa/suites/rbd/thrash/workloads/rbd_fsx_cache_writethrough.yaml
new file mode 100644
index 0000000..463ba99
--- /dev/null
+++ b/qa/suites/rbd/thrash/workloads/rbd_fsx_cache_writethrough.yaml
@@ -0,0 +1,10 @@
+tasks:
+- rbd_fsx:
+    clients: [client.0]
+    ops: 6000
+overrides:
+  ceph:
+    conf:
+      client:
+        rbd cache: true
+        rbd cache max dirty: 0
diff --git a/qa/suites/rbd/thrash/workloads/rbd_fsx_copy_on_read.yaml b/qa/suites/rbd/thrash/workloads/rbd_fsx_copy_on_read.yaml
new file mode 100644
index 0000000..0c284ca
--- /dev/null
+++ b/qa/suites/rbd/thrash/workloads/rbd_fsx_copy_on_read.yaml
@@ -0,0 +1,10 @@
+tasks:
+- rbd_fsx:
+    clients: [client.0]
+    ops: 6000
+overrides:
+  ceph:
+    conf:
+      client:
+        rbd cache: true
+        rbd clone copy on read: true
diff --git a/qa/suites/rbd/thrash/workloads/rbd_fsx_journal.yaml b/qa/suites/rbd/thrash/workloads/rbd_fsx_journal.yaml
new file mode 100644
index 0000000..13e9a78
--- /dev/null
+++ b/qa/suites/rbd/thrash/workloads/rbd_fsx_journal.yaml
@@ -0,0 +1,5 @@
+tasks:
+- rbd_fsx:
+    clients: [client.0]
+    ops: 6000
+    journal_replay: True
diff --git a/qa/suites/rbd/thrash/workloads/rbd_fsx_nbd.yaml b/qa/suites/rbd/thrash/workloads/rbd_fsx_nbd.yaml
new file mode 100644
index 0000000..b6e9d5b
--- /dev/null
+++ b/qa/suites/rbd/thrash/workloads/rbd_fsx_nbd.yaml
@@ -0,0 +1,15 @@
+os_type: ubuntu
+overrides:
+  install:
+    ceph:
+      extra_packages: [rbd-nbd]
+tasks:
+- rbd_fsx:
+    clients: [client.0]
+    ops: 6000
+    nbd: True
+    holebdy: 512
+    punch_holes: true
+    readbdy: 512
+    truncbdy: 512
+    writebdy: 512
diff --git a/qa/suites/rbd/thrash/workloads/rbd_fsx_nocache.yaml b/qa/suites/rbd/thrash/workloads/rbd_fsx_nocache.yaml
new file mode 100644
index 0000000..968665e
--- /dev/null
+++ b/qa/suites/rbd/thrash/workloads/rbd_fsx_nocache.yaml
@@ -0,0 +1,9 @@
+tasks:
+- rbd_fsx:
+    clients: [client.0]
+    ops: 6000
+overrides:
+  ceph:
+    conf:
+      client:
+        rbd cache: false
diff --git a/qa/suites/rbd/thrash/workloads/rbd_nbd.yaml b/qa/suites/rbd/thrash/workloads/rbd_nbd.yaml
new file mode 100644
index 0000000..897d07c
--- /dev/null
+++ b/qa/suites/rbd/thrash/workloads/rbd_nbd.yaml
@@ -0,0 +1,10 @@
+os_type: ubuntu
+overrides:
+  install:
+    ceph:
+      extra_packages: [rbd-nbd]
+tasks:
+- workunit:
+    clients:
+      client.0:
+        - rbd/rbd-nbd.sh
diff --git a/qa/suites/rbd/valgrind/% b/qa/suites/rbd/valgrind/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/rbd/valgrind/base/install.yaml b/qa/suites/rbd/valgrind/base/install.yaml
new file mode 100644
index 0000000..2030acb
--- /dev/null
+++ b/qa/suites/rbd/valgrind/base/install.yaml
@@ -0,0 +1,3 @@
+tasks:
+- install:
+- ceph:
diff --git a/qa/suites/rbd/valgrind/clusters/+ b/qa/suites/rbd/valgrind/clusters/+
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/rbd/valgrind/clusters/fixed-1.yaml b/qa/suites/rbd/valgrind/clusters/fixed-1.yaml
new file mode 100644
index 0000000..3768607
--- /dev/null
+++ b/qa/suites/rbd/valgrind/clusters/fixed-1.yaml
@@ -0,0 +1,10 @@
+overrides:
+  ceph-deploy:
+    conf:
+      global:
+        osd pool default size: 2
+        osd crush chooseleaf type: 0
+        osd pool default pg num:  128
+        osd pool default pgp num:  128
+roles:
+- [mon.a, osd.0, osd.1, osd.2, client.0]
diff --git a/qa/suites/rbd/valgrind/clusters/openstack.yaml b/qa/suites/rbd/valgrind/clusters/openstack.yaml
new file mode 100644
index 0000000..f4d1349
--- /dev/null
+++ b/qa/suites/rbd/valgrind/clusters/openstack.yaml
@@ -0,0 +1,4 @@
+openstack:
+  - volumes: # attached to each instance
+      count: 3
+      size: 30 # GB
diff --git a/qa/suites/rbd/valgrind/fs/xfs.yaml b/qa/suites/rbd/valgrind/fs/xfs.yaml
new file mode 100644
index 0000000..b4a8291
--- /dev/null
+++ b/qa/suites/rbd/valgrind/fs/xfs.yaml
@@ -0,0 +1,6 @@
+overrides:
+  ceph:
+    fs: xfs
+    conf:
+      osd:
+        osd sloppy crc: true
diff --git a/qa/suites/rbd/valgrind/validator/memcheck.yaml b/qa/suites/rbd/valgrind/validator/memcheck.yaml
new file mode 100644
index 0000000..64e7143
--- /dev/null
+++ b/qa/suites/rbd/valgrind/validator/memcheck.yaml
@@ -0,0 +1,10 @@
+overrides:
+  install:
+    ceph:
+      flavor: notcmalloc
+      debuginfo: true
+  rbd_fsx:
+    valgrind: ["--tool=memcheck"]
+  workunit:
+    env:
+      VALGRIND: "memcheck"
diff --git a/qa/suites/rbd/valgrind/workloads/c_api_tests.yaml b/qa/suites/rbd/valgrind/workloads/c_api_tests.yaml
new file mode 100644
index 0000000..188ddc5
--- /dev/null
+++ b/qa/suites/rbd/valgrind/workloads/c_api_tests.yaml
@@ -0,0 +1,7 @@
+tasks:
+- workunit:
+    clients:
+      client.0:
+        - rbd/test_librbd.sh
+    env:
+      RBD_FEATURES: "1"
diff --git a/qa/suites/rbd/valgrind/workloads/c_api_tests_with_defaults.yaml b/qa/suites/rbd/valgrind/workloads/c_api_tests_with_defaults.yaml
new file mode 100644
index 0000000..ee1de61
--- /dev/null
+++ b/qa/suites/rbd/valgrind/workloads/c_api_tests_with_defaults.yaml
@@ -0,0 +1,7 @@
+tasks:
+- workunit:
+    clients:
+      client.0:
+        - rbd/test_librbd.sh
+    env:
+      RBD_FEATURES: "61"
diff --git a/qa/suites/rbd/valgrind/workloads/c_api_tests_with_journaling.yaml b/qa/suites/rbd/valgrind/workloads/c_api_tests_with_journaling.yaml
new file mode 100644
index 0000000..eda2b5e
--- /dev/null
+++ b/qa/suites/rbd/valgrind/workloads/c_api_tests_with_journaling.yaml
@@ -0,0 +1,7 @@
+tasks:
+- workunit:
+    clients:
+      client.0:
+        - rbd/test_librbd.sh
+    env:
+      RBD_FEATURES: "125"
diff --git a/qa/suites/rbd/valgrind/workloads/fsx.yaml b/qa/suites/rbd/valgrind/workloads/fsx.yaml
new file mode 100644
index 0000000..5c745a2
--- /dev/null
+++ b/qa/suites/rbd/valgrind/workloads/fsx.yaml
@@ -0,0 +1,4 @@
+tasks:
+- rbd_fsx:
+    clients: [client.0]
+    size: 134217728
diff --git a/qa/suites/rbd/valgrind/workloads/python_api_tests.yaml b/qa/suites/rbd/valgrind/workloads/python_api_tests.yaml
new file mode 100644
index 0000000..a7b3ce7
--- /dev/null
+++ b/qa/suites/rbd/valgrind/workloads/python_api_tests.yaml
@@ -0,0 +1,7 @@
+tasks:
+- workunit:
+    clients:
+      client.0:
+        - rbd/test_librbd_python.sh
+    env:
+      RBD_FEATURES: "1"
diff --git a/qa/suites/rbd/valgrind/workloads/python_api_tests_with_defaults.yaml b/qa/suites/rbd/valgrind/workloads/python_api_tests_with_defaults.yaml
new file mode 100644
index 0000000..40b2312
--- /dev/null
+++ b/qa/suites/rbd/valgrind/workloads/python_api_tests_with_defaults.yaml
@@ -0,0 +1,7 @@
+tasks:
+- workunit:
+    clients:
+      client.0:
+        - rbd/test_librbd_python.sh
+    env:
+      RBD_FEATURES: "61"
diff --git a/qa/suites/rbd/valgrind/workloads/python_api_tests_with_journaling.yaml b/qa/suites/rbd/valgrind/workloads/python_api_tests_with_journaling.yaml
new file mode 100644
index 0000000..d0e905f
--- /dev/null
+++ b/qa/suites/rbd/valgrind/workloads/python_api_tests_with_journaling.yaml
@@ -0,0 +1,7 @@
+tasks:
+- workunit:
+    clients:
+      client.0:
+        - rbd/test_librbd_python.sh
+    env:
+      RBD_FEATURES: "125"
diff --git a/qa/suites/rbd/valgrind/workloads/rbd_mirror.yaml b/qa/suites/rbd/valgrind/workloads/rbd_mirror.yaml
new file mode 100644
index 0000000..4a2ee40
--- /dev/null
+++ b/qa/suites/rbd/valgrind/workloads/rbd_mirror.yaml
@@ -0,0 +1,5 @@
+tasks:
+- workunit:
+    clients:
+      client.0:
+        - rbd/test_rbd_mirror.sh
diff --git a/qa/suites/rest/basic/tasks/rest_test.yaml b/qa/suites/rest/basic/tasks/rest_test.yaml
new file mode 100644
index 0000000..1adc953
--- /dev/null
+++ b/qa/suites/rest/basic/tasks/rest_test.yaml
@@ -0,0 +1,32 @@
+roles:
+- - mon.a
+  - mds.a
+  - osd.0
+  - osd.1
+- - mon.b
+  - mon.c
+  - osd.2
+  - osd.3
+  - client.0
+
+openstack:
+- volumes: # attached to each instance
+    count: 2
+    size: 10 # GB
+
+tasks:
+- install:
+- ceph:
+    fs: xfs
+    log-whitelist:
+    - wrongly marked me down
+    conf:
+      client.rest0:
+        debug ms: 1
+        debug objecter: 20
+        debug rados: 20
+- rest-api: [client.0]
+- workunit:
+    clients:
+      client.0:
+         - rest/test.py
diff --git a/qa/suites/rgw/multifs/% b/qa/suites/rgw/multifs/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/rgw/multifs/clusters/fixed-2.yaml b/qa/suites/rgw/multifs/clusters/fixed-2.yaml
new file mode 100644
index 0000000..861e509
--- /dev/null
+++ b/qa/suites/rgw/multifs/clusters/fixed-2.yaml
@@ -0,0 +1,7 @@
+roles:
+- [mon.a, mon.c, osd.0, osd.1, osd.2, client.0]
+- [mon.b, osd.3, osd.4, osd.5, client.1]
+openstack:
+- volumes: # attached to each instance
+    count: 3
+    size: 10 # GB
diff --git a/qa/suites/rgw/multifs/frontend/apache.yaml b/qa/suites/rgw/multifs/frontend/apache.yaml
new file mode 100644
index 0000000..53ebf75
--- /dev/null
+++ b/qa/suites/rgw/multifs/frontend/apache.yaml
@@ -0,0 +1,3 @@
+overrides:
+  rgw:
+    frontend: apache
diff --git a/qa/suites/rgw/multifs/frontend/civetweb.yaml b/qa/suites/rgw/multifs/frontend/civetweb.yaml
new file mode 100644
index 0000000..5845a0e
--- /dev/null
+++ b/qa/suites/rgw/multifs/frontend/civetweb.yaml
@@ -0,0 +1,3 @@
+overrides:
+  rgw:
+    frontend: civetweb 
diff --git a/qa/suites/rgw/multifs/fs/btrfs.yaml b/qa/suites/rgw/multifs/fs/btrfs.yaml
new file mode 100644
index 0000000..0b3f6fa
--- /dev/null
+++ b/qa/suites/rgw/multifs/fs/btrfs.yaml
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    fs: btrfs
+    conf:
+      osd:
+        osd sloppy crc: true
+        osd op thread timeout: 60
diff --git a/qa/suites/rgw/multifs/fs/xfs.yaml b/qa/suites/rgw/multifs/fs/xfs.yaml
new file mode 100644
index 0000000..b4a8291
--- /dev/null
+++ b/qa/suites/rgw/multifs/fs/xfs.yaml
@@ -0,0 +1,6 @@
+overrides:
+  ceph:
+    fs: xfs
+    conf:
+      osd:
+        osd sloppy crc: true
diff --git a/qa/suites/rgw/multifs/overrides.yaml b/qa/suites/rgw/multifs/overrides.yaml
new file mode 100644
index 0000000..bc1e82e
--- /dev/null
+++ b/qa/suites/rgw/multifs/overrides.yaml
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    conf:
+      client:
+        debug rgw: 20
+  rgw:
+    frontend: civetweb
diff --git a/qa/suites/rgw/multifs/rgw_pool_type/ec-cache.yaml b/qa/suites/rgw/multifs/rgw_pool_type/ec-cache.yaml
new file mode 100644
index 0000000..6462fbe
--- /dev/null
+++ b/qa/suites/rgw/multifs/rgw_pool_type/ec-cache.yaml
@@ -0,0 +1,6 @@
+overrides:
+  rgw:
+    ec-data-pool: true
+    cache-pools: true
+  s3tests:
+    slow_backend: true
diff --git a/qa/suites/rgw/multifs/rgw_pool_type/ec-profile.yaml b/qa/suites/rgw/multifs/rgw_pool_type/ec-profile.yaml
new file mode 100644
index 0000000..52798f8
--- /dev/null
+++ b/qa/suites/rgw/multifs/rgw_pool_type/ec-profile.yaml
@@ -0,0 +1,10 @@
+overrides:
+  rgw:
+    ec-data-pool: true
+    erasure_code_profile:
+      name: testprofile
+      k: 3
+      m: 1
+      ruleset-failure-domain: osd
+  s3tests:
+    slow_backend: true
diff --git a/qa/suites/rgw/multifs/rgw_pool_type/ec.yaml b/qa/suites/rgw/multifs/rgw_pool_type/ec.yaml
new file mode 100644
index 0000000..7c99b7f
--- /dev/null
+++ b/qa/suites/rgw/multifs/rgw_pool_type/ec.yaml
@@ -0,0 +1,5 @@
+overrides:
+  rgw:
+    ec-data-pool: true
+  s3tests:
+    slow_backend: true
diff --git a/qa/suites/rgw/multifs/rgw_pool_type/replicated.yaml b/qa/suites/rgw/multifs/rgw_pool_type/replicated.yaml
new file mode 100644
index 0000000..c91709e
--- /dev/null
+++ b/qa/suites/rgw/multifs/rgw_pool_type/replicated.yaml
@@ -0,0 +1,3 @@
+overrides:
+  rgw:
+    ec-data-pool: false
diff --git a/qa/suites/rgw/multifs/tasks/rgw_bucket_quota.yaml b/qa/suites/rgw/multifs/tasks/rgw_bucket_quota.yaml
new file mode 100644
index 0000000..c518d0e
--- /dev/null
+++ b/qa/suites/rgw/multifs/tasks/rgw_bucket_quota.yaml
@@ -0,0 +1,10 @@
+# Amazon/S3.pm (cpan) not available as an rpm
+os_type: ubuntu
+tasks:
+- install:
+- ceph:
+- rgw: [client.0]
+- workunit:
+    clients:
+      client.0:
+        - rgw/s3_bucket_quota.pl
diff --git a/qa/suites/rgw/multifs/tasks/rgw_multipart_upload.yaml b/qa/suites/rgw/multifs/tasks/rgw_multipart_upload.yaml
new file mode 100644
index 0000000..b042aa8
--- /dev/null
+++ b/qa/suites/rgw/multifs/tasks/rgw_multipart_upload.yaml
@@ -0,0 +1,10 @@
+# Amazon::S3 is not available on el7
+os_type: ubuntu
+tasks:
+- install:
+- ceph:
+- rgw: [client.0]
+- workunit:
+    clients:
+      client.0:
+        - rgw/s3_multipart_upload.pl
diff --git a/qa/suites/rgw/multifs/tasks/rgw_readwrite.yaml b/qa/suites/rgw/multifs/tasks/rgw_readwrite.yaml
new file mode 100644
index 0000000..c7efaa1
--- /dev/null
+++ b/qa/suites/rgw/multifs/tasks/rgw_readwrite.yaml
@@ -0,0 +1,16 @@
+tasks:
+- install:
+- ceph:
+- rgw: [client.0]
+- s3readwrite:
+    client.0:
+      rgw_server: client.0
+      readwrite:
+        bucket: rwtest
+        readers: 10
+        writers: 3
+        duration: 300
+        files:
+          num: 10
+          size: 2000
+          stddev: 500 
diff --git a/qa/suites/rgw/multifs/tasks/rgw_roundtrip.yaml b/qa/suites/rgw/multifs/tasks/rgw_roundtrip.yaml
new file mode 100644
index 0000000..47b3c18
--- /dev/null
+++ b/qa/suites/rgw/multifs/tasks/rgw_roundtrip.yaml
@@ -0,0 +1,16 @@
+tasks:
+- install:
+- ceph:
+- rgw: [client.0]
+- s3roundtrip:
+    client.0:
+      rgw_server: client.0
+      roundtrip:
+        bucket: rttest
+        readers: 10
+        writers: 3
+        duration: 300
+        files:
+          num: 10
+          size: 2000
+          stddev: 500 
diff --git a/qa/suites/rgw/multifs/tasks/rgw_s3tests.yaml b/qa/suites/rgw/multifs/tasks/rgw_s3tests.yaml
new file mode 100644
index 0000000..91dddaf
--- /dev/null
+++ b/qa/suites/rgw/multifs/tasks/rgw_s3tests.yaml
@@ -0,0 +1,8 @@
+tasks:
+- install:
+- ceph:
+- rgw: [client.0]
+- s3tests:
+    client.0:
+      force-branch: ceph-jewel
+      rgw_server: client.0
diff --git a/qa/suites/rgw/multifs/tasks/rgw_swift.yaml b/qa/suites/rgw/multifs/tasks/rgw_swift.yaml
new file mode 100644
index 0000000..569741b
--- /dev/null
+++ b/qa/suites/rgw/multifs/tasks/rgw_swift.yaml
@@ -0,0 +1,7 @@
+tasks:
+- install:
+- ceph:
+- rgw: [client.0]
+- swift:
+    client.0:
+      rgw_server: client.0
diff --git a/qa/suites/rgw/multifs/tasks/rgw_user_quota.yaml b/qa/suites/rgw/multifs/tasks/rgw_user_quota.yaml
new file mode 100644
index 0000000..ef9d6df
--- /dev/null
+++ b/qa/suites/rgw/multifs/tasks/rgw_user_quota.yaml
@@ -0,0 +1,10 @@
+# Amazon/S3.pm (cpan) not available as an rpm
+os_type: ubuntu
+tasks:
+- install:
+- ceph:
+- rgw: [client.0]
+- workunit:
+    clients:
+      client.0:
+        - rgw/s3_user_quota.pl
diff --git a/qa/suites/rgw/singleton/% b/qa/suites/rgw/singleton/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/rgw/singleton/all/radosgw-admin-data-sync.yaml b/qa/suites/rgw/singleton/all/radosgw-admin-data-sync.yaml
new file mode 100644
index 0000000..3100691
--- /dev/null
+++ b/qa/suites/rgw/singleton/all/radosgw-admin-data-sync.yaml
@@ -0,0 +1,64 @@
+roles:
+- [mon.a, osd.0, osd.1, osd.2, osd.3, client.0, client.1]
+openstack:
+- volumes: # attached to each instance
+    count: 4
+    size: 10 # GB
+tasks:
+- install:
+- ceph:
+    conf:
+      client:
+        debug ms: 1
+        rgw gc obj min wait: 15
+        rgw data log window: 30
+      osd:
+        debug ms: 1
+        debug objclass : 20
+      client.0:
+        rgw region: region0
+        rgw zone: r0z0
+        rgw region root pool: .rgw.region.0
+        rgw zone root pool: .rgw.zone.0
+        rgw gc pool: .rgw.gc.0
+        rgw user uid pool: .users.uid.0
+        rgw user keys pool: .users.0
+        rgw log data: True
+        rgw log meta: True
+      client.1:
+        rgw region: region0
+        rgw zone: r0z1
+        rgw region root pool: .rgw.region.0
+        rgw zone root pool: .rgw.zone.1
+        rgw gc pool: .rgw.gc.1
+        rgw user uid pool: .users.uid.1
+        rgw user keys pool: .users.1
+        rgw log data: False
+        rgw log meta: False
+- rgw:
+    realm:
+      realm0
+    regions:
+      region0:
+        api name: api1
+        is master: True
+        master zone: r0z0
+        zones: [r0z0, r0z1]
+    client.0:
+      system user:
+        name: client0-system-user
+        access key: 0te6NH5mcdcq0Tc5i8i2
+        secret key: Oy4IOauQoL18Gp2zM7lC1vLmoawgqcYPbYGcWfXv
+    client.1:
+      system user:
+        name: client1-system-user
+        access key: 1te6NH5mcdcq0Tc5i8i3
+        secret key: Py4IOauQoL18Gp2zM7lC1vLmoawgqcYPbYGcWfXw
+- radosgw-agent:
+    client.0:
+      max-entries: 10
+      src: client.0
+      dest: client.1
+- sleep:
+    duration: 30
+- radosgw-admin:
diff --git a/qa/suites/rgw/singleton/all/radosgw-admin-multi-region.yaml b/qa/suites/rgw/singleton/all/radosgw-admin-multi-region.yaml
new file mode 100644
index 0000000..93b7551
--- /dev/null
+++ b/qa/suites/rgw/singleton/all/radosgw-admin-multi-region.yaml
@@ -0,0 +1,67 @@
+roles:
+- [mon.a, osd.0, osd.1, osd.2, client.0]
+- [mon.b, mon.c, osd.3, osd.4, osd.5, client.1]
+openstack:
+- volumes: # attached to each instance
+    count: 3
+    size: 10 # GB
+tasks:
+- install: 
+- ceph:
+    conf:
+      client:
+        debug ms: 1
+        rgw gc obj min wait: 15
+      osd:
+        debug ms: 1
+        debug objclass : 20
+      client.0: 
+        rgw region: region0
+        rgw zone: r0z1
+        rgw region root pool: .rgw.region.0
+        rgw zone root pool: .rgw.zone.0
+        rgw gc pool: .rgw.gc.0
+        rgw user uid pool: .users.uid.0
+        rgw user keys pool: .users.0
+        rgw log data: True
+        rgw log meta: True
+      client.1: 
+        rgw region: region1
+        rgw zone: r1z1
+        rgw region root pool: .rgw.region.1
+        rgw zone root pool: .rgw.zone.1
+        rgw gc pool: .rgw.gc.1
+        rgw user uid pool: .users.uid.1
+        rgw user keys pool: .users.1
+        rgw log data: False
+        rgw log meta: False
+- rgw:
+    realm:
+      realm0
+    regions:
+      region0:
+        api name: api1
+        is master: True
+        master zone: r0z1
+        zones: [r0z1]
+      region1:
+        api name: api1
+        is master: False
+        master zone: r1z1
+        zones: [r1z1]
+    client.0:
+      system user:
+        name: client0-system-user
+        access key: 0te6NH5mcdcq0Tc5i8i2
+        secret key: Oy4IOauQoL18Gp2zM7lC1vLmoawgqcYPbYGcWfXv
+    client.1:
+      system user:
+        name: client1-system-user
+        access key: 1te6NH5mcdcq0Tc5i8i3
+        secret key: Py4IOauQoL18Gp2zM7lC1vLmoawgqcYPbYGcWfXw
+- radosgw-agent:
+    client.0:
+      src: client.0
+      dest: client.1
+      metadata-only: true
+- radosgw-admin:
diff --git a/qa/suites/rgw/singleton/all/radosgw-admin.yaml b/qa/suites/rgw/singleton/all/radosgw-admin.yaml
new file mode 100644
index 0000000..e63d3ec
--- /dev/null
+++ b/qa/suites/rgw/singleton/all/radosgw-admin.yaml
@@ -0,0 +1,20 @@
+roles:
+- [mon.a, osd.0, client.0, osd.1, osd.2, osd.3]
+openstack:
+- volumes: # attached to each instance
+    count: 4
+    size: 10 # GB
+>>>>>>> 8af4c35f95... qa/suites/rgw: Add openstack volume configuration
+tasks:
+- install:
+- ceph:
+    conf:
+      client:
+        debug ms: 1
+        rgw gc obj min wait: 15
+      osd:
+        debug ms: 1
+        debug objclass : 20
+- rgw:
+    client.0:
+- radosgw-admin:
diff --git a/qa/suites/rgw/singleton/all/radosgw-convert-to-region.yaml b/qa/suites/rgw/singleton/all/radosgw-convert-to-region.yaml
new file mode 100644
index 0000000..5da1135
--- /dev/null
+++ b/qa/suites/rgw/singleton/all/radosgw-convert-to-region.yaml
@@ -0,0 +1,81 @@
+overrides:
+  s3readwrite:
+    s3:
+      user_id: s3readwrite-test-user
+      display_name: test user for the s3readwrite tests
+      email: tester at inktank
+      access_key: 2te6NH5mcdcq0Tc5i8i4
+      secret_key: Qy4IOauQoL18Gp2zM7lC1vLmoawgqcYPbYGcWfXx
+    readwrite:
+      deterministic_file_names: True
+      duration: 30
+      bucket: testbucket
+      files:
+        num: 10
+        size: 2000
+        stddev: 500
+roles:
+- [mon.a, osd.0, osd.1, osd.2, client.0]
+- [mon.b, mon.c, osd.3, osd.4, osd.5, client.1]
+openstack:
+- volumes: # attached to each instance
+    count: 3
+    size: 10 # GB
+
+tasks:
+- install:
+- ceph:
+    conf:
+        client:
+          rgw region: default
+          rgw zone: r1z1
+          rgw region root pool: .rgw
+          rgw zone root pool: .rgw
+          rgw domain root: .rgw
+          rgw gc pool: .rgw.gc
+          rgw user uid pool: .users.uid
+          rgw user keys pool: .users
+- rgw:
+    realm:
+      realm0
+    regions:
+      default:
+        api name: api1
+        is master: true
+        master zone: r1z1
+        zones: [r1z1]
+    client.0:
+      system user:
+        name: nr-system
+        access key: 0te6NH5mcdcq0Tc5i8i2
+        secret key: Oy4IOauQoL18Gp2zM7lC1vLmoawgqcYPbYGcWfXv
+- s3readwrite:
+    client.0:
+      extra_args: ['--no-cleanup']
+      s3:
+        delete_user: False
+      readwrite:
+        writers: 1
+        readers: 0
+- rgw:
+    realm:
+      realm0
+    regions:
+      default:
+        api name: api1
+        is master: true
+        master zone: r1z1
+        zones: [r1z1]
+    client.1:
+      system user:
+        name: r2-system
+        access key: 1te6NH5mcdcq0Tc5i8i3
+        secret key: Py4IOauQoL18Gp2zM7lC1vLmoawgqcYPbYGcWfXw
+- s3readwrite:
+    client.1:
+      s3:
+        create_user: False
+      readwrite:
+        writers: 0
+        readers: 2
+
diff --git a/qa/suites/rgw/singleton/frontend/apache.yaml b/qa/suites/rgw/singleton/frontend/apache.yaml
new file mode 100644
index 0000000..53ebf75
--- /dev/null
+++ b/qa/suites/rgw/singleton/frontend/apache.yaml
@@ -0,0 +1,3 @@
+overrides:
+  rgw:
+    frontend: apache
diff --git a/qa/suites/rgw/singleton/frontend/civetweb.yaml b/qa/suites/rgw/singleton/frontend/civetweb.yaml
new file mode 100644
index 0000000..5845a0e
--- /dev/null
+++ b/qa/suites/rgw/singleton/frontend/civetweb.yaml
@@ -0,0 +1,3 @@
+overrides:
+  rgw:
+    frontend: civetweb 
diff --git a/qa/suites/rgw/singleton/fs/xfs.yaml b/qa/suites/rgw/singleton/fs/xfs.yaml
new file mode 100644
index 0000000..b4a8291
--- /dev/null
+++ b/qa/suites/rgw/singleton/fs/xfs.yaml
@@ -0,0 +1,6 @@
+overrides:
+  ceph:
+    fs: xfs
+    conf:
+      osd:
+        osd sloppy crc: true
diff --git a/qa/suites/rgw/singleton/overrides.yaml b/qa/suites/rgw/singleton/overrides.yaml
new file mode 100644
index 0000000..bc1e82e
--- /dev/null
+++ b/qa/suites/rgw/singleton/overrides.yaml
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    conf:
+      client:
+        debug rgw: 20
+  rgw:
+    frontend: civetweb
diff --git a/qa/suites/rgw/singleton/rgw_pool_type/ec-cache.yaml b/qa/suites/rgw/singleton/rgw_pool_type/ec-cache.yaml
new file mode 100644
index 0000000..6462fbe
--- /dev/null
+++ b/qa/suites/rgw/singleton/rgw_pool_type/ec-cache.yaml
@@ -0,0 +1,6 @@
+overrides:
+  rgw:
+    ec-data-pool: true
+    cache-pools: true
+  s3tests:
+    slow_backend: true
diff --git a/qa/suites/rgw/singleton/rgw_pool_type/ec-profile.yaml b/qa/suites/rgw/singleton/rgw_pool_type/ec-profile.yaml
new file mode 100644
index 0000000..52798f8
--- /dev/null
+++ b/qa/suites/rgw/singleton/rgw_pool_type/ec-profile.yaml
@@ -0,0 +1,10 @@
+overrides:
+  rgw:
+    ec-data-pool: true
+    erasure_code_profile:
+      name: testprofile
+      k: 3
+      m: 1
+      ruleset-failure-domain: osd
+  s3tests:
+    slow_backend: true
diff --git a/qa/suites/rgw/singleton/rgw_pool_type/ec.yaml b/qa/suites/rgw/singleton/rgw_pool_type/ec.yaml
new file mode 100644
index 0000000..7c99b7f
--- /dev/null
+++ b/qa/suites/rgw/singleton/rgw_pool_type/ec.yaml
@@ -0,0 +1,5 @@
+overrides:
+  rgw:
+    ec-data-pool: true
+  s3tests:
+    slow_backend: true
diff --git a/qa/suites/rgw/singleton/rgw_pool_type/replicated.yaml b/qa/suites/rgw/singleton/rgw_pool_type/replicated.yaml
new file mode 100644
index 0000000..c91709e
--- /dev/null
+++ b/qa/suites/rgw/singleton/rgw_pool_type/replicated.yaml
@@ -0,0 +1,3 @@
+overrides:
+  rgw:
+    ec-data-pool: false
diff --git a/qa/suites/rgw/singleton/xfs.yaml b/qa/suites/rgw/singleton/xfs.yaml
new file mode 100644
index 0000000..b4a8291
--- /dev/null
+++ b/qa/suites/rgw/singleton/xfs.yaml
@@ -0,0 +1,6 @@
+overrides:
+  ceph:
+    fs: xfs
+    conf:
+      osd:
+        osd sloppy crc: true
diff --git a/qa/suites/rgw/verify/% b/qa/suites/rgw/verify/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/rgw/verify/clusters/fixed-2.yaml b/qa/suites/rgw/verify/clusters/fixed-2.yaml
new file mode 100644
index 0000000..861e509
--- /dev/null
+++ b/qa/suites/rgw/verify/clusters/fixed-2.yaml
@@ -0,0 +1,7 @@
+roles:
+- [mon.a, mon.c, osd.0, osd.1, osd.2, client.0]
+- [mon.b, osd.3, osd.4, osd.5, client.1]
+openstack:
+- volumes: # attached to each instance
+    count: 3
+    size: 10 # GB
diff --git a/qa/suites/rgw/verify/frontend/apache.yaml b/qa/suites/rgw/verify/frontend/apache.yaml
new file mode 100644
index 0000000..53ebf75
--- /dev/null
+++ b/qa/suites/rgw/verify/frontend/apache.yaml
@@ -0,0 +1,3 @@
+overrides:
+  rgw:
+    frontend: apache
diff --git a/qa/suites/rgw/verify/frontend/civetweb.yaml b/qa/suites/rgw/verify/frontend/civetweb.yaml
new file mode 100644
index 0000000..5845a0e
--- /dev/null
+++ b/qa/suites/rgw/verify/frontend/civetweb.yaml
@@ -0,0 +1,3 @@
+overrides:
+  rgw:
+    frontend: civetweb 
diff --git a/qa/suites/rgw/verify/fs/btrfs.yaml b/qa/suites/rgw/verify/fs/btrfs.yaml
new file mode 100644
index 0000000..0b3f6fa
--- /dev/null
+++ b/qa/suites/rgw/verify/fs/btrfs.yaml
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    fs: btrfs
+    conf:
+      osd:
+        osd sloppy crc: true
+        osd op thread timeout: 60
diff --git a/qa/suites/rgw/verify/msgr-failures/few.yaml b/qa/suites/rgw/verify/msgr-failures/few.yaml
new file mode 100644
index 0000000..0de320d
--- /dev/null
+++ b/qa/suites/rgw/verify/msgr-failures/few.yaml
@@ -0,0 +1,5 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms inject socket failures: 5000
diff --git a/qa/suites/rgw/verify/overrides.yaml b/qa/suites/rgw/verify/overrides.yaml
new file mode 100644
index 0000000..bc1e82e
--- /dev/null
+++ b/qa/suites/rgw/verify/overrides.yaml
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    conf:
+      client:
+        debug rgw: 20
+  rgw:
+    frontend: civetweb
diff --git a/qa/suites/rgw/verify/rgw_pool_type/ec-cache.yaml b/qa/suites/rgw/verify/rgw_pool_type/ec-cache.yaml
new file mode 100644
index 0000000..6462fbe
--- /dev/null
+++ b/qa/suites/rgw/verify/rgw_pool_type/ec-cache.yaml
@@ -0,0 +1,6 @@
+overrides:
+  rgw:
+    ec-data-pool: true
+    cache-pools: true
+  s3tests:
+    slow_backend: true
diff --git a/qa/suites/rgw/verify/rgw_pool_type/ec-profile.yaml b/qa/suites/rgw/verify/rgw_pool_type/ec-profile.yaml
new file mode 100644
index 0000000..52798f8
--- /dev/null
+++ b/qa/suites/rgw/verify/rgw_pool_type/ec-profile.yaml
@@ -0,0 +1,10 @@
+overrides:
+  rgw:
+    ec-data-pool: true
+    erasure_code_profile:
+      name: testprofile
+      k: 3
+      m: 1
+      ruleset-failure-domain: osd
+  s3tests:
+    slow_backend: true
diff --git a/qa/suites/rgw/verify/rgw_pool_type/ec.yaml b/qa/suites/rgw/verify/rgw_pool_type/ec.yaml
new file mode 100644
index 0000000..7c99b7f
--- /dev/null
+++ b/qa/suites/rgw/verify/rgw_pool_type/ec.yaml
@@ -0,0 +1,5 @@
+overrides:
+  rgw:
+    ec-data-pool: true
+  s3tests:
+    slow_backend: true
diff --git a/qa/suites/rgw/verify/rgw_pool_type/replicated.yaml b/qa/suites/rgw/verify/rgw_pool_type/replicated.yaml
new file mode 100644
index 0000000..c91709e
--- /dev/null
+++ b/qa/suites/rgw/verify/rgw_pool_type/replicated.yaml
@@ -0,0 +1,3 @@
+overrides:
+  rgw:
+    ec-data-pool: false
diff --git a/qa/suites/rgw/verify/tasks/rgw_s3tests.yaml b/qa/suites/rgw/verify/tasks/rgw_s3tests.yaml
new file mode 100644
index 0000000..29ff9de
--- /dev/null
+++ b/qa/suites/rgw/verify/tasks/rgw_s3tests.yaml
@@ -0,0 +1,12 @@
+tasks:
+- install:
+    flavor: notcmalloc
+    debuginfo: true
+- ceph:
+- rgw:
+    client.0:
+      valgrind: [--tool=memcheck]
+- s3tests:
+    client.0:
+      force-branch: ceph-jewel
+      rgw_server: client.0
diff --git a/qa/suites/rgw/verify/tasks/rgw_s3tests_multiregion.yaml b/qa/suites/rgw/verify/tasks/rgw_s3tests_multiregion.yaml
new file mode 100644
index 0000000..7c9b0a2
--- /dev/null
+++ b/qa/suites/rgw/verify/tasks/rgw_s3tests_multiregion.yaml
@@ -0,0 +1,63 @@
+tasks:
+- install:
+    flavor: notcmalloc
+    debuginfo: true
+- ceph:
+    conf:
+      client.0: 
+        rgw region:  zero
+        rgw zone: r0z1
+        rgw region root pool: .rgw.region.0
+        rgw zone root pool: .rgw.zone.0
+        rgw gc pool: .rgw.gc.0
+        rgw user uid pool: .users.uid.0
+        rgw user keys pool: .users.0
+        rgw log data: True
+        rgw log meta: True
+      client.1: 
+        rgw region: one
+        rgw zone: r1z1
+        rgw region root pool: .rgw.region.1
+        rgw zone root pool: .rgw.zone.1
+        rgw gc pool: .rgw.gc.1
+        rgw user uid pool: .users.uid.1
+        rgw user keys pool: .users.1
+        rgw log data: False
+        rgw log meta: False
+- rgw:
+    default_idle_timeout: 300
+    realm:
+      realm0
+    regions:
+      zero:
+        api name: api1
+        is master: True
+        master zone: r0z1
+        zones: [r0z1]
+      one:
+        api name: api1
+        is master: False
+        master zone: r1z1
+        zones: [r1z1]
+    client.0:
+      valgrind: [--tool=memcheck]
+      system user:
+        name: client0-system-user
+        access key: 1te6NH5mcdcq0Tc5i8i2
+        secret key: 1y4IOauQoL18Gp2zM7lC1vLmoawgqcYPbYGcWfXv
+    client.1:
+      valgrind: [--tool=memcheck]
+      system user:
+        name: client1-system-user
+        access key: 0te6NH5mcdcq0Tc5i8i2
+        secret key: Oy4IOauQoL18Gp2zM7lC1vLmoawgqcYPbYGcWfXv
+- radosgw-agent:
+    client.0:
+      src: client.0
+      dest: client.1
+      metadata-only: true
+- s3tests:
+    client.0:
+      force-branch: ceph-jewel
+      idle_timeout: 300
+      rgw_server: client.0
diff --git a/qa/suites/rgw/verify/tasks/rgw_swift.yaml b/qa/suites/rgw/verify/tasks/rgw_swift.yaml
new file mode 100644
index 0000000..dba0354
--- /dev/null
+++ b/qa/suites/rgw/verify/tasks/rgw_swift.yaml
@@ -0,0 +1,11 @@
+tasks:
+- install:
+    flavor: notcmalloc
+    debuginfo: true
+- ceph:
+- rgw:
+    client.0:
+      valgrind: [--tool=memcheck]
+- swift:
+    client.0:
+      rgw_server: client.0
diff --git a/qa/suites/rgw/verify/validater/lockdep.yaml b/qa/suites/rgw/verify/validater/lockdep.yaml
new file mode 100644
index 0000000..941fe12
--- /dev/null
+++ b/qa/suites/rgw/verify/validater/lockdep.yaml
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    conf:
+      osd:
+        lockdep: true
+      mon:
+        lockdep: true
diff --git a/qa/suites/rgw/verify/validater/valgrind.yaml b/qa/suites/rgw/verify/validater/valgrind.yaml
new file mode 100644
index 0000000..dc63731
--- /dev/null
+++ b/qa/suites/rgw/verify/validater/valgrind.yaml
@@ -0,0 +1,13 @@
+overrides:
+  install:
+    ceph:
+      flavor: notcmalloc
+      debuginfo: true
+  ceph:
+    conf:
+      global:
+        osd heartbeat grace: 40
+    valgrind:
+      mon: [--tool=memcheck, --leak-check=full, --show-reachable=yes]
+      osd: [--tool=memcheck]
+      mds: [--tool=memcheck]
diff --git a/qa/suites/samba/% b/qa/suites/samba/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/samba/clusters/samba-basic.yaml b/qa/suites/samba/clusters/samba-basic.yaml
new file mode 100644
index 0000000..86c71b8
--- /dev/null
+++ b/qa/suites/samba/clusters/samba-basic.yaml
@@ -0,0 +1,7 @@
+roles:
+- [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1]
+- [samba.0, client.0, client.1]
+openstack:
+- volumes: # attached to each instance
+    count: 2
+    size: 10 # GB
diff --git a/qa/suites/samba/fs/btrfs.yaml b/qa/suites/samba/fs/btrfs.yaml
new file mode 100644
index 0000000..0b3f6fa
--- /dev/null
+++ b/qa/suites/samba/fs/btrfs.yaml
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    fs: btrfs
+    conf:
+      osd:
+        osd sloppy crc: true
+        osd op thread timeout: 60
diff --git a/qa/suites/samba/install/install.yaml b/qa/suites/samba/install/install.yaml
new file mode 100644
index 0000000..c53f9c5
--- /dev/null
+++ b/qa/suites/samba/install/install.yaml
@@ -0,0 +1,9 @@
+# we currently can't install Samba on RHEL; need a gitbuilder and code updates
+os_type: ubuntu
+
+tasks:
+- install:
+- install:
+    project: samba
+    extra_packages: ['samba']
+- ceph:
diff --git a/qa/suites/samba/mount/fuse.yaml b/qa/suites/samba/mount/fuse.yaml
new file mode 100644
index 0000000..d00ffdb
--- /dev/null
+++ b/qa/suites/samba/mount/fuse.yaml
@@ -0,0 +1,6 @@
+tasks:
+- ceph-fuse: [client.0]
+- samba:
+    samba.0:
+      ceph: "{testdir}/mnt.0"
+
diff --git a/qa/suites/samba/mount/kclient.yaml b/qa/suites/samba/mount/kclient.yaml
new file mode 100644
index 0000000..8baa09f
--- /dev/null
+++ b/qa/suites/samba/mount/kclient.yaml
@@ -0,0 +1,14 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms die on skipped message: false
+kernel:
+  client:
+    branch: testing
+tasks:
+- kclient: [client.0]
+- samba:
+    samba.0:
+      ceph: "{testdir}/mnt.0"
+
diff --git a/qa/suites/samba/mount/native.yaml b/qa/suites/samba/mount/native.yaml
new file mode 100644
index 0000000..09b8c1c
--- /dev/null
+++ b/qa/suites/samba/mount/native.yaml
@@ -0,0 +1,2 @@
+tasks:
+- samba:
diff --git a/qa/suites/samba/mount/noceph.yaml b/qa/suites/samba/mount/noceph.yaml
new file mode 100644
index 0000000..3cad474
--- /dev/null
+++ b/qa/suites/samba/mount/noceph.yaml
@@ -0,0 +1,5 @@
+tasks:
+- localdir: [client.0]
+- samba:
+    samba.0:
+      ceph: "{testdir}/mnt.0"
diff --git a/qa/suites/samba/workload/cifs-dbench.yaml b/qa/suites/samba/workload/cifs-dbench.yaml
new file mode 100644
index 0000000..c13c1c0
--- /dev/null
+++ b/qa/suites/samba/workload/cifs-dbench.yaml
@@ -0,0 +1,8 @@
+tasks:
+- cifs-mount:
+    client.1:
+      share: ceph
+- workunit:
+    clients:
+      client.1:
+        - suites/dbench.sh
diff --git a/qa/suites/samba/workload/cifs-fsstress.yaml b/qa/suites/samba/workload/cifs-fsstress.yaml
new file mode 100644
index 0000000..ff003af
--- /dev/null
+++ b/qa/suites/samba/workload/cifs-fsstress.yaml
@@ -0,0 +1,8 @@
+tasks:
+- cifs-mount:
+    client.1:
+      share: ceph
+- workunit:
+    clients:
+      client.1:
+        - suites/fsstress.sh
diff --git a/qa/suites/samba/workload/cifs-kernel-build.yaml.disabled b/qa/suites/samba/workload/cifs-kernel-build.yaml.disabled
new file mode 100644
index 0000000..ab9ff8a
--- /dev/null
+++ b/qa/suites/samba/workload/cifs-kernel-build.yaml.disabled
@@ -0,0 +1,9 @@
+tasks:
+- cifs-mount:
+    client.1:
+      share: ceph
+- workunit:
+    clients:
+      client.1:
+        - kernel_untar_build.sh
+
diff --git a/qa/suites/samba/workload/smbtorture.yaml b/qa/suites/samba/workload/smbtorture.yaml
new file mode 100644
index 0000000..823489a
--- /dev/null
+++ b/qa/suites/samba/workload/smbtorture.yaml
@@ -0,0 +1,39 @@
+tasks:
+- pexec:
+    client.1:
+      - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.lock
+      - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.fdpass
+      - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.unlink
+      - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.attr
+      - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.trans2
+      - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.negnowait
+      - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.dir1
+      - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.deny1
+      - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.deny2
+      - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.deny3
+      - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.denydos
+      - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.ntdeny1
+      - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.ntdeny2
+      - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.tcon
+      - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.tcondev
+      - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.vuid
+      - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.rw1
+      - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.open
+      - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.defer_open
+      - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.xcopy
+      - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.rename
+      - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.properties
+      - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.mangle
+      - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.openattr
+      - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.chkpath
+      - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.secleak
+      - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.disconnect
+      - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.samba3error
+      - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.smb
+#      - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.bench-holdcon
+#      - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.bench-holdopen
+      - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.bench-readwrite
+      - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.bench-torture
+      - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.scan-pipe_number
+      - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.scan-ioctl
+#      - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.scan-maxfid
diff --git a/qa/suites/smoke/1node/% b/qa/suites/smoke/1node/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/smoke/1node/clusters/+ b/qa/suites/smoke/1node/clusters/+
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/smoke/1node/clusters/fixed-1.yaml b/qa/suites/smoke/1node/clusters/fixed-1.yaml
new file mode 100644
index 0000000..3768607
--- /dev/null
+++ b/qa/suites/smoke/1node/clusters/fixed-1.yaml
@@ -0,0 +1,10 @@
+overrides:
+  ceph-deploy:
+    conf:
+      global:
+        osd pool default size: 2
+        osd crush chooseleaf type: 0
+        osd pool default pg num:  128
+        osd pool default pgp num:  128
+roles:
+- [mon.a, osd.0, osd.1, osd.2, client.0]
diff --git a/qa/suites/smoke/1node/clusters/openstack.yaml b/qa/suites/smoke/1node/clusters/openstack.yaml
new file mode 100644
index 0000000..39e43d0
--- /dev/null
+++ b/qa/suites/smoke/1node/clusters/openstack.yaml
@@ -0,0 +1,8 @@
+openstack:
+  - machine:
+      disk: 40 # GB
+      ram: 8000 # MB
+      cpus: 1
+    volumes: # attached to each instance
+      count: 3
+      size: 30 # GB
diff --git a/qa/suites/smoke/1node/tasks/ceph-deploy.yaml b/qa/suites/smoke/1node/tasks/ceph-deploy.yaml
new file mode 100644
index 0000000..5a30923
--- /dev/null
+++ b/qa/suites/smoke/1node/tasks/ceph-deploy.yaml
@@ -0,0 +1,7 @@
+meta:
+- desc: |
+   Run ceph-deploy cli tests on one node
+   and verify all the cli works and cluster can reach
+   HEALTH_OK state(implicty verifying the daemons via init).
+tasks:
+- ceph_deploy.single_node_test: null
diff --git a/qa/suites/smoke/basic/% b/qa/suites/smoke/basic/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/smoke/basic/clusters/fixed-3-cephfs.yaml b/qa/suites/smoke/basic/clusters/fixed-3-cephfs.yaml
new file mode 100644
index 0000000..499c84c
--- /dev/null
+++ b/qa/suites/smoke/basic/clusters/fixed-3-cephfs.yaml
@@ -0,0 +1,11 @@
+roles:
+- [mon.a, mds.a, osd.0, osd.1]
+- [mon.b, mds.a-s, mon.c, osd.2, osd.3]
+- [client.0]
+openstack:
+- volumes: # attached to each instance
+    count: 2
+    size: 10 # GB
+log-rotate:
+  ceph-mds: 10G
+  ceph-osd: 10G
diff --git a/qa/suites/smoke/basic/fs/btrfs.yaml b/qa/suites/smoke/basic/fs/btrfs.yaml
new file mode 100644
index 0000000..0b3f6fa
--- /dev/null
+++ b/qa/suites/smoke/basic/fs/btrfs.yaml
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    fs: btrfs
+    conf:
+      osd:
+        osd sloppy crc: true
+        osd op thread timeout: 60
diff --git a/qa/suites/smoke/basic/tasks/cfuse_workunit_suites_blogbench.yaml b/qa/suites/smoke/basic/tasks/cfuse_workunit_suites_blogbench.yaml
new file mode 100644
index 0000000..2ee4177
--- /dev/null
+++ b/qa/suites/smoke/basic/tasks/cfuse_workunit_suites_blogbench.yaml
@@ -0,0 +1,9 @@
+tasks:
+- install:
+- ceph:
+    fs: xfs
+- ceph-fuse:
+- workunit:
+    clients:
+      all:
+        - suites/blogbench.sh
diff --git a/qa/suites/smoke/basic/tasks/cfuse_workunit_suites_fsstress.yaml b/qa/suites/smoke/basic/tasks/cfuse_workunit_suites_fsstress.yaml
new file mode 100644
index 0000000..cd12eae
--- /dev/null
+++ b/qa/suites/smoke/basic/tasks/cfuse_workunit_suites_fsstress.yaml
@@ -0,0 +1,9 @@
+tasks:
+- install:
+- ceph:
+    fs: btrfs
+- ceph-fuse:
+- workunit:
+    clients:
+      all:
+        - suites/fsstress.sh
diff --git a/qa/suites/smoke/basic/tasks/cfuse_workunit_suites_iozone.yaml b/qa/suites/smoke/basic/tasks/cfuse_workunit_suites_iozone.yaml
new file mode 100644
index 0000000..c4be4cd
--- /dev/null
+++ b/qa/suites/smoke/basic/tasks/cfuse_workunit_suites_iozone.yaml
@@ -0,0 +1,9 @@
+tasks:
+- install:
+- ceph:
+    fs: btrfs
+- ceph-fuse: [client.0]
+- workunit:
+    clients:
+      all:
+        - suites/iozone.sh
diff --git a/qa/suites/smoke/basic/tasks/cfuse_workunit_suites_pjd.yaml b/qa/suites/smoke/basic/tasks/cfuse_workunit_suites_pjd.yaml
new file mode 100644
index 0000000..d042daa
--- /dev/null
+++ b/qa/suites/smoke/basic/tasks/cfuse_workunit_suites_pjd.yaml
@@ -0,0 +1,16 @@
+tasks:
+- install:
+- ceph:
+    fs: xfs
+    conf:
+      mds:
+        debug mds: 20
+        debug ms: 1
+      client:
+        debug client: 20
+        debug ms: 1
+- ceph-fuse:
+- workunit:
+    clients:
+      all:
+        - suites/pjd.sh
diff --git a/qa/suites/smoke/basic/tasks/kclient_workunit_direct_io.yaml b/qa/suites/smoke/basic/tasks/kclient_workunit_direct_io.yaml
new file mode 100644
index 0000000..29ccf46
--- /dev/null
+++ b/qa/suites/smoke/basic/tasks/kclient_workunit_direct_io.yaml
@@ -0,0 +1,14 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms die on skipped message: false
+tasks:
+- install:
+- ceph:
+    fs: btrfs
+- kclient:
+- workunit:
+    clients:
+      all:
+        - direct_io
diff --git a/qa/suites/smoke/basic/tasks/kclient_workunit_suites_dbench.yaml b/qa/suites/smoke/basic/tasks/kclient_workunit_suites_dbench.yaml
new file mode 100644
index 0000000..01d7470
--- /dev/null
+++ b/qa/suites/smoke/basic/tasks/kclient_workunit_suites_dbench.yaml
@@ -0,0 +1,14 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms die on skipped message: false
+tasks:
+- install:
+- ceph:
+    fs: xfs
+- kclient:
+- workunit:
+    clients:
+      all:
+        - suites/dbench.sh
diff --git a/qa/suites/smoke/basic/tasks/kclient_workunit_suites_fsstress.yaml b/qa/suites/smoke/basic/tasks/kclient_workunit_suites_fsstress.yaml
new file mode 100644
index 0000000..42d6b97
--- /dev/null
+++ b/qa/suites/smoke/basic/tasks/kclient_workunit_suites_fsstress.yaml
@@ -0,0 +1,14 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms die on skipped message: false
+tasks:
+- install:
+- ceph:
+    fs: xfs
+- kclient:
+- workunit:
+    clients:
+      all:
+        - suites/fsstress.sh
diff --git a/qa/suites/smoke/basic/tasks/kclient_workunit_suites_pjd.yaml b/qa/suites/smoke/basic/tasks/kclient_workunit_suites_pjd.yaml
new file mode 100644
index 0000000..6818a2a
--- /dev/null
+++ b/qa/suites/smoke/basic/tasks/kclient_workunit_suites_pjd.yaml
@@ -0,0 +1,14 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms die on skipped message: false
+tasks:
+- install:
+- ceph:
+    fs: xfs
+- kclient:
+- workunit:
+    clients:
+      all:
+        - suites/pjd.sh
diff --git a/qa/suites/smoke/basic/tasks/libcephfs_interface_tests.yaml b/qa/suites/smoke/basic/tasks/libcephfs_interface_tests.yaml
new file mode 100644
index 0000000..4996d33
--- /dev/null
+++ b/qa/suites/smoke/basic/tasks/libcephfs_interface_tests.yaml
@@ -0,0 +1,18 @@
+overrides:
+  ceph:
+    conf:
+      client:
+        debug ms: 1
+        debug client: 20
+      mds:
+        debug ms: 1
+        debug mds: 20
+tasks:
+- install:
+- ceph:
+    fs: btrfs
+- ceph-fuse:
+- workunit:
+    clients:
+      client.0:
+        - libcephfs/test.sh
diff --git a/qa/suites/smoke/basic/tasks/mon_thrash.yaml b/qa/suites/smoke/basic/tasks/mon_thrash.yaml
new file mode 100644
index 0000000..1ef7919
--- /dev/null
+++ b/qa/suites/smoke/basic/tasks/mon_thrash.yaml
@@ -0,0 +1,23 @@
+overrides:
+  ceph:
+    log-whitelist:
+      - reached quota
+    conf:
+      global:
+        ms inject delay max: 1
+        ms inject delay probability: 0.005
+        ms inject delay type: mon
+        ms inject internal delays: 0.002
+        ms inject socket failures: 2500
+tasks:
+- install: null
+- ceph:
+    fs: xfs
+- mon_thrash:
+    revive_delay: 90
+    thrash_delay: 1
+    thrash_many: true
+- workunit:
+    clients:
+      client.0:
+      - rados/test.sh
diff --git a/qa/suites/smoke/basic/tasks/rados_api_tests.yaml b/qa/suites/smoke/basic/tasks/rados_api_tests.yaml
new file mode 100644
index 0000000..e84459b
--- /dev/null
+++ b/qa/suites/smoke/basic/tasks/rados_api_tests.yaml
@@ -0,0 +1,16 @@
+tasks:
+- install: null
+- ceph:
+    fs: ext4
+    log-whitelist:
+    - reached quota
+    - wrongly marked me down
+    - objects unfound and apparently lost
+- thrashosds:
+    chance_pgnum_grow: 2
+    chance_pgpnum_fix: 1
+    timeout: 1200
+- workunit:
+    clients:
+      client.0:
+      - rados/test.sh
diff --git a/qa/suites/smoke/basic/tasks/rados_bench.yaml b/qa/suites/smoke/basic/tasks/rados_bench.yaml
new file mode 100644
index 0000000..9e90158
--- /dev/null
+++ b/qa/suites/smoke/basic/tasks/rados_bench.yaml
@@ -0,0 +1,36 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms inject delay max: 1
+        ms inject delay probability: 0.005
+        ms inject delay type: osd
+        ms inject internal delays: 0.002
+        ms inject socket failures: 2500
+tasks:
+- install: null
+- ceph:
+    fs: xfs
+    log-whitelist:
+    - wrongly marked me down
+    - objects unfound and apparently lost
+- thrashosds:
+    chance_pgnum_grow: 2
+    chance_pgpnum_fix: 1
+    timeout: 1200
+- full_sequential:
+  - radosbench:
+      clients: [client.0]
+      time: 150
+  - radosbench:
+      clients: [client.0]
+      time: 150
+  - radosbench:
+      clients: [client.0]
+      time: 150
+  - radosbench:
+      clients: [client.0]
+      time: 150
+  - radosbench:
+      clients: [client.0]
+      time: 150
diff --git a/qa/suites/smoke/basic/tasks/rados_cache_snaps.yaml b/qa/suites/smoke/basic/tasks/rados_cache_snaps.yaml
new file mode 100644
index 0000000..a060fbc
--- /dev/null
+++ b/qa/suites/smoke/basic/tasks/rados_cache_snaps.yaml
@@ -0,0 +1,41 @@
+tasks:
+- install: null
+- ceph:
+    fs: btrfs
+    log-whitelist:
+    - wrongly marked me down
+    - objects unfound and apparently lost
+- thrashosds:
+    chance_pgnum_grow: 2
+    chance_pgpnum_fix: 1
+    timeout: 1200
+- exec:
+    client.0:
+    - sudo ceph osd pool create base 4
+    - sudo ceph osd pool create cache 4
+    - sudo ceph osd tier add base cache
+    - sudo ceph osd tier cache-mode cache writeback
+    - sudo ceph osd tier set-overlay base cache
+    - sudo ceph osd pool set cache hit_set_type bloom
+    - sudo ceph osd pool set cache hit_set_count 8
+    - sudo ceph osd pool set cache hit_set_period 3600
+    - sudo ceph osd pool set cache target_max_objects 250
+- rados:
+    clients:
+    - client.0
+    objects: 500
+    op_weights:
+      copy_from: 50
+      delete: 50
+      evict: 50
+      flush: 50
+      read: 100
+      rollback: 50
+      snap_create: 50
+      snap_remove: 50
+      try_flush: 50
+      write: 100
+    ops: 4000
+    pool_snaps: true
+    pools:
+    - base
diff --git a/qa/suites/smoke/basic/tasks/rados_cls_all.yaml b/qa/suites/smoke/basic/tasks/rados_cls_all.yaml
new file mode 100644
index 0000000..7f18a7e
--- /dev/null
+++ b/qa/suites/smoke/basic/tasks/rados_cls_all.yaml
@@ -0,0 +1,8 @@
+tasks:
+- install:
+- ceph:
+    fs: xfs
+- workunit:
+    clients:
+      client.0:
+        - cls
diff --git a/qa/suites/smoke/basic/tasks/rados_ec_snaps.yaml b/qa/suites/smoke/basic/tasks/rados_ec_snaps.yaml
new file mode 100644
index 0000000..d9282bf
--- /dev/null
+++ b/qa/suites/smoke/basic/tasks/rados_ec_snaps.yaml
@@ -0,0 +1,31 @@
+tasks:
+- install: null
+- ceph:
+    fs: xfs
+    log-whitelist:
+    - wrongly marked me down
+    - objects unfound and apparently lost
+- thrashosds:
+    chance_pgnum_grow: 3
+    chance_pgpnum_fix: 1
+    timeout: 1200
+- rados:
+    clients:
+    - client.0
+    ec_pool: true
+    max_in_flight: 64
+    max_seconds: 600
+    objects: 1024
+    op_weights:
+      append: 100
+      copy_from: 50
+      delete: 50
+      read: 100
+      rmattr: 25
+      rollback: 50
+      setattr: 25
+      snap_create: 50
+      snap_remove: 50
+      write: 0
+    ops: 400000
+    size: 16384
diff --git a/qa/suites/smoke/basic/tasks/rados_python.yaml b/qa/suites/smoke/basic/tasks/rados_python.yaml
new file mode 100644
index 0000000..399967c
--- /dev/null
+++ b/qa/suites/smoke/basic/tasks/rados_python.yaml
@@ -0,0 +1,11 @@
+tasks:
+- install:
+- ceph:
+    fs: btrfs
+    log-whitelist:
+      - wrongly marked me down
+- ceph-fuse:
+- workunit:
+    clients:
+      client.0:
+        - rados/test_python.sh
diff --git a/qa/suites/smoke/basic/tasks/rados_workunit_loadgen_mix.yaml b/qa/suites/smoke/basic/tasks/rados_workunit_loadgen_mix.yaml
new file mode 100644
index 0000000..0d472a3
--- /dev/null
+++ b/qa/suites/smoke/basic/tasks/rados_workunit_loadgen_mix.yaml
@@ -0,0 +1,9 @@
+tasks:
+- install:
+- ceph:
+    fs: ext4
+- ceph-fuse:
+- workunit:
+    clients:
+      all:
+        - rados/load-gen-mix.sh
diff --git a/qa/suites/smoke/basic/tasks/rbd_api_tests.yaml b/qa/suites/smoke/basic/tasks/rbd_api_tests.yaml
new file mode 100644
index 0000000..a0dda21
--- /dev/null
+++ b/qa/suites/smoke/basic/tasks/rbd_api_tests.yaml
@@ -0,0 +1,11 @@
+tasks:
+- install:
+- ceph:
+    fs: xfs
+- ceph-fuse:
+- workunit:
+    clients:
+      client.0:
+        - rbd/test_librbd.sh
+    env:
+      RBD_FEATURES: "1"
diff --git a/qa/suites/smoke/basic/tasks/rbd_cli_import_export.yaml b/qa/suites/smoke/basic/tasks/rbd_cli_import_export.yaml
new file mode 100644
index 0000000..e9f38d3
--- /dev/null
+++ b/qa/suites/smoke/basic/tasks/rbd_cli_import_export.yaml
@@ -0,0 +1,11 @@
+tasks:
+- install:
+- ceph:
+    fs: xfs
+- ceph-fuse:
+- workunit:
+    clients:
+      client.0:
+        - rbd/import_export.sh
+    env:
+      RBD_CREATE_ARGS: --new-format
diff --git a/qa/suites/smoke/basic/tasks/rbd_fsx.yaml b/qa/suites/smoke/basic/tasks/rbd_fsx.yaml
new file mode 100644
index 0000000..ed737a3
--- /dev/null
+++ b/qa/suites/smoke/basic/tasks/rbd_fsx.yaml
@@ -0,0 +1,17 @@
+overrides:
+  ceph:
+    conf:
+      client:
+        rbd cache: true
+      global:
+        ms inject socket failures: 5000
+tasks:
+- install: null
+- ceph:
+    fs: xfs
+- thrashosds:
+    timeout: 1200
+- rbd_fsx:
+    clients:
+    - client.0
+    ops: 2000
diff --git a/qa/suites/smoke/basic/tasks/rbd_python_api_tests.yaml b/qa/suites/smoke/basic/tasks/rbd_python_api_tests.yaml
new file mode 100644
index 0000000..7ed61d0
--- /dev/null
+++ b/qa/suites/smoke/basic/tasks/rbd_python_api_tests.yaml
@@ -0,0 +1,11 @@
+tasks:
+- install:
+- ceph:
+    fs: btrfs
+- ceph-fuse:
+- workunit:
+    clients:
+      client.0:
+        - rbd/test_librbd_python.sh
+    env:
+      RBD_FEATURES: "1"
diff --git a/qa/suites/smoke/basic/tasks/rbd_workunit_suites_iozone.yaml b/qa/suites/smoke/basic/tasks/rbd_workunit_suites_iozone.yaml
new file mode 100644
index 0000000..e4961dd
--- /dev/null
+++ b/qa/suites/smoke/basic/tasks/rbd_workunit_suites_iozone.yaml
@@ -0,0 +1,18 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms die on skipped message: false
+      client:
+        rbd default features: 5
+tasks:
+- install:
+- ceph:
+    fs: btrfs
+- rbd:
+    all:
+      image_size: 20480
+- workunit:
+    clients:
+      all:
+        - suites/iozone.sh
diff --git a/qa/suites/smoke/basic/tasks/rgw_ec_s3tests.yaml b/qa/suites/smoke/basic/tasks/rgw_ec_s3tests.yaml
new file mode 100644
index 0000000..49f28b5
--- /dev/null
+++ b/qa/suites/smoke/basic/tasks/rgw_ec_s3tests.yaml
@@ -0,0 +1,13 @@
+overrides:
+   rgw:
+    ec-data-pool: true
+    cache-pools: true
+    frontend: civetweb
+tasks:
+- install:
+- ceph:
+    fs: btrfs
+- rgw: [client.0]
+- s3tests:
+    client.0:
+      rgw_server: client.0
diff --git a/qa/suites/smoke/basic/tasks/rgw_s3tests.yaml b/qa/suites/smoke/basic/tasks/rgw_s3tests.yaml
new file mode 100644
index 0000000..7321891
--- /dev/null
+++ b/qa/suites/smoke/basic/tasks/rgw_s3tests.yaml
@@ -0,0 +1,8 @@
+tasks:
+- install:
+- ceph:
+    fs: xfs
+- rgw: [client.0]
+- s3tests:
+    client.0:
+      rgw_server: client.0
diff --git a/qa/suites/smoke/basic/tasks/rgw_swift.yaml b/qa/suites/smoke/basic/tasks/rgw_swift.yaml
new file mode 100644
index 0000000..57c7226
--- /dev/null
+++ b/qa/suites/smoke/basic/tasks/rgw_swift.yaml
@@ -0,0 +1,8 @@
+tasks:
+- install:
+- ceph:
+    fs: ext4
+- rgw: [client.0]
+- swift:
+    client.0:
+      rgw_server: client.0
diff --git a/qa/suites/smoke/systemd/distro/centos.yaml b/qa/suites/smoke/systemd/distro/centos.yaml
new file mode 100644
index 0000000..9dfcc7f
--- /dev/null
+++ b/qa/suites/smoke/systemd/distro/centos.yaml
@@ -0,0 +1,2 @@
+os_type: centos
+os_version: "7.3"
diff --git a/qa/suites/stress/bench/% b/qa/suites/stress/bench/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/stress/bench/clusters/fixed-3-cephfs.yaml b/qa/suites/stress/bench/clusters/fixed-3-cephfs.yaml
new file mode 100644
index 0000000..499c84c
--- /dev/null
+++ b/qa/suites/stress/bench/clusters/fixed-3-cephfs.yaml
@@ -0,0 +1,11 @@
+roles:
+- [mon.a, mds.a, osd.0, osd.1]
+- [mon.b, mds.a-s, mon.c, osd.2, osd.3]
+- [client.0]
+openstack:
+- volumes: # attached to each instance
+    count: 2
+    size: 10 # GB
+log-rotate:
+  ceph-mds: 10G
+  ceph-osd: 10G
diff --git a/qa/suites/stress/bench/tasks/cfuse_workunit_snaps.yaml b/qa/suites/stress/bench/tasks/cfuse_workunit_snaps.yaml
new file mode 100644
index 0000000..eafec39
--- /dev/null
+++ b/qa/suites/stress/bench/tasks/cfuse_workunit_snaps.yaml
@@ -0,0 +1,8 @@
+tasks:
+- install:
+- ceph:
+- ceph-fuse:
+- workunit:
+    clients:
+      all:
+        - snaps
diff --git a/qa/suites/stress/bench/tasks/kclient_workunit_suites_fsx.yaml b/qa/suites/stress/bench/tasks/kclient_workunit_suites_fsx.yaml
new file mode 100644
index 0000000..a0d2e76
--- /dev/null
+++ b/qa/suites/stress/bench/tasks/kclient_workunit_suites_fsx.yaml
@@ -0,0 +1,8 @@
+tasks:
+- install:
+- ceph:
+- kclient:
+- workunit:
+    clients:
+      all:
+        - suites/fsx.sh
diff --git a/qa/suites/stress/thrash/% b/qa/suites/stress/thrash/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/stress/thrash/clusters/16-osd.yaml b/qa/suites/stress/thrash/clusters/16-osd.yaml
new file mode 100644
index 0000000..373dd40
--- /dev/null
+++ b/qa/suites/stress/thrash/clusters/16-osd.yaml
@@ -0,0 +1,18 @@
+roles:
+- [mon.0, mds.a, osd.0]
+- [mon.1, osd.1]
+- [mon.2, osd.2]
+- [osd.3]
+- [osd.4]
+- [osd.5]
+- [osd.6]
+- [osd.7]
+- [osd.8]
+- [osd.9]
+- [osd.10]
+- [osd.11]
+- [osd.12]
+- [osd.13]
+- [osd.14]
+- [osd.15]
+- [client.0]
diff --git a/qa/suites/stress/thrash/clusters/3-osd-1-machine.yaml b/qa/suites/stress/thrash/clusters/3-osd-1-machine.yaml
new file mode 100644
index 0000000..d8ff594
--- /dev/null
+++ b/qa/suites/stress/thrash/clusters/3-osd-1-machine.yaml
@@ -0,0 +1,3 @@
+roles:
+- [mon.0, mds.a, osd.0, osd.1, osd.2]
+- [mon.1, mon.2, client.0]
diff --git a/qa/suites/stress/thrash/clusters/8-osd.yaml b/qa/suites/stress/thrash/clusters/8-osd.yaml
new file mode 100644
index 0000000..3b13105
--- /dev/null
+++ b/qa/suites/stress/thrash/clusters/8-osd.yaml
@@ -0,0 +1,10 @@
+roles:
+- [mon.0, mds.a, osd.0]
+- [mon.1, osd.1]
+- [mon.2, osd.2]
+- [osd.3]
+- [osd.4]
+- [osd.5]
+- [osd.6]
+- [osd.7]
+- [client.0]
diff --git a/qa/suites/stress/thrash/fs/btrfs.yaml b/qa/suites/stress/thrash/fs/btrfs.yaml
new file mode 100644
index 0000000..0b3f6fa
--- /dev/null
+++ b/qa/suites/stress/thrash/fs/btrfs.yaml
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    fs: btrfs
+    conf:
+      osd:
+        osd sloppy crc: true
+        osd op thread timeout: 60
diff --git a/qa/suites/stress/thrash/fs/none.yaml b/qa/suites/stress/thrash/fs/none.yaml
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/stress/thrash/fs/xfs.yaml b/qa/suites/stress/thrash/fs/xfs.yaml
new file mode 100644
index 0000000..b4a8291
--- /dev/null
+++ b/qa/suites/stress/thrash/fs/xfs.yaml
@@ -0,0 +1,6 @@
+overrides:
+  ceph:
+    fs: xfs
+    conf:
+      osd:
+        osd sloppy crc: true
diff --git a/qa/suites/stress/thrash/thrashers/default.yaml b/qa/suites/stress/thrash/thrashers/default.yaml
new file mode 100644
index 0000000..14d7725
--- /dev/null
+++ b/qa/suites/stress/thrash/thrashers/default.yaml
@@ -0,0 +1,7 @@
+tasks:
+- install:
+- ceph:
+    log-whitelist:
+    - wrongly marked me down
+    - objects unfound and apparently lost
+- thrashosds:
diff --git a/qa/suites/stress/thrash/thrashers/fast.yaml b/qa/suites/stress/thrash/thrashers/fast.yaml
new file mode 100644
index 0000000..eea9c06
--- /dev/null
+++ b/qa/suites/stress/thrash/thrashers/fast.yaml
@@ -0,0 +1,9 @@
+tasks:
+- install:
+- ceph:
+    log-whitelist:
+    - wrongly marked me down
+    - objects unfound and apparently lost
+- thrashosds:
+    op_delay: 1
+    chance_down: 10
diff --git a/qa/suites/stress/thrash/thrashers/more-down.yaml b/qa/suites/stress/thrash/thrashers/more-down.yaml
new file mode 100644
index 0000000..e39098b
--- /dev/null
+++ b/qa/suites/stress/thrash/thrashers/more-down.yaml
@@ -0,0 +1,8 @@
+tasks:
+- install:
+- ceph:
+    log-whitelist:
+    - wrongly marked me down
+    - objects unfound and apparently lost
+- thrashosds:
+    chance_down: 50
diff --git a/qa/suites/stress/thrash/workloads/bonnie_cfuse.yaml b/qa/suites/stress/thrash/workloads/bonnie_cfuse.yaml
new file mode 100644
index 0000000..912f12d
--- /dev/null
+++ b/qa/suites/stress/thrash/workloads/bonnie_cfuse.yaml
@@ -0,0 +1,6 @@
+tasks:
+- ceph-fuse:
+- workunit:
+    clients:
+      all:
+        - suites/bonnie.sh
diff --git a/qa/suites/stress/thrash/workloads/iozone_cfuse.yaml b/qa/suites/stress/thrash/workloads/iozone_cfuse.yaml
new file mode 100644
index 0000000..18a6051
--- /dev/null
+++ b/qa/suites/stress/thrash/workloads/iozone_cfuse.yaml
@@ -0,0 +1,6 @@
+tasks:
+- ceph-fuse:
+- workunit:
+    clients:
+      all:
+        - suites/iozone.sh
diff --git a/qa/suites/stress/thrash/workloads/radosbench.yaml b/qa/suites/stress/thrash/workloads/radosbench.yaml
new file mode 100644
index 0000000..3940870
--- /dev/null
+++ b/qa/suites/stress/thrash/workloads/radosbench.yaml
@@ -0,0 +1,4 @@
+tasks:
+- radosbench:
+    clients: [client.0]
+    time: 1800
diff --git a/qa/suites/stress/thrash/workloads/readwrite.yaml b/qa/suites/stress/thrash/workloads/readwrite.yaml
new file mode 100644
index 0000000..c53e52b
--- /dev/null
+++ b/qa/suites/stress/thrash/workloads/readwrite.yaml
@@ -0,0 +1,9 @@
+tasks:
+- rados:
+    clients: [client.0]
+    ops: 4000
+    objects: 500
+    op_weights:
+      read: 45
+      write: 45
+      delete: 10
diff --git a/qa/suites/teuthology/buildpackages/% b/qa/suites/teuthology/buildpackages/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/teuthology/buildpackages/distros/centos_7.3.yaml b/qa/suites/teuthology/buildpackages/distros/centos_7.3.yaml
new file mode 100644
index 0000000..9dfcc7f
--- /dev/null
+++ b/qa/suites/teuthology/buildpackages/distros/centos_7.3.yaml
@@ -0,0 +1,2 @@
+os_type: centos
+os_version: "7.3"
diff --git a/qa/suites/teuthology/buildpackages/distros/ubuntu_14.04.yaml b/qa/suites/teuthology/buildpackages/distros/ubuntu_14.04.yaml
new file mode 100644
index 0000000..309e989
--- /dev/null
+++ b/qa/suites/teuthology/buildpackages/distros/ubuntu_14.04.yaml
@@ -0,0 +1,2 @@
+os_type: ubuntu
+os_version: "14.04"
diff --git a/qa/suites/teuthology/buildpackages/tasks/branch.yaml b/qa/suites/teuthology/buildpackages/tasks/branch.yaml
new file mode 100644
index 0000000..b93c5d0
--- /dev/null
+++ b/qa/suites/teuthology/buildpackages/tasks/branch.yaml
@@ -0,0 +1,10 @@
+roles:
+    - [mon.0, client.0]
+tasks:
+    - install:
+        # branch has precedence over sha1
+        branch: hammer
+        sha1: e5b6eea91cc37434f78a987d2dd1d3edd4a23f3f # dumpling
+    - exec:
+        client.0:
+          - ceph --version | grep 'version 0.94'
diff --git a/qa/suites/teuthology/buildpackages/tasks/default.yaml b/qa/suites/teuthology/buildpackages/tasks/default.yaml
new file mode 100644
index 0000000..cb583c7
--- /dev/null
+++ b/qa/suites/teuthology/buildpackages/tasks/default.yaml
@@ -0,0 +1,14 @@
+roles:
+    - [client.0]
+tasks:
+    - install:
+        tag: v0.94.1
+    - exec:
+        client.0:
+          - ceph --version | grep 'version 0.94.1'
+    - install.upgrade:
+        client.0:
+          tag: v0.94.3
+    - exec:
+        client.0:
+          - ceph --version | grep 'version 0.94.3'
diff --git a/qa/suites/teuthology/buildpackages/tasks/tag.yaml b/qa/suites/teuthology/buildpackages/tasks/tag.yaml
new file mode 100644
index 0000000..126749c
--- /dev/null
+++ b/qa/suites/teuthology/buildpackages/tasks/tag.yaml
@@ -0,0 +1,11 @@
+roles:
+    - [mon.0, client.0]
+tasks:
+    - install:
+        # tag has precedence over branch and sha1
+        tag: v0.94.1
+        branch: firefly
+        sha1: e5b6eea91cc37434f78a987d2dd1d3edd4a23f3f # dumpling
+    - exec:
+        client.0:
+          - ceph --version | grep 'version 0.94.1'
diff --git a/qa/suites/teuthology/ceph/% b/qa/suites/teuthology/ceph/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/teuthology/ceph/clusters/single.yaml b/qa/suites/teuthology/ceph/clusters/single.yaml
new file mode 100644
index 0000000..beba397
--- /dev/null
+++ b/qa/suites/teuthology/ceph/clusters/single.yaml
@@ -0,0 +1,2 @@
+roles:
+    - [mon.0, client.0]
diff --git a/qa/suites/teuthology/ceph/distros/centos_7.3.yaml b/qa/suites/teuthology/ceph/distros/centos_7.3.yaml
new file mode 100644
index 0000000..9dfcc7f
--- /dev/null
+++ b/qa/suites/teuthology/ceph/distros/centos_7.3.yaml
@@ -0,0 +1,2 @@
+os_type: centos
+os_version: "7.3"
diff --git a/qa/suites/teuthology/ceph/distros/ubuntu_14.04.yaml b/qa/suites/teuthology/ceph/distros/ubuntu_14.04.yaml
new file mode 100644
index 0000000..309e989
--- /dev/null
+++ b/qa/suites/teuthology/ceph/distros/ubuntu_14.04.yaml
@@ -0,0 +1,2 @@
+os_type: ubuntu
+os_version: "14.04"
diff --git a/qa/suites/teuthology/ceph/tasks/teuthology.yaml b/qa/suites/teuthology/ceph/tasks/teuthology.yaml
new file mode 100644
index 0000000..00081c8
--- /dev/null
+++ b/qa/suites/teuthology/ceph/tasks/teuthology.yaml
@@ -0,0 +1,3 @@
+tasks:
+    - install:
+    - tests:
diff --git a/qa/suites/teuthology/integration.yaml b/qa/suites/teuthology/integration.yaml
new file mode 100644
index 0000000..8a7f1c7
--- /dev/null
+++ b/qa/suites/teuthology/integration.yaml
@@ -0,0 +1,2 @@
+tasks:
+- teuthology_integration:
diff --git a/qa/suites/teuthology/multi-cluster/% b/qa/suites/teuthology/multi-cluster/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/teuthology/multi-cluster/all/ceph.yaml b/qa/suites/teuthology/multi-cluster/all/ceph.yaml
new file mode 100644
index 0000000..6a93b9a
--- /dev/null
+++ b/qa/suites/teuthology/multi-cluster/all/ceph.yaml
@@ -0,0 +1,23 @@
+roles:
+- - ceph.mon.a
+  - ceph.mon.b
+  - backup.osd.0
+  - backup.osd.1
+  - backup.osd.2
+  - backup.client.0
+- - backup.mon.a
+  - ceph.osd.0
+  - ceph.osd.1
+  - ceph.osd.2
+  - ceph.client.0
+  - client.1
+  - osd.3
+tasks:
+- install:
+- ceph:
+    cluster: backup
+- ceph:
+- workunit:
+    clients:
+      ceph.client.0: [true.sh]
+      backup.client.0: [true.sh]
diff --git a/qa/suites/teuthology/multi-cluster/all/thrashosds.yaml b/qa/suites/teuthology/multi-cluster/all/thrashosds.yaml
new file mode 100644
index 0000000..c25a214
--- /dev/null
+++ b/qa/suites/teuthology/multi-cluster/all/thrashosds.yaml
@@ -0,0 +1,20 @@
+roles:
+- - backup.mon.a
+  - backup.mon.b
+  - backup.osd.0
+  - backup.osd.1
+  - backup.osd.2
+- - backup.mon.c
+  - backup.osd.3
+  - backup.osd.4
+  - backup.osd.5
+  - backup.client.0
+tasks:
+- install:
+- ceph:
+    cluster: backup
+- thrashosds:
+    cluster: backup
+- workunit:
+    clients:
+      all: [true.sh]
diff --git a/qa/suites/teuthology/multi-cluster/all/upgrade.yaml b/qa/suites/teuthology/multi-cluster/all/upgrade.yaml
new file mode 100644
index 0000000..c8a742f
--- /dev/null
+++ b/qa/suites/teuthology/multi-cluster/all/upgrade.yaml
@@ -0,0 +1,49 @@
+overrides:
+  ceph:
+    log-whitelist:
+    - failed to encode map
+  conf:
+    mon:
+      mon warn on legacy crush tunables: false
+roles:
+- - ceph.mon.a
+  - ceph.mon.b
+  - backup.osd.0
+  - backup.osd.1
+  - backup.osd.2
+  - backup.client.0
+- - backup.mon.a
+  - ceph.osd.0
+  - ceph.osd.1
+  - ceph.osd.2
+  - ceph.client.0
+  - client.1
+  - osd.3
+tasks:
+- install:
+    branch: infernalis
+- ceph:
+    cluster: backup
+- ceph:
+- workunit:
+    clients:
+      backup.client.0: [true.sh]
+      ceph.client.0: [true.sh]
+- install.upgrade:
+    ceph.mon.a:
+      branch: jewel
+    backup.mon.a:
+      branch: jewel
+- ceph.restart: [ceph.mon.a, ceph.mon.b, ceph.osd.0, ceph.osd.1, ceph.osd.2, osd.3]
+- exec:
+    ceph.client.0:
+    - ceph --version | grep -F 'version 10.'
+    client.1:
+    - ceph --cluster backup --version | grep -F 'version 10.'
+    backup.client.0:
+    # cli upgraded
+    - ceph --cluster backup --id 0 --version | grep -F 'version 10.'
+    - ceph --version | grep -F 'version 10.'
+    # backup cluster mon not upgraded
+    - ceph --cluster backup --id 0 tell mon.a version | grep -F 'version 9.2.'
+    - ceph tell mon.a version | grep -F 'version 10.'
diff --git a/qa/suites/teuthology/multi-cluster/all/workunit.yaml b/qa/suites/teuthology/multi-cluster/all/workunit.yaml
new file mode 100644
index 0000000..4c32d9a
--- /dev/null
+++ b/qa/suites/teuthology/multi-cluster/all/workunit.yaml
@@ -0,0 +1,21 @@
+roles:
+- - backup.mon.a
+  - osd.0
+  - osd.1
+  - osd.2
+  - client.0
+  - backup.client.0
+- - mon.a
+  - backup.osd.0
+  - backup.osd.1
+  - backup.osd.2
+  - client.1
+  - backup.client.1
+tasks:
+- install:
+- workunit:
+    clients:
+      all: [true.sh]
+- workunit:
+    clients:
+      backup.client.1: [true.sh]
diff --git a/qa/suites/teuthology/multi-cluster/fs/xfs.yaml b/qa/suites/teuthology/multi-cluster/fs/xfs.yaml
new file mode 100644
index 0000000..b4a8291
--- /dev/null
+++ b/qa/suites/teuthology/multi-cluster/fs/xfs.yaml
@@ -0,0 +1,6 @@
+overrides:
+  ceph:
+    fs: xfs
+    conf:
+      osd:
+        osd sloppy crc: true
diff --git a/qa/suites/teuthology/no-ceph/% b/qa/suites/teuthology/no-ceph/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/teuthology/no-ceph/clusters/single.yaml b/qa/suites/teuthology/no-ceph/clusters/single.yaml
new file mode 100644
index 0000000..beba397
--- /dev/null
+++ b/qa/suites/teuthology/no-ceph/clusters/single.yaml
@@ -0,0 +1,2 @@
+roles:
+    - [mon.0, client.0]
diff --git a/qa/suites/teuthology/no-ceph/tasks/teuthology.yaml b/qa/suites/teuthology/no-ceph/tasks/teuthology.yaml
new file mode 100644
index 0000000..1391458
--- /dev/null
+++ b/qa/suites/teuthology/no-ceph/tasks/teuthology.yaml
@@ -0,0 +1,2 @@
+tasks:
+    - tests:
diff --git a/qa/suites/teuthology/nop/% b/qa/suites/teuthology/nop/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/teuthology/nop/all/nop.yaml b/qa/suites/teuthology/nop/all/nop.yaml
new file mode 100644
index 0000000..4a5b227
--- /dev/null
+++ b/qa/suites/teuthology/nop/all/nop.yaml
@@ -0,0 +1,3 @@
+tasks:
+    - nop:
+
diff --git a/qa/suites/teuthology/rgw/% b/qa/suites/teuthology/rgw/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/teuthology/rgw/distros/centos_7.3.yaml b/qa/suites/teuthology/rgw/distros/centos_7.3.yaml
new file mode 100644
index 0000000..9dfcc7f
--- /dev/null
+++ b/qa/suites/teuthology/rgw/distros/centos_7.3.yaml
@@ -0,0 +1,2 @@
+os_type: centos
+os_version: "7.3"
diff --git a/qa/suites/teuthology/rgw/distros/ubuntu_14.04.yaml b/qa/suites/teuthology/rgw/distros/ubuntu_14.04.yaml
new file mode 100644
index 0000000..309e989
--- /dev/null
+++ b/qa/suites/teuthology/rgw/distros/ubuntu_14.04.yaml
@@ -0,0 +1,2 @@
+os_type: ubuntu
+os_version: "14.04"
diff --git a/qa/suites/teuthology/rgw/tasks/s3tests-civetweb.yaml b/qa/suites/teuthology/rgw/tasks/s3tests-civetweb.yaml
new file mode 100644
index 0000000..59a2a3f
--- /dev/null
+++ b/qa/suites/teuthology/rgw/tasks/s3tests-civetweb.yaml
@@ -0,0 +1,23 @@
+# this runs s3tests against rgw, using civetweb
+roles:
+- [mon.a, mon.c, osd.0, osd.1, osd.2, client.0]
+- [mon.b, osd.3, osd.4, osd.5, client.1]
+
+tasks:
+- install:
+    branch: master
+- ceph:
+- rgw: [client.0]
+- s3tests:
+    client.0:
+      rgw_server: client.0
+      force-branch: master
+overrides:
+  ceph:
+    fs: xfs
+    conf:
+      client:
+        debug rgw: 20
+  rgw:
+    ec-data-pool: false
+    frontend: civetweb
diff --git a/qa/suites/teuthology/rgw/tasks/s3tests-fastcgi.yaml b/qa/suites/teuthology/rgw/tasks/s3tests-fastcgi.yaml
new file mode 100644
index 0000000..79a1a50
--- /dev/null
+++ b/qa/suites/teuthology/rgw/tasks/s3tests-fastcgi.yaml
@@ -0,0 +1,23 @@
+# this runs s3tests against rgw, using mod_fastcgi
+roles:
+- [mon.a, mon.c, osd.0, osd.1, osd.2, client.0]
+- [mon.b, osd.3, osd.4, osd.5, client.1]
+
+tasks:
+- install:
+    branch: master
+- ceph:
+- rgw: [client.0]
+- s3tests:
+    client.0:
+      rgw_server: client.0
+      force-branch: master
+overrides:
+  ceph:
+    fs: xfs
+    conf:
+      client:
+        debug rgw: 20
+  rgw:
+    ec-data-pool: false
+    frontend: apache
diff --git a/qa/suites/teuthology/rgw/tasks/s3tests-fcgi.yaml b/qa/suites/teuthology/rgw/tasks/s3tests-fcgi.yaml
new file mode 100644
index 0000000..fc8c77f
--- /dev/null
+++ b/qa/suites/teuthology/rgw/tasks/s3tests-fcgi.yaml
@@ -0,0 +1,25 @@
+# this runs s3tests against rgw, using mod_proxy_fcgi
+# the choice between uds or tcp with mod_proxy_fcgi depends on the distro
+roles:
+- [mon.a, mon.c, osd.0, osd.1, osd.2, client.0]
+- [mon.b, osd.3, osd.4, osd.5, client.1]
+
+tasks:
+- install:
+    branch: master
+- ceph:
+- rgw: [client.0]
+- s3tests:
+    client.0:
+      rgw_server: client.0
+      force-branch: master
+overrides:
+  ceph:
+    fs: xfs
+    conf:
+      client:
+        debug rgw: 20
+  rgw:
+    ec-data-pool: false
+    frontend: apache
+    use_fcgi: true
diff --git a/qa/suites/teuthology/workunits/yes.yaml b/qa/suites/teuthology/workunits/yes.yaml
new file mode 100644
index 0000000..45098db
--- /dev/null
+++ b/qa/suites/teuthology/workunits/yes.yaml
@@ -0,0 +1,8 @@
+roles:
+    - [client.0]
+tasks:
+- install:
+- workunit:
+    clients:
+      all:
+        - true.sh
diff --git a/qa/suites/tgt/basic/% b/qa/suites/tgt/basic/%
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/qa/suites/tgt/basic/%
@@ -0,0 +1 @@
+
diff --git a/qa/suites/tgt/basic/clusters/fixed-3.yaml b/qa/suites/tgt/basic/clusters/fixed-3.yaml
new file mode 100644
index 0000000..0038432
--- /dev/null
+++ b/qa/suites/tgt/basic/clusters/fixed-3.yaml
@@ -0,0 +1,4 @@
+roles:
+- [mon.a, mon.c, osd.0, osd.1, osd.2]
+- [mon.b, mds.a, osd.3, osd.4, osd.5]
+- [client.0]
diff --git a/qa/suites/tgt/basic/fs/btrfs.yaml b/qa/suites/tgt/basic/fs/btrfs.yaml
new file mode 100644
index 0000000..4c7af31
--- /dev/null
+++ b/qa/suites/tgt/basic/fs/btrfs.yaml
@@ -0,0 +1,6 @@
+overrides:
+  ceph:
+    fs: btrfs
+    conf:
+      osd:
+        osd op thread timeout: 60
diff --git a/qa/suites/tgt/basic/msgr-failures/few.yaml b/qa/suites/tgt/basic/msgr-failures/few.yaml
new file mode 100644
index 0000000..0de320d
--- /dev/null
+++ b/qa/suites/tgt/basic/msgr-failures/few.yaml
@@ -0,0 +1,5 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms inject socket failures: 5000
diff --git a/qa/suites/tgt/basic/msgr-failures/many.yaml b/qa/suites/tgt/basic/msgr-failures/many.yaml
new file mode 100644
index 0000000..86f8dde
--- /dev/null
+++ b/qa/suites/tgt/basic/msgr-failures/many.yaml
@@ -0,0 +1,5 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms inject socket failures: 500
diff --git a/qa/suites/tgt/basic/tasks/blogbench.yaml b/qa/suites/tgt/basic/tasks/blogbench.yaml
new file mode 100644
index 0000000..f77a78b
--- /dev/null
+++ b/qa/suites/tgt/basic/tasks/blogbench.yaml
@@ -0,0 +1,9 @@
+tasks:
+- install:
+- ceph:
+- tgt:
+- iscsi:
+- workunit:
+    clients:
+        all:
+            - suites/blogbench.sh
diff --git a/qa/suites/tgt/basic/tasks/bonnie.yaml b/qa/suites/tgt/basic/tasks/bonnie.yaml
new file mode 100644
index 0000000..2cbfcf8
--- /dev/null
+++ b/qa/suites/tgt/basic/tasks/bonnie.yaml
@@ -0,0 +1,9 @@
+tasks:
+- install:
+- ceph:
+- tgt:
+- iscsi:
+- workunit:
+    clients:
+        all:
+            - suites/bonnie.sh
diff --git a/qa/suites/tgt/basic/tasks/dbench-short.yaml b/qa/suites/tgt/basic/tasks/dbench-short.yaml
new file mode 100644
index 0000000..fcb721a
--- /dev/null
+++ b/qa/suites/tgt/basic/tasks/dbench-short.yaml
@@ -0,0 +1,9 @@
+tasks:
+- install:
+- ceph:
+- tgt:
+- iscsi:
+- workunit:
+    clients:
+        all:
+            - suites/dbench-short.sh
diff --git a/qa/suites/tgt/basic/tasks/dbench.yaml b/qa/suites/tgt/basic/tasks/dbench.yaml
new file mode 100644
index 0000000..7f73217
--- /dev/null
+++ b/qa/suites/tgt/basic/tasks/dbench.yaml
@@ -0,0 +1,9 @@
+tasks:
+- install:
+- ceph:
+- tgt:
+- iscsi:
+- workunit:
+    clients:
+        all:
+            - suites/dbench.sh
diff --git a/qa/suites/tgt/basic/tasks/ffsb.yaml b/qa/suites/tgt/basic/tasks/ffsb.yaml
new file mode 100644
index 0000000..f50a3a1
--- /dev/null
+++ b/qa/suites/tgt/basic/tasks/ffsb.yaml
@@ -0,0 +1,9 @@
+tasks:
+- install:
+- ceph:
+- tgt:
+- iscsi:
+- workunit:
+    clients:
+        all:
+            - suites/ffsb.sh
diff --git a/qa/suites/tgt/basic/tasks/fio.yaml b/qa/suites/tgt/basic/tasks/fio.yaml
new file mode 100644
index 0000000..e7346ce
--- /dev/null
+++ b/qa/suites/tgt/basic/tasks/fio.yaml
@@ -0,0 +1,9 @@
+tasks:
+- install:
+- ceph:
+- tgt:
+- iscsi:
+- workunit:
+    clients:
+        all:
+            - suites/fio.sh
diff --git a/qa/suites/tgt/basic/tasks/fsstress.yaml b/qa/suites/tgt/basic/tasks/fsstress.yaml
new file mode 100644
index 0000000..c77f511
--- /dev/null
+++ b/qa/suites/tgt/basic/tasks/fsstress.yaml
@@ -0,0 +1,9 @@
+tasks:
+- install:
+- ceph:
+- tgt:
+- iscsi:
+- workunit:
+    clients:
+        all:
+            - suites/fsstress.sh
diff --git a/qa/suites/tgt/basic/tasks/fsx.yaml b/qa/suites/tgt/basic/tasks/fsx.yaml
new file mode 100644
index 0000000..04732c8
--- /dev/null
+++ b/qa/suites/tgt/basic/tasks/fsx.yaml
@@ -0,0 +1,9 @@
+tasks:
+- install:
+- ceph:
+- tgt:
+- iscsi:
+- workunit:
+    clients:
+        all:
+            - suites/fsx.sh
diff --git a/qa/suites/tgt/basic/tasks/fsync-tester.yaml b/qa/suites/tgt/basic/tasks/fsync-tester.yaml
new file mode 100644
index 0000000..ea627b7
--- /dev/null
+++ b/qa/suites/tgt/basic/tasks/fsync-tester.yaml
@@ -0,0 +1,9 @@
+tasks:
+- install:
+- ceph:
+- tgt:
+- iscsi:
+- workunit:
+    clients:
+        all:
+            - suites/fsync-tester.sh
diff --git a/qa/suites/tgt/basic/tasks/iogen.yaml b/qa/suites/tgt/basic/tasks/iogen.yaml
new file mode 100644
index 0000000..1065c74
--- /dev/null
+++ b/qa/suites/tgt/basic/tasks/iogen.yaml
@@ -0,0 +1,9 @@
+tasks:
+- install:
+- ceph:
+- tgt:
+- iscsi:
+- workunit:
+    clients:
+        all:
+            - suites/iogen.sh
diff --git a/qa/suites/tgt/basic/tasks/iozone-sync.yaml b/qa/suites/tgt/basic/tasks/iozone-sync.yaml
new file mode 100644
index 0000000..ac241a4
--- /dev/null
+++ b/qa/suites/tgt/basic/tasks/iozone-sync.yaml
@@ -0,0 +1,9 @@
+tasks:
+- install:
+- ceph:
+- tgt:
+- iscsi:
+- workunit:
+    clients:
+        all:
+            - suites/iozone-sync.sh
diff --git a/qa/suites/tgt/basic/tasks/iozone.yaml b/qa/suites/tgt/basic/tasks/iozone.yaml
new file mode 100644
index 0000000..cf5604c
--- /dev/null
+++ b/qa/suites/tgt/basic/tasks/iozone.yaml
@@ -0,0 +1,9 @@
+tasks:
+- install:
+- ceph:
+- tgt:
+- iscsi:
+- workunit:
+    clients:
+        all:
+            - suites/iozone.sh
diff --git a/qa/suites/tgt/basic/tasks/pjd.yaml b/qa/suites/tgt/basic/tasks/pjd.yaml
new file mode 100644
index 0000000..ba5c631
--- /dev/null
+++ b/qa/suites/tgt/basic/tasks/pjd.yaml
@@ -0,0 +1,9 @@
+tasks:
+- install:
+- ceph:
+- tgt:
+- iscsi:
+- workunit:
+    clients:
+        all:
+            - suites/pjd.sh
diff --git a/qa/suites/upgrade/client-upgrade/firefly-client-x/basic/% b/qa/suites/upgrade/client-upgrade/firefly-client-x/basic/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/upgrade/client-upgrade/firefly-client-x/basic/0-cluster/start.yaml b/qa/suites/upgrade/client-upgrade/firefly-client-x/basic/0-cluster/start.yaml
new file mode 100644
index 0000000..e2ea18d
--- /dev/null
+++ b/qa/suites/upgrade/client-upgrade/firefly-client-x/basic/0-cluster/start.yaml
@@ -0,0 +1,18 @@
+roles:
+- - mon.a
+  - mds.a
+  - osd.0
+  - osd.1
+- - mon.b
+  - mon.c
+  - osd.2
+  - osd.3
+- - client.0
+overrides:
+  ceph:
+    log-whitelist:
+    - failed to encode map
+    conf:
+      client:
+        rbd default format: 1
+        rbd default features: 1
diff --git a/qa/suites/upgrade/client-upgrade/firefly-client-x/basic/1-install/firefly-client-x.yaml b/qa/suites/upgrade/client-upgrade/firefly-client-x/basic/1-install/firefly-client-x.yaml
new file mode 100644
index 0000000..39430da
--- /dev/null
+++ b/qa/suites/upgrade/client-upgrade/firefly-client-x/basic/1-install/firefly-client-x.yaml
@@ -0,0 +1,10 @@
+tasks:
+- install:
+   branch: firefly 
+- print: "**** done install firefly"
+- install.upgrade:
+   exclude_packages: ['ceph-test', 'ceph-test-dbg']
+   client.0:
+- print: "**** done install.upgrade client.0"
+- ceph: 
+- print: "**** done ceph"
diff --git a/qa/suites/upgrade/client-upgrade/firefly-client-x/basic/2-workload/rbd_cli_import_export.yaml b/qa/suites/upgrade/client-upgrade/firefly-client-x/basic/2-workload/rbd_cli_import_export.yaml
new file mode 100644
index 0000000..6d4fd41
--- /dev/null
+++ b/qa/suites/upgrade/client-upgrade/firefly-client-x/basic/2-workload/rbd_cli_import_export.yaml
@@ -0,0 +1,9 @@
+tasks:
+- workunit:
+    branch: firefly
+    clients:
+      client.0:
+        - rbd/import_export.sh
+    env:
+      RBD_CREATE_ARGS: --new-format
+- print: "**** done rbd/import_export.sh"
diff --git a/qa/suites/upgrade/client-upgrade/firefly-client-x/basic/distros/centos_7.2.yaml b/qa/suites/upgrade/client-upgrade/firefly-client-x/basic/distros/centos_7.2.yaml
new file mode 100644
index 0000000..44d2f0e
--- /dev/null
+++ b/qa/suites/upgrade/client-upgrade/firefly-client-x/basic/distros/centos_7.2.yaml
@@ -0,0 +1,2 @@
+os_type: centos
+os_version: "7.2"
diff --git a/qa/suites/upgrade/client-upgrade/firefly-client-x/basic/distros/ubuntu_14.04.yaml b/qa/suites/upgrade/client-upgrade/firefly-client-x/basic/distros/ubuntu_14.04.yaml
new file mode 100644
index 0000000..309e989
--- /dev/null
+++ b/qa/suites/upgrade/client-upgrade/firefly-client-x/basic/distros/ubuntu_14.04.yaml
@@ -0,0 +1,2 @@
+os_type: ubuntu
+os_version: "14.04"
diff --git a/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/% b/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/0-cluster/start.yaml b/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/0-cluster/start.yaml
new file mode 100644
index 0000000..db6f5e2
--- /dev/null
+++ b/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/0-cluster/start.yaml
@@ -0,0 +1,14 @@
+roles:
+- - mon.a
+  - mds.a
+  - osd.0
+  - osd.1
+- - mon.b
+  - mon.c
+  - osd.2
+  - osd.3
+- - client.0
+overrides:
+  ceph:
+    log-whitelist:
+    - failed to encode map
diff --git a/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/1-install/hammer-client-x.yaml b/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/1-install/hammer-client-x.yaml
new file mode 100644
index 0000000..6a4eb99
--- /dev/null
+++ b/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/1-install/hammer-client-x.yaml
@@ -0,0 +1,10 @@
+tasks:
+- install:
+    branch: hammer 
+- print: "**** done install hammer"
+upgrade_workload:
+  sequential:
+  - install.upgrade:
+      exclude_packages: ['ceph-test-dbg']
+      client.0:
+  - print: "**** done install.upgrade client.0"
diff --git a/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/2-workload/rbd_api_tests.yaml b/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/2-workload/rbd_api_tests.yaml
new file mode 100644
index 0000000..6638d14
--- /dev/null
+++ b/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/2-workload/rbd_api_tests.yaml
@@ -0,0 +1,26 @@
+overrides:
+  ceph:
+    conf:
+      client:
+        rbd default features: 13
+tasks:
+- exec:
+    client.0:
+    - "cp $(which ceph_test_librbd_api) $TESTDIR/ceph_test_librbd_api"
+- sequential:
+  - upgrade_workload
+- ceph: 
+- print: "**** done ceph"
+- exec:
+    client.0:
+    - "cp --force $TESTDIR/ceph_test_librbd_api $(which ceph_test_librbd_api)"
+    - "rm -rf $TESTDIR/ceph_test_librbd_api"
+- print: "**** done reverting to hammer ceph_test_librbd_api"
+- workunit:
+    branch: hammer
+    clients:
+      client.0:
+      - rbd/test_librbd_api.sh
+    env:
+      RBD_FEATURES: "13"
+- print: "**** done rbd/test_librbd_api.sh"
diff --git a/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/2-workload/rbd_cli_import_export.yaml b/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/2-workload/rbd_cli_import_export.yaml
new file mode 100644
index 0000000..dfaa0e8
--- /dev/null
+++ b/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/2-workload/rbd_cli_import_export.yaml
@@ -0,0 +1,13 @@
+tasks:
+- sequential:
+  - upgrade_workload
+- ceph: 
+- print: "**** done ceph"
+- workunit:
+    branch: hammer
+    clients:
+      client.0:
+      - rbd/import_export.sh
+    env:
+      RBD_CREATE_ARGS: --image-feature layering,exclusive-lock,object-map
+- print: "**** done rbd/import_export.sh"
diff --git a/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/distros/centos_7.2.yaml b/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/distros/centos_7.2.yaml
new file mode 100644
index 0000000..44d2f0e
--- /dev/null
+++ b/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/distros/centos_7.2.yaml
@@ -0,0 +1,2 @@
+os_type: centos
+os_version: "7.2"
diff --git a/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/distros/ubuntu_14.04.yaml b/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/distros/ubuntu_14.04.yaml
new file mode 100644
index 0000000..309e989
--- /dev/null
+++ b/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/distros/ubuntu_14.04.yaml
@@ -0,0 +1,2 @@
+os_type: ubuntu
+os_version: "14.04"
diff --git a/qa/suites/upgrade/client-upgrade/hammer-client-x/rbd/% b/qa/suites/upgrade/client-upgrade/hammer-client-x/rbd/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/upgrade/client-upgrade/hammer-client-x/rbd/0-cluster/start.yaml b/qa/suites/upgrade/client-upgrade/hammer-client-x/rbd/0-cluster/start.yaml
new file mode 100644
index 0000000..4d92f80
--- /dev/null
+++ b/qa/suites/upgrade/client-upgrade/hammer-client-x/rbd/0-cluster/start.yaml
@@ -0,0 +1,16 @@
+roles:
+- - mon.a
+  - mon.b
+  - mon.c
+  - osd.0
+  - osd.1
+  - osd.2
+  - client.0
+- - client.1
+overrides:
+  ceph:
+    log-whitelist:
+    - failed to encode map
+    conf:
+      client:
+        rbd default features: 1
diff --git a/qa/suites/upgrade/client-upgrade/hammer-client-x/rbd/1-install/hammer-client-x.yaml b/qa/suites/upgrade/client-upgrade/hammer-client-x/rbd/1-install/hammer-client-x.yaml
new file mode 100644
index 0000000..c91ba3c
--- /dev/null
+++ b/qa/suites/upgrade/client-upgrade/hammer-client-x/rbd/1-install/hammer-client-x.yaml
@@ -0,0 +1,10 @@
+tasks:
+- install:
+   branch: hammer
+- print: "**** done install hammer"
+- install.upgrade:
+   exclude_packages: ['ceph-test-dbg']
+   client.1:
+- print: "**** done install.upgrade client.1"
+- ceph:
+- print: "**** done ceph"
diff --git a/qa/suites/upgrade/client-upgrade/hammer-client-x/rbd/2-workload/rbd_notification_tests.yaml b/qa/suites/upgrade/client-upgrade/hammer-client-x/rbd/2-workload/rbd_notification_tests.yaml
new file mode 100644
index 0000000..984dfa0
--- /dev/null
+++ b/qa/suites/upgrade/client-upgrade/hammer-client-x/rbd/2-workload/rbd_notification_tests.yaml
@@ -0,0 +1,21 @@
+tasks:
+- workunit:
+    branch: hammer
+    clients:
+      client.0:
+        - rbd/notify_master.sh
+      client.1:
+        - rbd/notify_slave.sh
+    env:
+      RBD_FEATURES: "13"
+- print: "**** done rbd: old librbd -> new librbd"
+- workunit:
+    branch: hammer
+    clients:
+      client.0:
+        - rbd/notify_slave.sh
+      client.1:
+        - rbd/notify_master.sh
+    env:
+      RBD_FEATURES: "13"
+- print: "**** done rbd: new librbd -> old librbd"
diff --git a/qa/suites/upgrade/client-upgrade/hammer-client-x/rbd/distros/centos_7.2.yaml b/qa/suites/upgrade/client-upgrade/hammer-client-x/rbd/distros/centos_7.2.yaml
new file mode 100644
index 0000000..44d2f0e
--- /dev/null
+++ b/qa/suites/upgrade/client-upgrade/hammer-client-x/rbd/distros/centos_7.2.yaml
@@ -0,0 +1,2 @@
+os_type: centos
+os_version: "7.2"
diff --git a/qa/suites/upgrade/client-upgrade/hammer-client-x/rbd/distros/ubuntu_14.04.yaml b/qa/suites/upgrade/client-upgrade/hammer-client-x/rbd/distros/ubuntu_14.04.yaml
new file mode 100644
index 0000000..309e989
--- /dev/null
+++ b/qa/suites/upgrade/client-upgrade/hammer-client-x/rbd/distros/ubuntu_14.04.yaml
@@ -0,0 +1,2 @@
+os_type: ubuntu
+os_version: "14.04"
diff --git a/qa/suites/upgrade/client-upgrade/infernalis-client-x/basic/% b/qa/suites/upgrade/client-upgrade/infernalis-client-x/basic/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/upgrade/client-upgrade/infernalis-client-x/basic/0-cluster/start.yaml b/qa/suites/upgrade/client-upgrade/infernalis-client-x/basic/0-cluster/start.yaml
new file mode 100644
index 0000000..8ab0246
--- /dev/null
+++ b/qa/suites/upgrade/client-upgrade/infernalis-client-x/basic/0-cluster/start.yaml
@@ -0,0 +1,12 @@
+roles:
+- - mon.a
+  - mon.b
+  - mon.c
+  - osd.0
+  - osd.1
+  - osd.2
+- - client.0
+overrides:
+  ceph:
+    log-whitelist:
+    - failed to encode map
diff --git a/qa/suites/upgrade/client-upgrade/infernalis-client-x/basic/1-install/infernalis-client-x.yaml b/qa/suites/upgrade/client-upgrade/infernalis-client-x/basic/1-install/infernalis-client-x.yaml
new file mode 100644
index 0000000..b016af0
--- /dev/null
+++ b/qa/suites/upgrade/client-upgrade/infernalis-client-x/basic/1-install/infernalis-client-x.yaml
@@ -0,0 +1,10 @@
+tasks:
+- install:
+    branch: infernalis 
+- print: "**** done install infernalis"
+upgrade_workload:
+  sequential:
+  - install.upgrade:
+      exclude_packages: ['ceph-test-dbg']
+      client.0:
+  - print: "**** done install.upgrade to -x on client.0"
diff --git a/qa/suites/upgrade/client-upgrade/infernalis-client-x/basic/2-workload/rbd_api_tests.yaml b/qa/suites/upgrade/client-upgrade/infernalis-client-x/basic/2-workload/rbd_api_tests.yaml
new file mode 100644
index 0000000..99321c6
--- /dev/null
+++ b/qa/suites/upgrade/client-upgrade/infernalis-client-x/basic/2-workload/rbd_api_tests.yaml
@@ -0,0 +1,21 @@
+tasks:
+- exec:
+    client.0:
+    - "cp $(which ceph_test_librbd_api) $TESTDIR/ceph_test_librbd_api"
+- sequential:
+  - upgrade_workload
+- ceph: 
+- print: "**** done ceph"
+- exec:
+    client.0:
+    - "cp --force $TESTDIR/ceph_test_librbd_api $(which ceph_test_librbd_api)"
+    - "rm -rf $TESTDIR/ceph_test_librbd_api"
+- print: "**** done reverting to hammer ceph_test_librbd_api"
+- workunit:
+    branch: infernalis
+    clients:
+      client.0:
+      - rbd/test_librbd_api.sh
+    env:
+      RBD_FEATURES: "13"
+- print: "**** done rbd/test_librbd_api.sh"
diff --git a/qa/suites/upgrade/client-upgrade/infernalis-client-x/basic/2-workload/rbd_cli_import_export.yaml b/qa/suites/upgrade/client-upgrade/infernalis-client-x/basic/2-workload/rbd_cli_import_export.yaml
new file mode 100644
index 0000000..5adc5ac
--- /dev/null
+++ b/qa/suites/upgrade/client-upgrade/infernalis-client-x/basic/2-workload/rbd_cli_import_export.yaml
@@ -0,0 +1,13 @@
+tasks:
+- sequential:
+  - upgrade_workload
+- ceph: 
+- print: "**** done ceph"
+- workunit:
+    branch: infernalis
+    clients:
+      client.0:
+      - rbd/import_export.sh
+    env:
+      RBD_CREATE_ARGS: --image-feature layering,exclusive-lock,object-map
+- print: "**** done rbd/import_export.sh"
diff --git a/qa/suites/upgrade/client-upgrade/infernalis-client-x/basic/distros/centos_7.2.yaml b/qa/suites/upgrade/client-upgrade/infernalis-client-x/basic/distros/centos_7.2.yaml
new file mode 100644
index 0000000..44d2f0e
--- /dev/null
+++ b/qa/suites/upgrade/client-upgrade/infernalis-client-x/basic/distros/centos_7.2.yaml
@@ -0,0 +1,2 @@
+os_type: centos
+os_version: "7.2"
diff --git a/qa/suites/upgrade/client-upgrade/infernalis-client-x/basic/distros/ubuntu_14.04.yaml b/qa/suites/upgrade/client-upgrade/infernalis-client-x/basic/distros/ubuntu_14.04.yaml
new file mode 100644
index 0000000..309e989
--- /dev/null
+++ b/qa/suites/upgrade/client-upgrade/infernalis-client-x/basic/distros/ubuntu_14.04.yaml
@@ -0,0 +1,2 @@
+os_type: ubuntu
+os_version: "14.04"
diff --git a/qa/suites/upgrade/client-upgrade/infernalis-client-x/rbd/% b/qa/suites/upgrade/client-upgrade/infernalis-client-x/rbd/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/upgrade/client-upgrade/infernalis-client-x/rbd/0-cluster/start.yaml b/qa/suites/upgrade/client-upgrade/infernalis-client-x/rbd/0-cluster/start.yaml
new file mode 100644
index 0000000..4d92f80
--- /dev/null
+++ b/qa/suites/upgrade/client-upgrade/infernalis-client-x/rbd/0-cluster/start.yaml
@@ -0,0 +1,16 @@
+roles:
+- - mon.a
+  - mon.b
+  - mon.c
+  - osd.0
+  - osd.1
+  - osd.2
+  - client.0
+- - client.1
+overrides:
+  ceph:
+    log-whitelist:
+    - failed to encode map
+    conf:
+      client:
+        rbd default features: 1
diff --git a/qa/suites/upgrade/client-upgrade/infernalis-client-x/rbd/1-install/infernalis-client-x.yaml b/qa/suites/upgrade/client-upgrade/infernalis-client-x/rbd/1-install/infernalis-client-x.yaml
new file mode 100644
index 0000000..ebaeeda
--- /dev/null
+++ b/qa/suites/upgrade/client-upgrade/infernalis-client-x/rbd/1-install/infernalis-client-x.yaml
@@ -0,0 +1,10 @@
+tasks:
+- install:
+   branch: infernalis
+- print: "**** done install infernalis"
+- install.upgrade:
+   exclude_packages: ['ceph-test-dbg']
+   client.1:
+- print: "**** done install.upgrade to -x on client.0"
+- ceph:
+- print: "**** done ceph"
diff --git a/qa/suites/upgrade/client-upgrade/infernalis-client-x/rbd/2-workload/rbd_notification_tests.yaml b/qa/suites/upgrade/client-upgrade/infernalis-client-x/rbd/2-workload/rbd_notification_tests.yaml
new file mode 100644
index 0000000..f83742f
--- /dev/null
+++ b/qa/suites/upgrade/client-upgrade/infernalis-client-x/rbd/2-workload/rbd_notification_tests.yaml
@@ -0,0 +1,21 @@
+tasks:
+- workunit:
+    branch: infernalis
+    clients:
+      client.0:
+        - rbd/notify_master.sh
+      client.1:
+        - rbd/notify_slave.sh
+    env:
+      RBD_FEATURES: "13"
+- print: "**** done rbd: old librbd -> new librbd"
+- workunit:
+    branch: infernalis
+    clients:
+      client.0:
+        - rbd/notify_slave.sh
+      client.1:
+        - rbd/notify_master.sh
+    env:
+      RBD_FEATURES: "13"
+- print: "**** done rbd: new librbd -> old librbd"
diff --git a/qa/suites/upgrade/client-upgrade/infernalis-client-x/rbd/distros/centos_7.2.yaml b/qa/suites/upgrade/client-upgrade/infernalis-client-x/rbd/distros/centos_7.2.yaml
new file mode 100644
index 0000000..44d2f0e
--- /dev/null
+++ b/qa/suites/upgrade/client-upgrade/infernalis-client-x/rbd/distros/centos_7.2.yaml
@@ -0,0 +1,2 @@
+os_type: centos
+os_version: "7.2"
diff --git a/qa/suites/upgrade/client-upgrade/infernalis-client-x/rbd/distros/ubuntu_14.04.yaml b/qa/suites/upgrade/client-upgrade/infernalis-client-x/rbd/distros/ubuntu_14.04.yaml
new file mode 100644
index 0000000..309e989
--- /dev/null
+++ b/qa/suites/upgrade/client-upgrade/infernalis-client-x/rbd/distros/ubuntu_14.04.yaml
@@ -0,0 +1,2 @@
+os_type: ubuntu
+os_version: "14.04"
diff --git a/qa/suites/upgrade/firefly-hammer-x/parallel/% b/qa/suites/upgrade/firefly-hammer-x/parallel/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/upgrade/firefly-hammer-x/parallel/0-cluster/start.yaml b/qa/suites/upgrade/firefly-hammer-x/parallel/0-cluster/start.yaml
new file mode 100644
index 0000000..3eb17b6
--- /dev/null
+++ b/qa/suites/upgrade/firefly-hammer-x/parallel/0-cluster/start.yaml
@@ -0,0 +1,21 @@
+overrides:
+  ceph:
+    conf:
+      mon:
+        mon warn on legacy crush tunables: false
+        mon debug unsafe allow tier with nonempty snaps: true
+    log-whitelist:
+      - scrub mismatch
+      - ScrubResult
+      - failed to encode map
+roles:
+- - mon.a
+  - mds.a
+  - osd.0
+  - osd.1
+- - mon.b
+  - mon.c
+  - osd.2
+  - osd.3
+- - client.0
+  - client.1
diff --git a/qa/suites/upgrade/firefly-hammer-x/parallel/1-firelfy-hammer-install/firefly-hammer.yaml b/qa/suites/upgrade/firefly-hammer-x/parallel/1-firelfy-hammer-install/firefly-hammer.yaml
new file mode 100644
index 0000000..94c54df
--- /dev/null
+++ b/qa/suites/upgrade/firefly-hammer-x/parallel/1-firelfy-hammer-install/firefly-hammer.yaml
@@ -0,0 +1,14 @@
+tasks:
+- install:
+   branch: firefly
+- print: "**** done firefly install"
+- ceph:
+   fs: xfs
+- parallel:
+   - workload
+   - upgrade-sequence
+- print: "**** done parallel"
+- install.upgrade:
+    client.0:
+      branch: hammer
+- print: "*** client.0 upgraded to hammer"  
diff --git a/qa/suites/upgrade/firefly-hammer-x/parallel/2-workload/+ b/qa/suites/upgrade/firefly-hammer-x/parallel/2-workload/+
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/upgrade/firefly-hammer-x/parallel/2-workload/rados_api.yaml b/qa/suites/upgrade/firefly-hammer-x/parallel/2-workload/rados_api.yaml
new file mode 100644
index 0000000..fb53cb4
--- /dev/null
+++ b/qa/suites/upgrade/firefly-hammer-x/parallel/2-workload/rados_api.yaml
@@ -0,0 +1,9 @@
+workload:
+   sequential:
+   - workunit:
+       branch: firefly
+       clients:
+         client.0:
+         - rados/test-upgrade-v9.0.1.sh
+         - cls
+   - print: "**** done  2-workload/rados_api.yaml"
diff --git a/qa/suites/upgrade/firefly-hammer-x/parallel/2-workload/rados_loadgenbig.yaml b/qa/suites/upgrade/firefly-hammer-x/parallel/2-workload/rados_loadgenbig.yaml
new file mode 100644
index 0000000..2c8570e
--- /dev/null
+++ b/qa/suites/upgrade/firefly-hammer-x/parallel/2-workload/rados_loadgenbig.yaml
@@ -0,0 +1,8 @@
+workload:
+   sequential:
+   - workunit:
+       branch: firefly
+       clients:
+         client.0:
+         - rados/load-gen-big.sh
+   - print: "**** done rados/load-gen-big.sh"
diff --git a/qa/suites/upgrade/firefly-hammer-x/parallel/2-workload/test_rbd_api.yaml b/qa/suites/upgrade/firefly-hammer-x/parallel/2-workload/test_rbd_api.yaml
new file mode 100644
index 0000000..c48f5cb
--- /dev/null
+++ b/qa/suites/upgrade/firefly-hammer-x/parallel/2-workload/test_rbd_api.yaml
@@ -0,0 +1,8 @@
+workload:
+  sequential:
+  - workunit:
+      branch: firefly
+      clients:
+        client.0:
+        - rbd/test_librbd.sh
+  - print: "**** done rbd/test_librbd.sh"
diff --git a/qa/suites/upgrade/firefly-hammer-x/parallel/2-workload/test_rbd_python.yaml b/qa/suites/upgrade/firefly-hammer-x/parallel/2-workload/test_rbd_python.yaml
new file mode 100644
index 0000000..49c6237
--- /dev/null
+++ b/qa/suites/upgrade/firefly-hammer-x/parallel/2-workload/test_rbd_python.yaml
@@ -0,0 +1,8 @@
+workload:
+  sequential:
+  - workunit:
+      branch: firefly
+      clients:
+        client.0:
+        - rbd/test_librbd_python.sh
+  - print: "**** done rbd/test_librbd_python.sh"
diff --git a/qa/suites/upgrade/firefly-hammer-x/parallel/3-upgrade-sequence/upgrade-all.yaml b/qa/suites/upgrade/firefly-hammer-x/parallel/3-upgrade-sequence/upgrade-all.yaml
new file mode 100644
index 0000000..54a0a3c
--- /dev/null
+++ b/qa/suites/upgrade/firefly-hammer-x/parallel/3-upgrade-sequence/upgrade-all.yaml
@@ -0,0 +1,10 @@
+upgrade-sequence:
+   sequential:
+   - install.upgrade:
+       mon.a:
+         branch: hammer
+       mon.b:
+         branch: hammer
+   - print: "**** done install.upgrade mon.a & mon.b to branch: hammer"
+   - ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3]
+   - print: "**** done ceph.restart all"
diff --git a/qa/suites/upgrade/firefly-hammer-x/parallel/3-upgrade-sequence/upgrade-mon-osd-mds.yaml b/qa/suites/upgrade/firefly-hammer-x/parallel/3-upgrade-sequence/upgrade-mon-osd-mds.yaml
new file mode 100644
index 0000000..84dca41
--- /dev/null
+++ b/qa/suites/upgrade/firefly-hammer-x/parallel/3-upgrade-sequence/upgrade-mon-osd-mds.yaml
@@ -0,0 +1,39 @@
+upgrade-sequence:
+   sequential:
+   - install.upgrade:
+       mon.a:
+         branch: hammer
+   - print: "**** done install.upgrade mon.a to branch: hammer"
+   - ceph.restart:
+       daemons: [mon.a]
+       wait-for-healthy: true
+   - sleep:
+       duration: 60
+   - ceph.restart:
+       daemons: [osd.0, osd.1]
+       wait-for-healthy: true
+   - sleep:
+       duration: 60
+   - ceph.restart: [mds.a]
+   - sleep:
+       duration: 60
+   - print: "**** done ceph.restart [mon.a] [osd.0, osd.1] [mds.a]"
+   - exec:
+      mon.b:
+        # is this command valid?
+        - sudo ceph osd crush tunables firefly
+   - install.upgrade:
+       mon.b:
+         branch: hammer
+   - print: "**** done install.upgrade mon.b to branch: hammer"
+   - ceph.restart:
+       daemons: [mon.b, mon.c]
+       wait-for-healthy: true
+   - sleep:
+       duration: 60
+   - ceph.restart:
+       daemons: [osd.2, osd.3]
+       wait-for-healthy: true
+   - sleep:
+       duration: 60
+   - print: "**** done ceph.restart [mon.b, mon.c] [osd.2, osd.3]"
diff --git a/qa/suites/upgrade/firefly-hammer-x/parallel/4-firefly-hammer-x-upgrade/firefly-hammer-x.yaml b/qa/suites/upgrade/firefly-hammer-x/parallel/4-firefly-hammer-x-upgrade/firefly-hammer-x.yaml
new file mode 100644
index 0000000..37a880b
--- /dev/null
+++ b/qa/suites/upgrade/firefly-hammer-x/parallel/4-firefly-hammer-x-upgrade/firefly-hammer-x.yaml
@@ -0,0 +1,8 @@
+tasks:
+   - parallel:
+     - workload2
+     - upgrade-sequence2
+   - print: "**** done parallel workload2 and upgrade-sequence2"
+   - install.upgrade:
+       client.0:
+   - print: "**** done install.upgrade client.0 to the version from teuthology-suite arg"
diff --git a/qa/suites/upgrade/firefly-hammer-x/parallel/5-workload/+ b/qa/suites/upgrade/firefly-hammer-x/parallel/5-workload/+
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/upgrade/firefly-hammer-x/parallel/5-workload/rados_api.yaml b/qa/suites/upgrade/firefly-hammer-x/parallel/5-workload/rados_api.yaml
new file mode 100644
index 0000000..70249b9
--- /dev/null
+++ b/qa/suites/upgrade/firefly-hammer-x/parallel/5-workload/rados_api.yaml
@@ -0,0 +1,13 @@
+overrides:
+  ceph:
+    log-whitelist:
+      - reached quota
+workload2:
+  sequential:
+  - workunit:
+      branch: hammer
+      clients:
+        client.0:
+        - rados/test-upgrade-v9.0.1.sh
+        - cls
+  - print: "**** done rados/test.sh and cls from 5-workload/rados_api.yaml"
diff --git a/qa/suites/upgrade/firefly-hammer-x/parallel/5-workload/rados_loadgenbig.yaml b/qa/suites/upgrade/firefly-hammer-x/parallel/5-workload/rados_loadgenbig.yaml
new file mode 100644
index 0000000..0f4d779
--- /dev/null
+++ b/qa/suites/upgrade/firefly-hammer-x/parallel/5-workload/rados_loadgenbig.yaml
@@ -0,0 +1,8 @@
+workload2:
+  sequential:
+  - workunit:
+      branch: hammer
+      clients:
+        client.0:
+        - rados/load-gen-big.sh
+  - print: "**** done rados/load-gen-big.sh 2"
diff --git a/qa/suites/upgrade/firefly-hammer-x/parallel/5-workload/test_rbd_api.yaml b/qa/suites/upgrade/firefly-hammer-x/parallel/5-workload/test_rbd_api.yaml
new file mode 100644
index 0000000..264bfec
--- /dev/null
+++ b/qa/suites/upgrade/firefly-hammer-x/parallel/5-workload/test_rbd_api.yaml
@@ -0,0 +1,8 @@
+workload2:
+  sequential:
+  - workunit:
+      branch: hammer
+      clients:
+        client.0:
+        - rbd/test_librbd.sh
+  - print: "**** done rbd/test_librbd.sh 2"
diff --git a/qa/suites/upgrade/firefly-hammer-x/parallel/5-workload/test_rbd_python.yaml b/qa/suites/upgrade/firefly-hammer-x/parallel/5-workload/test_rbd_python.yaml
new file mode 100644
index 0000000..b71f35d
--- /dev/null
+++ b/qa/suites/upgrade/firefly-hammer-x/parallel/5-workload/test_rbd_python.yaml
@@ -0,0 +1,8 @@
+workload2:
+  sequential:
+  - workunit:
+      branch: hammer
+      clients:
+        client.0:
+        - rbd/test_librbd_python.sh
+  - print: "**** done rbd/test_librbd_python.sh 2"
diff --git a/qa/suites/upgrade/firefly-hammer-x/parallel/6-upgrade-sequence/upgrade-all.yaml b/qa/suites/upgrade/firefly-hammer-x/parallel/6-upgrade-sequence/upgrade-all.yaml
new file mode 100644
index 0000000..8350d74
--- /dev/null
+++ b/qa/suites/upgrade/firefly-hammer-x/parallel/6-upgrade-sequence/upgrade-all.yaml
@@ -0,0 +1,8 @@
+upgrade-sequence2:
+   sequential:
+   - install.upgrade:
+       mon.a:
+       mon.b:
+   - print: "**** done install.upgrade mon.a and mon.b"
+   - ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3]
+   - print: "**** done ceph.restart all"
diff --git a/qa/suites/upgrade/firefly-hammer-x/parallel/6-upgrade-sequence/upgrade-by-daemon.yaml b/qa/suites/upgrade/firefly-hammer-x/parallel/6-upgrade-sequence/upgrade-by-daemon.yaml
new file mode 100644
index 0000000..d953c63
--- /dev/null
+++ b/qa/suites/upgrade/firefly-hammer-x/parallel/6-upgrade-sequence/upgrade-by-daemon.yaml
@@ -0,0 +1,35 @@
+upgrade-sequence2:
+   sequential:
+   - install.upgrade:
+       mon.a:
+   - print: "**** done install.upgrade mon.a to the version from teuthology-suite arg"
+   - ceph.restart:
+       daemons: [mon.a]
+       wait-for-healthy: true
+   - sleep:
+       duration: 60
+   - ceph.restart:
+       daemons: [osd.0, osd.1]
+       wait-for-healthy: true
+   - sleep:
+       duration: 60
+   - ceph.restart: [mds.a]
+   - sleep:
+       duration: 60
+   - print: "**** running mixed versions of osds and mons"
+   - exec:
+      mon.b:
+        - sudo ceph osd crush tunables hammer
+   - install.upgrade:
+       mon.b:
+   - print: "**** done install.upgrade mon.b to the version from teuthology-suite arg"
+   - ceph.restart:
+       daemons: [mon.b, mon.c]
+       wait-for-healthy: true
+   - sleep:
+       duration: 60
+   - ceph.restart:
+       daemons: [osd.2, osd.3]
+       wait-for-healthy: true
+   - sleep:
+       duration: 60
diff --git a/qa/suites/upgrade/firefly-hammer-x/parallel/7-final-workload/+ b/qa/suites/upgrade/firefly-hammer-x/parallel/7-final-workload/+
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/upgrade/firefly-hammer-x/parallel/7-final-workload/ec-rados-plugin=jerasure-k=2-m=1.yaml b/qa/suites/upgrade/firefly-hammer-x/parallel/7-final-workload/ec-rados-plugin=jerasure-k=2-m=1.yaml
new file mode 100644
index 0000000..4fa8d9f
--- /dev/null
+++ b/qa/suites/upgrade/firefly-hammer-x/parallel/7-final-workload/ec-rados-plugin=jerasure-k=2-m=1.yaml
@@ -0,0 +1,25 @@
+tasks:
+- rados:
+    clients: [client.0]
+    ops: 4000
+    objects: 50
+    ec_pool: true
+    write_append_excl: false
+    erasure_code_profile:
+      name: jerasure21profile
+      plugin: jerasure
+      k: 2
+      m: 1
+      technique: reed_sol_van
+      ruleset-failure-domain: osd
+    op_weights:
+      read: 100
+      write: 0
+      append: 100
+      delete: 50
+      snap_create: 50
+      snap_remove: 50
+      rollback: 50
+      copy_from: 50
+      setattr: 25
+      rmattr: 25
diff --git a/qa/suites/upgrade/firefly-hammer-x/parallel/7-final-workload/ec-rados-plugin=jerasure-k=3-m=1.yaml b/qa/suites/upgrade/firefly-hammer-x/parallel/7-final-workload/ec-rados-plugin=jerasure-k=3-m=1.yaml
new file mode 100644
index 0000000..3c31a8b
--- /dev/null
+++ b/qa/suites/upgrade/firefly-hammer-x/parallel/7-final-workload/ec-rados-plugin=jerasure-k=3-m=1.yaml
@@ -0,0 +1,31 @@
+#
+# k=3 implies a stripe_width of 1376*3 = 4128 which is different from
+# the default value of 4096 It is also not a multiple of 1024*1024 and
+# creates situations where rounding rules during recovery becomes 
+# necessary. 
+#
+tasks:
+- rados:
+    clients: [client.0]
+    ops: 4000
+    objects: 50
+    ec_pool: true
+    write_append_excl: false
+    erasure_code_profile:
+      name: jerasure31profile
+      plugin: jerasure
+      k: 3
+      m: 1
+      technique: reed_sol_van
+      ruleset-failure-domain: osd
+    op_weights:
+      read: 100
+      write: 0
+      append: 100
+      delete: 50
+      snap_create: 50
+      snap_remove: 50
+      rollback: 50
+      copy_from: 50
+      setattr: 25
+      rmattr: 25
diff --git a/qa/suites/upgrade/firefly-hammer-x/parallel/7-final-workload/rados-snaps-few-objects.yaml b/qa/suites/upgrade/firefly-hammer-x/parallel/7-final-workload/rados-snaps-few-objects.yaml
new file mode 100644
index 0000000..e0b0ba1
--- /dev/null
+++ b/qa/suites/upgrade/firefly-hammer-x/parallel/7-final-workload/rados-snaps-few-objects.yaml
@@ -0,0 +1,13 @@
+tasks:
+- rados:
+    clients: [client.1]
+    ops: 4000
+    objects: 50
+    op_weights:
+      read: 100
+      write: 100
+      delete: 50
+      snap_create: 50
+      snap_remove: 50
+      rollback: 50
+- print: "**** done 7-final-workload/rados-snaps-few-objects.yaml"
diff --git a/qa/suites/upgrade/firefly-hammer-x/parallel/7-final-workload/rados_loadgenmix.yaml b/qa/suites/upgrade/firefly-hammer-x/parallel/7-final-workload/rados_loadgenmix.yaml
new file mode 100644
index 0000000..b1c6791
--- /dev/null
+++ b/qa/suites/upgrade/firefly-hammer-x/parallel/7-final-workload/rados_loadgenmix.yaml
@@ -0,0 +1,6 @@
+tasks:
+  - workunit:
+      clients:
+        client.1:
+        - rados/load-gen-mix.sh
+  - print: "**** done 7-final-workload/rados_loadgenmix.yaml"
diff --git a/qa/suites/upgrade/firefly-hammer-x/parallel/7-final-workload/rados_mon_thrash.yaml b/qa/suites/upgrade/firefly-hammer-x/parallel/7-final-workload/rados_mon_thrash.yaml
new file mode 100644
index 0000000..89745a5
--- /dev/null
+++ b/qa/suites/upgrade/firefly-hammer-x/parallel/7-final-workload/rados_mon_thrash.yaml
@@ -0,0 +1,11 @@
+tasks:
+   - sequential:
+      - mon_thrash:
+          revive_delay: 20
+          thrash_delay: 1
+      - workunit:
+          branch: hammer
+          clients:
+            client.1:
+            - rados/test-upgrade-v9.0.1.sh
+      - print: "**** done rados/test-upgrade-v9.0.1.sh - 6-final-workload"
diff --git a/qa/suites/upgrade/firefly-hammer-x/parallel/7-final-workload/rbd_cls.yaml b/qa/suites/upgrade/firefly-hammer-x/parallel/7-final-workload/rbd_cls.yaml
new file mode 100644
index 0000000..27b10a3
--- /dev/null
+++ b/qa/suites/upgrade/firefly-hammer-x/parallel/7-final-workload/rbd_cls.yaml
@@ -0,0 +1,7 @@
+tasks:
+- workunit:
+    clients:
+      client.1:
+      - cls/test_cls_rbd.sh
+- print: "**** done 7-final-workload/rbd_cls.yaml"
+
diff --git a/qa/suites/upgrade/firefly-hammer-x/parallel/7-final-workload/rbd_import_export.yaml b/qa/suites/upgrade/firefly-hammer-x/parallel/7-final-workload/rbd_import_export.yaml
new file mode 100644
index 0000000..a7ce214
--- /dev/null
+++ b/qa/suites/upgrade/firefly-hammer-x/parallel/7-final-workload/rbd_import_export.yaml
@@ -0,0 +1,7 @@
+tasks:
+- workunit:
+     clients:
+        client.1:
+        - rbd/import_export.sh
+     env:
+        RBD_CREATE_ARGS: --new-format
diff --git a/qa/suites/upgrade/firefly-hammer-x/parallel/7-final-workload/rgw_s3tests.yaml b/qa/suites/upgrade/firefly-hammer-x/parallel/7-final-workload/rgw_s3tests.yaml
new file mode 100644
index 0000000..22c3a3f
--- /dev/null
+++ b/qa/suites/upgrade/firefly-hammer-x/parallel/7-final-workload/rgw_s3tests.yaml
@@ -0,0 +1,5 @@
+tasks:
+- rgw: [client.1]
+- s3tests:
+    client.1:
+      rgw_server: client.1
diff --git a/qa/suites/upgrade/firefly-hammer-x/parallel/distros/centos_7.3.yaml b/qa/suites/upgrade/firefly-hammer-x/parallel/distros/centos_7.3.yaml
new file mode 100644
index 0000000..9dfcc7f
--- /dev/null
+++ b/qa/suites/upgrade/firefly-hammer-x/parallel/distros/centos_7.3.yaml
@@ -0,0 +1,2 @@
+os_type: centos
+os_version: "7.3"
diff --git a/qa/suites/upgrade/firefly-hammer-x/parallel/distros/ubuntu_14.04.yaml b/qa/suites/upgrade/firefly-hammer-x/parallel/distros/ubuntu_14.04.yaml
new file mode 100644
index 0000000..309e989
--- /dev/null
+++ b/qa/suites/upgrade/firefly-hammer-x/parallel/distros/ubuntu_14.04.yaml
@@ -0,0 +1,2 @@
+os_type: ubuntu
+os_version: "14.04"
diff --git a/qa/suites/upgrade/firefly-hammer-x/stress-split/% b/qa/suites/upgrade/firefly-hammer-x/stress-split/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/upgrade/firefly-hammer-x/stress-split/00-cluster/start.yaml b/qa/suites/upgrade/firefly-hammer-x/stress-split/00-cluster/start.yaml
new file mode 100644
index 0000000..129635f
--- /dev/null
+++ b/qa/suites/upgrade/firefly-hammer-x/stress-split/00-cluster/start.yaml
@@ -0,0 +1,19 @@
+overrides:
+  ceph:
+    log-whitelist:
+    - failed to encode map
+    conf:
+      mon:
+        mon warn on legacy crush tunables: false
+roles:
+- - mon.a
+  - mon.b
+  - mds.a
+  - osd.0
+  - osd.1
+  - osd.2
+  - mon.c
+- - osd.3
+  - osd.4
+  - osd.5
+- - client.0
diff --git a/qa/suites/upgrade/firefly-hammer-x/stress-split/01-firefly-install/firefly.yaml b/qa/suites/upgrade/firefly-hammer-x/stress-split/01-firefly-install/firefly.yaml
new file mode 100644
index 0000000..44e6420
--- /dev/null
+++ b/qa/suites/upgrade/firefly-hammer-x/stress-split/01-firefly-install/firefly.yaml
@@ -0,0 +1,6 @@
+tasks:
+- install:
+    branch: firefly
+- ceph:
+    fs: xfs
+- print: "**** done install firefly"
diff --git a/qa/suites/upgrade/firefly-hammer-x/stress-split/02-partial-upgrade-hammer/firsthalf.yaml b/qa/suites/upgrade/firefly-hammer-x/stress-split/02-partial-upgrade-hammer/firsthalf.yaml
new file mode 100644
index 0000000..5d10882
--- /dev/null
+++ b/qa/suites/upgrade/firefly-hammer-x/stress-split/02-partial-upgrade-hammer/firsthalf.yaml
@@ -0,0 +1,8 @@
+tasks:
+- install.upgrade:
+    osd.0:
+      branch: hammer
+- print: "**** done install.upgrade hammer"
+- ceph.restart:
+    daemons: [osd.0, osd.1, osd.2]
+- print: "**** done 02-partial-upgrade-hammer"
diff --git a/qa/suites/upgrade/firefly-hammer-x/stress-split/03-workload/rbd.yaml b/qa/suites/upgrade/firefly-hammer-x/stress-split/03-workload/rbd.yaml
new file mode 100644
index 0000000..07541d6
--- /dev/null
+++ b/qa/suites/upgrade/firefly-hammer-x/stress-split/03-workload/rbd.yaml
@@ -0,0 +1,6 @@
+tasks:
+- workunit:
+    clients:
+      client.0:
+        - cls/test_cls_rbd.sh
+- print: "**** done 03-workload/rbd.yaml"
diff --git a/qa/suites/upgrade/firefly-hammer-x/stress-split/04-mona-upgrade-hammer/mona.yaml b/qa/suites/upgrade/firefly-hammer-x/stress-split/04-mona-upgrade-hammer/mona.yaml
new file mode 100644
index 0000000..d3b8556
--- /dev/null
+++ b/qa/suites/upgrade/firefly-hammer-x/stress-split/04-mona-upgrade-hammer/mona.yaml
@@ -0,0 +1,6 @@
+tasks:
+- ceph.restart:
+    daemons: [mon.a]
+    wait-for-healthy: false
+    wait-for-osds-up: true
+- print: "*** done restart mon.a 04-mona-upgrade-hammer/mona.yaml"
diff --git a/qa/suites/upgrade/firefly-hammer-x/stress-split/05-workload/+ b/qa/suites/upgrade/firefly-hammer-x/stress-split/05-workload/+
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/upgrade/firefly-hammer-x/stress-split/05-workload/rbd-cls.yaml b/qa/suites/upgrade/firefly-hammer-x/stress-split/05-workload/rbd-cls.yaml
new file mode 100644
index 0000000..507c8f9
--- /dev/null
+++ b/qa/suites/upgrade/firefly-hammer-x/stress-split/05-workload/rbd-cls.yaml
@@ -0,0 +1,6 @@
+tasks:
+- workunit:
+    clients:
+      client.0:
+        - cls/test_cls_rbd.sh
+- print: "**** done 05-workload/rbd-cls.yaml"
diff --git a/qa/suites/upgrade/firefly-hammer-x/stress-split/05-workload/readwrite.yaml b/qa/suites/upgrade/firefly-hammer-x/stress-split/05-workload/readwrite.yaml
new file mode 100644
index 0000000..cacfde1
--- /dev/null
+++ b/qa/suites/upgrade/firefly-hammer-x/stress-split/05-workload/readwrite.yaml
@@ -0,0 +1,11 @@
+tasks:
+- rados:
+    clients: [client.0]
+    ops: 4000
+    objects: 500
+    write_append_excl: false
+    op_weights:
+      read: 45
+      write: 45
+      delete: 10
+- print: "**** done 05-workload/readwrite.yaml"
diff --git a/qa/suites/upgrade/firefly-hammer-x/stress-split/06-monb-upgrade-hammer/monb.yaml b/qa/suites/upgrade/firefly-hammer-x/stress-split/06-monb-upgrade-hammer/monb.yaml
new file mode 100644
index 0000000..f4a6d99
--- /dev/null
+++ b/qa/suites/upgrade/firefly-hammer-x/stress-split/06-monb-upgrade-hammer/monb.yaml
@@ -0,0 +1,6 @@
+tasks:
+- ceph.restart:
+    daemons: [mon.b]
+    wait-for-healthy: false
+    wait-for-osds-up: true
+- print: "**** done restarted mon.b 06-monb-upgrade-hammer/monb.yaml" 
diff --git a/qa/suites/upgrade/firefly-hammer-x/stress-split/07-workload/+ b/qa/suites/upgrade/firefly-hammer-x/stress-split/07-workload/+
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/upgrade/firefly-hammer-x/stress-split/07-workload/radosbench.yaml b/qa/suites/upgrade/firefly-hammer-x/stress-split/07-workload/radosbench.yaml
new file mode 100644
index 0000000..7f10f7a
--- /dev/null
+++ b/qa/suites/upgrade/firefly-hammer-x/stress-split/07-workload/radosbench.yaml
@@ -0,0 +1,33 @@
+tasks:
+- full_sequential:
+  - radosbench:
+      clients: [client.0]
+      time: 150
+  - radosbench:
+      clients: [client.0]
+      time: 150
+  - radosbench:
+      clients: [client.0]
+      time: 150
+  - radosbench:
+      clients: [client.0]
+      time: 150
+  - radosbench:
+      clients: [client.0]
+      time: 150
+  - radosbench:
+      clients: [client.0]
+      time: 150
+  - radosbench:
+      clients: [client.0]
+      time: 150
+  - radosbench:
+      clients: [client.0]
+      time: 150
+  - radosbench:
+      clients: [client.0]
+      time: 150
+  - radosbench:
+      clients: [client.0]
+      time: 150
+- print: "**** done 07-workload/radosbench.yaml"
diff --git a/qa/suites/upgrade/firefly-hammer-x/stress-split/07-workload/rbd_api.yaml b/qa/suites/upgrade/firefly-hammer-x/stress-split/07-workload/rbd_api.yaml
new file mode 100644
index 0000000..68f074e
--- /dev/null
+++ b/qa/suites/upgrade/firefly-hammer-x/stress-split/07-workload/rbd_api.yaml
@@ -0,0 +1,6 @@
+tasks:
+- workunit:
+     clients:
+        client.0:
+           - rbd/test_librbd.sh
+- print: "**** done 07-workload/rbd_api.yaml"
diff --git a/qa/suites/upgrade/firefly-hammer-x/stress-split/08-monc-upgrade-hammer/monc.yaml b/qa/suites/upgrade/firefly-hammer-x/stress-split/08-monc-upgrade-hammer/monc.yaml
new file mode 100644
index 0000000..6f2ba5e
--- /dev/null
+++ b/qa/suites/upgrade/firefly-hammer-x/stress-split/08-monc-upgrade-hammer/monc.yaml
@@ -0,0 +1,8 @@
+tasks:
+- ceph.restart:
+    daemons: [mon.c]
+    wait-for-healthy: false
+    wait-for-osds-up: true
+- print: "**** done ceph.restart mon.c"
+- ceph.wait_for_mon_quorum: [a, b, c]
+- print: "**** done 08-monc-upgrade-hammer/monc.yaml"
diff --git a/qa/suites/upgrade/firefly-hammer-x/stress-split/09-workload/rbd-python.yaml b/qa/suites/upgrade/firefly-hammer-x/stress-split/09-workload/rbd-python.yaml
new file mode 100644
index 0000000..bd79dc1
--- /dev/null
+++ b/qa/suites/upgrade/firefly-hammer-x/stress-split/09-workload/rbd-python.yaml
@@ -0,0 +1,8 @@
+tasks:
+- workunit:
+    #fixes #10577
+    branch: firefly
+    clients:
+      client.0:
+        - rbd/test_librbd_python.sh
+- print: "**** done 09-workload/rbd-python.yaml"
diff --git a/qa/suites/upgrade/firefly-hammer-x/stress-split/10-osds-upgrade-hammer/secondhalf.yaml b/qa/suites/upgrade/firefly-hammer-x/stress-split/10-osds-upgrade-hammer/secondhalf.yaml
new file mode 100644
index 0000000..75e49a7
--- /dev/null
+++ b/qa/suites/upgrade/firefly-hammer-x/stress-split/10-osds-upgrade-hammer/secondhalf.yaml
@@ -0,0 +1,8 @@
+tasks:
+- install.upgrade:
+    osd.3: 
+       branch: hammer
+- print: "**** done install.upgrade hammer"
+- ceph.restart:
+    daemons: [osd.3, osd.4, osd.5]
+- print: "**** done 10-osds-upgrade-hammer/secondhalf.yaml" 
diff --git a/qa/suites/upgrade/firefly-hammer-x/stress-split/11-workload/snaps-few-objects.yaml b/qa/suites/upgrade/firefly-hammer-x/stress-split/11-workload/snaps-few-objects.yaml
new file mode 100644
index 0000000..b9f0271
--- /dev/null
+++ b/qa/suites/upgrade/firefly-hammer-x/stress-split/11-workload/snaps-few-objects.yaml
@@ -0,0 +1,14 @@
+tasks:
+- rados:
+    clients: [client.0]
+    ops: 4000
+    objects: 50
+    write_append_excl: false
+    op_weights:
+      read: 100
+      write: 100
+      delete: 50
+      snap_create: 50
+      snap_remove: 50
+      rollback: 50
+- print: "**** done 11-workload/snaps-few-objects.yaml"
diff --git a/qa/suites/upgrade/firefly-hammer-x/stress-split/12-partial-upgrade-x/first.yaml b/qa/suites/upgrade/firefly-hammer-x/stress-split/12-partial-upgrade-x/first.yaml
new file mode 100644
index 0000000..ff690ba
--- /dev/null
+++ b/qa/suites/upgrade/firefly-hammer-x/stress-split/12-partial-upgrade-x/first.yaml
@@ -0,0 +1,7 @@
+tasks:
+- install.upgrade:
+    osd.0:
+- print: "**** done install.upgrade -x osd.0"
+- ceph.restart:
+    daemons: [osd.0, osd.1, osd.2]
+- print: "**** done 12-partial-upgrade-x/first.yaml" 
diff --git a/qa/suites/upgrade/firefly-hammer-x/stress-split/13-workload/rados_loadgen_big.yaml b/qa/suites/upgrade/firefly-hammer-x/stress-split/13-workload/rados_loadgen_big.yaml
new file mode 100644
index 0000000..90698cb
--- /dev/null
+++ b/qa/suites/upgrade/firefly-hammer-x/stress-split/13-workload/rados_loadgen_big.yaml
@@ -0,0 +1,6 @@
+tasks:
+- workunit:
+    clients:
+      client.0:
+        - rados/load-gen-big.sh
+- print: "**** done 13-workload/rados_loadgen_big.yaml"
diff --git a/qa/suites/upgrade/firefly-hammer-x/stress-split/14-mona-upgrade-x/mona.yaml b/qa/suites/upgrade/firefly-hammer-x/stress-split/14-mona-upgrade-x/mona.yaml
new file mode 100644
index 0000000..d83162c
--- /dev/null
+++ b/qa/suites/upgrade/firefly-hammer-x/stress-split/14-mona-upgrade-x/mona.yaml
@@ -0,0 +1,6 @@
+tasks:
+- ceph.restart:
+    daemons: [mon.a]
+    wait-for-healthy: false
+    wait-for-osds-up: true
+- print: "**** done ceph.restart mon.a 14-mona-upgrade-x/mona.yaml"
diff --git a/qa/suites/upgrade/firefly-hammer-x/stress-split/15-workload/rbd-import-export.yaml b/qa/suites/upgrade/firefly-hammer-x/stress-split/15-workload/rbd-import-export.yaml
new file mode 100644
index 0000000..7e6d43b
--- /dev/null
+++ b/qa/suites/upgrade/firefly-hammer-x/stress-split/15-workload/rbd-import-export.yaml
@@ -0,0 +1,8 @@
+tasks:
+- workunit:
+    clients:
+      client.0:
+        - rbd/import_export.sh
+    env:
+      RBD_CREATE_ARGS: --new-format
+- print: "**** done 15-workload/rbd-import-export.yaml"
diff --git a/qa/suites/upgrade/firefly-hammer-x/stress-split/16-monb-upgrade-x/monb.yaml b/qa/suites/upgrade/firefly-hammer-x/stress-split/16-monb-upgrade-x/monb.yaml
new file mode 100644
index 0000000..9123403
--- /dev/null
+++ b/qa/suites/upgrade/firefly-hammer-x/stress-split/16-monb-upgrade-x/monb.yaml
@@ -0,0 +1,6 @@
+tasks:
+- ceph.restart:
+    daemons: [mon.b]
+    wait-for-healthy: false
+    wait-for-osds-up: true
+- print: "**** done ceph.restart mon.b 16-monb-upgrade-x/monb.yaml"
diff --git a/qa/suites/upgrade/firefly-hammer-x/stress-split/17-workload/readwrite.yaml b/qa/suites/upgrade/firefly-hammer-x/stress-split/17-workload/readwrite.yaml
new file mode 100644
index 0000000..ee49512
--- /dev/null
+++ b/qa/suites/upgrade/firefly-hammer-x/stress-split/17-workload/readwrite.yaml
@@ -0,0 +1,11 @@
+tasks:
+- rados:
+    clients: [client.0]
+    ops: 4000
+    objects: 500
+    write_append_excl: false
+    op_weights:
+      read: 45
+      write: 45
+      delete: 10
+- print: "**** done 17-workload/readwrite.yaml"
diff --git a/qa/suites/upgrade/firefly-hammer-x/stress-split/18-monc-upgrade-x/monc.yaml b/qa/suites/upgrade/firefly-hammer-x/stress-split/18-monc-upgrade-x/monc.yaml
new file mode 100644
index 0000000..f708237
--- /dev/null
+++ b/qa/suites/upgrade/firefly-hammer-x/stress-split/18-monc-upgrade-x/monc.yaml
@@ -0,0 +1,8 @@
+tasks:
+- ceph.restart:
+    daemons: [mon.c]
+    wait-for-healthy: false
+    wait-for-osds-up: true
+- print: "**** done ceph.restart mon.c"
+- ceph.wait_for_mon_quorum: [a, b, c]
+- print: "**** done 18-monc-upgrade-x/monc.yaml"
diff --git a/qa/suites/upgrade/firefly-hammer-x/stress-split/19-workload/radosbench.yaml b/qa/suites/upgrade/firefly-hammer-x/stress-split/19-workload/radosbench.yaml
new file mode 100644
index 0000000..d51088d
--- /dev/null
+++ b/qa/suites/upgrade/firefly-hammer-x/stress-split/19-workload/radosbench.yaml
@@ -0,0 +1,33 @@
+tasks:
+- full_sequential:
+  - radosbench:
+      clients: [client.0]
+      time: 150
+  - radosbench:
+      clients: [client.0]
+      time: 150
+  - radosbench:
+      clients: [client.0]
+      time: 150
+  - radosbench:
+      clients: [client.0]
+      time: 150
+  - radosbench:
+      clients: [client.0]
+      time: 150
+  - radosbench:
+      clients: [client.0]
+      time: 150
+  - radosbench:
+      clients: [client.0]
+      time: 150
+  - radosbench:
+      clients: [client.0]
+      time: 150
+  - radosbench:
+      clients: [client.0]
+      time: 150
+  - radosbench:
+      clients: [client.0]
+      time: 150
+- print: "**** done 19-workload/radosbench.yaml"
diff --git a/qa/suites/upgrade/firefly-hammer-x/stress-split/20-osds-upgrade-x/osds_secondhalf.yaml b/qa/suites/upgrade/firefly-hammer-x/stress-split/20-osds-upgrade-x/osds_secondhalf.yaml
new file mode 100644
index 0000000..cf23816
--- /dev/null
+++ b/qa/suites/upgrade/firefly-hammer-x/stress-split/20-osds-upgrade-x/osds_secondhalf.yaml
@@ -0,0 +1,7 @@
+tasks:
+- install.upgrade:
+    osd.3:
+- print: "**** done install.upgrade osd.3"
+- ceph.restart:
+    daemons: [osd.3, osd.4, osd.5]
+- print: "**** done 20-osds-upgrade-x/osds_secondhalf.yaml"
diff --git a/qa/suites/upgrade/firefly-hammer-x/stress-split/21-final-workload/+ b/qa/suites/upgrade/firefly-hammer-x/stress-split/21-final-workload/+
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/upgrade/firefly-hammer-x/stress-split/21-final-workload/rados_stress_watch.yaml b/qa/suites/upgrade/firefly-hammer-x/stress-split/21-final-workload/rados_stress_watch.yaml
new file mode 100644
index 0000000..0e1ba01
--- /dev/null
+++ b/qa/suites/upgrade/firefly-hammer-x/stress-split/21-final-workload/rados_stress_watch.yaml
@@ -0,0 +1,5 @@
+tasks:
+- workunit:
+    clients:
+      client.0:
+        - rados/stress_watch.sh
diff --git a/qa/suites/upgrade/firefly-hammer-x/stress-split/21-final-workload/rbd_cls_tests.yaml b/qa/suites/upgrade/firefly-hammer-x/stress-split/21-final-workload/rbd_cls_tests.yaml
new file mode 100644
index 0000000..9ccd57c
--- /dev/null
+++ b/qa/suites/upgrade/firefly-hammer-x/stress-split/21-final-workload/rbd_cls_tests.yaml
@@ -0,0 +1,5 @@
+tasks:
+- workunit:
+    clients:
+      client.0:
+        - cls/test_cls_rbd.sh
diff --git a/qa/suites/upgrade/firefly-hammer-x/stress-split/21-final-workload/rgw-swift.yaml b/qa/suites/upgrade/firefly-hammer-x/stress-split/21-final-workload/rgw-swift.yaml
new file mode 100644
index 0000000..0d79fb6
--- /dev/null
+++ b/qa/suites/upgrade/firefly-hammer-x/stress-split/21-final-workload/rgw-swift.yaml
@@ -0,0 +1,8 @@
+tasks:
+- rgw: 
+    client.0:
+    default_idle_timeout: 300
+- swift:
+    client.0:
+      rgw_server: client.0
+
diff --git a/qa/suites/upgrade/firefly-hammer-x/stress-split/distros/centos_7.3.yaml b/qa/suites/upgrade/firefly-hammer-x/stress-split/distros/centos_7.3.yaml
new file mode 100644
index 0000000..9dfcc7f
--- /dev/null
+++ b/qa/suites/upgrade/firefly-hammer-x/stress-split/distros/centos_7.3.yaml
@@ -0,0 +1,2 @@
+os_type: centos
+os_version: "7.3"
diff --git a/qa/suites/upgrade/firefly-hammer-x/stress-split/distros/ubuntu_14.04.yaml b/qa/suites/upgrade/firefly-hammer-x/stress-split/distros/ubuntu_14.04.yaml
new file mode 100644
index 0000000..309e989
--- /dev/null
+++ b/qa/suites/upgrade/firefly-hammer-x/stress-split/distros/ubuntu_14.04.yaml
@@ -0,0 +1,2 @@
+os_type: ubuntu
+os_version: "14.04"
diff --git a/qa/suites/upgrade/hammer-jewel-x/parallel/distros/centos.yaml b/qa/suites/upgrade/hammer-jewel-x/parallel/distros/centos.yaml
new file mode 100644
index 0000000..9dfcc7f
--- /dev/null
+++ b/qa/suites/upgrade/hammer-jewel-x/parallel/distros/centos.yaml
@@ -0,0 +1,2 @@
+os_type: centos
+os_version: "7.3"
diff --git a/qa/suites/upgrade/hammer-jewel-x/stress-split/distros/centos.yaml b/qa/suites/upgrade/hammer-jewel-x/stress-split/distros/centos.yaml
new file mode 100644
index 0000000..9dfcc7f
--- /dev/null
+++ b/qa/suites/upgrade/hammer-jewel-x/stress-split/distros/centos.yaml
@@ -0,0 +1,2 @@
+os_type: centos
+os_version: "7.3"
diff --git a/qa/suites/upgrade/hammer-x/f-h-x-offline/% b/qa/suites/upgrade/hammer-x/f-h-x-offline/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/upgrade/hammer-x/f-h-x-offline/0-install.yaml b/qa/suites/upgrade/hammer-x/f-h-x-offline/0-install.yaml
new file mode 100644
index 0000000..a178d44
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/f-h-x-offline/0-install.yaml
@@ -0,0 +1,13 @@
+roles:
+- [mon.a, mon.b, mon.c, osd.0, osd.1, osd.2, client.0]
+openstack:
+- volumes: # attached to each instance
+    count: 3
+    size: 10 # GB
+tasks:
+- install:
+    branch: firefly
+- ceph:
+    fs: xfs
+    log-whitelist:
+    - reached quota
diff --git a/qa/suites/upgrade/hammer-x/f-h-x-offline/1-pre.yaml b/qa/suites/upgrade/hammer-x/f-h-x-offline/1-pre.yaml
new file mode 100644
index 0000000..fe6c376
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/f-h-x-offline/1-pre.yaml
@@ -0,0 +1,6 @@
+tasks:
+- workunit:
+    branch: firefly
+    clients:
+      client.0:
+        - rados/test.sh
diff --git a/qa/suites/upgrade/hammer-x/f-h-x-offline/2-upgrade.yaml b/qa/suites/upgrade/hammer-x/f-h-x-offline/2-upgrade.yaml
new file mode 100644
index 0000000..1b5204e
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/f-h-x-offline/2-upgrade.yaml
@@ -0,0 +1,18 @@
+tasks:
+- ceph.stop: [mon.a, mon.b, mon.c]
+- ceph.stop: [osd.0, osd.1, osd.2]
+- install.upgrade:
+    mon.a:
+- ceph.restart:
+    daemons: [osd.0, osd.1, osd.2]
+    wait-for-healthy: false
+    wait-for-osds-up: false
+- ceph.restart:
+    daemons: [mon.a, mon.b, mon.c]
+    wait-for-healthy: false
+    wait-for-osds-up: false
+- exec:
+    mon.a:
+      - ceph osd down 0 1 2
+- sleep:
+    duration: 10
diff --git a/qa/suites/upgrade/hammer-x/f-h-x-offline/3-jewel.yaml b/qa/suites/upgrade/hammer-x/f-h-x-offline/3-jewel.yaml
new file mode 100644
index 0000000..f21e7fe
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/f-h-x-offline/3-jewel.yaml
@@ -0,0 +1,5 @@
+tasks:
+- exec:
+    osd.0:
+      - ceph osd set sortbitwise
+      - for p in `ceph osd pool ls` ; do ceph osd pool set $p use_gmt_hitset true ; done
diff --git a/qa/suites/upgrade/hammer-x/f-h-x-offline/4-after.yaml b/qa/suites/upgrade/hammer-x/f-h-x-offline/4-after.yaml
new file mode 100644
index 0000000..cd11ae6
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/f-h-x-offline/4-after.yaml
@@ -0,0 +1,5 @@
+tasks:
+- workunit:
+    clients:
+      client.0:
+        - rados/test.sh
diff --git a/qa/suites/upgrade/hammer-x/f-h-x-offline/README b/qa/suites/upgrade/hammer-x/f-h-x-offline/README
new file mode 100644
index 0000000..98153e0
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/f-h-x-offline/README
@@ -0,0 +1,4 @@
+Verify that we can upgrade straight from firefly to x without ever installing
+hammer.
+
+This would be an offline upgrade, though.. all osds have to be restarted!
diff --git a/qa/suites/upgrade/hammer-x/f-h-x-offline/ubuntu_14.04.yaml b/qa/suites/upgrade/hammer-x/f-h-x-offline/ubuntu_14.04.yaml
new file mode 100644
index 0000000..309e989
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/f-h-x-offline/ubuntu_14.04.yaml
@@ -0,0 +1,2 @@
+os_type: ubuntu
+os_version: "14.04"
diff --git a/qa/suites/upgrade/hammer-x/parallel/% b/qa/suites/upgrade/hammer-x/parallel/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/upgrade/hammer-x/parallel/0-cluster/start.yaml b/qa/suites/upgrade/hammer-x/parallel/0-cluster/start.yaml
new file mode 100644
index 0000000..3f19d79
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/parallel/0-cluster/start.yaml
@@ -0,0 +1,31 @@
+roles:
+- - mon.a
+  - mds.a
+  - osd.0
+  - osd.1
+- - mon.b
+  - mon.c
+  - osd.2
+  - osd.3
+- - client.0
+  - client.1
+  - client.2
+  - client.3
+overrides:
+  ceph:
+    log-whitelist:
+    - scrub mismatch
+    - ScrubResult
+    - wrongly marked me down
+    - soft lockup
+    - detected stalls on CPUs
+    - failed to encode
+    conf:
+      mon:
+        mon warn on legacy crush tunables: false
+      client:
+        debug client: 20
+        debug ms: 1
+      mds:
+        debug mds: 20
+        debug ms: 1
diff --git a/qa/suites/upgrade/hammer-x/parallel/0-tz-eastern.yaml b/qa/suites/upgrade/hammer-x/parallel/0-tz-eastern.yaml
new file mode 100644
index 0000000..019c761
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/parallel/0-tz-eastern.yaml
@@ -0,0 +1,4 @@
+tasks:
+- exec:
+    all:
+      - echo America/New_York | sudo tee /etc/timezone
diff --git a/qa/suites/upgrade/hammer-x/parallel/1-hammer-install/hammer.yaml b/qa/suites/upgrade/hammer-x/parallel/1-hammer-install/hammer.yaml
new file mode 100644
index 0000000..e5bf1fc
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/parallel/1-hammer-install/hammer.yaml
@@ -0,0 +1,17 @@
+tasks:
+- install:
+    branch: hammer
+- print: "**** done installing hammer"
+- ceph:
+    fs: xfs
+- print: "**** done ceph"
+- install.upgrade:
+    mon.a:
+    mon.b:
+- print: "**** done install.upgrade mon.a and mon.b"
+- parallel:
+    - workload
+    - upgrade-sequence
+- print: "**** done parallel"
+- install.upgrade:
+    client.0:
\ No newline at end of file
diff --git a/qa/suites/upgrade/hammer-x/parallel/2-workload/+ b/qa/suites/upgrade/hammer-x/parallel/2-workload/+
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/upgrade/hammer-x/parallel/2-workload/blogbench.yaml b/qa/suites/upgrade/hammer-x/parallel/2-workload/blogbench.yaml
new file mode 100644
index 0000000..021fcc6
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/parallel/2-workload/blogbench.yaml
@@ -0,0 +1,14 @@
+meta:
+- desc: |
+   run a cephfs stress test
+   mount ceph-fuse on client.2 before running workunit
+workload:
+  full_sequential:
+  - sequential:
+    - ceph-fuse:
+    - print: "**** done ceph-fuse 2-workload"
+    - workunit:
+        clients:
+           client.2:
+            - suites/blogbench.sh
+    - print: "**** done suites/blogbench.sh 2-workload"
diff --git a/qa/suites/upgrade/hammer-x/parallel/2-workload/ec-rados-default.yaml b/qa/suites/upgrade/hammer-x/parallel/2-workload/ec-rados-default.yaml
new file mode 100644
index 0000000..e4f3ee1
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/parallel/2-workload/ec-rados-default.yaml
@@ -0,0 +1,20 @@
+workload:
+  full_sequential:
+  - rados:
+      clients: [client.0]
+      ops: 4000
+      objects: 50
+      ec_pool: true
+      write_append_excl: false
+      op_weights:
+        read: 100
+        write: 0
+        append: 100
+        delete: 50
+        snap_create: 50
+        snap_remove: 50
+        rollback: 50
+        copy_from: 50
+        setattr: 25
+        rmattr: 25
+  - print: "**** done rados ec task"
diff --git a/qa/suites/upgrade/hammer-x/parallel/2-workload/rados_api.yaml b/qa/suites/upgrade/hammer-x/parallel/2-workload/rados_api.yaml
new file mode 100644
index 0000000..d86c2d2
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/parallel/2-workload/rados_api.yaml
@@ -0,0 +1,8 @@
+workload:
+  full_sequential:
+    - workunit:
+        branch: hammer
+        clients:
+          client.0:
+            - cls
+    - print: "**** done cls 2-workload"
diff --git a/qa/suites/upgrade/hammer-x/parallel/2-workload/rados_loadgenbig.yaml b/qa/suites/upgrade/hammer-x/parallel/2-workload/rados_loadgenbig.yaml
new file mode 100644
index 0000000..50ba808
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/parallel/2-workload/rados_loadgenbig.yaml
@@ -0,0 +1,8 @@
+workload:
+  full_sequential:
+    - workunit:
+        branch: hammer
+        clients:
+          client.0:
+            - rados/load-gen-big.sh
+    - print: "**** done rados/load-gen-big.sh 2-workload"
diff --git a/qa/suites/upgrade/hammer-x/parallel/2-workload/test_rbd_api.yaml b/qa/suites/upgrade/hammer-x/parallel/2-workload/test_rbd_api.yaml
new file mode 100644
index 0000000..997f7ba
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/parallel/2-workload/test_rbd_api.yaml
@@ -0,0 +1,8 @@
+workload:
+  full_sequential:
+    - workunit:
+        branch: hammer
+        clients:
+          client.0:
+            - rbd/test_librbd.sh
+    - print: "**** done rbd/test_librbd.sh 2-workload"
diff --git a/qa/suites/upgrade/hammer-x/parallel/2-workload/test_rbd_python.yaml b/qa/suites/upgrade/hammer-x/parallel/2-workload/test_rbd_python.yaml
new file mode 100644
index 0000000..d1046da
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/parallel/2-workload/test_rbd_python.yaml
@@ -0,0 +1,8 @@
+workload:
+  full_sequential:
+    - workunit:
+        branch: hammer
+        clients:
+          client.0:
+            - rbd/test_librbd_python.sh
+    - print: "**** done rbd/test_librbd_python.sh 2-workload"
diff --git a/qa/suites/upgrade/hammer-x/parallel/3-upgrade-sequence/upgrade-all.yaml b/qa/suites/upgrade/hammer-x/parallel/3-upgrade-sequence/upgrade-all.yaml
new file mode 100644
index 0000000..ec1a88c
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/parallel/3-upgrade-sequence/upgrade-all.yaml
@@ -0,0 +1,17 @@
+upgrade-sequence:
+   sequential:
+   - ceph.restart:
+       daemons: [osd.0, osd.1, osd.2, osd.3]
+       wait-for-healthy: false
+       wait-for-osds-up: true
+   - ceph.restart:
+       daemons: [mon.a, mon.b, mon.c]
+       wait-for-healthy: false
+       wait-for-osds-up: true
+   - print: "**** done ceph.restart do not wait for healthy"
+   - exec:
+       mon.a:
+         - sleep 300 # http://tracker.ceph.com/issues/17808
+         - ceph osd set require_jewel_osds
+   - ceph.healthy:
+   - print: "**** done ceph.healthy"
diff --git a/qa/suites/upgrade/hammer-x/parallel/3-upgrade-sequence/upgrade-osd-mds-mon.yaml b/qa/suites/upgrade/hammer-x/parallel/3-upgrade-sequence/upgrade-osd-mds-mon.yaml
new file mode 100644
index 0000000..b5acf98
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/parallel/3-upgrade-sequence/upgrade-osd-mds-mon.yaml
@@ -0,0 +1,38 @@
+upgrade-sequence:
+   sequential:
+   - ceph.restart:
+       daemons: [osd.0, osd.1]
+       wait-for-healthy: true
+   - sleep:
+       duration: 60
+   - ceph.restart:
+       daemons: [osd.2, osd.3]
+       wait-for-healthy: true
+   - sleep:
+       duration: 60
+   - ceph.restart:
+       daemons: [mon.a]
+       wait-for-healthy: false
+   - sleep:
+       duration: 60
+   - ceph.restart: [mds.a]
+   - sleep:
+       duration: 60
+   - print: "**** running mixed versions of osds and mons"
+#do we need to use "ceph osd crush tunables hammer" ?
+   - exec:
+      mon.b:
+        - sudo ceph osd crush tunables hammer
+   - print: "**** done ceph osd crush tunables hammer"
+   - ceph.restart:
+       daemons: [mon.b, mon.c]
+       wait-for-healthy: false
+   - sleep:
+       duration: 30
+   - exec:
+       osd.0:
+         - sleep 300 # http://tracker.ceph.com/issues/17808
+         - ceph osd set require_jewel_osds
+   - ceph.healthy:
+   - sleep:
+       duration: 60
diff --git a/qa/suites/upgrade/hammer-x/parallel/4-jewel.yaml b/qa/suites/upgrade/hammer-x/parallel/4-jewel.yaml
new file mode 100644
index 0000000..f21e7fe
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/parallel/4-jewel.yaml
@@ -0,0 +1,5 @@
+tasks:
+- exec:
+    osd.0:
+      - ceph osd set sortbitwise
+      - for p in `ceph osd pool ls` ; do ceph osd pool set $p use_gmt_hitset true ; done
diff --git a/qa/suites/upgrade/hammer-x/parallel/5-final-workload/+ b/qa/suites/upgrade/hammer-x/parallel/5-final-workload/+
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/upgrade/hammer-x/parallel/5-final-workload/blogbench.yaml b/qa/suites/upgrade/hammer-x/parallel/5-final-workload/blogbench.yaml
new file mode 100644
index 0000000..d2629c0
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/parallel/5-final-workload/blogbench.yaml
@@ -0,0 +1,13 @@
+meta:
+- desc: |
+   run a cephfs stress test
+   mount ceph-fuse on client.3 before running workunit
+tasks:
+- sequential:
+  - ceph-fuse:
+  - print: "**** done ceph-fuse 5-final-workload"
+  - workunit:
+      clients:
+         client.3:
+          - suites/blogbench.sh
+  - print: "**** done suites/blogbench.sh 5-final-workload"
diff --git a/qa/suites/upgrade/hammer-x/parallel/5-final-workload/rados-snaps-few-objects.yaml b/qa/suites/upgrade/hammer-x/parallel/5-final-workload/rados-snaps-few-objects.yaml
new file mode 100644
index 0000000..505c16a
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/parallel/5-final-workload/rados-snaps-few-objects.yaml
@@ -0,0 +1,14 @@
+tasks:
+  - rados:
+      clients: [client.1]
+      ops: 4000
+      objects: 50
+      write_append_excl: false
+      op_weights:
+        read: 100
+        write: 100
+        delete: 50
+        snap_create: 50
+        snap_remove: 50
+        rollback: 50
+  - print: "**** done rados 4-final-workload"
diff --git a/qa/suites/upgrade/hammer-x/parallel/5-final-workload/rados_loadgenmix.yaml b/qa/suites/upgrade/hammer-x/parallel/5-final-workload/rados_loadgenmix.yaml
new file mode 100644
index 0000000..d4a8006
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/parallel/5-final-workload/rados_loadgenmix.yaml
@@ -0,0 +1,6 @@
+tasks:
+  - workunit:
+      clients:
+        client.1:
+          - rados/load-gen-mix.sh
+  - print: "**** done rados/load-gen-mix.sh 4-final-workload"
diff --git a/qa/suites/upgrade/hammer-x/parallel/5-final-workload/rados_mon_thrash.yaml b/qa/suites/upgrade/hammer-x/parallel/5-final-workload/rados_mon_thrash.yaml
new file mode 100644
index 0000000..cc71c5c
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/parallel/5-final-workload/rados_mon_thrash.yaml
@@ -0,0 +1,14 @@
+overrides:
+  ceph:
+    log-whitelist:
+      - reached quota
+tasks:
+  - mon_thrash:
+      revive_delay: 20
+      thrash_delay: 1
+  - print: "**** done mon_thrash 4-final-workload"
+  - workunit:
+      clients:
+        client.1:
+          - rados/test.sh
+  - print: "**** done rados/test.sh 4-final-workload"
diff --git a/qa/suites/upgrade/hammer-x/parallel/5-final-workload/rbd_cls.yaml b/qa/suites/upgrade/hammer-x/parallel/5-final-workload/rbd_cls.yaml
new file mode 100644
index 0000000..ed75230
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/parallel/5-final-workload/rbd_cls.yaml
@@ -0,0 +1,6 @@
+tasks:
+  - workunit:
+      clients:
+        client.1:
+          - cls/test_cls_rbd.sh
+  - print: "**** done cls/test_cls_rbd.sh 4-final-workload"
diff --git a/qa/suites/upgrade/hammer-x/parallel/5-final-workload/rbd_import_export.yaml b/qa/suites/upgrade/hammer-x/parallel/5-final-workload/rbd_import_export.yaml
new file mode 100644
index 0000000..2c66c28
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/parallel/5-final-workload/rbd_import_export.yaml
@@ -0,0 +1,8 @@
+tasks:
+  - workunit:
+      clients:
+        client.1:
+          - rbd/import_export.sh
+      env:
+        RBD_CREATE_ARGS: --new-format
+  - print: "**** done rbd/import_export.sh 4-final-workload"
diff --git a/qa/suites/upgrade/hammer-x/parallel/5-final-workload/rgw_swift.yaml b/qa/suites/upgrade/hammer-x/parallel/5-final-workload/rgw_swift.yaml
new file mode 100644
index 0000000..01f2932
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/parallel/5-final-workload/rgw_swift.yaml
@@ -0,0 +1,10 @@
+overrides:
+  rgw:
+    frontend: civetweb
+tasks:
+  - rgw: [client.1]
+  - print: "**** done rgw 4-final-workload"
+  - swift:
+      client.1:
+        rgw_server: client.1
+  - print: "**** done swift 4-final-workload"
diff --git a/qa/suites/upgrade/hammer-x/parallel/distros/centos_7.3.yaml b/qa/suites/upgrade/hammer-x/parallel/distros/centos_7.3.yaml
new file mode 100644
index 0000000..9dfcc7f
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/parallel/distros/centos_7.3.yaml
@@ -0,0 +1,2 @@
+os_type: centos
+os_version: "7.3"
diff --git a/qa/suites/upgrade/hammer-x/parallel/distros/ubuntu_14.04.yaml b/qa/suites/upgrade/hammer-x/parallel/distros/ubuntu_14.04.yaml
new file mode 100644
index 0000000..309e989
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/parallel/distros/ubuntu_14.04.yaml
@@ -0,0 +1,2 @@
+os_type: ubuntu
+os_version: "14.04"
diff --git a/qa/suites/upgrade/hammer-x/stress-split-erasure-code-x86_64/% b/qa/suites/upgrade/hammer-x/stress-split-erasure-code-x86_64/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/upgrade/hammer-x/stress-split-erasure-code-x86_64/0-cluster/+ b/qa/suites/upgrade/hammer-x/stress-split-erasure-code-x86_64/0-cluster/+
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/upgrade/hammer-x/stress-split-erasure-code-x86_64/0-cluster/openstack.yaml b/qa/suites/upgrade/hammer-x/stress-split-erasure-code-x86_64/0-cluster/openstack.yaml
new file mode 100644
index 0000000..45e60ef
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/stress-split-erasure-code-x86_64/0-cluster/openstack.yaml
@@ -0,0 +1,3 @@
+openstack:
+  - machine:
+      disk: 40 # GB
diff --git a/qa/suites/upgrade/hammer-x/stress-split-erasure-code-x86_64/0-cluster/start.yaml b/qa/suites/upgrade/hammer-x/stress-split-erasure-code-x86_64/0-cluster/start.yaml
new file mode 100644
index 0000000..a7dbead
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/stress-split-erasure-code-x86_64/0-cluster/start.yaml
@@ -0,0 +1,17 @@
+overrides:
+  ceph:
+    conf:
+      mon:
+        mon warn on legacy crush tunables: false
+roles:
+- - mon.a
+  - mon.b
+  - mon.c
+  - mds.a
+  - osd.0
+  - osd.1
+  - osd.2
+- - osd.3
+  - osd.4
+  - osd.5
+- - client.0
diff --git a/qa/suites/upgrade/hammer-x/stress-split-erasure-code-x86_64/0-tz-eastern.yaml b/qa/suites/upgrade/hammer-x/stress-split-erasure-code-x86_64/0-tz-eastern.yaml
new file mode 100644
index 0000000..019c761
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/stress-split-erasure-code-x86_64/0-tz-eastern.yaml
@@ -0,0 +1,4 @@
+tasks:
+- exec:
+    all:
+      - echo America/New_York | sudo tee /etc/timezone
diff --git a/qa/suites/upgrade/hammer-x/stress-split-erasure-code-x86_64/0-x86_64.yaml b/qa/suites/upgrade/hammer-x/stress-split-erasure-code-x86_64/0-x86_64.yaml
new file mode 100644
index 0000000..c2409f5
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/stress-split-erasure-code-x86_64/0-x86_64.yaml
@@ -0,0 +1 @@
+arch: x86_64
diff --git a/qa/suites/upgrade/hammer-x/stress-split-erasure-code-x86_64/1-hammer-install/hammer.yaml b/qa/suites/upgrade/hammer-x/stress-split-erasure-code-x86_64/1-hammer-install/hammer.yaml
new file mode 100644
index 0000000..6c2470d
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/stress-split-erasure-code-x86_64/1-hammer-install/hammer.yaml
@@ -0,0 +1,7 @@
+tasks:
+- install:
+    branch: hammer
+- print: "**** done install hammer"
+- ceph:
+    fs: xfs
+- print: "**** done ceph"
diff --git a/qa/suites/upgrade/hammer-x/stress-split-erasure-code-x86_64/2-partial-upgrade/firsthalf.yaml b/qa/suites/upgrade/hammer-x/stress-split-erasure-code-x86_64/2-partial-upgrade/firsthalf.yaml
new file mode 100644
index 0000000..f92eda6
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/stress-split-erasure-code-x86_64/2-partial-upgrade/firsthalf.yaml
@@ -0,0 +1,7 @@
+tasks:
+- install.upgrade:
+    osd.0: 
+- print: "**** done install.upgrade osd.0"
+- ceph.restart:
+    daemons: [osd.0, osd.1, osd.2]
+- print: "**** done ceph.restart 1st half"
diff --git a/qa/suites/upgrade/hammer-x/stress-split-erasure-code-x86_64/3-thrash/default.yaml b/qa/suites/upgrade/hammer-x/stress-split-erasure-code-x86_64/3-thrash/default.yaml
new file mode 100644
index 0000000..94bc8d9
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/stress-split-erasure-code-x86_64/3-thrash/default.yaml
@@ -0,0 +1,16 @@
+overrides:
+  ceph:
+    fs: xfs
+    log-whitelist:
+    - wrongly marked me down
+    - objects unfound and apparently lost
+    - log bound mismatch
+    - failed to encode map e
+tasks:
+- thrashosds:
+    timeout: 1200
+    chance_pgnum_grow: 1
+    chance_pgpnum_fix: 1
+    min_in: 4
+    sighup_delay: 0
+- print: "**** done thrashosds 3-thrash"
diff --git a/qa/suites/upgrade/hammer-x/stress-split-erasure-code-x86_64/4-mon/mona.yaml b/qa/suites/upgrade/hammer-x/stress-split-erasure-code-x86_64/4-mon/mona.yaml
new file mode 100644
index 0000000..7c75c10
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/stress-split-erasure-code-x86_64/4-mon/mona.yaml
@@ -0,0 +1,6 @@
+tasks:
+- ceph.restart:
+    daemons: [mon.a]
+    wait-for-healthy: false
+    wait-for-osds-up: true
+- print: "**** done ceph.restart mon.a"
diff --git a/qa/suites/upgrade/hammer-x/stress-split-erasure-code-x86_64/5-workload/ec-rados-default.yaml b/qa/suites/upgrade/hammer-x/stress-split-erasure-code-x86_64/5-workload/ec-rados-default.yaml
new file mode 100644
index 0000000..cc62371
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/stress-split-erasure-code-x86_64/5-workload/ec-rados-default.yaml
@@ -0,0 +1,19 @@
+tasks:
+  - rados:
+      clients: [client.0]
+      ops: 4000
+      objects: 50
+      ec_pool: true
+      write_append_excl: false
+      op_weights:
+        read: 100
+        write: 0
+        append: 100
+        delete: 50
+        snap_create: 50
+        snap_remove: 50
+        rollback: 50
+        copy_from: 50
+        setattr: 25
+        rmattr: 25
+  - print: "**** done rados ec task"
diff --git a/qa/suites/upgrade/hammer-x/stress-split-erasure-code-x86_64/6-next-mon/monb.yaml b/qa/suites/upgrade/hammer-x/stress-split-erasure-code-x86_64/6-next-mon/monb.yaml
new file mode 100644
index 0000000..22e87c7
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/stress-split-erasure-code-x86_64/6-next-mon/monb.yaml
@@ -0,0 +1,6 @@
+tasks:
+- ceph.restart:
+    daemons: [mon.b]
+    wait-for-healthy: false
+    wait-for-osds-up: true
+- print: "**** done ceph.restart mon.b 6-next-mon"
diff --git a/qa/suites/upgrade/hammer-x/stress-split-erasure-code-x86_64/8-finish-upgrade/last-osds-and-monc.yaml b/qa/suites/upgrade/hammer-x/stress-split-erasure-code-x86_64/8-finish-upgrade/last-osds-and-monc.yaml
new file mode 100644
index 0000000..4bc6f7e
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/stress-split-erasure-code-x86_64/8-finish-upgrade/last-osds-and-monc.yaml
@@ -0,0 +1,20 @@
+tasks:
+- install.upgrade:
+    osd.3:
+- ceph.restart:
+      daemons: [osd.3,osd.4,osd.5]
+      wait-for-healthy: false
+      wait-for-osds-up: true
+- sleep:
+      duration: 10
+- ceph.restart:
+    daemons: [mon.c]
+    wait-for-healthy: false
+    wait-for-osds-up: true
+- print: "**** done ceph.restart mon.c 8-next-mon"
+- ceph.wait_for_mon_quorum: [a, b, c]
+- exec:
+    osd.0:
+      - sleep 300 # http://tracker.ceph.com/issues/17808
+      - ceph osd set require_jewel_osds
+- print: "**** done wait_for_mon_quorum 8-next-mon"
diff --git a/qa/suites/upgrade/hammer-x/stress-split-erasure-code-x86_64/9-workload/ec-rados-plugin=isa-k=2-m=1.yaml b/qa/suites/upgrade/hammer-x/stress-split-erasure-code-x86_64/9-workload/ec-rados-plugin=isa-k=2-m=1.yaml
new file mode 100644
index 0000000..8d7c497
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/stress-split-erasure-code-x86_64/9-workload/ec-rados-plugin=isa-k=2-m=1.yaml
@@ -0,0 +1,25 @@
+tasks:
+- rados:
+    clients: [client.0]
+    ops: 4000
+    objects: 50
+    ec_pool: true
+    write_append_excl: false
+    erasure_code_profile:
+      name: isaprofile
+      plugin: isa
+      k: 2
+      m: 1
+      technique: reed_sol_van
+      ruleset-failure-domain: osd
+    op_weights:
+      read: 100
+      write: 0
+      append: 100
+      delete: 50
+      snap_create: 50
+      snap_remove: 50
+      rollback: 50
+      copy_from: 50
+      setattr: 25
+      rmattr: 25
diff --git a/qa/suites/upgrade/hammer-x/stress-split-erasure-code/% b/qa/suites/upgrade/hammer-x/stress-split-erasure-code/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/upgrade/hammer-x/stress-split-erasure-code/0-cluster/+ b/qa/suites/upgrade/hammer-x/stress-split-erasure-code/0-cluster/+
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/upgrade/hammer-x/stress-split-erasure-code/0-cluster/openstack.yaml b/qa/suites/upgrade/hammer-x/stress-split-erasure-code/0-cluster/openstack.yaml
new file mode 100644
index 0000000..45e60ef
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/stress-split-erasure-code/0-cluster/openstack.yaml
@@ -0,0 +1,3 @@
+openstack:
+  - machine:
+      disk: 40 # GB
diff --git a/qa/suites/upgrade/hammer-x/stress-split-erasure-code/0-cluster/start.yaml b/qa/suites/upgrade/hammer-x/stress-split-erasure-code/0-cluster/start.yaml
new file mode 100644
index 0000000..a7dbead
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/stress-split-erasure-code/0-cluster/start.yaml
@@ -0,0 +1,17 @@
+overrides:
+  ceph:
+    conf:
+      mon:
+        mon warn on legacy crush tunables: false
+roles:
+- - mon.a
+  - mon.b
+  - mon.c
+  - mds.a
+  - osd.0
+  - osd.1
+  - osd.2
+- - osd.3
+  - osd.4
+  - osd.5
+- - client.0
diff --git a/qa/suites/upgrade/hammer-x/stress-split-erasure-code/0-tz-eastern.yaml b/qa/suites/upgrade/hammer-x/stress-split-erasure-code/0-tz-eastern.yaml
new file mode 100644
index 0000000..019c761
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/stress-split-erasure-code/0-tz-eastern.yaml
@@ -0,0 +1,4 @@
+tasks:
+- exec:
+    all:
+      - echo America/New_York | sudo tee /etc/timezone
diff --git a/qa/suites/upgrade/hammer-x/stress-split-erasure-code/1-hammer-install/hammer.yaml b/qa/suites/upgrade/hammer-x/stress-split-erasure-code/1-hammer-install/hammer.yaml
new file mode 100644
index 0000000..6c2470d
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/stress-split-erasure-code/1-hammer-install/hammer.yaml
@@ -0,0 +1,7 @@
+tasks:
+- install:
+    branch: hammer
+- print: "**** done install hammer"
+- ceph:
+    fs: xfs
+- print: "**** done ceph"
diff --git a/qa/suites/upgrade/hammer-x/stress-split-erasure-code/2-partial-upgrade/firsthalf.yaml b/qa/suites/upgrade/hammer-x/stress-split-erasure-code/2-partial-upgrade/firsthalf.yaml
new file mode 100644
index 0000000..f92eda6
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/stress-split-erasure-code/2-partial-upgrade/firsthalf.yaml
@@ -0,0 +1,7 @@
+tasks:
+- install.upgrade:
+    osd.0: 
+- print: "**** done install.upgrade osd.0"
+- ceph.restart:
+    daemons: [osd.0, osd.1, osd.2]
+- print: "**** done ceph.restart 1st half"
diff --git a/qa/suites/upgrade/hammer-x/stress-split-erasure-code/3-thrash/default.yaml b/qa/suites/upgrade/hammer-x/stress-split-erasure-code/3-thrash/default.yaml
new file mode 100644
index 0000000..42029f3
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/stress-split-erasure-code/3-thrash/default.yaml
@@ -0,0 +1,18 @@
+overrides:
+  ceph:
+    fs: xfs
+    log-whitelist:
+    - wrongly marked me down
+    - objects unfound and apparently lost
+    - log bound mismatch
+    - failed to encode map e
+    - soft lockup
+    - detected stalls on CPUs
+tasks:
+- thrashosds:
+    timeout: 1200
+    chance_pgnum_grow: 1
+    chance_pgpnum_fix: 1
+    min_in: 4
+    sighup_delay: 0
+- print: "**** done thrashosds 3-thrash"
diff --git a/qa/suites/upgrade/hammer-x/stress-split-erasure-code/4-mon/mona.yaml b/qa/suites/upgrade/hammer-x/stress-split-erasure-code/4-mon/mona.yaml
new file mode 100644
index 0000000..7c75c10
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/stress-split-erasure-code/4-mon/mona.yaml
@@ -0,0 +1,6 @@
+tasks:
+- ceph.restart:
+    daemons: [mon.a]
+    wait-for-healthy: false
+    wait-for-osds-up: true
+- print: "**** done ceph.restart mon.a"
diff --git a/qa/suites/upgrade/hammer-x/stress-split-erasure-code/5-workload/ec-no-shec.yaml b/qa/suites/upgrade/hammer-x/stress-split-erasure-code/5-workload/ec-no-shec.yaml
new file mode 100644
index 0000000..7be49a3
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/stress-split-erasure-code/5-workload/ec-no-shec.yaml
@@ -0,0 +1,9 @@
+#
+# The shec plugin cannot be used because some OSD are not upgraded
+# yet and would crash.
+#
+tasks: 
+- exec:
+    mon.a:
+      - |-
+        sudo ceph osd erasure-code-profile set profile-shec k=2 m=1 c=1 plugin=shec 2>&1 | grep "unsupported by"
diff --git a/qa/suites/upgrade/hammer-x/stress-split-erasure-code/5-workload/ec-rados-default.yaml b/qa/suites/upgrade/hammer-x/stress-split-erasure-code/5-workload/ec-rados-default.yaml
new file mode 100644
index 0000000..cc62371
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/stress-split-erasure-code/5-workload/ec-rados-default.yaml
@@ -0,0 +1,19 @@
+tasks:
+  - rados:
+      clients: [client.0]
+      ops: 4000
+      objects: 50
+      ec_pool: true
+      write_append_excl: false
+      op_weights:
+        read: 100
+        write: 0
+        append: 100
+        delete: 50
+        snap_create: 50
+        snap_remove: 50
+        rollback: 50
+        copy_from: 50
+        setattr: 25
+        rmattr: 25
+  - print: "**** done rados ec task"
diff --git a/qa/suites/upgrade/hammer-x/stress-split-erasure-code/6-next-mon/monb.yaml b/qa/suites/upgrade/hammer-x/stress-split-erasure-code/6-next-mon/monb.yaml
new file mode 100644
index 0000000..22e87c7
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/stress-split-erasure-code/6-next-mon/monb.yaml
@@ -0,0 +1,6 @@
+tasks:
+- ceph.restart:
+    daemons: [mon.b]
+    wait-for-healthy: false
+    wait-for-osds-up: true
+- print: "**** done ceph.restart mon.b 6-next-mon"
diff --git a/qa/suites/upgrade/hammer-x/stress-split-erasure-code/8-finish-upgrade/last-osds-and-monc.yaml b/qa/suites/upgrade/hammer-x/stress-split-erasure-code/8-finish-upgrade/last-osds-and-monc.yaml
new file mode 100644
index 0000000..4bc6f7e
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/stress-split-erasure-code/8-finish-upgrade/last-osds-and-monc.yaml
@@ -0,0 +1,20 @@
+tasks:
+- install.upgrade:
+    osd.3:
+- ceph.restart:
+      daemons: [osd.3,osd.4,osd.5]
+      wait-for-healthy: false
+      wait-for-osds-up: true
+- sleep:
+      duration: 10
+- ceph.restart:
+    daemons: [mon.c]
+    wait-for-healthy: false
+    wait-for-osds-up: true
+- print: "**** done ceph.restart mon.c 8-next-mon"
+- ceph.wait_for_mon_quorum: [a, b, c]
+- exec:
+    osd.0:
+      - sleep 300 # http://tracker.ceph.com/issues/17808
+      - ceph osd set require_jewel_osds
+- print: "**** done wait_for_mon_quorum 8-next-mon"
diff --git a/qa/suites/upgrade/hammer-x/stress-split-erasure-code/9-workload/ec-rados-plugin=jerasure-k=3-m=1.yaml b/qa/suites/upgrade/hammer-x/stress-split-erasure-code/9-workload/ec-rados-plugin=jerasure-k=3-m=1.yaml
new file mode 100644
index 0000000..3c31a8b
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/stress-split-erasure-code/9-workload/ec-rados-plugin=jerasure-k=3-m=1.yaml
@@ -0,0 +1,31 @@
+#
+# k=3 implies a stripe_width of 1376*3 = 4128 which is different from
+# the default value of 4096 It is also not a multiple of 1024*1024 and
+# creates situations where rounding rules during recovery becomes 
+# necessary. 
+#
+tasks:
+- rados:
+    clients: [client.0]
+    ops: 4000
+    objects: 50
+    ec_pool: true
+    write_append_excl: false
+    erasure_code_profile:
+      name: jerasure31profile
+      plugin: jerasure
+      k: 3
+      m: 1
+      technique: reed_sol_van
+      ruleset-failure-domain: osd
+    op_weights:
+      read: 100
+      write: 0
+      append: 100
+      delete: 50
+      snap_create: 50
+      snap_remove: 50
+      rollback: 50
+      copy_from: 50
+      setattr: 25
+      rmattr: 25
diff --git a/qa/suites/upgrade/hammer-x/stress-split-erasure-code/distros/centos_7.3.yaml b/qa/suites/upgrade/hammer-x/stress-split-erasure-code/distros/centos_7.3.yaml
new file mode 100644
index 0000000..9dfcc7f
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/stress-split-erasure-code/distros/centos_7.3.yaml
@@ -0,0 +1,2 @@
+os_type: centos
+os_version: "7.3"
diff --git a/qa/suites/upgrade/hammer-x/stress-split-erasure-code/distros/ubuntu_14.04.yaml b/qa/suites/upgrade/hammer-x/stress-split-erasure-code/distros/ubuntu_14.04.yaml
new file mode 100644
index 0000000..309e989
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/stress-split-erasure-code/distros/ubuntu_14.04.yaml
@@ -0,0 +1,2 @@
+os_type: ubuntu
+os_version: "14.04"
diff --git a/qa/suites/upgrade/hammer-x/stress-split/% b/qa/suites/upgrade/hammer-x/stress-split/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/upgrade/hammer-x/stress-split/0-cluster/+ b/qa/suites/upgrade/hammer-x/stress-split/0-cluster/+
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/upgrade/hammer-x/stress-split/0-cluster/openstack.yaml b/qa/suites/upgrade/hammer-x/stress-split/0-cluster/openstack.yaml
new file mode 100644
index 0000000..45e60ef
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/stress-split/0-cluster/openstack.yaml
@@ -0,0 +1,3 @@
+openstack:
+  - machine:
+      disk: 40 # GB
diff --git a/qa/suites/upgrade/hammer-x/stress-split/0-cluster/start.yaml b/qa/suites/upgrade/hammer-x/stress-split/0-cluster/start.yaml
new file mode 100644
index 0000000..a7dbead
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/stress-split/0-cluster/start.yaml
@@ -0,0 +1,17 @@
+overrides:
+  ceph:
+    conf:
+      mon:
+        mon warn on legacy crush tunables: false
+roles:
+- - mon.a
+  - mon.b
+  - mon.c
+  - mds.a
+  - osd.0
+  - osd.1
+  - osd.2
+- - osd.3
+  - osd.4
+  - osd.5
+- - client.0
diff --git a/qa/suites/upgrade/hammer-x/stress-split/0-tz-eastern.yaml b/qa/suites/upgrade/hammer-x/stress-split/0-tz-eastern.yaml
new file mode 100644
index 0000000..019c761
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/stress-split/0-tz-eastern.yaml
@@ -0,0 +1,4 @@
+tasks:
+- exec:
+    all:
+      - echo America/New_York | sudo tee /etc/timezone
diff --git a/qa/suites/upgrade/hammer-x/stress-split/1-hammer-install/hammer.yaml b/qa/suites/upgrade/hammer-x/stress-split/1-hammer-install/hammer.yaml
new file mode 100644
index 0000000..6c2470d
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/stress-split/1-hammer-install/hammer.yaml
@@ -0,0 +1,7 @@
+tasks:
+- install:
+    branch: hammer
+- print: "**** done install hammer"
+- ceph:
+    fs: xfs
+- print: "**** done ceph"
diff --git a/qa/suites/upgrade/hammer-x/stress-split/2-partial-upgrade/firsthalf.yaml b/qa/suites/upgrade/hammer-x/stress-split/2-partial-upgrade/firsthalf.yaml
new file mode 100644
index 0000000..f92eda6
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/stress-split/2-partial-upgrade/firsthalf.yaml
@@ -0,0 +1,7 @@
+tasks:
+- install.upgrade:
+    osd.0: 
+- print: "**** done install.upgrade osd.0"
+- ceph.restart:
+    daemons: [osd.0, osd.1, osd.2]
+- print: "**** done ceph.restart 1st half"
diff --git a/qa/suites/upgrade/hammer-x/stress-split/3-thrash/default.yaml b/qa/suites/upgrade/hammer-x/stress-split/3-thrash/default.yaml
new file mode 100644
index 0000000..f9ee589
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/stress-split/3-thrash/default.yaml
@@ -0,0 +1,19 @@
+overrides:
+  ceph:
+    log-whitelist:
+    - wrongly marked me down
+    - objects unfound and apparently lost
+    - log bound mismatch
+    - soft lockup
+    - detected stalls on CPUs
+    - failed to encode map e
+tasks:
+- parallel:
+  - stress-tasks
+stress-tasks:
+- thrashosds:
+    timeout: 1200
+    chance_pgnum_grow: 1
+    chance_pgpnum_fix: 1
+    sighup_delay: 0
+- print: "**** done thrashosds 3-thrash"
diff --git a/qa/suites/upgrade/hammer-x/stress-split/4-mon/mona.yaml b/qa/suites/upgrade/hammer-x/stress-split/4-mon/mona.yaml
new file mode 100644
index 0000000..f8ff647
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/stress-split/4-mon/mona.yaml
@@ -0,0 +1,6 @@
+stress-tasks:
+- ceph.restart:
+    daemons: [mon.a]
+    wait-for-healthy: false
+    wait-for-osds-up: true
+- print: "**** done ceph.restart mon.a"
diff --git a/qa/suites/upgrade/hammer-x/stress-split/5-workload/+ b/qa/suites/upgrade/hammer-x/stress-split/5-workload/+
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/upgrade/hammer-x/stress-split/5-workload/rbd-cls.yaml b/qa/suites/upgrade/hammer-x/stress-split/5-workload/rbd-cls.yaml
new file mode 100644
index 0000000..d75eb05
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/stress-split/5-workload/rbd-cls.yaml
@@ -0,0 +1,7 @@
+stress-tasks:
+- workunit:
+    branch: hammer
+    clients:
+      client.0:
+        - cls/test_cls_rbd.sh
+- print: "**** done cls/test_cls_rbd.sh 5-workload"
diff --git a/qa/suites/upgrade/hammer-x/stress-split/5-workload/rbd-import-export.yaml b/qa/suites/upgrade/hammer-x/stress-split/5-workload/rbd-import-export.yaml
new file mode 100644
index 0000000..e29ccf9
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/stress-split/5-workload/rbd-import-export.yaml
@@ -0,0 +1,9 @@
+stress-tasks:
+- workunit:
+    branch: hammer
+    clients:
+      client.0:
+        - rbd/import_export.sh
+    env:
+      RBD_CREATE_ARGS: --new-format
+- print: "**** done rbd/import_export.sh 5-workload"
diff --git a/qa/suites/upgrade/hammer-x/stress-split/5-workload/readwrite.yaml b/qa/suites/upgrade/hammer-x/stress-split/5-workload/readwrite.yaml
new file mode 100644
index 0000000..8ee559a
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/stress-split/5-workload/readwrite.yaml
@@ -0,0 +1,12 @@
+stress-tasks:
+- full_sequential:
+  - rados:
+      clients: [client.0]
+      ops: 4000
+      objects: 500
+      write_append_excl: false
+      op_weights:
+        read: 45
+        write: 45
+        delete: 10
+- print: "**** done rados/readwrite 5-workload"
diff --git a/qa/suites/upgrade/hammer-x/stress-split/5-workload/snaps-few-objects.yaml b/qa/suites/upgrade/hammer-x/stress-split/5-workload/snaps-few-objects.yaml
new file mode 100644
index 0000000..0136e9e
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/stress-split/5-workload/snaps-few-objects.yaml
@@ -0,0 +1,15 @@
+stress-tasks:
+- full_sequential:
+  - rados:
+      clients: [client.0]
+      ops: 4000
+      objects: 50
+      write_append_excl: false
+      op_weights:
+        read: 100
+        write: 100
+        delete: 50
+        snap_create: 50
+        snap_remove: 50
+        rollback: 50
+- print: "**** done rados/snaps-few-objects 5-workload"
diff --git a/qa/suites/upgrade/hammer-x/stress-split/6-next-mon/monb.yaml b/qa/suites/upgrade/hammer-x/stress-split/6-next-mon/monb.yaml
new file mode 100644
index 0000000..fbff55f
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/stress-split/6-next-mon/monb.yaml
@@ -0,0 +1,6 @@
+stress-tasks:
+- ceph.restart:
+    daemons: [mon.b]
+    wait-for-healthy: false
+    wait-for-osds-up: true
+- print: "**** done ceph.restart mon.b 6-next-mon"
diff --git a/qa/suites/upgrade/hammer-x/stress-split/7-workload/+ b/qa/suites/upgrade/hammer-x/stress-split/7-workload/+
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/upgrade/hammer-x/stress-split/7-workload/radosbench.yaml b/qa/suites/upgrade/hammer-x/stress-split/7-workload/radosbench.yaml
new file mode 100644
index 0000000..8f8ddbe
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/stress-split/7-workload/radosbench.yaml
@@ -0,0 +1,36 @@
+stress-tasks:
+- full_sequential:
+  - radosbench:
+      clients: [client.0]
+      time: 150
+  - radosbench:
+      clients: [client.0]
+      time: 150
+  - radosbench:
+      clients: [client.0]
+      time: 150
+  - radosbench:
+      clients: [client.0]
+      time: 150
+  - radosbench:
+      clients: [client.0]
+      time: 150
+  - radosbench:
+      clients: [client.0]
+      time: 150
+  - radosbench:
+      clients: [client.0]
+      time: 150
+  - radosbench:
+      clients: [client.0]
+      time: 150
+  - radosbench:
+      clients: [client.0]
+      time: 150
+  - radosbench:
+      clients: [client.0]
+      time: 150
+  - radosbench:
+      clients: [client.0]
+      time: 150
+- print: "**** done radosbench 7-workload"
diff --git a/qa/suites/upgrade/hammer-x/stress-split/7-workload/rbd_api.yaml b/qa/suites/upgrade/hammer-x/stress-split/7-workload/rbd_api.yaml
new file mode 100644
index 0000000..434465c
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/stress-split/7-workload/rbd_api.yaml
@@ -0,0 +1,7 @@
+stress-tasks:
+- workunit:
+     branch: hammer
+     clients:
+        client.0:
+           - rbd/test_librbd.sh
+- print: "**** done rbd/test_librbd.sh 7-workload"
diff --git a/qa/suites/upgrade/hammer-x/stress-split/8-finish-upgrade/last-osds-and-monc.yaml b/qa/suites/upgrade/hammer-x/stress-split/8-finish-upgrade/last-osds-and-monc.yaml
new file mode 100644
index 0000000..4bc6f7e
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/stress-split/8-finish-upgrade/last-osds-and-monc.yaml
@@ -0,0 +1,20 @@
+tasks:
+- install.upgrade:
+    osd.3:
+- ceph.restart:
+      daemons: [osd.3,osd.4,osd.5]
+      wait-for-healthy: false
+      wait-for-osds-up: true
+- sleep:
+      duration: 10
+- ceph.restart:
+    daemons: [mon.c]
+    wait-for-healthy: false
+    wait-for-osds-up: true
+- print: "**** done ceph.restart mon.c 8-next-mon"
+- ceph.wait_for_mon_quorum: [a, b, c]
+- exec:
+    osd.0:
+      - sleep 300 # http://tracker.ceph.com/issues/17808
+      - ceph osd set require_jewel_osds
+- print: "**** done wait_for_mon_quorum 8-next-mon"
diff --git a/qa/suites/upgrade/hammer-x/stress-split/9-workload/+ b/qa/suites/upgrade/hammer-x/stress-split/9-workload/+
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/upgrade/hammer-x/stress-split/9-workload/rbd-python.yaml b/qa/suites/upgrade/hammer-x/stress-split/9-workload/rbd-python.yaml
new file mode 100644
index 0000000..0ea5639
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/stress-split/9-workload/rbd-python.yaml
@@ -0,0 +1,7 @@
+tasks:
+- workunit:
+    branch: hammer
+    clients:
+      client.0:
+        - rbd/test_librbd_python.sh
+- print: "**** done rbd/test_librbd_python.sh 9-workload"
diff --git a/qa/suites/upgrade/hammer-x/stress-split/9-workload/rgw-swift.yaml b/qa/suites/upgrade/hammer-x/stress-split/9-workload/rgw-swift.yaml
new file mode 100644
index 0000000..8f14160
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/stress-split/9-workload/rgw-swift.yaml
@@ -0,0 +1,9 @@
+tasks:
+- rgw: 
+    client.0:
+    default_idle_timeout: 300
+- print: "**** done rgw 9-workload"
+- swift:
+    client.0:
+      rgw_server: client.0
+- print: "**** done swift 9-workload"
diff --git a/qa/suites/upgrade/hammer-x/stress-split/9-workload/snaps-many-objects.yaml b/qa/suites/upgrade/hammer-x/stress-split/9-workload/snaps-many-objects.yaml
new file mode 100644
index 0000000..39b44f6
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/stress-split/9-workload/snaps-many-objects.yaml
@@ -0,0 +1,13 @@
+tasks:
+- rados:
+    clients: [client.0]
+    ops: 4000
+    objects: 500
+    write_append_excl: false
+    op_weights:
+      read: 100
+      write: 100
+      delete: 50
+      snap_create: 50
+      snap_remove: 50
+      rollback: 50
diff --git a/qa/suites/upgrade/hammer-x/stress-split/distros/centos_7.3.yaml b/qa/suites/upgrade/hammer-x/stress-split/distros/centos_7.3.yaml
new file mode 100644
index 0000000..9dfcc7f
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/stress-split/distros/centos_7.3.yaml
@@ -0,0 +1,2 @@
+os_type: centos
+os_version: "7.3"
diff --git a/qa/suites/upgrade/hammer-x/stress-split/distros/ubuntu_14.04.yaml b/qa/suites/upgrade/hammer-x/stress-split/distros/ubuntu_14.04.yaml
new file mode 100644
index 0000000..309e989
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/stress-split/distros/ubuntu_14.04.yaml
@@ -0,0 +1,2 @@
+os_type: ubuntu
+os_version: "14.04"
diff --git a/qa/suites/upgrade/hammer-x/tiering/% b/qa/suites/upgrade/hammer-x/tiering/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/upgrade/hammer-x/tiering/0-cluster/start.yaml b/qa/suites/upgrade/hammer-x/tiering/0-cluster/start.yaml
new file mode 100644
index 0000000..028155f
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/tiering/0-cluster/start.yaml
@@ -0,0 +1,22 @@
+roles:
+- - mon.a
+  - mon.b
+  - mon.c
+  - mds.a
+  - osd.0
+  - osd.1
+- - osd.2
+  - osd.3
+- - client.0
+overrides:
+  ceph:
+    log-whitelist:
+    - scrub mismatch
+    - ScrubResult
+    - wrongly marked me down
+    - soft lockup
+    - detected stalls on CPUs
+    - failed to encode map
+    conf:
+      mon:
+        mon warn on legacy crush tunables: false
diff --git a/qa/suites/upgrade/hammer-x/tiering/1-hammer-install/hammer.yaml b/qa/suites/upgrade/hammer-x/tiering/1-hammer-install/hammer.yaml
new file mode 100644
index 0000000..6c2470d
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/tiering/1-hammer-install/hammer.yaml
@@ -0,0 +1,7 @@
+tasks:
+- install:
+    branch: hammer
+- print: "**** done install hammer"
+- ceph:
+    fs: xfs
+- print: "**** done ceph"
diff --git a/qa/suites/upgrade/hammer-x/tiering/2-setup-cache-tiering/% b/qa/suites/upgrade/hammer-x/tiering/2-setup-cache-tiering/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/upgrade/hammer-x/tiering/2-setup-cache-tiering/0-create-base-tier/create-ec-pool.yaml b/qa/suites/upgrade/hammer-x/tiering/2-setup-cache-tiering/0-create-base-tier/create-ec-pool.yaml
new file mode 100644
index 0000000..24e6a33
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/tiering/2-setup-cache-tiering/0-create-base-tier/create-ec-pool.yaml
@@ -0,0 +1,5 @@
+tasks:
+- exec:
+    client.0:
+      - ceph osd erasure-code-profile set teuthologyprofile ruleset-failure-domain=osd k=2 m=1
+      - ceph osd pool create base-pool 4 4 erasure teuthologyprofile
diff --git a/qa/suites/upgrade/hammer-x/tiering/2-setup-cache-tiering/0-create-base-tier/create-replicated-pool.yaml b/qa/suites/upgrade/hammer-x/tiering/2-setup-cache-tiering/0-create-base-tier/create-replicated-pool.yaml
new file mode 100644
index 0000000..5a13581
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/tiering/2-setup-cache-tiering/0-create-base-tier/create-replicated-pool.yaml
@@ -0,0 +1,4 @@
+tasks:
+- exec:
+    client.0:
+      - ceph osd pool create base-pool 4
diff --git a/qa/suites/upgrade/hammer-x/tiering/2-setup-cache-tiering/1-create-cache-tier/create-cache-tier.yaml b/qa/suites/upgrade/hammer-x/tiering/2-setup-cache-tiering/1-create-cache-tier/create-cache-tier.yaml
new file mode 100644
index 0000000..8933621
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/tiering/2-setup-cache-tiering/1-create-cache-tier/create-cache-tier.yaml
@@ -0,0 +1,10 @@
+tasks:
+- exec:
+    client.0:
+      - ceph osd pool create cache-pool 4
+      - ceph osd tier add base-pool cache-pool
+      - ceph osd tier cache-mode cache-pool writeback
+      - ceph osd tier set-overlay base-pool cache-pool
+      - ceph osd pool set cache-pool hit_set_type bloom
+      - ceph osd pool set cache-pool hit_set_count 8
+      - ceph osd pool set cache-pool hit_set_period 5
diff --git a/qa/suites/upgrade/hammer-x/tiering/3-upgrade/upgrade.yaml b/qa/suites/upgrade/hammer-x/tiering/3-upgrade/upgrade.yaml
new file mode 100644
index 0000000..5bbfda2
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/tiering/3-upgrade/upgrade.yaml
@@ -0,0 +1,90 @@
+tasks:
+- parallel:
+  - workload-when-upgrading
+  - upgrade-sequence
+- print: "**** done upgrade"
+
+workload-when-upgrading:
+  sequential:
+  - rados:
+      clients: [client.0]
+      ops: 4000
+      objects: 50
+      pools: [base-pool]
+      write_append_excl: false
+      op_weights:
+        read: 100
+        write: 0
+        append: 100
+        delete: 50
+        copy_from: 50
+        setattr: 25
+        rmattr: 25
+  - print: "**** done rados when upgrading"
+
+upgrade-sequence:
+  sequential:
+  - upgrade-first-half
+  - flip-but-fail
+  - upgrade-second-half
+
+upgrade-first-half:
+  sequential:
+  - install.upgrade:
+      mon.a:
+  - print: "**** done install.upgrade mon.{a,b,c} and osd.{0,1}"
+  - ceph.restart:
+      daemons: [mon.a]
+      wait-for-healthy: true
+  - sleep:
+      duration: 60
+  - ceph.restart:
+      daemons: [osd.0]
+      wait-for-healthy: true
+  - sleep:
+      duration: 60
+  - ceph.restart:
+      daemons: [osd.1]
+      wait-for-healthy: true
+  - sleep:
+      duration: 60
+  - ceph.restart:
+      daemons: [mon.b]
+      wait-for-healthy: true
+  - sleep:
+      duration: 60
+  - ceph.restart:
+      daemons: [mon.c]
+      wait-for-healthy: true
+  - sleep:
+      duration: 60
+
+upgrade-second-half:
+  sequential:
+  - install.upgrade:
+      osd.2:
+  - print: "**** done install.upgrade osd.{2,3}"
+  - ceph.restart:
+      daemons: [osd.2]
+      wait-for-healthy: true
+  - sleep:
+      duration: 60
+  - ceph.restart:
+      daemons: [osd.3]
+      wait-for-healthy: false
+      wait-for-osds-up: true
+  - sleep:
+      duration: 60
+  - exec:
+      mon.a:
+        - ceph osd set require_jewel_osds
+  - ceph.healthy:
+  - print: "**** HEALTH_OK reached after upgrading last OSD to jewel"
+
+flip-but-fail:
+  sequential:
+  - exec:
+      mon.a:
+        - |-
+          ceph osd set sortbitwise 2>&1 | grep "not all up OSDs have OSD_BITWISE_HOBJ_SORT feature"
+  - print: "**** done flip-but-fail"
diff --git a/qa/suites/upgrade/hammer-x/tiering/4-finish-upgrade/flip-success.yaml b/qa/suites/upgrade/hammer-x/tiering/4-finish-upgrade/flip-success.yaml
new file mode 100644
index 0000000..9482ea5
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/tiering/4-finish-upgrade/flip-success.yaml
@@ -0,0 +1,28 @@
+tasks:
+- parallel:
+    - workload-2
+    - flip-and-success
+
+workload-2:
+  sequential:
+  - rados:
+      clients: [client.0]
+      ops: 1000
+      objects: 50
+      pools: [base-pool]
+      op_weights:
+        read: 100
+        write: 100
+        delete: 50
+        snap_create: 50
+        snap_remove: 50
+        rollback: 50
+  - print: "**** done rados after upgrading"
+
+flip-and-success:
+  sequential:
+    - exec:
+        client.0:
+          - ceph osd set sortbitwise
+          - ceph osd pool set cache-pool use_gmt_hitset true
+    - print: "**** done flip-and-success"
diff --git a/qa/suites/upgrade/hammer-x/tiering/distros/centos_7.3.yaml b/qa/suites/upgrade/hammer-x/tiering/distros/centos_7.3.yaml
new file mode 100644
index 0000000..9dfcc7f
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/tiering/distros/centos_7.3.yaml
@@ -0,0 +1,2 @@
+os_type: centos
+os_version: "7.3"
diff --git a/qa/suites/upgrade/hammer-x/tiering/distros/ubuntu_14.04.yaml b/qa/suites/upgrade/hammer-x/tiering/distros/ubuntu_14.04.yaml
new file mode 100644
index 0000000..309e989
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/tiering/distros/ubuntu_14.04.yaml
@@ -0,0 +1,2 @@
+os_type: ubuntu
+os_version: "14.04"
diff --git a/qa/suites/upgrade/hammer-x/v0-94-4-stop/+ b/qa/suites/upgrade/hammer-x/v0-94-4-stop/+
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/upgrade/hammer-x/v0-94-4-stop/distros/centos_7.3.yaml b/qa/suites/upgrade/hammer-x/v0-94-4-stop/distros/centos_7.3.yaml
new file mode 100644
index 0000000..9dfcc7f
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/v0-94-4-stop/distros/centos_7.3.yaml
@@ -0,0 +1,2 @@
+os_type: centos
+os_version: "7.3"
diff --git a/qa/suites/upgrade/hammer-x/v0-94-4-stop/distros/ubuntu_14.04.yaml b/qa/suites/upgrade/hammer-x/v0-94-4-stop/distros/ubuntu_14.04.yaml
new file mode 100644
index 0000000..309e989
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/v0-94-4-stop/distros/ubuntu_14.04.yaml
@@ -0,0 +1,2 @@
+os_type: ubuntu
+os_version: "14.04"
diff --git a/qa/suites/upgrade/hammer-x/v0-94-4-stop/ignore.yaml b/qa/suites/upgrade/hammer-x/v0-94-4-stop/ignore.yaml
new file mode 100644
index 0000000..f761b53
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/v0-94-4-stop/ignore.yaml
@@ -0,0 +1,8 @@
+overrides:
+  ceph:
+    log-whitelist:
+    - scrub mismatch
+    - ScrubResult
+    conf:
+      mon:
+        mon warn on legacy crush tunables: false
diff --git a/qa/suites/upgrade/hammer-x/v0-94-4-stop/v0-94-4-stop.yaml b/qa/suites/upgrade/hammer-x/v0-94-4-stop/v0-94-4-stop.yaml
new file mode 100644
index 0000000..896a56b
--- /dev/null
+++ b/qa/suites/upgrade/hammer-x/v0-94-4-stop/v0-94-4-stop.yaml
@@ -0,0 +1,110 @@
+#
+# Test the expected behavior of the
+#
+#    CEPH_FEATURE_HAMMER_0_94_4
+#
+# feature that forbids a cluster with a mix of
+# OSD < v0.94.4 and OSD >= v0.94.4
+#
+roles:
+- - mon.a
+  - osd.0
+  - osd.1
+- - osd.2
+openstack:
+- volumes: # attached to each instance
+    count: 2
+    size: 10 # GB
+tasks:
+- print: "**** Install version lower than v0.94.4"
+- install:
+    tag: v0.94.3
+- ceph:
+    fs: xfs
+
+- print: "*** Upgrade the target that runs osd.0 and osd.1 to -x while the target that runs osd.2 stays v0.94.3"
+- install.upgrade:
+    osd.0:
+- print: "*** Restart the mon.a so that it is post-hammer v0.94.4 and implements the CEPH_FEATURE_HAMMER_0_94_4 feature"
+- ceph.restart:
+    daemons: [mon.a]
+- print: "*** Verify that osd.0 cannot restart because osd.1 and osd.2 are still < v0.94.4"
+- ceph.restart:
+    daemons: [osd.0]
+    wait-for-healthy: false
+- exec:
+    osd.0:
+      - |-
+        set -x
+        success=false
+        for delay in 1 2 4 8 16 32 64 128 256 512 1024 ; do 
+          if ceph daemon osd.0 log flush ; then
+            if grep "one or more pre-v0.94.4 hammer" /var/log/ceph/ceph-osd.0.log ; then
+              success=true
+              break
+            fi
+          fi
+          sleep $delay
+        done
+        $success || exit 1
+
+- print: "*** Stop all OSDs and restart osd.0 and osd.1 which are >= v0.94.4"
+- ceph.stop:
+    daemons: [osd.0, osd.1, osd.2]
+- exec:
+    mon.a:
+      - |-
+        set -x
+        ceph osd down osd.0
+        ceph osd down osd.1
+        ceph osd down osd.2
+- ceph.restart:
+    daemons: [osd.0, osd.1]
+    wait-for-healthy: false
+- exec:
+    mon.a:
+      - |-
+        set -x
+        success=false
+        for delay in 1 2 4 8 16 32 64 128 256 512 1024 ; do 
+          if ceph osd dump | grep 'osd.1 up' && ceph osd dump | grep 'osd.0 up' ; then 
+            success=true
+            break
+          fi
+          ceph osd dump
+          sleep $delay
+        done
+        $success || exit 1
+        ceph osd dump | grep 'osd.2 down' || exit 1
+          
+- print: "*** Verify that osd.2 cannot restart because it is < v0.94.4 and all other OSDs are >= v0.94.4"
+- ceph.restart:
+    daemons: [osd.2]
+    wait-for-healthy: false
+- exec:
+    mon.a:
+      - |-
+        set -x
+        success=false
+        for delay in 1 2 4 8 16 32 64 128 256 512 1024 ; do 
+          ceph daemon mon.a log flush
+          if grep "disallowing boot of pre-hammer v0.94.4 OSD" /var/log/ceph/*.log ; then
+            success=true
+            break
+          fi
+          sleep $delay
+          ceph osd dump
+        done
+        $success || exit 1
+
+- print: "*** Upgrade the target that runs osd.2 to -x and verify the cluster is back to being healthy"
+- install.upgrade:
+    osd.2:
+- ceph.restart:
+    daemons: [osd.2]
+    wait-for-healthy: false
+- exec:
+    mon.a:
+      - sleep 300 # http://tracker.ceph.com/issues/17808
+      - ceph osd set require_jewel_osds
+- ceph.healthy:
diff --git a/qa/suites/upgrade/infernalis-x/parallel/% b/qa/suites/upgrade/infernalis-x/parallel/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/upgrade/infernalis-x/parallel/0-cluster/+ b/qa/suites/upgrade/infernalis-x/parallel/0-cluster/+
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/upgrade/infernalis-x/parallel/0-cluster/openstack.yaml b/qa/suites/upgrade/infernalis-x/parallel/0-cluster/openstack.yaml
new file mode 100644
index 0000000..f4d1349
--- /dev/null
+++ b/qa/suites/upgrade/infernalis-x/parallel/0-cluster/openstack.yaml
@@ -0,0 +1,4 @@
+openstack:
+  - volumes: # attached to each instance
+      count: 3
+      size: 30 # GB
diff --git a/qa/suites/upgrade/infernalis-x/parallel/0-cluster/start.yaml b/qa/suites/upgrade/infernalis-x/parallel/0-cluster/start.yaml
new file mode 100644
index 0000000..e73804f
--- /dev/null
+++ b/qa/suites/upgrade/infernalis-x/parallel/0-cluster/start.yaml
@@ -0,0 +1,30 @@
+meta:
+- desc: |
+   Run ceph on two nodes,
+   with a separate client 0,1,2 third node. 
+   Use xfs beneath the osds.
+   CephFS tests running on client 2,3
+roles:
+- - mon.a
+  - mds.a
+  - osd.0
+  - osd.1
+- - mon.b
+  - mon.c
+  - osd.2
+  - osd.3
+- - client.0
+  - client.1
+  - client.2
+  - client.3
+overrides:
+  ceph:
+    log-whitelist:
+    - scrub mismatch
+    - ScrubResult
+    - failed to encode map
+    - wrongly marked
+    conf:
+      mon:
+        mon warn on legacy crush tunables: false
+    fs: xfs
diff --git a/qa/suites/upgrade/infernalis-x/parallel/1-infernalis-install/infernalis.yaml b/qa/suites/upgrade/infernalis-x/parallel/1-infernalis-install/infernalis.yaml
new file mode 100644
index 0000000..aa02540
--- /dev/null
+++ b/qa/suites/upgrade/infernalis-x/parallel/1-infernalis-install/infernalis.yaml
@@ -0,0 +1,22 @@
+meta:
+- desc: |
+   install ceph/infernalis latest
+   run workload and upgrade-sequence in parallel
+   upgrade the client node
+tasks:
+- install:
+    branch: infernalis
+- print: "**** done installing infernalis"
+- ceph:
+- print: "**** done ceph"
+- install.upgrade:
+    mon.a:
+    mon.b:
+- print: "**** done install.upgrade mon.a and mon.b"
+- parallel:
+    - workload
+    - upgrade-sequence
+- print: "**** done parallel"
+- install.upgrade:
+    client.0:
+- print: "**** done install.upgrade on client.0"
diff --git a/qa/suites/upgrade/infernalis-x/parallel/2-workload/+ b/qa/suites/upgrade/infernalis-x/parallel/2-workload/+
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/upgrade/infernalis-x/parallel/2-workload/blogbench.yaml b/qa/suites/upgrade/infernalis-x/parallel/2-workload/blogbench.yaml
new file mode 100644
index 0000000..021fcc6
--- /dev/null
+++ b/qa/suites/upgrade/infernalis-x/parallel/2-workload/blogbench.yaml
@@ -0,0 +1,14 @@
+meta:
+- desc: |
+   run a cephfs stress test
+   mount ceph-fuse on client.2 before running workunit
+workload:
+  full_sequential:
+  - sequential:
+    - ceph-fuse:
+    - print: "**** done ceph-fuse 2-workload"
+    - workunit:
+        clients:
+           client.2:
+            - suites/blogbench.sh
+    - print: "**** done suites/blogbench.sh 2-workload"
diff --git a/qa/suites/upgrade/infernalis-x/parallel/2-workload/ec-rados-default.yaml b/qa/suites/upgrade/infernalis-x/parallel/2-workload/ec-rados-default.yaml
new file mode 100644
index 0000000..5c5a958
--- /dev/null
+++ b/qa/suites/upgrade/infernalis-x/parallel/2-workload/ec-rados-default.yaml
@@ -0,0 +1,24 @@
+meta:
+- desc: |
+   run run randomized correctness test for rados operations
+   on an erasure-coded pool
+workload:
+  full_sequential:
+  - rados:
+      clients: [client.0]
+      ops: 4000
+      objects: 50
+      ec_pool: true
+      write_append_excl: false
+      op_weights:
+        read: 100
+        write: 0
+        append: 100
+        delete: 50
+        snap_create: 50
+        snap_remove: 50
+        rollback: 50
+        copy_from: 50
+        setattr: 25
+        rmattr: 25
+  - print: "**** done rados ec task"
diff --git a/qa/suites/upgrade/infernalis-x/parallel/2-workload/rados_api.yaml b/qa/suites/upgrade/infernalis-x/parallel/2-workload/rados_api.yaml
new file mode 100644
index 0000000..6342adc
--- /dev/null
+++ b/qa/suites/upgrade/infernalis-x/parallel/2-workload/rados_api.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+   object class functional tests
+workload:
+  full_sequential:
+    - workunit:
+        branch: infernalis
+        clients:
+          client.0:
+            - cls
+    - print: "**** done cls 2-workload"
diff --git a/qa/suites/upgrade/infernalis-x/parallel/2-workload/rados_loadgenbig.yaml b/qa/suites/upgrade/infernalis-x/parallel/2-workload/rados_loadgenbig.yaml
new file mode 100644
index 0000000..ee1ff46
--- /dev/null
+++ b/qa/suites/upgrade/infernalis-x/parallel/2-workload/rados_loadgenbig.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+   generate read/write load with rados objects ranging from 1MB to 25MB
+workload:
+  full_sequential:
+    - workunit:
+        branch: infernalis
+        clients:
+          client.0:
+            - rados/load-gen-big.sh
+    - print: "**** done rados/load-gen-big.sh 2-workload"
diff --git a/qa/suites/upgrade/infernalis-x/parallel/2-workload/test_rbd_api.yaml b/qa/suites/upgrade/infernalis-x/parallel/2-workload/test_rbd_api.yaml
new file mode 100644
index 0000000..ef906e3
--- /dev/null
+++ b/qa/suites/upgrade/infernalis-x/parallel/2-workload/test_rbd_api.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+   librbd C and C++ api tests
+workload:
+  full_sequential:
+    - workunit:
+        branch: infernalis
+        clients:
+          client.0:
+            - rbd/test_librbd.sh
+    - print: "**** done rbd/test_librbd.sh 2-workload"
diff --git a/qa/suites/upgrade/infernalis-x/parallel/2-workload/test_rbd_python.yaml b/qa/suites/upgrade/infernalis-x/parallel/2-workload/test_rbd_python.yaml
new file mode 100644
index 0000000..02f112c
--- /dev/null
+++ b/qa/suites/upgrade/infernalis-x/parallel/2-workload/test_rbd_python.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+   librbd python api tests
+workload:
+  full_sequential:
+    - workunit:
+        branch: infernalis
+        clients:
+          client.0:
+            - rbd/test_librbd_python.sh
+    - print: "**** done rbd/test_librbd_python.sh 2-workload"
diff --git a/qa/suites/upgrade/infernalis-x/parallel/3-upgrade-sequence/upgrade-all.yaml b/qa/suites/upgrade/infernalis-x/parallel/3-upgrade-sequence/upgrade-all.yaml
new file mode 100644
index 0000000..f56effd
--- /dev/null
+++ b/qa/suites/upgrade/infernalis-x/parallel/3-upgrade-sequence/upgrade-all.yaml
@@ -0,0 +1,16 @@
+meta:
+- desc: |
+   upgrade the ceph cluster
+upgrade-sequence:
+   sequential:
+   - ceph.restart:
+       daemons: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3]
+       wait-for-healthy: false
+       wait-for-osds-up: true
+   - print: "**** done ceph.restart do not wait for healthy"
+   - exec:
+       mon.a:
+         - sleep 300 # http://tracker.ceph.com/issues/17808
+         - ceph osd set require_jewel_osds
+   - ceph.healthy:
+   - print: "**** done ceph.healthy"
diff --git a/qa/suites/upgrade/infernalis-x/parallel/3-upgrade-sequence/upgrade-mon-osd-mds.yaml b/qa/suites/upgrade/infernalis-x/parallel/3-upgrade-sequence/upgrade-mon-osd-mds.yaml
new file mode 100644
index 0000000..eaefce7
--- /dev/null
+++ b/qa/suites/upgrade/infernalis-x/parallel/3-upgrade-sequence/upgrade-mon-osd-mds.yaml
@@ -0,0 +1,43 @@
+meta:
+- desc: |
+   upgrade the ceph cluster,
+   upgrate in two steps 
+   step one ordering: mon.a, osd.0, osd.1, mds.a
+   step two ordering: mon.b, mon.c, osd.2, osd.3
+   ceph expected to be healthy state after each step
+upgrade-sequence:
+   sequential:
+   - ceph.restart:
+       daemons: [mon.a]
+       wait-for-healthy: true
+   - sleep:
+       duration: 60
+   - ceph.restart:
+       daemons: [osd.0, osd.1]
+       wait-for-healthy: true
+   - sleep:
+       duration: 60
+   - ceph.restart: [mds.a]
+   - sleep:
+       duration: 60
+   - print: "**** running mixed versions of osds and mons"
+   #do we need to use "ceph osd crush tunables hammer" ?
+   - exec:
+      mon.b:
+        - sudo ceph osd crush tunables hammer
+   - print: "**** done ceph osd crush tunables hammer"
+   - ceph.restart:
+       daemons: [mon.b, mon.c]
+       wait-for-healthy: true
+   - sleep:
+       duration: 60
+   - ceph.restart:
+       daemons: [osd.2, osd.3]
+       wait-for-healthy: false
+   - exec:
+       osd.0:
+         - sleep 300 # http://tracker.ceph.com/issues/17808
+         - ceph osd set require_jewel_osds
+   - ceph.healthy:
+   - sleep:
+       duration: 60
diff --git a/qa/suites/upgrade/infernalis-x/parallel/4-jewel.yaml b/qa/suites/upgrade/infernalis-x/parallel/4-jewel.yaml
new file mode 100644
index 0000000..f21e7fe
--- /dev/null
+++ b/qa/suites/upgrade/infernalis-x/parallel/4-jewel.yaml
@@ -0,0 +1,5 @@
+tasks:
+- exec:
+    osd.0:
+      - ceph osd set sortbitwise
+      - for p in `ceph osd pool ls` ; do ceph osd pool set $p use_gmt_hitset true ; done
diff --git a/qa/suites/upgrade/infernalis-x/parallel/5-final-workload/+ b/qa/suites/upgrade/infernalis-x/parallel/5-final-workload/+
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/upgrade/infernalis-x/parallel/5-final-workload/blogbench.yaml b/qa/suites/upgrade/infernalis-x/parallel/5-final-workload/blogbench.yaml
new file mode 100644
index 0000000..d2629c0
--- /dev/null
+++ b/qa/suites/upgrade/infernalis-x/parallel/5-final-workload/blogbench.yaml
@@ -0,0 +1,13 @@
+meta:
+- desc: |
+   run a cephfs stress test
+   mount ceph-fuse on client.3 before running workunit
+tasks:
+- sequential:
+  - ceph-fuse:
+  - print: "**** done ceph-fuse 5-final-workload"
+  - workunit:
+      clients:
+         client.3:
+          - suites/blogbench.sh
+  - print: "**** done suites/blogbench.sh 5-final-workload"
diff --git a/qa/suites/upgrade/infernalis-x/parallel/5-final-workload/rados-snaps-few-objects.yaml b/qa/suites/upgrade/infernalis-x/parallel/5-final-workload/rados-snaps-few-objects.yaml
new file mode 100644
index 0000000..d8b3dcb
--- /dev/null
+++ b/qa/suites/upgrade/infernalis-x/parallel/5-final-workload/rados-snaps-few-objects.yaml
@@ -0,0 +1,17 @@
+meta:
+- desc: |
+   randomized correctness test for rados operations on a replicated pool with snapshots
+tasks:
+  - rados:
+      clients: [client.1]
+      ops: 4000
+      objects: 50
+      write_append_excl: false
+      op_weights:
+        read: 100
+        write: 100
+        delete: 50
+        snap_create: 50
+        snap_remove: 50
+        rollback: 50
+  - print: "**** done rados 4-final-workload"
diff --git a/qa/suites/upgrade/infernalis-x/parallel/5-final-workload/rados_loadgenmix.yaml b/qa/suites/upgrade/infernalis-x/parallel/5-final-workload/rados_loadgenmix.yaml
new file mode 100644
index 0000000..922a9da
--- /dev/null
+++ b/qa/suites/upgrade/infernalis-x/parallel/5-final-workload/rados_loadgenmix.yaml
@@ -0,0 +1,9 @@
+meta:
+- desc: |
+   generate read/write load with rados objects ranging from 1 byte to 1MB
+tasks:
+  - workunit:
+      clients:
+        client.1:
+          - rados/load-gen-mix.sh
+  - print: "**** done rados/load-gen-mix.sh 4-final-workload"
diff --git a/qa/suites/upgrade/infernalis-x/parallel/5-final-workload/rados_mon_thrash.yaml b/qa/suites/upgrade/infernalis-x/parallel/5-final-workload/rados_mon_thrash.yaml
new file mode 100644
index 0000000..f2249c9
--- /dev/null
+++ b/qa/suites/upgrade/infernalis-x/parallel/5-final-workload/rados_mon_thrash.yaml
@@ -0,0 +1,17 @@
+meta:
+- desc: |
+   librados C and C++ api tests
+overrides:
+  ceph:
+    log-whitelist:
+      - reached quota
+tasks:
+  - mon_thrash:
+      revive_delay: 20
+      thrash_delay: 1
+  - print: "**** done mon_thrash 4-final-workload"
+  - workunit:
+      clients:
+        client.1:
+          - rados/test.sh
+  - print: "**** done rados/test.sh 4-final-workload"
diff --git a/qa/suites/upgrade/infernalis-x/parallel/5-final-workload/rbd_cls.yaml b/qa/suites/upgrade/infernalis-x/parallel/5-final-workload/rbd_cls.yaml
new file mode 100644
index 0000000..aaf0a37
--- /dev/null
+++ b/qa/suites/upgrade/infernalis-x/parallel/5-final-workload/rbd_cls.yaml
@@ -0,0 +1,9 @@
+meta:
+- desc: |
+   rbd object class functional tests
+tasks:
+  - workunit:
+      clients:
+        client.1:
+          - cls/test_cls_rbd.sh
+  - print: "**** done cls/test_cls_rbd.sh 4-final-workload"
diff --git a/qa/suites/upgrade/infernalis-x/parallel/5-final-workload/rbd_import_export.yaml b/qa/suites/upgrade/infernalis-x/parallel/5-final-workload/rbd_import_export.yaml
new file mode 100644
index 0000000..46e1355
--- /dev/null
+++ b/qa/suites/upgrade/infernalis-x/parallel/5-final-workload/rbd_import_export.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+   run basic import/export cli tests for rbd
+tasks:
+  - workunit:
+      clients:
+        client.1:
+          - rbd/import_export.sh
+      env:
+        RBD_CREATE_ARGS: --new-format
+  - print: "**** done rbd/import_export.sh 4-final-workload"
diff --git a/qa/suites/upgrade/infernalis-x/parallel/5-final-workload/rgw_swift.yaml b/qa/suites/upgrade/infernalis-x/parallel/5-final-workload/rgw_swift.yaml
new file mode 100644
index 0000000..7a7659f
--- /dev/null
+++ b/qa/suites/upgrade/infernalis-x/parallel/5-final-workload/rgw_swift.yaml
@@ -0,0 +1,13 @@
+meta:
+- desc: |
+   swift api tests for rgw
+overrides:
+  rgw:
+    frontend: civetweb
+tasks:
+  - rgw: [client.1]
+  - print: "**** done rgw 4-final-workload"
+  - swift:
+      client.1:
+        rgw_server: client.1
+  - print: "**** done swift 4-final-workload"
diff --git a/qa/suites/upgrade/infernalis-x/parallel/distros/centos_7.3.yaml b/qa/suites/upgrade/infernalis-x/parallel/distros/centos_7.3.yaml
new file mode 100644
index 0000000..9dfcc7f
--- /dev/null
+++ b/qa/suites/upgrade/infernalis-x/parallel/distros/centos_7.3.yaml
@@ -0,0 +1,2 @@
+os_type: centos
+os_version: "7.3"
diff --git a/qa/suites/upgrade/infernalis-x/parallel/distros/ubuntu_14.04.yaml b/qa/suites/upgrade/infernalis-x/parallel/distros/ubuntu_14.04.yaml
new file mode 100644
index 0000000..309e989
--- /dev/null
+++ b/qa/suites/upgrade/infernalis-x/parallel/distros/ubuntu_14.04.yaml
@@ -0,0 +1,2 @@
+os_type: ubuntu
+os_version: "14.04"
diff --git a/qa/suites/upgrade/infernalis-x/point-to-point-x/% b/qa/suites/upgrade/infernalis-x/point-to-point-x/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/upgrade/infernalis-x/point-to-point-x/distros/centos_7.3.yaml b/qa/suites/upgrade/infernalis-x/point-to-point-x/distros/centos_7.3.yaml
new file mode 100644
index 0000000..9dfcc7f
--- /dev/null
+++ b/qa/suites/upgrade/infernalis-x/point-to-point-x/distros/centos_7.3.yaml
@@ -0,0 +1,2 @@
+os_type: centos
+os_version: "7.3"
diff --git a/qa/suites/upgrade/infernalis-x/point-to-point-x/distros/ubuntu_14.04.yaml b/qa/suites/upgrade/infernalis-x/point-to-point-x/distros/ubuntu_14.04.yaml
new file mode 100644
index 0000000..309e989
--- /dev/null
+++ b/qa/suites/upgrade/infernalis-x/point-to-point-x/distros/ubuntu_14.04.yaml
@@ -0,0 +1,2 @@
+os_type: ubuntu
+os_version: "14.04"
diff --git a/qa/suites/upgrade/infernalis-x/point-to-point-x/point-to-point.yaml b/qa/suites/upgrade/infernalis-x/point-to-point-x/point-to-point.yaml
new file mode 100644
index 0000000..c553c02
--- /dev/null
+++ b/qa/suites/upgrade/infernalis-x/point-to-point-x/point-to-point.yaml
@@ -0,0 +1,204 @@
+meta:
+- desc: |
+   Run ceph on two nodes, using one of them as a client,
+   with a separate client-only node. 
+   Use xfs beneath the osds.
+   install ceph/infernalis v9.2.0 point version
+   run workload and upgrade-sequence in parallel
+   install ceph/infernalis latest version
+   run workload and upgrade-sequence in parallel
+   install ceph/-x version (jewel)
+   run workload and upgrade-sequence in parallel
+overrides:
+  ceph:
+    log-whitelist:
+    - reached quota
+    - scrub
+    - osd_map_max_advance
+    - failed to encode
+    - wrongly marked
+    fs: xfs
+    conf:
+      mon:
+        mon debug unsafe allow tier with nonempty snaps: true
+      osd:
+        osd map max advance: 1000
+roles:
+- - mon.a
+  - mds.a
+  - osd.0
+  - osd.1
+  - osd.2
+- - mon.b
+  - mon.c
+  - osd.3
+  - osd.4
+  - osd.5
+  - client.0
+- - client.1
+openstack:
+- volumes: # attached to each instance
+    count: 3
+    size: 30 # GB
+tasks:
+- print: "****  v9.2.0 about to install"
+- install:
+    tag: v9.2.0
+- print: "**** done v9.2.0 install"
+- ceph:
+   fs: xfs
+- print: "**** done ceph xfs"
+- sequential:
+   - workload
+- print: "**** done workload v9.2.0"
+- install.upgrade:
+    mon.a:
+      branch: infernalis
+    mon.b:
+      branch: infernalis
+    # Note that client.a IS NOT upgraded at this point
+    #client.1:
+      #branch: hammer
+- parallel:
+   - workload_infernalis
+   - upgrade-sequence_infernalis
+- print: "**** done parallel infernalis branch"
+- install.upgrade:
+   client.1:
+     branch: infernalis
+- print: "**** done branch: -infernalis install.upgrade on client.1"
+- install.upgrade:
+   mon.a:
+     #branch: infernalis
+   mon.b:
+     #branch: infernalis
+- print: "**** done branch: -x install.upgrade on mon.a and mon.b"
+- parallel:
+   - workload_x
+   - upgrade-sequence_x
+- print: "**** done parallel -x branch"
+# Run test.sh on the -x upgraded cluster
+- install.upgrade:
+    client.1:
+- workunit:
+    clients:
+      client.1:
+      - rados/test.sh
+      - cls
+- print: "**** done final test on -x cluster"
+#######################
+workload:
+   sequential:
+   - workunit:
+       clients:
+         client.0:
+           - suites/blogbench.sh
+workload_infernalis:
+   full_sequential:
+   - workunit:
+       branch: infernalis
+       clients:
+         client.1:
+         - rados/test.sh
+         - cls
+   - print: "**** done rados/test.sh &  cls workload_infernalis"
+   - sequential:
+     - rgw: [client.0]
+     - print: "**** done rgw workload_infernalis"
+     - s3tests:
+         client.0:
+           force-branch: ceph-infernalis
+           rgw_server: client.0
+     - print: "**** done s3tests workload_infernalis"
+upgrade-sequence_infernalis:
+   sequential:
+   - print: "**** done branch: infernalis install.upgrade"
+   - ceph.restart: [mds.a]
+   - sleep:
+       duration: 60
+   - ceph.restart: [osd.0]
+   - sleep:
+       duration: 30
+   - ceph.restart: [osd.1]
+   - sleep:
+       duration: 30
+   - ceph.restart: [osd.2]
+   - sleep:
+       duration: 30
+   - ceph.restart: [osd.3]
+   - sleep:
+       duration: 30
+   - ceph.restart: [osd.4]
+   - sleep:
+       duration: 30
+   - ceph.restart: [osd.5]
+   - sleep:
+       duration: 60
+   - ceph.restart: [mon.a]
+   - sleep:
+       duration: 60
+   - ceph.restart: [mon.b]
+   - sleep:
+       duration: 60
+   - ceph.restart: [mon.c]
+   - sleep:
+       duration: 60
+   - print: "**** done ceph.restart all hammer branch mds/osd/mon"
+workload_x:
+   sequential:
+   - workunit:
+       branch: infernalis
+       clients:
+         client.1:
+         - rados/test-upgrade-from-9.2.sh
+         - cls
+   - print: "**** done rados/test.sh &  cls workload_x NOT upgraded  client"
+   - workunit:
+       clients:
+         client.0:
+         - rados/test.sh
+         - cls
+   - print: "**** done rados/test.sh &  cls workload_x upgraded client"
+   - rgw: [client.1]
+   - print: "**** done rgw workload_x"
+   - s3tests:
+       client.1:
+         force-branch: ceph-infernalis
+         rgw_server: client.1
+   - print: "**** done s3tests workload_x"
+upgrade-sequence_x:
+   sequential:
+   - ceph.restart: [mds.a]
+   - sleep:
+       duration: 60
+
+   - ceph.restart:
+       daemons: [osd.0, osd.1, osd.2]
+       wait-for-healthy: false
+   - exec:
+       osd.0:
+         - sleep 300 # http://tracker.ceph.com/issues/17808
+         - ceph osd set require_jewel_osds
+   - ceph.healthy:
+   - sleep:
+       duration: 60
+   - ceph.restart:
+       daemons: [osd.3, osd.4, osd.5]
+       wait-for-healthy: false
+   - exec:
+       osd.0:
+         - sleep 300 # http://tracker.ceph.com/issues/17808
+         - ceph osd set require_jewel_osds
+   - ceph.healthy:
+   - sleep:
+       duration: 60
+   - ceph.restart: [mon.a]
+   - sleep:
+       duration: 60
+   - ceph.restart: [mon.b]
+   - sleep:
+       duration: 60
+   - ceph.restart: [mon.c]
+   - sleep:
+       duration: 60
+   - print: "**** done ceph.restart all -x branch mds/osd/mon"
diff --git a/qa/suites/upgrade/infernalis-x/stress-split-erasure-code-x86_64/% b/qa/suites/upgrade/infernalis-x/stress-split-erasure-code-x86_64/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/upgrade/infernalis-x/stress-split-erasure-code-x86_64/0-cluster/+ b/qa/suites/upgrade/infernalis-x/stress-split-erasure-code-x86_64/0-cluster/+
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/upgrade/infernalis-x/stress-split-erasure-code-x86_64/0-cluster/openstack.yaml b/qa/suites/upgrade/infernalis-x/stress-split-erasure-code-x86_64/0-cluster/openstack.yaml
new file mode 100644
index 0000000..a0d5c20
--- /dev/null
+++ b/qa/suites/upgrade/infernalis-x/stress-split-erasure-code-x86_64/0-cluster/openstack.yaml
@@ -0,0 +1,6 @@
+openstack:
+  - machine:
+      disk: 100 # GB
+  - volumes: # attached to each instance
+      count: 3
+      size: 30 # GB
diff --git a/qa/suites/upgrade/infernalis-x/stress-split-erasure-code-x86_64/0-cluster/start.yaml b/qa/suites/upgrade/infernalis-x/stress-split-erasure-code-x86_64/0-cluster/start.yaml
new file mode 100644
index 0000000..fd200d7
--- /dev/null
+++ b/qa/suites/upgrade/infernalis-x/stress-split-erasure-code-x86_64/0-cluster/start.yaml
@@ -0,0 +1,23 @@
+meta:
+- desc: |
+   Run ceph on two nodes,
+   with a separate client-only node. 
+   Use xfs beneath the osds.
+overrides:
+  ceph:
+    conf:
+      mon:
+        mon warn on legacy crush tunables: false
+    fs: xfs
+roles:
+- - mon.a
+  - mon.b
+  - mon.c
+  - mds.a
+  - osd.0
+  - osd.1
+  - osd.2
+- - osd.3
+  - osd.4
+  - osd.5
+- - client.0
diff --git a/qa/suites/upgrade/infernalis-x/stress-split-erasure-code-x86_64/0-x86_64.yaml b/qa/suites/upgrade/infernalis-x/stress-split-erasure-code-x86_64/0-x86_64.yaml
new file mode 100644
index 0000000..c2409f5
--- /dev/null
+++ b/qa/suites/upgrade/infernalis-x/stress-split-erasure-code-x86_64/0-x86_64.yaml
@@ -0,0 +1 @@
+arch: x86_64
diff --git a/qa/suites/upgrade/infernalis-x/stress-split-erasure-code-x86_64/1-infernalis-install/infernalis.yaml b/qa/suites/upgrade/infernalis-x/stress-split-erasure-code-x86_64/1-infernalis-install/infernalis.yaml
new file mode 100644
index 0000000..ee92f7f
--- /dev/null
+++ b/qa/suites/upgrade/infernalis-x/stress-split-erasure-code-x86_64/1-infernalis-install/infernalis.yaml
@@ -0,0 +1,8 @@
+meta:
+- desc: install ceph/infernalis latest
+tasks:
+- install:
+    branch: infernalis
+- print: "**** done install infernalis"
+- ceph:
+- print: "**** done ceph"
diff --git a/qa/suites/upgrade/infernalis-x/stress-split-erasure-code-x86_64/2-partial-upgrade/firsthalf.yaml b/qa/suites/upgrade/infernalis-x/stress-split-erasure-code-x86_64/2-partial-upgrade/firsthalf.yaml
new file mode 100644
index 0000000..ab5dcac
--- /dev/null
+++ b/qa/suites/upgrade/infernalis-x/stress-split-erasure-code-x86_64/2-partial-upgrade/firsthalf.yaml
@@ -0,0 +1,12 @@
+meta:
+- desc: |
+   install upgrade ceph/-x on one node only
+   1st half
+   restart : osd.0,1,2,3,4,5
+tasks:
+- install.upgrade:
+    osd.0: 
+- print: "**** done install.upgrade osd.0"
+- ceph.restart:
+    daemons: [osd.0, osd.1, osd.2, osd.3, osd.4, osd.5]
+- print: "**** done ceph.restart 1st half"
diff --git a/qa/suites/upgrade/infernalis-x/stress-split-erasure-code-x86_64/3-thrash/default.yaml b/qa/suites/upgrade/infernalis-x/stress-split-erasure-code-x86_64/3-thrash/default.yaml
new file mode 100644
index 0000000..ec04790
--- /dev/null
+++ b/qa/suites/upgrade/infernalis-x/stress-split-erasure-code-x86_64/3-thrash/default.yaml
@@ -0,0 +1,20 @@
+meta:
+- desc: |
+   randomly kill and revive osd
+   small chance of increasing the number of pgs
+overrides:
+  ceph:
+    log-whitelist:
+    - wrongly marked me down
+    - objects unfound and apparently lost
+    - log bound mismatch
+    - failed to encode map e
+tasks:
+- thrashosds:
+    timeout: 1200
+    chance_pgnum_grow: 1
+    chance_pgpnum_fix: 1
+    min_in: 4
+    ceph_objectstore_tool: false
+    test_rm_past_intervals: false
+- print: "**** done thrashosds 3-thrash"
diff --git a/qa/suites/upgrade/infernalis-x/stress-split-erasure-code-x86_64/4-mon/mona.yaml b/qa/suites/upgrade/infernalis-x/stress-split-erasure-code-x86_64/4-mon/mona.yaml
new file mode 100644
index 0000000..e3c369d
--- /dev/null
+++ b/qa/suites/upgrade/infernalis-x/stress-split-erasure-code-x86_64/4-mon/mona.yaml
@@ -0,0 +1,9 @@
+meta:
+- desc: |
+   restart mon.a so it is upgraded to -x
+tasks:
+- ceph.restart:
+    daemons: [mon.a]
+    wait-for-healthy: false
+    wait-for-osds-up: true
+- print: "**** done ceph.restart mon.a"
diff --git a/qa/suites/upgrade/infernalis-x/stress-split-erasure-code-x86_64/5-workload/ec-rados-default.yaml b/qa/suites/upgrade/infernalis-x/stress-split-erasure-code-x86_64/5-workload/ec-rados-default.yaml
new file mode 100644
index 0000000..eeffd92
--- /dev/null
+++ b/qa/suites/upgrade/infernalis-x/stress-split-erasure-code-x86_64/5-workload/ec-rados-default.yaml
@@ -0,0 +1,22 @@
+meta:
+- desc: |
+   randomized correctness test for rados operations on an erasure coded pool
+tasks:
+  - rados:
+      clients: [client.0]
+      ops: 4000
+      objects: 50
+      ec_pool: true
+      write_append_excl: false
+      op_weights:
+        read: 100
+        write: 0
+        append: 100
+        delete: 50
+        snap_create: 50
+        snap_remove: 50
+        rollback: 50
+        copy_from: 50
+        setattr: 25
+        rmattr: 25
+  - print: "**** done rados ec task"
diff --git a/qa/suites/upgrade/infernalis-x/stress-split-erasure-code-x86_64/6-next-mon/monb.yaml b/qa/suites/upgrade/infernalis-x/stress-split-erasure-code-x86_64/6-next-mon/monb.yaml
new file mode 100644
index 0000000..ba71c73
--- /dev/null
+++ b/qa/suites/upgrade/infernalis-x/stress-split-erasure-code-x86_64/6-next-mon/monb.yaml
@@ -0,0 +1,9 @@
+meta:
+- desc: |
+   restart mon.b so it is upgraded to -x
+tasks:
+- ceph.restart:
+    daemons: [mon.b]
+    wait-for-healthy: false
+    wait-for-osds-up: true
+- print: "**** done ceph.restart mon.b 6-next-mon"
diff --git a/qa/suites/upgrade/infernalis-x/stress-split-erasure-code-x86_64/8-next-mon/monc.yaml b/qa/suites/upgrade/infernalis-x/stress-split-erasure-code-x86_64/8-next-mon/monc.yaml
new file mode 100644
index 0000000..12788b7
--- /dev/null
+++ b/qa/suites/upgrade/infernalis-x/stress-split-erasure-code-x86_64/8-next-mon/monc.yaml
@@ -0,0 +1,12 @@
+meta:
+- desc: |
+   restart mon.c so it is upgraded to -x
+   as all mon were upgrated, expected ceph cluster reach quorum
+tasks:
+- ceph.restart:
+    daemons: [mon.c]
+    wait-for-healthy: false
+    wait-for-osds-up: true
+- print: "**** done ceph.restart mon.c 8-next-mon"
+- ceph.wait_for_mon_quorum: [a, b, c]
+- print: "**** done wait_for_mon_quorum 8-next-mon"
diff --git a/qa/suites/upgrade/infernalis-x/stress-split-erasure-code-x86_64/9-workload/ec-rados-plugin=jerasure-k=3-m=1.yaml b/qa/suites/upgrade/infernalis-x/stress-split-erasure-code-x86_64/9-workload/ec-rados-plugin=jerasure-k=3-m=1.yaml
new file mode 100644
index 0000000..13fe70c
--- /dev/null
+++ b/qa/suites/upgrade/infernalis-x/stress-split-erasure-code-x86_64/9-workload/ec-rados-plugin=jerasure-k=3-m=1.yaml
@@ -0,0 +1,35 @@
+#
+# k=3 implies a stripe_width of 1376*3 = 4128 which is different from
+# the default value of 4096 It is also not a multiple of 1024*1024 and
+# creates situations where rounding rules during recovery becomes 
+# necessary. 
+#
+meta:
+- desc: |
+   randomized correctness test for rados operations on an erasure coded pool 
+   using the jerasure plugin with k=3 and m=1
+tasks:
+- rados:
+    clients: [client.0]
+    ops: 4000
+    objects: 50
+    ec_pool: true
+    write_append_excl: false
+    erasure_code_profile:
+      name: jerasure31profile
+      plugin: jerasure
+      k: 3
+      m: 1
+      technique: reed_sol_van
+      ruleset-failure-domain: osd
+    op_weights:
+      read: 100
+      write: 0
+      append: 100
+      delete: 50
+      snap_create: 50
+      snap_remove: 50
+      rollback: 50
+      copy_from: 50
+      setattr: 25
+      rmattr: 25
diff --git a/qa/suites/upgrade/infernalis-x/stress-split-erasure-code/% b/qa/suites/upgrade/infernalis-x/stress-split-erasure-code/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/upgrade/infernalis-x/stress-split-erasure-code/0-cluster/+ b/qa/suites/upgrade/infernalis-x/stress-split-erasure-code/0-cluster/+
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/upgrade/infernalis-x/stress-split-erasure-code/0-cluster/openstack.yaml b/qa/suites/upgrade/infernalis-x/stress-split-erasure-code/0-cluster/openstack.yaml
new file mode 100644
index 0000000..a0d5c20
--- /dev/null
+++ b/qa/suites/upgrade/infernalis-x/stress-split-erasure-code/0-cluster/openstack.yaml
@@ -0,0 +1,6 @@
+openstack:
+  - machine:
+      disk: 100 # GB
+  - volumes: # attached to each instance
+      count: 3
+      size: 30 # GB
diff --git a/qa/suites/upgrade/infernalis-x/stress-split-erasure-code/0-cluster/start.yaml b/qa/suites/upgrade/infernalis-x/stress-split-erasure-code/0-cluster/start.yaml
new file mode 100644
index 0000000..fd200d7
--- /dev/null
+++ b/qa/suites/upgrade/infernalis-x/stress-split-erasure-code/0-cluster/start.yaml
@@ -0,0 +1,23 @@
+meta:
+- desc: |
+   Run ceph on two nodes,
+   with a separate client-only node. 
+   Use xfs beneath the osds.
+overrides:
+  ceph:
+    conf:
+      mon:
+        mon warn on legacy crush tunables: false
+    fs: xfs
+roles:
+- - mon.a
+  - mon.b
+  - mon.c
+  - mds.a
+  - osd.0
+  - osd.1
+  - osd.2
+- - osd.3
+  - osd.4
+  - osd.5
+- - client.0
diff --git a/qa/suites/upgrade/infernalis-x/stress-split-erasure-code/1-infernalis-install/infernalis.yaml b/qa/suites/upgrade/infernalis-x/stress-split-erasure-code/1-infernalis-install/infernalis.yaml
new file mode 100644
index 0000000..ee92f7f
--- /dev/null
+++ b/qa/suites/upgrade/infernalis-x/stress-split-erasure-code/1-infernalis-install/infernalis.yaml
@@ -0,0 +1,8 @@
+meta:
+- desc: install ceph/infernalis latest
+tasks:
+- install:
+    branch: infernalis
+- print: "**** done install infernalis"
+- ceph:
+- print: "**** done ceph"
diff --git a/qa/suites/upgrade/infernalis-x/stress-split-erasure-code/2-partial-upgrade/firsthalf.yaml b/qa/suites/upgrade/infernalis-x/stress-split-erasure-code/2-partial-upgrade/firsthalf.yaml
new file mode 100644
index 0000000..ab5dcac
--- /dev/null
+++ b/qa/suites/upgrade/infernalis-x/stress-split-erasure-code/2-partial-upgrade/firsthalf.yaml
@@ -0,0 +1,12 @@
+meta:
+- desc: |
+   install upgrade ceph/-x on one node only
+   1st half
+   restart : osd.0,1,2,3,4,5
+tasks:
+- install.upgrade:
+    osd.0: 
+- print: "**** done install.upgrade osd.0"
+- ceph.restart:
+    daemons: [osd.0, osd.1, osd.2, osd.3, osd.4, osd.5]
+- print: "**** done ceph.restart 1st half"
diff --git a/qa/suites/upgrade/infernalis-x/stress-split-erasure-code/3-thrash/default.yaml b/qa/suites/upgrade/infernalis-x/stress-split-erasure-code/3-thrash/default.yaml
new file mode 100644
index 0000000..de7fd76
--- /dev/null
+++ b/qa/suites/upgrade/infernalis-x/stress-split-erasure-code/3-thrash/default.yaml
@@ -0,0 +1,20 @@
+meta:
+- desc: |
+   randomly kill and revive osd
+   small chance to increase the number of pgs
+overrides:
+  ceph:
+    log-whitelist:
+    - wrongly marked me down
+    - objects unfound and apparently lost
+    - log bound mismatch
+    - failed to encode map e
+tasks:
+- thrashosds:
+    timeout: 1200
+    chance_pgnum_grow: 1
+    chance_pgpnum_fix: 1
+    min_in: 4
+    ceph_objectstore_tool: false
+    test_rm_past_intervals: false
+- print: "**** done thrashosds 3-thrash"
diff --git a/qa/suites/upgrade/infernalis-x/stress-split-erasure-code/4-mon/mona.yaml b/qa/suites/upgrade/infernalis-x/stress-split-erasure-code/4-mon/mona.yaml
new file mode 100644
index 0000000..e3c369d
--- /dev/null
+++ b/qa/suites/upgrade/infernalis-x/stress-split-erasure-code/4-mon/mona.yaml
@@ -0,0 +1,9 @@
+meta:
+- desc: |
+   restart mon.a so it is upgraded to -x
+tasks:
+- ceph.restart:
+    daemons: [mon.a]
+    wait-for-healthy: false
+    wait-for-osds-up: true
+- print: "**** done ceph.restart mon.a"
diff --git a/qa/suites/upgrade/infernalis-x/stress-split-erasure-code/5-workload/ec-rados-default.yaml b/qa/suites/upgrade/infernalis-x/stress-split-erasure-code/5-workload/ec-rados-default.yaml
new file mode 100644
index 0000000..eeffd92
--- /dev/null
+++ b/qa/suites/upgrade/infernalis-x/stress-split-erasure-code/5-workload/ec-rados-default.yaml
@@ -0,0 +1,22 @@
+meta:
+- desc: |
+   randomized correctness test for rados operations on an erasure coded pool
+tasks:
+  - rados:
+      clients: [client.0]
+      ops: 4000
+      objects: 50
+      ec_pool: true
+      write_append_excl: false
+      op_weights:
+        read: 100
+        write: 0
+        append: 100
+        delete: 50
+        snap_create: 50
+        snap_remove: 50
+        rollback: 50
+        copy_from: 50
+        setattr: 25
+        rmattr: 25
+  - print: "**** done rados ec task"
diff --git a/qa/suites/upgrade/infernalis-x/stress-split-erasure-code/6-next-mon/monb.yaml b/qa/suites/upgrade/infernalis-x/stress-split-erasure-code/6-next-mon/monb.yaml
new file mode 100644
index 0000000..ba71c73
--- /dev/null
+++ b/qa/suites/upgrade/infernalis-x/stress-split-erasure-code/6-next-mon/monb.yaml
@@ -0,0 +1,9 @@
+meta:
+- desc: |
+   restart mon.b so it is upgraded to -x
+tasks:
+- ceph.restart:
+    daemons: [mon.b]
+    wait-for-healthy: false
+    wait-for-osds-up: true
+- print: "**** done ceph.restart mon.b 6-next-mon"
diff --git a/qa/suites/upgrade/infernalis-x/stress-split-erasure-code/8-next-mon/monc.yaml b/qa/suites/upgrade/infernalis-x/stress-split-erasure-code/8-next-mon/monc.yaml
new file mode 100644
index 0000000..12788b7
--- /dev/null
+++ b/qa/suites/upgrade/infernalis-x/stress-split-erasure-code/8-next-mon/monc.yaml
@@ -0,0 +1,12 @@
+meta:
+- desc: |
+   restart mon.c so it is upgraded to -x
+   as all mon were upgrated, expected ceph cluster reach quorum
+tasks:
+- ceph.restart:
+    daemons: [mon.c]
+    wait-for-healthy: false
+    wait-for-osds-up: true
+- print: "**** done ceph.restart mon.c 8-next-mon"
+- ceph.wait_for_mon_quorum: [a, b, c]
+- print: "**** done wait_for_mon_quorum 8-next-mon"
diff --git a/qa/suites/upgrade/infernalis-x/stress-split-erasure-code/9-workload/ec-rados-plugin=jerasure-k=3-m=1.yaml b/qa/suites/upgrade/infernalis-x/stress-split-erasure-code/9-workload/ec-rados-plugin=jerasure-k=3-m=1.yaml
new file mode 100644
index 0000000..ab439d5
--- /dev/null
+++ b/qa/suites/upgrade/infernalis-x/stress-split-erasure-code/9-workload/ec-rados-plugin=jerasure-k=3-m=1.yaml
@@ -0,0 +1,35 @@
+#
+# k=3 implies a stripe_width of 1376*3 = 4128 which is different from
+# the default value of 4096 It is also not a multiple of 1024*1024 and
+# creates situations where rounding rules during recovery becomes 
+# necessary. 
+#
+meta:
+- desc: |
+   randomized correctness test for rados operations on an erasure coded pool
+   using the jerasure plugin with k=3 and m=1
+tasks:
+- rados:
+    clients: [client.0]
+    ops: 4000
+    objects: 50
+    ec_pool: true
+    write_append_excl: false
+    erasure_code_profile:
+      name: jerasure31profile
+      plugin: jerasure
+      k: 3
+      m: 1
+      technique: reed_sol_van
+      ruleset-failure-domain: osd
+    op_weights:
+      read: 100
+      write: 0
+      append: 100
+      delete: 50
+      snap_create: 50
+      snap_remove: 50
+      rollback: 50
+      copy_from: 50
+      setattr: 25
+      rmattr: 25
diff --git a/qa/suites/upgrade/infernalis-x/stress-split-erasure-code/distros/centos_7.3.yaml b/qa/suites/upgrade/infernalis-x/stress-split-erasure-code/distros/centos_7.3.yaml
new file mode 100644
index 0000000..9dfcc7f
--- /dev/null
+++ b/qa/suites/upgrade/infernalis-x/stress-split-erasure-code/distros/centos_7.3.yaml
@@ -0,0 +1,2 @@
+os_type: centos
+os_version: "7.3"
diff --git a/qa/suites/upgrade/infernalis-x/stress-split-erasure-code/distros/ubuntu_14.04.yaml b/qa/suites/upgrade/infernalis-x/stress-split-erasure-code/distros/ubuntu_14.04.yaml
new file mode 100644
index 0000000..309e989
--- /dev/null
+++ b/qa/suites/upgrade/infernalis-x/stress-split-erasure-code/distros/ubuntu_14.04.yaml
@@ -0,0 +1,2 @@
+os_type: ubuntu
+os_version: "14.04"
diff --git a/qa/suites/upgrade/infernalis-x/stress-split/% b/qa/suites/upgrade/infernalis-x/stress-split/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/upgrade/infernalis-x/stress-split/0-cluster/+ b/qa/suites/upgrade/infernalis-x/stress-split/0-cluster/+
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/upgrade/infernalis-x/stress-split/0-cluster/openstack.yaml b/qa/suites/upgrade/infernalis-x/stress-split/0-cluster/openstack.yaml
new file mode 100644
index 0000000..a0d5c20
--- /dev/null
+++ b/qa/suites/upgrade/infernalis-x/stress-split/0-cluster/openstack.yaml
@@ -0,0 +1,6 @@
+openstack:
+  - machine:
+      disk: 100 # GB
+  - volumes: # attached to each instance
+      count: 3
+      size: 30 # GB
diff --git a/qa/suites/upgrade/infernalis-x/stress-split/0-cluster/start.yaml b/qa/suites/upgrade/infernalis-x/stress-split/0-cluster/start.yaml
new file mode 100644
index 0000000..fd200d7
--- /dev/null
+++ b/qa/suites/upgrade/infernalis-x/stress-split/0-cluster/start.yaml
@@ -0,0 +1,23 @@
+meta:
+- desc: |
+   Run ceph on two nodes,
+   with a separate client-only node. 
+   Use xfs beneath the osds.
+overrides:
+  ceph:
+    conf:
+      mon:
+        mon warn on legacy crush tunables: false
+    fs: xfs
+roles:
+- - mon.a
+  - mon.b
+  - mon.c
+  - mds.a
+  - osd.0
+  - osd.1
+  - osd.2
+- - osd.3
+  - osd.4
+  - osd.5
+- - client.0
diff --git a/qa/suites/upgrade/infernalis-x/stress-split/1-infernalis-install/infernalis.yaml b/qa/suites/upgrade/infernalis-x/stress-split/1-infernalis-install/infernalis.yaml
new file mode 100644
index 0000000..ee92f7f
--- /dev/null
+++ b/qa/suites/upgrade/infernalis-x/stress-split/1-infernalis-install/infernalis.yaml
@@ -0,0 +1,8 @@
+meta:
+- desc: install ceph/infernalis latest
+tasks:
+- install:
+    branch: infernalis
+- print: "**** done install infernalis"
+- ceph:
+- print: "**** done ceph"
diff --git a/qa/suites/upgrade/infernalis-x/stress-split/2-partial-upgrade/firsthalf.yaml b/qa/suites/upgrade/infernalis-x/stress-split/2-partial-upgrade/firsthalf.yaml
new file mode 100644
index 0000000..ab5dcac
--- /dev/null
+++ b/qa/suites/upgrade/infernalis-x/stress-split/2-partial-upgrade/firsthalf.yaml
@@ -0,0 +1,12 @@
+meta:
+- desc: |
+   install upgrade ceph/-x on one node only
+   1st half
+   restart : osd.0,1,2,3,4,5
+tasks:
+- install.upgrade:
+    osd.0: 
+- print: "**** done install.upgrade osd.0"
+- ceph.restart:
+    daemons: [osd.0, osd.1, osd.2, osd.3, osd.4, osd.5]
+- print: "**** done ceph.restart 1st half"
diff --git a/qa/suites/upgrade/infernalis-x/stress-split/3-thrash/default.yaml b/qa/suites/upgrade/infernalis-x/stress-split/3-thrash/default.yaml
new file mode 100644
index 0000000..04bf073
--- /dev/null
+++ b/qa/suites/upgrade/infernalis-x/stress-split/3-thrash/default.yaml
@@ -0,0 +1,19 @@
+meta:
+- desc: |
+   randomly kill and revive osd
+   small chance to increase the number of pgs
+overrides:
+  ceph:
+    log-whitelist:
+    - wrongly marked me down
+    - objects unfound and apparently lost
+    - log bound mismatch
+    - failed to encode map e
+tasks:
+- thrashosds:
+    timeout: 1200
+    chance_pgnum_grow: 1
+    chance_pgpnum_fix: 1
+    ceph_objectstore_tool: false
+    test_rm_past_intervals: false
+- print: "**** done thrashosds 3-thrash"
diff --git a/qa/suites/upgrade/infernalis-x/stress-split/4-mon/mona.yaml b/qa/suites/upgrade/infernalis-x/stress-split/4-mon/mona.yaml
new file mode 100644
index 0000000..e3c369d
--- /dev/null
+++ b/qa/suites/upgrade/infernalis-x/stress-split/4-mon/mona.yaml
@@ -0,0 +1,9 @@
+meta:
+- desc: |
+   restart mon.a so it is upgraded to -x
+tasks:
+- ceph.restart:
+    daemons: [mon.a]
+    wait-for-healthy: false
+    wait-for-osds-up: true
+- print: "**** done ceph.restart mon.a"
diff --git a/qa/suites/upgrade/infernalis-x/stress-split/5-workload/+ b/qa/suites/upgrade/infernalis-x/stress-split/5-workload/+
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/upgrade/infernalis-x/stress-split/5-workload/rbd-cls.yaml b/qa/suites/upgrade/infernalis-x/stress-split/5-workload/rbd-cls.yaml
new file mode 100644
index 0000000..2a8dc6c
--- /dev/null
+++ b/qa/suites/upgrade/infernalis-x/stress-split/5-workload/rbd-cls.yaml
@@ -0,0 +1,10 @@
+meta:
+- desc: |
+   run basic cls tests for rbd
+tasks:
+- workunit:
+    branch: infernalis
+    clients:
+      client.0:
+        - cls/test_cls_rbd.sh
+- print: "**** done cls/test_cls_rbd.sh 5-workload"
diff --git a/qa/suites/upgrade/infernalis-x/stress-split/5-workload/rbd-import-export.yaml b/qa/suites/upgrade/infernalis-x/stress-split/5-workload/rbd-import-export.yaml
new file mode 100644
index 0000000..b9342ad
--- /dev/null
+++ b/qa/suites/upgrade/infernalis-x/stress-split/5-workload/rbd-import-export.yaml
@@ -0,0 +1,12 @@
+meta:
+- desc: |
+   run basic import/export cli tests for rbd
+tasks:
+- workunit:
+    branch: infernalis
+    clients:
+      client.0:
+        - rbd/import_export.sh
+    env:
+      RBD_CREATE_ARGS: --new-format
+- print: "**** done rbd/import_export.sh 5-workload"
diff --git a/qa/suites/upgrade/infernalis-x/stress-split/5-workload/readwrite.yaml b/qa/suites/upgrade/infernalis-x/stress-split/5-workload/readwrite.yaml
new file mode 100644
index 0000000..1b161b4
--- /dev/null
+++ b/qa/suites/upgrade/infernalis-x/stress-split/5-workload/readwrite.yaml
@@ -0,0 +1,16 @@
+meta:
+- desc: |
+   randomized correctness test for rados operations on a replicated pool, 
+   using only reads, writes, and deletes
+tasks:
+- full_sequential:
+  - rados:
+      clients: [client.0]
+      ops: 4000
+      objects: 500
+      write_append_excl: false
+      op_weights:
+        read: 45
+        write: 45
+        delete: 10
+- print: "**** done rados/readwrite 5-workload"
diff --git a/qa/suites/upgrade/infernalis-x/stress-split/5-workload/snaps-few-objects.yaml b/qa/suites/upgrade/infernalis-x/stress-split/5-workload/snaps-few-objects.yaml
new file mode 100644
index 0000000..71445bd
--- /dev/null
+++ b/qa/suites/upgrade/infernalis-x/stress-split/5-workload/snaps-few-objects.yaml
@@ -0,0 +1,18 @@
+meta:
+- desc: |
+   randomized correctness test for rados operations on a replicated pool with snapshot operations
+tasks:
+- full_sequential:
+  - rados:
+      clients: [client.0]
+      ops: 4000
+      objects: 50
+      write_append_excl: false
+      op_weights:
+        read: 100
+        write: 100
+        delete: 50
+        snap_create: 50
+        snap_remove: 50
+        rollback: 50
+- print: "**** done rados/snaps-few-objects 5-workload"
diff --git a/qa/suites/upgrade/infernalis-x/stress-split/6-next-mon/monb.yaml b/qa/suites/upgrade/infernalis-x/stress-split/6-next-mon/monb.yaml
new file mode 100644
index 0000000..ba71c73
--- /dev/null
+++ b/qa/suites/upgrade/infernalis-x/stress-split/6-next-mon/monb.yaml
@@ -0,0 +1,9 @@
+meta:
+- desc: |
+   restart mon.b so it is upgraded to -x
+tasks:
+- ceph.restart:
+    daemons: [mon.b]
+    wait-for-healthy: false
+    wait-for-osds-up: true
+- print: "**** done ceph.restart mon.b 6-next-mon"
diff --git a/qa/suites/upgrade/infernalis-x/stress-split/7-workload/+ b/qa/suites/upgrade/infernalis-x/stress-split/7-workload/+
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/upgrade/infernalis-x/stress-split/7-workload/radosbench.yaml b/qa/suites/upgrade/infernalis-x/stress-split/7-workload/radosbench.yaml
new file mode 100644
index 0000000..7d2be5f
--- /dev/null
+++ b/qa/suites/upgrade/infernalis-x/stress-split/7-workload/radosbench.yaml
@@ -0,0 +1,40 @@
+meta:
+- desc: |
+   run randomized correctness test for rados operations
+   generate write load with rados bench
+tasks:
+- full_sequential:
+  - radosbench:
+      clients: [client.0]
+      time: 150
+  - radosbench:
+      clients: [client.0]
+      time: 150
+  - radosbench:
+      clients: [client.0]
+      time: 150
+  - radosbench:
+      clients: [client.0]
+      time: 150
+  - radosbench:
+      clients: [client.0]
+      time: 150
+  - radosbench:
+      clients: [client.0]
+      time: 150
+  - radosbench:
+      clients: [client.0]
+      time: 150
+  - radosbench:
+      clients: [client.0]
+      time: 150
+  - radosbench:
+      clients: [client.0]
+      time: 150
+  - radosbench:
+      clients: [client.0]
+      time: 150
+  - radosbench:
+      clients: [client.0]
+      time: 150
+- print: "**** done radosbench 7-workload"
diff --git a/qa/suites/upgrade/infernalis-x/stress-split/7-workload/rbd_api.yaml b/qa/suites/upgrade/infernalis-x/stress-split/7-workload/rbd_api.yaml
new file mode 100644
index 0000000..585daae
--- /dev/null
+++ b/qa/suites/upgrade/infernalis-x/stress-split/7-workload/rbd_api.yaml
@@ -0,0 +1,10 @@
+meta:
+- desc: |
+   librbd C and C++ api tests
+tasks:
+- workunit:
+     branch: infernalis
+     clients:
+        client.0:
+           - rbd/test_librbd.sh
+- print: "**** done rbd/test_librbd.sh 7-workload"
diff --git a/qa/suites/upgrade/infernalis-x/stress-split/8-next-mon/monc.yaml b/qa/suites/upgrade/infernalis-x/stress-split/8-next-mon/monc.yaml
new file mode 100644
index 0000000..12788b7
--- /dev/null
+++ b/qa/suites/upgrade/infernalis-x/stress-split/8-next-mon/monc.yaml
@@ -0,0 +1,12 @@
+meta:
+- desc: |
+   restart mon.c so it is upgraded to -x
+   as all mon were upgrated, expected ceph cluster reach quorum
+tasks:
+- ceph.restart:
+    daemons: [mon.c]
+    wait-for-healthy: false
+    wait-for-osds-up: true
+- print: "**** done ceph.restart mon.c 8-next-mon"
+- ceph.wait_for_mon_quorum: [a, b, c]
+- print: "**** done wait_for_mon_quorum 8-next-mon"
diff --git a/qa/suites/upgrade/infernalis-x/stress-split/9-workload/+ b/qa/suites/upgrade/infernalis-x/stress-split/9-workload/+
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/upgrade/infernalis-x/stress-split/9-workload/rbd-python.yaml b/qa/suites/upgrade/infernalis-x/stress-split/9-workload/rbd-python.yaml
new file mode 100644
index 0000000..5822ee0
--- /dev/null
+++ b/qa/suites/upgrade/infernalis-x/stress-split/9-workload/rbd-python.yaml
@@ -0,0 +1,10 @@
+meta:
+- desc: |
+   librbd python api tests
+tasks:
+- workunit:
+    branch: infernalis
+    clients:
+      client.0:
+        - rbd/test_librbd_python.sh
+- print: "**** done rbd/test_librbd_python.sh 9-workload"
diff --git a/qa/suites/upgrade/infernalis-x/stress-split/9-workload/rgw-swift.yaml b/qa/suites/upgrade/infernalis-x/stress-split/9-workload/rgw-swift.yaml
new file mode 100644
index 0000000..ba004ce
--- /dev/null
+++ b/qa/suites/upgrade/infernalis-x/stress-split/9-workload/rgw-swift.yaml
@@ -0,0 +1,12 @@
+meta:
+- desc: |
+   swift api tests for rgw
+tasks:
+- rgw: 
+    client.0:
+    default_idle_timeout: 300
+- print: "**** done rgw 9-workload"
+- swift:
+    client.0:
+      rgw_server: client.0
+- print: "**** done swift 9-workload"
diff --git a/qa/suites/upgrade/infernalis-x/stress-split/9-workload/snaps-many-objects.yaml b/qa/suites/upgrade/infernalis-x/stress-split/9-workload/snaps-many-objects.yaml
new file mode 100644
index 0000000..805bf97
--- /dev/null
+++ b/qa/suites/upgrade/infernalis-x/stress-split/9-workload/snaps-many-objects.yaml
@@ -0,0 +1,16 @@
+meta:
+- desc: |
+   randomized correctness test for rados operations on a replicated pool with snapshot operations
+tasks:
+- rados:
+    clients: [client.0]
+    ops: 4000
+    objects: 500
+    write_append_excl: false
+    op_weights:
+      read: 100
+      write: 100
+      delete: 50
+      snap_create: 50
+      snap_remove: 50
+      rollback: 50
diff --git a/qa/suites/upgrade/infernalis-x/stress-split/distros/centos_7.3.yaml b/qa/suites/upgrade/infernalis-x/stress-split/distros/centos_7.3.yaml
new file mode 100644
index 0000000..9dfcc7f
--- /dev/null
+++ b/qa/suites/upgrade/infernalis-x/stress-split/distros/centos_7.3.yaml
@@ -0,0 +1,2 @@
+os_type: centos
+os_version: "7.3"
diff --git a/qa/suites/upgrade/infernalis-x/stress-split/distros/ubuntu_14.04.yaml b/qa/suites/upgrade/infernalis-x/stress-split/distros/ubuntu_14.04.yaml
new file mode 100644
index 0000000..309e989
--- /dev/null
+++ b/qa/suites/upgrade/infernalis-x/stress-split/distros/ubuntu_14.04.yaml
@@ -0,0 +1,2 @@
+os_type: ubuntu
+os_version: "14.04"
diff --git a/qa/suites/upgrade/jewel-x/parallel/% b/qa/suites/upgrade/jewel-x/parallel/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/upgrade/jewel-x/parallel/0-cluster/+ b/qa/suites/upgrade/jewel-x/parallel/0-cluster/+
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/upgrade/jewel-x/parallel/0-cluster/openstack.yaml b/qa/suites/upgrade/jewel-x/parallel/0-cluster/openstack.yaml
new file mode 100644
index 0000000..f4d1349
--- /dev/null
+++ b/qa/suites/upgrade/jewel-x/parallel/0-cluster/openstack.yaml
@@ -0,0 +1,4 @@
+openstack:
+  - volumes: # attached to each instance
+      count: 3
+      size: 30 # GB
diff --git a/qa/suites/upgrade/jewel-x/parallel/0-cluster/start.yaml b/qa/suites/upgrade/jewel-x/parallel/0-cluster/start.yaml
new file mode 100644
index 0000000..1925f6b
--- /dev/null
+++ b/qa/suites/upgrade/jewel-x/parallel/0-cluster/start.yaml
@@ -0,0 +1,28 @@
+meta:
+- desc: |
+   Run ceph on two nodes,
+   with a separate client 0,1,2 third node. 
+   Use xfs beneath the osds.
+   CephFS tests running on client 2,3
+roles:
+- - mon.a
+  - mds.a
+  - osd.0
+  - osd.1
+- - mon.b
+  - mon.c
+  - osd.2
+  - osd.3
+- - client.0
+  - client.1
+  - client.2
+  - client.3
+overrides:
+  ceph:
+    log-whitelist:
+    - scrub mismatch
+    - ScrubResult
+    - failed to encode map
+    - wrongly marked
+    conf:
+    fs: xfs
diff --git a/qa/suites/upgrade/jewel-x/parallel/1-jewel-install/jewel.yaml b/qa/suites/upgrade/jewel-x/parallel/1-jewel-install/jewel.yaml
new file mode 100644
index 0000000..17e3c0e
--- /dev/null
+++ b/qa/suites/upgrade/jewel-x/parallel/1-jewel-install/jewel.yaml
@@ -0,0 +1,22 @@
+meta:
+- desc: |
+   install ceph/jewel latest
+   run workload and upgrade-sequence in parallel
+   upgrade the client node
+tasks:
+- install:
+    branch: jewel
+- print: "**** done installing jewel"
+- ceph:
+- print: "**** done ceph"
+- install.upgrade:
+    mon.a:
+    mon.b:
+- print: "**** done install.upgrade mon.a and mon.b"
+- parallel:
+    - workload
+    - upgrade-sequence
+- print: "**** done parallel"
+- install.upgrade:
+    client.0:
+- print: "**** done install.upgrade on client.0"
diff --git a/qa/suites/upgrade/jewel-x/parallel/2-workload/+ b/qa/suites/upgrade/jewel-x/parallel/2-workload/+
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/upgrade/jewel-x/parallel/2-workload/blogbench.yaml b/qa/suites/upgrade/jewel-x/parallel/2-workload/blogbench.yaml
new file mode 100644
index 0000000..021fcc6
--- /dev/null
+++ b/qa/suites/upgrade/jewel-x/parallel/2-workload/blogbench.yaml
@@ -0,0 +1,14 @@
+meta:
+- desc: |
+   run a cephfs stress test
+   mount ceph-fuse on client.2 before running workunit
+workload:
+  full_sequential:
+  - sequential:
+    - ceph-fuse:
+    - print: "**** done ceph-fuse 2-workload"
+    - workunit:
+        clients:
+           client.2:
+            - suites/blogbench.sh
+    - print: "**** done suites/blogbench.sh 2-workload"
diff --git a/qa/suites/upgrade/jewel-x/parallel/2-workload/ec-rados-default.yaml b/qa/suites/upgrade/jewel-x/parallel/2-workload/ec-rados-default.yaml
new file mode 100644
index 0000000..5c5a958
--- /dev/null
+++ b/qa/suites/upgrade/jewel-x/parallel/2-workload/ec-rados-default.yaml
@@ -0,0 +1,24 @@
+meta:
+- desc: |
+   run run randomized correctness test for rados operations
+   on an erasure-coded pool
+workload:
+  full_sequential:
+  - rados:
+      clients: [client.0]
+      ops: 4000
+      objects: 50
+      ec_pool: true
+      write_append_excl: false
+      op_weights:
+        read: 100
+        write: 0
+        append: 100
+        delete: 50
+        snap_create: 50
+        snap_remove: 50
+        rollback: 50
+        copy_from: 50
+        setattr: 25
+        rmattr: 25
+  - print: "**** done rados ec task"
diff --git a/qa/suites/upgrade/jewel-x/parallel/2-workload/rados_api.yaml b/qa/suites/upgrade/jewel-x/parallel/2-workload/rados_api.yaml
new file mode 100644
index 0000000..348f1ae
--- /dev/null
+++ b/qa/suites/upgrade/jewel-x/parallel/2-workload/rados_api.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+   object class functional tests
+workload:
+  full_sequential:
+    - workunit:
+        branch: jewel
+        clients:
+          client.0:
+            - cls
+    - print: "**** done cls 2-workload"
diff --git a/qa/suites/upgrade/jewel-x/parallel/2-workload/rados_loadgenbig.yaml b/qa/suites/upgrade/jewel-x/parallel/2-workload/rados_loadgenbig.yaml
new file mode 100644
index 0000000..a91ed2c
--- /dev/null
+++ b/qa/suites/upgrade/jewel-x/parallel/2-workload/rados_loadgenbig.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+   generate read/write load with rados objects ranging from 1MB to 25MB
+workload:
+  full_sequential:
+    - workunit:
+        branch: jewel
+        clients:
+          client.0:
+            - rados/load-gen-big.sh
+    - print: "**** done rados/load-gen-big.sh 2-workload"
diff --git a/qa/suites/upgrade/jewel-x/parallel/2-workload/test_rbd_api.yaml b/qa/suites/upgrade/jewel-x/parallel/2-workload/test_rbd_api.yaml
new file mode 100644
index 0000000..15d892e
--- /dev/null
+++ b/qa/suites/upgrade/jewel-x/parallel/2-workload/test_rbd_api.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+   librbd C and C++ api tests
+workload:
+  full_sequential:
+    - workunit:
+        branch: jewel
+        clients:
+          client.0:
+            - rbd/test_librbd.sh
+    - print: "**** done rbd/test_librbd.sh 2-workload"
diff --git a/qa/suites/upgrade/jewel-x/parallel/2-workload/test_rbd_python.yaml b/qa/suites/upgrade/jewel-x/parallel/2-workload/test_rbd_python.yaml
new file mode 100644
index 0000000..bb2d3ea
--- /dev/null
+++ b/qa/suites/upgrade/jewel-x/parallel/2-workload/test_rbd_python.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+   librbd python api tests
+workload:
+  full_sequential:
+    - workunit:
+        branch: jewel
+        clients:
+          client.0:
+            - rbd/test_librbd_python.sh
+    - print: "**** done rbd/test_librbd_python.sh 2-workload"
diff --git a/qa/suites/upgrade/jewel-x/parallel/3-upgrade-sequence/upgrade-all.yaml b/qa/suites/upgrade/jewel-x/parallel/3-upgrade-sequence/upgrade-all.yaml
new file mode 100644
index 0000000..2e41f73
--- /dev/null
+++ b/qa/suites/upgrade/jewel-x/parallel/3-upgrade-sequence/upgrade-all.yaml
@@ -0,0 +1,7 @@
+meta:
+- desc: |
+   upgrade the ceph cluster
+upgrade-sequence:
+   sequential:
+   - ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3]
+   - print: "**** done ceph.restart all"
diff --git a/qa/suites/upgrade/jewel-x/parallel/3-upgrade-sequence/upgrade-mon-osd-mds.yaml b/qa/suites/upgrade/jewel-x/parallel/3-upgrade-sequence/upgrade-mon-osd-mds.yaml
new file mode 100644
index 0000000..43f3937
--- /dev/null
+++ b/qa/suites/upgrade/jewel-x/parallel/3-upgrade-sequence/upgrade-mon-osd-mds.yaml
@@ -0,0 +1,37 @@
+meta:
+- desc: |
+   upgrade the ceph cluster,
+   upgrate in two steps 
+   step one ordering: mon.a, osd.0, osd.1, mds.a
+   step two ordering: mon.b, mon.c, osd.2, osd.3
+   ceph expected to be healthy state after each step
+upgrade-sequence:
+   sequential:
+   - ceph.restart:
+       daemons: [mon.a]
+       wait-for-healthy: true
+   - sleep:
+       duration: 60
+   - ceph.restart:
+       daemons: [osd.0, osd.1]
+       wait-for-healthy: true
+   - sleep:
+       duration: 60
+   - ceph.restart: [mds.a]
+   - sleep:
+       duration: 60
+   - print: "**** running mixed versions of osds and mons"
+   - exec:
+      mon.b:
+        - sudo ceph osd crush tunables jewel
+   - print: "**** done ceph osd crush tunables jewel"
+   - ceph.restart:
+       daemons: [mon.b, mon.c]
+       wait-for-healthy: true
+   - sleep:
+       duration: 60
+   - ceph.restart:
+       daemons: [osd.2, osd.3]
+       wait-for-healthy: true
+   - sleep:
+       duration: 60
diff --git a/qa/suites/upgrade/jewel-x/parallel/5-final-workload/+ b/qa/suites/upgrade/jewel-x/parallel/5-final-workload/+
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/upgrade/jewel-x/parallel/5-final-workload/blogbench.yaml b/qa/suites/upgrade/jewel-x/parallel/5-final-workload/blogbench.yaml
new file mode 100644
index 0000000..d2629c0
--- /dev/null
+++ b/qa/suites/upgrade/jewel-x/parallel/5-final-workload/blogbench.yaml
@@ -0,0 +1,13 @@
+meta:
+- desc: |
+   run a cephfs stress test
+   mount ceph-fuse on client.3 before running workunit
+tasks:
+- sequential:
+  - ceph-fuse:
+  - print: "**** done ceph-fuse 5-final-workload"
+  - workunit:
+      clients:
+         client.3:
+          - suites/blogbench.sh
+  - print: "**** done suites/blogbench.sh 5-final-workload"
diff --git a/qa/suites/upgrade/jewel-x/parallel/5-final-workload/rados-snaps-few-objects.yaml b/qa/suites/upgrade/jewel-x/parallel/5-final-workload/rados-snaps-few-objects.yaml
new file mode 100644
index 0000000..d8b3dcb
--- /dev/null
+++ b/qa/suites/upgrade/jewel-x/parallel/5-final-workload/rados-snaps-few-objects.yaml
@@ -0,0 +1,17 @@
+meta:
+- desc: |
+   randomized correctness test for rados operations on a replicated pool with snapshots
+tasks:
+  - rados:
+      clients: [client.1]
+      ops: 4000
+      objects: 50
+      write_append_excl: false
+      op_weights:
+        read: 100
+        write: 100
+        delete: 50
+        snap_create: 50
+        snap_remove: 50
+        rollback: 50
+  - print: "**** done rados 4-final-workload"
diff --git a/qa/suites/upgrade/jewel-x/parallel/5-final-workload/rados_loadgenmix.yaml b/qa/suites/upgrade/jewel-x/parallel/5-final-workload/rados_loadgenmix.yaml
new file mode 100644
index 0000000..922a9da
--- /dev/null
+++ b/qa/suites/upgrade/jewel-x/parallel/5-final-workload/rados_loadgenmix.yaml
@@ -0,0 +1,9 @@
+meta:
+- desc: |
+   generate read/write load with rados objects ranging from 1 byte to 1MB
+tasks:
+  - workunit:
+      clients:
+        client.1:
+          - rados/load-gen-mix.sh
+  - print: "**** done rados/load-gen-mix.sh 4-final-workload"
diff --git a/qa/suites/upgrade/jewel-x/parallel/5-final-workload/rados_mon_thrash.yaml b/qa/suites/upgrade/jewel-x/parallel/5-final-workload/rados_mon_thrash.yaml
new file mode 100644
index 0000000..f2249c9
--- /dev/null
+++ b/qa/suites/upgrade/jewel-x/parallel/5-final-workload/rados_mon_thrash.yaml
@@ -0,0 +1,17 @@
+meta:
+- desc: |
+   librados C and C++ api tests
+overrides:
+  ceph:
+    log-whitelist:
+      - reached quota
+tasks:
+  - mon_thrash:
+      revive_delay: 20
+      thrash_delay: 1
+  - print: "**** done mon_thrash 4-final-workload"
+  - workunit:
+      clients:
+        client.1:
+          - rados/test.sh
+  - print: "**** done rados/test.sh 4-final-workload"
diff --git a/qa/suites/upgrade/jewel-x/parallel/5-final-workload/rbd_cls.yaml b/qa/suites/upgrade/jewel-x/parallel/5-final-workload/rbd_cls.yaml
new file mode 100644
index 0000000..aaf0a37
--- /dev/null
+++ b/qa/suites/upgrade/jewel-x/parallel/5-final-workload/rbd_cls.yaml
@@ -0,0 +1,9 @@
+meta:
+- desc: |
+   rbd object class functional tests
+tasks:
+  - workunit:
+      clients:
+        client.1:
+          - cls/test_cls_rbd.sh
+  - print: "**** done cls/test_cls_rbd.sh 4-final-workload"
diff --git a/qa/suites/upgrade/jewel-x/parallel/5-final-workload/rbd_import_export.yaml b/qa/suites/upgrade/jewel-x/parallel/5-final-workload/rbd_import_export.yaml
new file mode 100644
index 0000000..46e1355
--- /dev/null
+++ b/qa/suites/upgrade/jewel-x/parallel/5-final-workload/rbd_import_export.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+   run basic import/export cli tests for rbd
+tasks:
+  - workunit:
+      clients:
+        client.1:
+          - rbd/import_export.sh
+      env:
+        RBD_CREATE_ARGS: --new-format
+  - print: "**** done rbd/import_export.sh 4-final-workload"
diff --git a/qa/suites/upgrade/jewel-x/parallel/5-final-workload/rgw_swift.yaml b/qa/suites/upgrade/jewel-x/parallel/5-final-workload/rgw_swift.yaml
new file mode 100644
index 0000000..7a7659f
--- /dev/null
+++ b/qa/suites/upgrade/jewel-x/parallel/5-final-workload/rgw_swift.yaml
@@ -0,0 +1,13 @@
+meta:
+- desc: |
+   swift api tests for rgw
+overrides:
+  rgw:
+    frontend: civetweb
+tasks:
+  - rgw: [client.1]
+  - print: "**** done rgw 4-final-workload"
+  - swift:
+      client.1:
+        rgw_server: client.1
+  - print: "**** done swift 4-final-workload"
diff --git a/qa/suites/upgrade/jewel-x/parallel/distros/centos_7.3.yaml b/qa/suites/upgrade/jewel-x/parallel/distros/centos_7.3.yaml
new file mode 100644
index 0000000..9dfcc7f
--- /dev/null
+++ b/qa/suites/upgrade/jewel-x/parallel/distros/centos_7.3.yaml
@@ -0,0 +1,2 @@
+os_type: centos
+os_version: "7.3"
diff --git a/qa/suites/upgrade/jewel-x/parallel/distros/ubuntu_14.04.yaml b/qa/suites/upgrade/jewel-x/parallel/distros/ubuntu_14.04.yaml
new file mode 100644
index 0000000..309e989
--- /dev/null
+++ b/qa/suites/upgrade/jewel-x/parallel/distros/ubuntu_14.04.yaml
@@ -0,0 +1,2 @@
+os_type: ubuntu
+os_version: "14.04"
diff --git a/qa/suites/upgrade/jewel-x/parallel/kraken.yaml b/qa/suites/upgrade/jewel-x/parallel/kraken.yaml
new file mode 100644
index 0000000..4ffb722
--- /dev/null
+++ b/qa/suites/upgrade/jewel-x/parallel/kraken.yaml
@@ -0,0 +1 @@
+#empty placeholder  for now
diff --git a/qa/suites/upgrade/jewel-x/point-to-point-x/% b/qa/suites/upgrade/jewel-x/point-to-point-x/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/upgrade/jewel-x/point-to-point-x/distros/centos.yaml b/qa/suites/upgrade/jewel-x/point-to-point-x/distros/centos.yaml
new file mode 100644
index 0000000..9dfcc7f
--- /dev/null
+++ b/qa/suites/upgrade/jewel-x/point-to-point-x/distros/centos.yaml
@@ -0,0 +1,2 @@
+os_type: centos
+os_version: "7.3"
diff --git a/qa/suites/upgrade/jewel-x/point-to-point-x/point-to-point-upgrade.yaml b/qa/suites/upgrade/jewel-x/point-to-point-x/point-to-point-upgrade.yaml
new file mode 100644
index 0000000..affdabd
--- /dev/null
+++ b/qa/suites/upgrade/jewel-x/point-to-point-x/point-to-point-upgrade.yaml
@@ -0,0 +1,214 @@
+meta:
+- desc: |
+   Run ceph on two nodes, using one of them as a client,
+   with a separate client-only node. 
+   Use xfs beneath the osds.
+   install ceph/jewel v10.2.0 point version
+   run workload and upgrade-sequence in parallel
+   install ceph/jewel latest version
+   run workload and upgrade-sequence in parallel
+   install ceph/-x version (jewel)
+   run workload and upgrade-sequence in parallel
+overrides:
+  ceph:
+    log-whitelist:
+    - reached quota
+    - scrub
+    - osd_map_max_advance
+    - wrongly marked
+    fs: xfs
+    conf:
+      mon:
+        mon debug unsafe allow tier with nonempty snaps: true
+      osd:
+        osd map max advance: 1000
+roles:
+- - mon.a
+  - mds.a
+  - osd.0
+  - osd.1
+  - osd.2
+- - mon.b
+  - mon.c
+  - osd.3
+  - osd.4
+  - osd.5
+  - client.0
+- - client.1
+openstack:
+- volumes: # attached to each instance
+    count: 3
+    size: 30 # GB
+tasks:
+- print: "****  v10.2.0 about to install"
+- install:
+    tag: v10.2.0
+    exclude_packages: ['ceph-mgr']
+- print: "**** done v10.2.0 install"
+- ceph:
+   fs: xfs
+- print: "**** done ceph xfs"
+- sequential:
+   - workload
+- print: "**** done workload v10.2.0"
+- install.upgrade:
+    mon.a:
+      branch: jewel
+      exclude_packages: ['ceph-mgr']
+    mon.b:
+      branch: jewel
+      exclude_packages: ['ceph-mgr']
+    # Note that client.a IS NOT upgraded at this point
+    #client.1:
+      #branch: jewel
+- parallel:
+   - workload_jewel
+   - upgrade-sequence_jewel
+- print: "**** done parallel jewel branch"
+- install.upgrade:
+    client.1:
+      branch: jewel
+      exclude_packages: ['ceph-mgr']
+- print: "**** done branch: jewel install.upgrade on client.1"
+- install.upgrade:
+    mon.a:
+    mon.b:
+- print: "**** done branch: -x install.upgrade on mon.a and mon.b"
+- parallel:
+   - workload_x
+   - upgrade-sequence_x
+- print: "**** done parallel -x branch"
+# Run librados tests on the -x upgraded cluster
+- install.upgrade:
+    client.1:
+- workunit:
+    branch: jewel
+    clients:
+      client.1:
+      - rados/test-upgrade-v11.0.0.sh
+      - cls
+- print: "**** done final test on -x cluster"
+#######################
+workload:
+   sequential:
+   - workunit:
+       clients:
+         client.0:
+           - suites/blogbench.sh
+workload_jewel:
+   full_sequential:
+   - workunit:
+       branch: jewel
+       clients:
+         client.1:
+         - rados/test.sh
+         - cls
+       env:
+         CLS_RBD_GTEST_FILTER: '*:-TestClsRbd.mirror_image'
+   - print: "**** done rados/test.sh &  cls workload_jewel"
+   - sequential:
+     - rgw: [client.0]
+     - print: "**** done rgw workload_jewel"
+     - s3tests:
+         client.0:
+           force-branch: ceph-jewel
+           rgw_server: client.0
+     - print: "**** done s3tests workload_jewel"
+upgrade-sequence_jewel:
+   sequential:
+   - print: "**** done branch: jewel install.upgrade"
+   - ceph.restart: [mds.a]
+   - sleep:
+       duration: 60
+   - ceph.restart: [osd.0]
+   - sleep:
+       duration: 30
+   - ceph.restart: [osd.1]
+   - sleep:
+       duration: 30
+   - ceph.restart: [osd.2]
+   - sleep:
+       duration: 30
+   - ceph.restart: [osd.3]
+   - sleep:
+       duration: 30
+   - ceph.restart: [osd.4]
+   - sleep:
+       duration: 30
+   - ceph.restart: [osd.5]
+   - sleep:
+       duration: 60
+   - ceph.restart: [mon.a]
+   - sleep:
+       duration: 60
+   - ceph.restart: [mon.b]
+   - sleep:
+       duration: 60
+   - ceph.restart: [mon.c]
+   - sleep:
+       duration: 60
+   - print: "**** done ceph.restart all jewel branch mds/osd/mon"
+workload_x:
+   sequential:
+   - workunit:
+       branch: jewel
+       clients:
+         client.1:
+         - rados/test-upgrade-v11.0.0.sh
+         - cls
+       env:
+         CLS_RBD_GTEST_FILTER: '*:-TestClsRbd.mirror_image'
+   - print: "**** done rados/test-upgrade-v11.0.0.sh &  cls workload_x NOT upgraded  client"
+   - workunit:
+       branch: jewel
+       clients:
+         client.0:
+         - rados/test-upgrade-v11.0.0.sh
+         - cls
+   - print: "**** done rados/test-upgrade-v11.0.0.sh &  cls workload_x upgraded client"
+   - rgw: [client.1]
+   - print: "**** done rgw workload_x"
+   - s3tests:
+       client.1:
+         force-branch: ceph-jewel
+         rgw_server: client.1
+   - print: "**** done s3tests workload_x"
+upgrade-sequence_x:
+   sequential:
+   - ceph.restart: [mds.a]
+   - sleep:
+       duration: 60
+   - ceph.restart: [mon.a]
+   - sleep:
+       duration: 60
+   - ceph.restart: [mon.b]
+   - sleep:
+       duration: 60
+   - ceph.restart: [mon.c]
+   - sleep:
+       duration: 60
+   - ceph.restart: [osd.0]
+   - sleep:
+       duration: 30
+   - ceph.restart: [osd.1]
+   - sleep:
+       duration: 30
+   - ceph.restart: [osd.2]
+   - sleep:
+       duration: 30
+   - ceph.restart: [osd.3]
+   - sleep:
+       duration: 30
+   - ceph.restart: [osd.4]
+   - sleep:
+       duration: 30
+   - ceph.restart:
+       daemons: [osd.5]
+       wait-for-healthy: false
+       wait-for-up-osds: true
+   - exec:
+       mon.a:
+         - ceph osd set require_jewel_osds
+   - sleep:
+       duration: 60
+   - print: "**** done ceph.restart all -x branch mds/osd/mon"
diff --git a/qa/suites/upgrade/jewel-x/stress-split-erasure-code-x86_64/% b/qa/suites/upgrade/jewel-x/stress-split-erasure-code-x86_64/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/upgrade/jewel-x/stress-split-erasure-code-x86_64/0-cluster/+ b/qa/suites/upgrade/jewel-x/stress-split-erasure-code-x86_64/0-cluster/+
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/upgrade/jewel-x/stress-split-erasure-code-x86_64/0-cluster/openstack.yaml b/qa/suites/upgrade/jewel-x/stress-split-erasure-code-x86_64/0-cluster/openstack.yaml
new file mode 100644
index 0000000..a0d5c20
--- /dev/null
+++ b/qa/suites/upgrade/jewel-x/stress-split-erasure-code-x86_64/0-cluster/openstack.yaml
@@ -0,0 +1,6 @@
+openstack:
+  - machine:
+      disk: 100 # GB
+  - volumes: # attached to each instance
+      count: 3
+      size: 30 # GB
diff --git a/qa/suites/upgrade/jewel-x/stress-split-erasure-code-x86_64/0-cluster/start.yaml b/qa/suites/upgrade/jewel-x/stress-split-erasure-code-x86_64/0-cluster/start.yaml
new file mode 100644
index 0000000..fd200d7
--- /dev/null
+++ b/qa/suites/upgrade/jewel-x/stress-split-erasure-code-x86_64/0-cluster/start.yaml
@@ -0,0 +1,23 @@
+meta:
+- desc: |
+   Run ceph on two nodes,
+   with a separate client-only node. 
+   Use xfs beneath the osds.
+overrides:
+  ceph:
+    conf:
+      mon:
+        mon warn on legacy crush tunables: false
+    fs: xfs
+roles:
+- - mon.a
+  - mon.b
+  - mon.c
+  - mds.a
+  - osd.0
+  - osd.1
+  - osd.2
+- - osd.3
+  - osd.4
+  - osd.5
+- - client.0
diff --git a/qa/suites/upgrade/jewel-x/stress-split-erasure-code-x86_64/0-x86_64.yaml b/qa/suites/upgrade/jewel-x/stress-split-erasure-code-x86_64/0-x86_64.yaml
new file mode 100644
index 0000000..c2409f5
--- /dev/null
+++ b/qa/suites/upgrade/jewel-x/stress-split-erasure-code-x86_64/0-x86_64.yaml
@@ -0,0 +1 @@
+arch: x86_64
diff --git a/qa/suites/upgrade/jewel-x/stress-split-erasure-code-x86_64/1-jewel-install/jewel.yaml b/qa/suites/upgrade/jewel-x/stress-split-erasure-code-x86_64/1-jewel-install/jewel.yaml
new file mode 100644
index 0000000..590e1e3
--- /dev/null
+++ b/qa/suites/upgrade/jewel-x/stress-split-erasure-code-x86_64/1-jewel-install/jewel.yaml
@@ -0,0 +1,8 @@
+meta:
+- desc: install ceph/jewel latest
+tasks:
+- install:
+    branch: jewel
+- print: "**** done install jewel"
+- ceph:
+- print: "**** done ceph"
diff --git a/qa/suites/upgrade/jewel-x/stress-split-erasure-code-x86_64/2-partial-upgrade/firsthalf.yaml b/qa/suites/upgrade/jewel-x/stress-split-erasure-code-x86_64/2-partial-upgrade/firsthalf.yaml
new file mode 100644
index 0000000..ab5dcac
--- /dev/null
+++ b/qa/suites/upgrade/jewel-x/stress-split-erasure-code-x86_64/2-partial-upgrade/firsthalf.yaml
@@ -0,0 +1,12 @@
+meta:
+- desc: |
+   install upgrade ceph/-x on one node only
+   1st half
+   restart : osd.0,1,2,3,4,5
+tasks:
+- install.upgrade:
+    osd.0: 
+- print: "**** done install.upgrade osd.0"
+- ceph.restart:
+    daemons: [osd.0, osd.1, osd.2, osd.3, osd.4, osd.5]
+- print: "**** done ceph.restart 1st half"
diff --git a/qa/suites/upgrade/jewel-x/stress-split-erasure-code-x86_64/3-thrash/default.yaml b/qa/suites/upgrade/jewel-x/stress-split-erasure-code-x86_64/3-thrash/default.yaml
new file mode 100644
index 0000000..389a190
--- /dev/null
+++ b/qa/suites/upgrade/jewel-x/stress-split-erasure-code-x86_64/3-thrash/default.yaml
@@ -0,0 +1,18 @@
+meta:
+- desc: |
+   randomly kill and revive osd
+   small chance of increasing the number of pgs
+overrides:
+  ceph:
+    log-whitelist:
+    - wrongly marked me down
+    - objects unfound and apparently lost
+    - log bound mismatch
+    - failed to encode map e
+tasks:
+- thrashosds:
+    timeout: 1200
+    chance_pgnum_grow: 1
+    chance_pgpnum_fix: 1
+    min_in: 4
+- print: "**** done thrashosds 3-thrash"
diff --git a/qa/suites/upgrade/jewel-x/stress-split-erasure-code-x86_64/4-mon/mona.yaml b/qa/suites/upgrade/jewel-x/stress-split-erasure-code-x86_64/4-mon/mona.yaml
new file mode 100644
index 0000000..e3c369d
--- /dev/null
+++ b/qa/suites/upgrade/jewel-x/stress-split-erasure-code-x86_64/4-mon/mona.yaml
@@ -0,0 +1,9 @@
+meta:
+- desc: |
+   restart mon.a so it is upgraded to -x
+tasks:
+- ceph.restart:
+    daemons: [mon.a]
+    wait-for-healthy: false
+    wait-for-osds-up: true
+- print: "**** done ceph.restart mon.a"
diff --git a/qa/suites/upgrade/jewel-x/stress-split-erasure-code-x86_64/5-workload/ec-rados-default.yaml b/qa/suites/upgrade/jewel-x/stress-split-erasure-code-x86_64/5-workload/ec-rados-default.yaml
new file mode 100644
index 0000000..eeffd92
--- /dev/null
+++ b/qa/suites/upgrade/jewel-x/stress-split-erasure-code-x86_64/5-workload/ec-rados-default.yaml
@@ -0,0 +1,22 @@
+meta:
+- desc: |
+   randomized correctness test for rados operations on an erasure coded pool
+tasks:
+  - rados:
+      clients: [client.0]
+      ops: 4000
+      objects: 50
+      ec_pool: true
+      write_append_excl: false
+      op_weights:
+        read: 100
+        write: 0
+        append: 100
+        delete: 50
+        snap_create: 50
+        snap_remove: 50
+        rollback: 50
+        copy_from: 50
+        setattr: 25
+        rmattr: 25
+  - print: "**** done rados ec task"
diff --git a/qa/suites/upgrade/jewel-x/stress-split-erasure-code-x86_64/6-next-mon/monb.yaml b/qa/suites/upgrade/jewel-x/stress-split-erasure-code-x86_64/6-next-mon/monb.yaml
new file mode 100644
index 0000000..ba71c73
--- /dev/null
+++ b/qa/suites/upgrade/jewel-x/stress-split-erasure-code-x86_64/6-next-mon/monb.yaml
@@ -0,0 +1,9 @@
+meta:
+- desc: |
+   restart mon.b so it is upgraded to -x
+tasks:
+- ceph.restart:
+    daemons: [mon.b]
+    wait-for-healthy: false
+    wait-for-osds-up: true
+- print: "**** done ceph.restart mon.b 6-next-mon"
diff --git a/qa/suites/upgrade/jewel-x/stress-split-erasure-code-x86_64/8-next-mon/monc.yaml b/qa/suites/upgrade/jewel-x/stress-split-erasure-code-x86_64/8-next-mon/monc.yaml
new file mode 100644
index 0000000..12788b7
--- /dev/null
+++ b/qa/suites/upgrade/jewel-x/stress-split-erasure-code-x86_64/8-next-mon/monc.yaml
@@ -0,0 +1,12 @@
+meta:
+- desc: |
+   restart mon.c so it is upgraded to -x
+   as all mon were upgrated, expected ceph cluster reach quorum
+tasks:
+- ceph.restart:
+    daemons: [mon.c]
+    wait-for-healthy: false
+    wait-for-osds-up: true
+- print: "**** done ceph.restart mon.c 8-next-mon"
+- ceph.wait_for_mon_quorum: [a, b, c]
+- print: "**** done wait_for_mon_quorum 8-next-mon"
diff --git a/qa/suites/upgrade/jewel-x/stress-split-erasure-code-x86_64/9-workload/ec-rados-plugin=jerasure-k=3-m=1.yaml b/qa/suites/upgrade/jewel-x/stress-split-erasure-code-x86_64/9-workload/ec-rados-plugin=jerasure-k=3-m=1.yaml
new file mode 100644
index 0000000..13fe70c
--- /dev/null
+++ b/qa/suites/upgrade/jewel-x/stress-split-erasure-code-x86_64/9-workload/ec-rados-plugin=jerasure-k=3-m=1.yaml
@@ -0,0 +1,35 @@
+#
+# k=3 implies a stripe_width of 1376*3 = 4128 which is different from
+# the default value of 4096 It is also not a multiple of 1024*1024 and
+# creates situations where rounding rules during recovery becomes 
+# necessary. 
+#
+meta:
+- desc: |
+   randomized correctness test for rados operations on an erasure coded pool 
+   using the jerasure plugin with k=3 and m=1
+tasks:
+- rados:
+    clients: [client.0]
+    ops: 4000
+    objects: 50
+    ec_pool: true
+    write_append_excl: false
+    erasure_code_profile:
+      name: jerasure31profile
+      plugin: jerasure
+      k: 3
+      m: 1
+      technique: reed_sol_van
+      ruleset-failure-domain: osd
+    op_weights:
+      read: 100
+      write: 0
+      append: 100
+      delete: 50
+      snap_create: 50
+      snap_remove: 50
+      rollback: 50
+      copy_from: 50
+      setattr: 25
+      rmattr: 25
diff --git a/qa/suites/upgrade/jewel-x/stress-split-erasure-code/% b/qa/suites/upgrade/jewel-x/stress-split-erasure-code/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/upgrade/jewel-x/stress-split-erasure-code/0-cluster/+ b/qa/suites/upgrade/jewel-x/stress-split-erasure-code/0-cluster/+
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/upgrade/jewel-x/stress-split-erasure-code/0-cluster/openstack.yaml b/qa/suites/upgrade/jewel-x/stress-split-erasure-code/0-cluster/openstack.yaml
new file mode 100644
index 0000000..a0d5c20
--- /dev/null
+++ b/qa/suites/upgrade/jewel-x/stress-split-erasure-code/0-cluster/openstack.yaml
@@ -0,0 +1,6 @@
+openstack:
+  - machine:
+      disk: 100 # GB
+  - volumes: # attached to each instance
+      count: 3
+      size: 30 # GB
diff --git a/qa/suites/upgrade/jewel-x/stress-split-erasure-code/0-cluster/start.yaml b/qa/suites/upgrade/jewel-x/stress-split-erasure-code/0-cluster/start.yaml
new file mode 100644
index 0000000..fd200d7
--- /dev/null
+++ b/qa/suites/upgrade/jewel-x/stress-split-erasure-code/0-cluster/start.yaml
@@ -0,0 +1,23 @@
+meta:
+- desc: |
+   Run ceph on two nodes,
+   with a separate client-only node. 
+   Use xfs beneath the osds.
+overrides:
+  ceph:
+    conf:
+      mon:
+        mon warn on legacy crush tunables: false
+    fs: xfs
+roles:
+- - mon.a
+  - mon.b
+  - mon.c
+  - mds.a
+  - osd.0
+  - osd.1
+  - osd.2
+- - osd.3
+  - osd.4
+  - osd.5
+- - client.0
diff --git a/qa/suites/upgrade/jewel-x/stress-split-erasure-code/1-jewel-install/jewel.yaml b/qa/suites/upgrade/jewel-x/stress-split-erasure-code/1-jewel-install/jewel.yaml
new file mode 100644
index 0000000..590e1e3
--- /dev/null
+++ b/qa/suites/upgrade/jewel-x/stress-split-erasure-code/1-jewel-install/jewel.yaml
@@ -0,0 +1,8 @@
+meta:
+- desc: install ceph/jewel latest
+tasks:
+- install:
+    branch: jewel
+- print: "**** done install jewel"
+- ceph:
+- print: "**** done ceph"
diff --git a/qa/suites/upgrade/jewel-x/stress-split-erasure-code/2-partial-upgrade/firsthalf.yaml b/qa/suites/upgrade/jewel-x/stress-split-erasure-code/2-partial-upgrade/firsthalf.yaml
new file mode 100644
index 0000000..ab5dcac
--- /dev/null
+++ b/qa/suites/upgrade/jewel-x/stress-split-erasure-code/2-partial-upgrade/firsthalf.yaml
@@ -0,0 +1,12 @@
+meta:
+- desc: |
+   install upgrade ceph/-x on one node only
+   1st half
+   restart : osd.0,1,2,3,4,5
+tasks:
+- install.upgrade:
+    osd.0: 
+- print: "**** done install.upgrade osd.0"
+- ceph.restart:
+    daemons: [osd.0, osd.1, osd.2, osd.3, osd.4, osd.5]
+- print: "**** done ceph.restart 1st half"
diff --git a/qa/suites/upgrade/jewel-x/stress-split-erasure-code/3-thrash/default.yaml b/qa/suites/upgrade/jewel-x/stress-split-erasure-code/3-thrash/default.yaml
new file mode 100644
index 0000000..8a87f75
--- /dev/null
+++ b/qa/suites/upgrade/jewel-x/stress-split-erasure-code/3-thrash/default.yaml
@@ -0,0 +1,18 @@
+meta:
+- desc: |
+   randomly kill and revive osd
+   small chance to increase the number of pgs
+overrides:
+  ceph:
+    log-whitelist:
+    - wrongly marked me down
+    - objects unfound and apparently lost
+    - log bound mismatch
+    - failed to encode map e
+tasks:
+- thrashosds:
+    timeout: 1200
+    chance_pgnum_grow: 1
+    chance_pgpnum_fix: 1
+    min_in: 4
+- print: "**** done thrashosds 3-thrash"
diff --git a/qa/suites/upgrade/jewel-x/stress-split-erasure-code/4-mon/mona.yaml b/qa/suites/upgrade/jewel-x/stress-split-erasure-code/4-mon/mona.yaml
new file mode 100644
index 0000000..e3c369d
--- /dev/null
+++ b/qa/suites/upgrade/jewel-x/stress-split-erasure-code/4-mon/mona.yaml
@@ -0,0 +1,9 @@
+meta:
+- desc: |
+   restart mon.a so it is upgraded to -x
+tasks:
+- ceph.restart:
+    daemons: [mon.a]
+    wait-for-healthy: false
+    wait-for-osds-up: true
+- print: "**** done ceph.restart mon.a"
diff --git a/qa/suites/upgrade/jewel-x/stress-split-erasure-code/5-workload/ec-rados-default.yaml b/qa/suites/upgrade/jewel-x/stress-split-erasure-code/5-workload/ec-rados-default.yaml
new file mode 100644
index 0000000..eeffd92
--- /dev/null
+++ b/qa/suites/upgrade/jewel-x/stress-split-erasure-code/5-workload/ec-rados-default.yaml
@@ -0,0 +1,22 @@
+meta:
+- desc: |
+   randomized correctness test for rados operations on an erasure coded pool
+tasks:
+  - rados:
+      clients: [client.0]
+      ops: 4000
+      objects: 50
+      ec_pool: true
+      write_append_excl: false
+      op_weights:
+        read: 100
+        write: 0
+        append: 100
+        delete: 50
+        snap_create: 50
+        snap_remove: 50
+        rollback: 50
+        copy_from: 50
+        setattr: 25
+        rmattr: 25
+  - print: "**** done rados ec task"
diff --git a/qa/suites/upgrade/jewel-x/stress-split-erasure-code/6-next-mon/monb.yaml b/qa/suites/upgrade/jewel-x/stress-split-erasure-code/6-next-mon/monb.yaml
new file mode 100644
index 0000000..ba71c73
--- /dev/null
+++ b/qa/suites/upgrade/jewel-x/stress-split-erasure-code/6-next-mon/monb.yaml
@@ -0,0 +1,9 @@
+meta:
+- desc: |
+   restart mon.b so it is upgraded to -x
+tasks:
+- ceph.restart:
+    daemons: [mon.b]
+    wait-for-healthy: false
+    wait-for-osds-up: true
+- print: "**** done ceph.restart mon.b 6-next-mon"
diff --git a/qa/suites/upgrade/jewel-x/stress-split-erasure-code/8-next-mon/monc.yaml b/qa/suites/upgrade/jewel-x/stress-split-erasure-code/8-next-mon/monc.yaml
new file mode 100644
index 0000000..12788b7
--- /dev/null
+++ b/qa/suites/upgrade/jewel-x/stress-split-erasure-code/8-next-mon/monc.yaml
@@ -0,0 +1,12 @@
+meta:
+- desc: |
+   restart mon.c so it is upgraded to -x
+   as all mon were upgrated, expected ceph cluster reach quorum
+tasks:
+- ceph.restart:
+    daemons: [mon.c]
+    wait-for-healthy: false
+    wait-for-osds-up: true
+- print: "**** done ceph.restart mon.c 8-next-mon"
+- ceph.wait_for_mon_quorum: [a, b, c]
+- print: "**** done wait_for_mon_quorum 8-next-mon"
diff --git a/qa/suites/upgrade/jewel-x/stress-split-erasure-code/9-workload/ec-rados-plugin=jerasure-k=3-m=1.yaml b/qa/suites/upgrade/jewel-x/stress-split-erasure-code/9-workload/ec-rados-plugin=jerasure-k=3-m=1.yaml
new file mode 100644
index 0000000..ab439d5
--- /dev/null
+++ b/qa/suites/upgrade/jewel-x/stress-split-erasure-code/9-workload/ec-rados-plugin=jerasure-k=3-m=1.yaml
@@ -0,0 +1,35 @@
+#
+# k=3 implies a stripe_width of 1376*3 = 4128 which is different from
+# the default value of 4096 It is also not a multiple of 1024*1024 and
+# creates situations where rounding rules during recovery becomes 
+# necessary. 
+#
+meta:
+- desc: |
+   randomized correctness test for rados operations on an erasure coded pool
+   using the jerasure plugin with k=3 and m=1
+tasks:
+- rados:
+    clients: [client.0]
+    ops: 4000
+    objects: 50
+    ec_pool: true
+    write_append_excl: false
+    erasure_code_profile:
+      name: jerasure31profile
+      plugin: jerasure
+      k: 3
+      m: 1
+      technique: reed_sol_van
+      ruleset-failure-domain: osd
+    op_weights:
+      read: 100
+      write: 0
+      append: 100
+      delete: 50
+      snap_create: 50
+      snap_remove: 50
+      rollback: 50
+      copy_from: 50
+      setattr: 25
+      rmattr: 25
diff --git a/qa/suites/upgrade/jewel-x/stress-split-erasure-code/distros/centos_7.3.yaml b/qa/suites/upgrade/jewel-x/stress-split-erasure-code/distros/centos_7.3.yaml
new file mode 100644
index 0000000..9dfcc7f
--- /dev/null
+++ b/qa/suites/upgrade/jewel-x/stress-split-erasure-code/distros/centos_7.3.yaml
@@ -0,0 +1,2 @@
+os_type: centos
+os_version: "7.3"
diff --git a/qa/suites/upgrade/jewel-x/stress-split-erasure-code/distros/ubuntu_14.04.yaml b/qa/suites/upgrade/jewel-x/stress-split-erasure-code/distros/ubuntu_14.04.yaml
new file mode 100644
index 0000000..309e989
--- /dev/null
+++ b/qa/suites/upgrade/jewel-x/stress-split-erasure-code/distros/ubuntu_14.04.yaml
@@ -0,0 +1,2 @@
+os_type: ubuntu
+os_version: "14.04"
diff --git a/qa/suites/upgrade/jewel-x/stress-split/% b/qa/suites/upgrade/jewel-x/stress-split/%
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/upgrade/jewel-x/stress-split/0-cluster/+ b/qa/suites/upgrade/jewel-x/stress-split/0-cluster/+
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/upgrade/jewel-x/stress-split/0-cluster/openstack.yaml b/qa/suites/upgrade/jewel-x/stress-split/0-cluster/openstack.yaml
new file mode 100644
index 0000000..a0d5c20
--- /dev/null
+++ b/qa/suites/upgrade/jewel-x/stress-split/0-cluster/openstack.yaml
@@ -0,0 +1,6 @@
+openstack:
+  - machine:
+      disk: 100 # GB
+  - volumes: # attached to each instance
+      count: 3
+      size: 30 # GB
diff --git a/qa/suites/upgrade/jewel-x/stress-split/0-cluster/start.yaml b/qa/suites/upgrade/jewel-x/stress-split/0-cluster/start.yaml
new file mode 100644
index 0000000..fd200d7
--- /dev/null
+++ b/qa/suites/upgrade/jewel-x/stress-split/0-cluster/start.yaml
@@ -0,0 +1,23 @@
+meta:
+- desc: |
+   Run ceph on two nodes,
+   with a separate client-only node. 
+   Use xfs beneath the osds.
+overrides:
+  ceph:
+    conf:
+      mon:
+        mon warn on legacy crush tunables: false
+    fs: xfs
+roles:
+- - mon.a
+  - mon.b
+  - mon.c
+  - mds.a
+  - osd.0
+  - osd.1
+  - osd.2
+- - osd.3
+  - osd.4
+  - osd.5
+- - client.0
diff --git a/qa/suites/upgrade/jewel-x/stress-split/1-jewel-install/jewel.yaml b/qa/suites/upgrade/jewel-x/stress-split/1-jewel-install/jewel.yaml
new file mode 100644
index 0000000..590e1e3
--- /dev/null
+++ b/qa/suites/upgrade/jewel-x/stress-split/1-jewel-install/jewel.yaml
@@ -0,0 +1,8 @@
+meta:
+- desc: install ceph/jewel latest
+tasks:
+- install:
+    branch: jewel
+- print: "**** done install jewel"
+- ceph:
+- print: "**** done ceph"
diff --git a/qa/suites/upgrade/jewel-x/stress-split/2-partial-upgrade/firsthalf.yaml b/qa/suites/upgrade/jewel-x/stress-split/2-partial-upgrade/firsthalf.yaml
new file mode 100644
index 0000000..ab5dcac
--- /dev/null
+++ b/qa/suites/upgrade/jewel-x/stress-split/2-partial-upgrade/firsthalf.yaml
@@ -0,0 +1,12 @@
+meta:
+- desc: |
+   install upgrade ceph/-x on one node only
+   1st half
+   restart : osd.0,1,2,3,4,5
+tasks:
+- install.upgrade:
+    osd.0: 
+- print: "**** done install.upgrade osd.0"
+- ceph.restart:
+    daemons: [osd.0, osd.1, osd.2, osd.3, osd.4, osd.5]
+- print: "**** done ceph.restart 1st half"
diff --git a/qa/suites/upgrade/jewel-x/stress-split/3-thrash/default.yaml b/qa/suites/upgrade/jewel-x/stress-split/3-thrash/default.yaml
new file mode 100644
index 0000000..f22d24b
--- /dev/null
+++ b/qa/suites/upgrade/jewel-x/stress-split/3-thrash/default.yaml
@@ -0,0 +1,17 @@
+meta:
+- desc: |
+   randomly kill and revive osd
+   small chance to increase the number of pgs
+overrides:
+  ceph:
+    log-whitelist:
+    - wrongly marked me down
+    - objects unfound and apparently lost
+    - log bound mismatch
+    - failed to encode map e
+tasks:
+- thrashosds:
+    timeout: 1200
+    chance_pgnum_grow: 1
+    chance_pgpnum_fix: 1
+- print: "**** done thrashosds 3-thrash"
diff --git a/qa/suites/upgrade/jewel-x/stress-split/4-mon/mona.yaml b/qa/suites/upgrade/jewel-x/stress-split/4-mon/mona.yaml
new file mode 100644
index 0000000..e3c369d
--- /dev/null
+++ b/qa/suites/upgrade/jewel-x/stress-split/4-mon/mona.yaml
@@ -0,0 +1,9 @@
+meta:
+- desc: |
+   restart mon.a so it is upgraded to -x
+tasks:
+- ceph.restart:
+    daemons: [mon.a]
+    wait-for-healthy: false
+    wait-for-osds-up: true
+- print: "**** done ceph.restart mon.a"
diff --git a/qa/suites/upgrade/jewel-x/stress-split/5-workload/+ b/qa/suites/upgrade/jewel-x/stress-split/5-workload/+
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/upgrade/jewel-x/stress-split/5-workload/rbd-cls.yaml b/qa/suites/upgrade/jewel-x/stress-split/5-workload/rbd-cls.yaml
new file mode 100644
index 0000000..84827e4
--- /dev/null
+++ b/qa/suites/upgrade/jewel-x/stress-split/5-workload/rbd-cls.yaml
@@ -0,0 +1,10 @@
+meta:
+- desc: |
+   run basic cls tests for rbd
+tasks:
+- workunit:
+    branch: jewel
+    clients:
+      client.0:
+        - cls/test_cls_rbd.sh
+- print: "**** done cls/test_cls_rbd.sh 5-workload"
diff --git a/qa/suites/upgrade/jewel-x/stress-split/5-workload/rbd-import-export.yaml b/qa/suites/upgrade/jewel-x/stress-split/5-workload/rbd-import-export.yaml
new file mode 100644
index 0000000..dd3d25f
--- /dev/null
+++ b/qa/suites/upgrade/jewel-x/stress-split/5-workload/rbd-import-export.yaml
@@ -0,0 +1,12 @@
+meta:
+- desc: |
+   run basic import/export cli tests for rbd
+tasks:
+- workunit:
+    branch: jewel
+    clients:
+      client.0:
+        - rbd/import_export.sh
+    env:
+      RBD_CREATE_ARGS: --new-format
+- print: "**** done rbd/import_export.sh 5-workload"
diff --git a/qa/suites/upgrade/jewel-x/stress-split/5-workload/readwrite.yaml b/qa/suites/upgrade/jewel-x/stress-split/5-workload/readwrite.yaml
new file mode 100644
index 0000000..1b161b4
--- /dev/null
+++ b/qa/suites/upgrade/jewel-x/stress-split/5-workload/readwrite.yaml
@@ -0,0 +1,16 @@
+meta:
+- desc: |
+   randomized correctness test for rados operations on a replicated pool, 
+   using only reads, writes, and deletes
+tasks:
+- full_sequential:
+  - rados:
+      clients: [client.0]
+      ops: 4000
+      objects: 500
+      write_append_excl: false
+      op_weights:
+        read: 45
+        write: 45
+        delete: 10
+- print: "**** done rados/readwrite 5-workload"
diff --git a/qa/suites/upgrade/jewel-x/stress-split/5-workload/snaps-few-objects.yaml b/qa/suites/upgrade/jewel-x/stress-split/5-workload/snaps-few-objects.yaml
new file mode 100644
index 0000000..71445bd
--- /dev/null
+++ b/qa/suites/upgrade/jewel-x/stress-split/5-workload/snaps-few-objects.yaml
@@ -0,0 +1,18 @@
+meta:
+- desc: |
+   randomized correctness test for rados operations on a replicated pool with snapshot operations
+tasks:
+- full_sequential:
+  - rados:
+      clients: [client.0]
+      ops: 4000
+      objects: 50
+      write_append_excl: false
+      op_weights:
+        read: 100
+        write: 100
+        delete: 50
+        snap_create: 50
+        snap_remove: 50
+        rollback: 50
+- print: "**** done rados/snaps-few-objects 5-workload"
diff --git a/qa/suites/upgrade/jewel-x/stress-split/6-next-mon/monb.yaml b/qa/suites/upgrade/jewel-x/stress-split/6-next-mon/monb.yaml
new file mode 100644
index 0000000..ba71c73
--- /dev/null
+++ b/qa/suites/upgrade/jewel-x/stress-split/6-next-mon/monb.yaml
@@ -0,0 +1,9 @@
+meta:
+- desc: |
+   restart mon.b so it is upgraded to -x
+tasks:
+- ceph.restart:
+    daemons: [mon.b]
+    wait-for-healthy: false
+    wait-for-osds-up: true
+- print: "**** done ceph.restart mon.b 6-next-mon"
diff --git a/qa/suites/upgrade/jewel-x/stress-split/7-workload/+ b/qa/suites/upgrade/jewel-x/stress-split/7-workload/+
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/upgrade/jewel-x/stress-split/7-workload/radosbench.yaml b/qa/suites/upgrade/jewel-x/stress-split/7-workload/radosbench.yaml
new file mode 100644
index 0000000..7d2be5f
--- /dev/null
+++ b/qa/suites/upgrade/jewel-x/stress-split/7-workload/radosbench.yaml
@@ -0,0 +1,40 @@
+meta:
+- desc: |
+   run randomized correctness test for rados operations
+   generate write load with rados bench
+tasks:
+- full_sequential:
+  - radosbench:
+      clients: [client.0]
+      time: 150
+  - radosbench:
+      clients: [client.0]
+      time: 150
+  - radosbench:
+      clients: [client.0]
+      time: 150
+  - radosbench:
+      clients: [client.0]
+      time: 150
+  - radosbench:
+      clients: [client.0]
+      time: 150
+  - radosbench:
+      clients: [client.0]
+      time: 150
+  - radosbench:
+      clients: [client.0]
+      time: 150
+  - radosbench:
+      clients: [client.0]
+      time: 150
+  - radosbench:
+      clients: [client.0]
+      time: 150
+  - radosbench:
+      clients: [client.0]
+      time: 150
+  - radosbench:
+      clients: [client.0]
+      time: 150
+- print: "**** done radosbench 7-workload"
diff --git a/qa/suites/upgrade/jewel-x/stress-split/7-workload/rbd_api.yaml b/qa/suites/upgrade/jewel-x/stress-split/7-workload/rbd_api.yaml
new file mode 100644
index 0000000..81067e6
--- /dev/null
+++ b/qa/suites/upgrade/jewel-x/stress-split/7-workload/rbd_api.yaml
@@ -0,0 +1,10 @@
+meta:
+- desc: |
+   librbd C and C++ api tests
+tasks:
+- workunit:
+     branch: jewel
+     clients:
+        client.0:
+           - rbd/test_librbd.sh
+- print: "**** done rbd/test_librbd.sh 7-workload"
diff --git a/qa/suites/upgrade/jewel-x/stress-split/8-next-mon/monc.yaml b/qa/suites/upgrade/jewel-x/stress-split/8-next-mon/monc.yaml
new file mode 100644
index 0000000..12788b7
--- /dev/null
+++ b/qa/suites/upgrade/jewel-x/stress-split/8-next-mon/monc.yaml
@@ -0,0 +1,12 @@
+meta:
+- desc: |
+   restart mon.c so it is upgraded to -x
+   as all mon were upgrated, expected ceph cluster reach quorum
+tasks:
+- ceph.restart:
+    daemons: [mon.c]
+    wait-for-healthy: false
+    wait-for-osds-up: true
+- print: "**** done ceph.restart mon.c 8-next-mon"
+- ceph.wait_for_mon_quorum: [a, b, c]
+- print: "**** done wait_for_mon_quorum 8-next-mon"
diff --git a/qa/suites/upgrade/jewel-x/stress-split/9-workload/+ b/qa/suites/upgrade/jewel-x/stress-split/9-workload/+
new file mode 100644
index 0000000..e69de29
diff --git a/qa/suites/upgrade/jewel-x/stress-split/9-workload/rbd-python.yaml b/qa/suites/upgrade/jewel-x/stress-split/9-workload/rbd-python.yaml
new file mode 100644
index 0000000..8da3457
--- /dev/null
+++ b/qa/suites/upgrade/jewel-x/stress-split/9-workload/rbd-python.yaml
@@ -0,0 +1,10 @@
+meta:
+- desc: |
+   librbd python api tests
+tasks:
+- workunit:
+    branch: jewel
+    clients:
+      client.0:
+        - rbd/test_librbd_python.sh
+- print: "**** done rbd/test_librbd_python.sh 9-workload"
diff --git a/qa/suites/upgrade/jewel-x/stress-split/9-workload/rgw-swift.yaml b/qa/suites/upgrade/jewel-x/stress-split/9-workload/rgw-swift.yaml
new file mode 100644
index 0000000..ba004ce
--- /dev/null
+++ b/qa/suites/upgrade/jewel-x/stress-split/9-workload/rgw-swift.yaml
@@ -0,0 +1,12 @@
+meta:
+- desc: |
+   swift api tests for rgw
+tasks:
+- rgw: 
+    client.0:
+    default_idle_timeout: 300
+- print: "**** done rgw 9-workload"
+- swift:
+    client.0:
+      rgw_server: client.0
+- print: "**** done swift 9-workload"
diff --git a/qa/suites/upgrade/jewel-x/stress-split/9-workload/snaps-many-objects.yaml b/qa/suites/upgrade/jewel-x/stress-split/9-workload/snaps-many-objects.yaml
new file mode 100644
index 0000000..805bf97
--- /dev/null
+++ b/qa/suites/upgrade/jewel-x/stress-split/9-workload/snaps-many-objects.yaml
@@ -0,0 +1,16 @@
+meta:
+- desc: |
+   randomized correctness test for rados operations on a replicated pool with snapshot operations
+tasks:
+- rados:
+    clients: [client.0]
+    ops: 4000
+    objects: 500
+    write_append_excl: false
+    op_weights:
+      read: 100
+      write: 100
+      delete: 50
+      snap_create: 50
+      snap_remove: 50
+      rollback: 50
diff --git a/qa/suites/upgrade/jewel-x/stress-split/distros/centos_7.3.yaml b/qa/suites/upgrade/jewel-x/stress-split/distros/centos_7.3.yaml
new file mode 100644
index 0000000..9dfcc7f
--- /dev/null
+++ b/qa/suites/upgrade/jewel-x/stress-split/distros/centos_7.3.yaml
@@ -0,0 +1,2 @@
+os_type: centos
+os_version: "7.3"
diff --git a/qa/suites/upgrade/jewel-x/stress-split/distros/ubuntu_14.04.yaml b/qa/suites/upgrade/jewel-x/stress-split/distros/ubuntu_14.04.yaml
new file mode 100644
index 0000000..309e989
--- /dev/null
+++ b/qa/suites/upgrade/jewel-x/stress-split/distros/ubuntu_14.04.yaml
@@ -0,0 +1,2 @@
+os_type: ubuntu
+os_version: "14.04"
diff --git a/qa/tasks/__init__.py b/qa/tasks/__init__.py
new file mode 100644
index 0000000..9a7949a
--- /dev/null
+++ b/qa/tasks/__init__.py
@@ -0,0 +1,6 @@
+import logging
+
+# Inherit teuthology's log level
+teuthology_log = logging.getLogger('teuthology')
+log = logging.getLogger(__name__)
+log.setLevel(teuthology_log.level)
diff --git a/qa/tasks/admin_socket.py b/qa/tasks/admin_socket.py
new file mode 100644
index 0000000..3301372
--- /dev/null
+++ b/qa/tasks/admin_socket.py
@@ -0,0 +1,199 @@
+"""
+Admin Socket task -- used in rados, powercycle, and smoke testing
+"""
+from cStringIO import StringIO
+
+import json
+import logging
+import os
+import time
+
+from teuthology.orchestra import run
+from teuthology import misc as teuthology
+from teuthology.parallel import parallel
+from teuthology.config import config as teuth_config
+
+log = logging.getLogger(__name__)
+
+
+def task(ctx, config):
+    """
+    Run an admin socket command, make sure the output is json, and run
+    a test program on it. The test program should read json from
+    stdin. This task succeeds if the test program exits with status 0.
+
+    To run the same test on all clients::
+
+        tasks:
+        - ceph:
+        - rados:
+        - admin_socket:
+            all:
+              dump_requests:
+                test: http://example.com/script
+
+    To restrict it to certain clients::
+
+        tasks:
+        - ceph:
+        - rados: [client.1]
+        - admin_socket:
+            client.1:
+              dump_requests:
+                test: http://example.com/script
+
+    If an admin socket command has arguments, they can be specified as
+    a list::
+
+        tasks:
+        - ceph:
+        - rados: [client.0]
+        - admin_socket:
+            client.0:
+              dump_requests:
+                test: http://example.com/script
+              help:
+                test: http://example.com/test_help_version
+                args: [version]
+
+    Note that there must be a ceph client with an admin socket running
+    before this task is run. The tests are parallelized at the client
+    level. Tests for a single client are run serially.
+
+    :param ctx: Context
+    :param config: Configuration
+    """
+    assert isinstance(config, dict), \
+        'admin_socket task requires a dict for configuration'
+    teuthology.replace_all_with_clients(ctx.cluster, config)
+
+    with parallel() as ptask:
+        for client, tests in config.iteritems():
+            ptask.spawn(_run_tests, ctx, client, tests)
+
+
+def _socket_command(ctx, remote, socket_path, command, args):
+    """
+    Run an admin socket command and return the result as a string.
+
+    :param ctx: Context
+    :param remote: Remote site
+    :param socket_path: path to socket
+    :param command: command to be run remotely
+    :param args: command arguments
+
+    :returns: output of command in json format
+    """
+    json_fp = StringIO()
+    testdir = teuthology.get_testdir(ctx)
+    max_tries = 120
+    while True:
+        proc = remote.run(
+            args=[
+                'sudo',
+                'adjust-ulimits',
+                'ceph-coverage',
+                '{tdir}/archive/coverage'.format(tdir=testdir),
+                'ceph',
+                '--admin-daemon', socket_path,
+                ] + command.split(' ') + args,
+            stdout=json_fp,
+            check_status=False,
+            )
+        if proc.exitstatus == 0:
+            break
+        assert max_tries > 0
+        max_tries -= 1
+        log.info('ceph cli returned an error, command not registered yet?')
+        log.info('sleeping and retrying ...')
+        time.sleep(1)
+    out = json_fp.getvalue()
+    json_fp.close()
+    log.debug('admin socket command %s returned %s', command, out)
+    return json.loads(out)
+
+def _run_tests(ctx, client, tests):
+    """
+    Create a temp directory and wait for a client socket to be created.
+    For each test, copy the executable locally and run the test.
+    Remove temp directory when finished.
+
+    :param ctx: Context
+    :param client: client machine to run the test
+    :param tests: list of tests to run
+    """
+    testdir = teuthology.get_testdir(ctx)
+    log.debug('Running admin socket tests on %s', client)
+    (remote,) = ctx.cluster.only(client).remotes.iterkeys()
+    socket_path = '/var/run/ceph/ceph-{name}.asok'.format(name=client)
+    overrides = ctx.config.get('overrides', {}).get('admin_socket', {})
+
+    try:
+        tmp_dir = os.path.join(
+            testdir,
+            'admin_socket_{client}'.format(client=client),
+            )
+        remote.run(
+            args=[
+                'mkdir',
+                '--',
+                tmp_dir,
+                run.Raw('&&'),
+                # wait for client process to create the socket
+                'while', 'test', '!', '-e', socket_path, run.Raw(';'),
+                'do', 'sleep', '1', run.Raw(';'), 'done',
+                ],
+            )
+
+        for command, config in tests.iteritems():
+            if config is None:
+                config = {}
+            teuthology.deep_merge(config, overrides)
+            log.debug('Testing %s with config %s', command, str(config))
+
+            test_path = None
+            if 'test' in config:
+                # hack: the git_url is always ceph-ci or ceph
+                git_url = teuth_config.get_ceph_git_url()
+                repo_name = 'ceph.git'
+                if git_url.count('ceph-ci'):
+                    repo_name = 'ceph-ci.git'
+                url = config['test'].format(
+                    branch=config.get('branch', 'master'),
+                    repo=repo_name,
+                    )
+                test_path = os.path.join(tmp_dir, command)
+                remote.run(
+                    args=[
+                        'wget',
+                        '-q',
+                        '-O',
+                        test_path,
+                        '--',
+                        url,
+                        run.Raw('&&'),
+                        'chmod',
+                        'u=rx',
+                        '--',
+                        test_path,
+                        ],
+                    )
+
+            args = config.get('args', [])
+            assert isinstance(args, list), \
+                'admin socket command args must be a list'
+            sock_out = _socket_command(ctx, remote, socket_path, command, args)
+            if test_path is not None:
+                remote.run(
+                    args=[
+                        test_path,
+                        ],
+                    stdin=json.dumps(sock_out),
+                    )
+
+    finally:
+        remote.run(
+            args=[
+                'rm', '-rf', '--', tmp_dir,
+                ],
+            )
diff --git a/qa/tasks/apache.conf.template b/qa/tasks/apache.conf.template
new file mode 100644
index 0000000..d164571
--- /dev/null
+++ b/qa/tasks/apache.conf.template
@@ -0,0 +1,48 @@
+<IfModule !version_module>
+  LoadModule version_module {mod_path}/mod_version.so
+</IfModule>
+<IfModule !env_module>
+  LoadModule env_module {mod_path}/mod_env.so
+</IfModule>
+<IfModule !rewrite_module>
+  LoadModule rewrite_module {mod_path}/mod_rewrite.so
+</IfModule>
+<IfModule !log_config_module>
+  LoadModule log_config_module {mod_path}/mod_log_config.so
+</IfModule>
+
+Listen {port}
+ServerName {host}
+
+<IfVersion >= 2.4>
+  <IfModule !unixd_module>
+    LoadModule unixd_module {mod_path}/mod_unixd.so
+  </IfModule>
+  <IfModule !authz_core_module>
+    LoadModule authz_core_module {mod_path}/mod_authz_core.so
+  </IfModule>
+  <IfModule !mpm_worker_module>
+    LoadModule mpm_worker_module {mod_path}/mod_mpm_worker.so
+  </IfModule>
+  User {user}
+  Group {group}
+</IfVersion>
+
+ServerRoot {testdir}/apache
+ErrorLog {testdir}/archive/apache.{client}/error.log
+LogFormat "%h l %u %t \"%r\" %>s %b \"{{Referer}}i\" \"%{{User-agent}}i\"" combined
+CustomLog {testdir}/archive/apache.{client}/access.log combined
+PidFile {testdir}/apache/tmp.{client}/apache.pid
+DocumentRoot {testdir}/apache/htdocs.{client}
+
+
+<Directory {testdir}/apache/htdocs.{client}>
+  Options +ExecCGI
+  AllowOverride All
+  SetHandler fastcgi-script
+</Directory>
+
+AllowEncodedSlashes On
+ServerSignature Off
+MaxRequestsPerChild 0
+
diff --git a/qa/tasks/autotest.py b/qa/tasks/autotest.py
new file mode 100644
index 0000000..efa9721
--- /dev/null
+++ b/qa/tasks/autotest.py
@@ -0,0 +1,166 @@
+""" 
+Run an autotest test on the ceph cluster.
+"""
+import json
+import logging
+import os
+
+from teuthology import misc as teuthology
+from teuthology.parallel import parallel
+from teuthology.orchestra import run
+
+log = logging.getLogger(__name__)
+
+def task(ctx, config):
+    """
+    Run an autotest test on the ceph cluster.
+
+    Only autotest client tests are supported.
+
+    The config is a mapping from role name to list of tests to run on
+    that client.
+
+    For example::
+
+        tasks:
+        - ceph:
+        - ceph-fuse: [client.0, client.1]
+        - autotest:
+            client.0: [dbench]
+            client.1: [bonnie]
+
+    You can also specify a list of tests to run on all clients::
+
+        tasks:
+        - ceph:
+        - ceph-fuse:
+        - autotest:
+            all: [dbench]
+    """
+    assert isinstance(config, dict)
+    config = teuthology.replace_all_with_clients(ctx.cluster, config)
+    log.info('Setting up autotest...')
+    testdir = teuthology.get_testdir(ctx)
+    with parallel() as p:
+        for role in config.iterkeys():
+            (remote,) = ctx.cluster.only(role).remotes.keys()
+            p.spawn(_download, testdir, remote)
+
+    log.info('Making a separate scratch dir for every client...')
+    for role in config.iterkeys():
+        assert isinstance(role, basestring)
+        PREFIX = 'client.'
+        assert role.startswith(PREFIX)
+        id_ = role[len(PREFIX):]
+        (remote,) = ctx.cluster.only(role).remotes.iterkeys()
+        mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
+        scratch = os.path.join(mnt, 'client.{id}'.format(id=id_))
+        remote.run(
+            args=[
+                'sudo',
+                'install',
+                '-d',
+                '-m', '0755',
+                '--owner={user}'.format(user='ubuntu'), #TODO
+                '--',
+                scratch,
+                ],
+            )
+
+    with parallel() as p:
+        for role, tests in config.iteritems():
+            (remote,) = ctx.cluster.only(role).remotes.keys()
+            p.spawn(_run_tests, testdir, remote, role, tests)
+
+def _download(testdir, remote):
+    """
+    Download.  Does not explicitly support muliple tasks in a single run.
+    """
+    remote.run(
+        args=[
+            # explicitly does not support multiple autotest tasks
+            # in a single run; the result archival would conflict
+            'mkdir', '{tdir}/archive/autotest'.format(tdir=testdir),
+            run.Raw('&&'),
+            'mkdir', '{tdir}/autotest'.format(tdir=testdir),
+            run.Raw('&&'),
+            'wget',
+            '-nv',
+            '--no-check-certificate',
+            'https://github.com/ceph/autotest/tarball/ceph',
+            '-O-',
+            run.Raw('|'),
+            'tar',
+            '-C', '{tdir}/autotest'.format(tdir=testdir),
+            '-x',
+            '-z',
+            '-f-',
+            '--strip-components=1',
+            ],
+        )
+
+def _run_tests(testdir, remote, role, tests):
+    """
+    Spawned to run test on remote site
+    """
+    assert isinstance(role, basestring)
+    PREFIX = 'client.'
+    assert role.startswith(PREFIX)
+    id_ = role[len(PREFIX):]
+    mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
+    scratch = os.path.join(mnt, 'client.{id}'.format(id=id_))
+
+    assert isinstance(tests, list)
+    for idx, testname in enumerate(tests):
+        log.info('Running autotest client test #%d: %s...', idx, testname)
+
+        tag = 'client.{id}.num{idx}.{testname}'.format(
+            idx=idx,
+            testname=testname,
+            id=id_,
+            )
+        control = '{tdir}/control.{tag}'.format(tdir=testdir, tag=tag)
+        teuthology.write_file(
+            remote=remote,
+            path=control,
+            data='import json; data=json.loads({data!r}); job.run_test(**data)'.format(
+                data=json.dumps(dict(
+                        url=testname,
+                        dir=scratch,
+                        # TODO perhaps tag
+                        # results will be in {testdir}/autotest/client/results/dbench
+                        # or {testdir}/autotest/client/results/dbench.{tag}
+                        )),
+                ),
+            )
+        remote.run(
+            args=[
+                '{tdir}/autotest/client/bin/autotest'.format(tdir=testdir),
+                '--verbose',
+                '--harness=simple',
+                '--tag={tag}'.format(tag=tag),
+                control,
+                run.Raw('3>&1'),
+                ],
+            )
+
+        remote.run(
+            args=[
+                'rm', '-rf', '--', control,
+                ],
+            )
+
+        remote.run(
+            args=[
+                'mv',
+                '--',
+                '{tdir}/autotest/client/results/{tag}'.format(tdir=testdir, tag=tag),
+                '{tdir}/archive/autotest/{tag}'.format(tdir=testdir, tag=tag),
+                ],
+            )
+
+    remote.run(
+        args=[
+            'rm', '-rf', '--', '{tdir}/autotest'.format(tdir=testdir),
+            ],
+        )
diff --git a/qa/tasks/blktrace.py b/qa/tasks/blktrace.py
new file mode 100644
index 0000000..96aaf50
--- /dev/null
+++ b/qa/tasks/blktrace.py
@@ -0,0 +1,96 @@
+"""
+Run blktrace program through teuthology
+"""
+import contextlib
+import logging
+
+from teuthology import misc as teuthology
+from teuthology import contextutil
+from teuthology.orchestra import run
+
+log = logging.getLogger(__name__)
+blktrace = '/usr/sbin/blktrace'
+daemon_signal = 'term'
+
+ at contextlib.contextmanager
+def setup(ctx, config):
+    """
+    Setup all the remotes
+    """
+    osds = ctx.cluster.only(teuthology.is_type('osd', config['cluster']))
+    log_dir = '{tdir}/archive/performance/blktrace'.format(tdir=teuthology.get_testdir(ctx))
+
+    for remote, roles_for_host in osds.remotes.iteritems():
+        log.info('Creating %s on %s' % (log_dir, remote.name))
+        remote.run(
+            args=['mkdir', '-p', '-m0755', '--', log_dir],
+            wait=False,
+            )
+    yield
+
+ at contextlib.contextmanager
+def execute(ctx, config):
+    """
+    Run the blktrace program on remote machines.
+    """
+    procs = []
+    testdir = teuthology.get_testdir(ctx)
+    log_dir = '{tdir}/archive/performance/blktrace'.format(tdir=testdir)
+
+    osds = ctx.cluster.only(teuthology.is_type('osd'))
+    for remote, roles_for_host in osds.remotes.iteritems():
+        roles_to_devs = ctx.disk_config.remote_to_roles_to_dev[remote]
+        for role in teuthology.cluster_roles_of_type(roles_for_host, 'osd',
+                                                     config['cluster']):
+            if roles_to_devs.get(role):
+                dev = roles_to_devs[role]
+                log.info("running blktrace on %s: %s" % (remote.name, dev))
+
+                proc = remote.run(
+                    args=[
+                        'cd',
+                        log_dir,
+                        run.Raw(';'),
+                        'daemon-helper',
+                        daemon_signal,
+                        'sudo',
+                        blktrace,
+                        '-o',
+                        dev.rsplit("/", 1)[1],
+                        '-d',
+                        dev,
+                        ],
+                    wait=False,
+                    stdin=run.PIPE,
+                    )
+                procs.append(proc)
+    try:
+        yield
+    finally:
+        osds = ctx.cluster.only(teuthology.is_type('osd'))
+        log.info('stopping blktrace processs')
+        for proc in procs:
+            proc.stdin.close()
+
+ at contextlib.contextmanager
+def task(ctx, config):
+    """
+    Usage:
+        blktrace:
+
+    or:
+        blktrace:
+          cluster: backup
+
+    Runs blktrace on all osds in the specified cluster (the 'ceph' cluster by
+    default).
+    """
+    if config is None:
+        config = {}
+    config['cluster'] = config.get('cluster', 'ceph')
+
+    with contextutil.nested(
+        lambda: setup(ctx=ctx, config=config),
+        lambda: execute(ctx=ctx, config=config),
+        ):
+        yield
diff --git a/qa/tasks/boto.cfg.template b/qa/tasks/boto.cfg.template
new file mode 100644
index 0000000..cdfe887
--- /dev/null
+++ b/qa/tasks/boto.cfg.template
@@ -0,0 +1,2 @@
+[Boto]
+http_socket_timeout = {idle_timeout}
diff --git a/qa/tasks/calamari_nosetests.py b/qa/tasks/calamari_nosetests.py
new file mode 100644
index 0000000..c6bbaf3
--- /dev/null
+++ b/qa/tasks/calamari_nosetests.py
@@ -0,0 +1,289 @@
+import contextlib
+import logging
+import os
+import textwrap
+import yaml
+
+from cStringIO import StringIO
+from teuthology import contextutil
+from teuthology import misc
+from teuthology import packaging
+from teuthology.orchestra import run
+
+log = logging.getLogger(__name__)
+
+# extra stuff we need to do our job here
+EXTRA_PKGS = [
+    'git',
+]
+
+# stuff that would be in a devmode install, but should be
+# installed in the system for running nosetests against
+# a production install.
+EXTRA_NOSETEST_PKGS = [
+    'python-psutil',
+    'python-mock',
+]
+
+
+def find_client0(cluster):
+    ''' Find remote that has client.0 role, or None '''
+    for rem, roles in cluster.remotes.iteritems():
+        if 'client.0' in roles:
+            return rem
+    return None
+
+
+def pip(remote, package, venv=None, uninstall=False, force=False):
+    ''' {un}install a package with pip, possibly in a virtualenv '''
+    if venv:
+        pip = os.path.join(venv, 'bin', 'pip')
+        args = ['sudo', pip]
+    else:
+        args = ['sudo', 'pip']
+
+    if uninstall:
+        args.extend(['uninstall', '-y'])
+    else:
+        args.append('install')
+        if force:
+            args.append('-I')
+
+    args.append(package)
+    remote.run(args=args)
+
+
+ at contextlib.contextmanager
+def install_epel(remote):
+    ''' install a disabled-by-default epel repo config file '''
+    remove = False
+    try:
+        if remote.os.package_type == 'deb':
+            yield
+        else:
+            remove = True
+            distromajor = remote.os.version.split('.')[0]
+
+            repofiledata = textwrap.dedent('''
+                [epel]
+                name=epel{version}
+                metalink=http://mirrors.fedoraproject.org/metalink?repo=epel-{version}&arch=$basearch
+                enabled=0
+                gpgcheck=0
+            ''').format(version=distromajor)
+
+            misc.create_file(remote, '/etc/yum.repos.d/epel.repo',
+                             data=repofiledata, sudo=True)
+            remote.run(args='sudo yum clean all')
+            yield
+
+    finally:
+        if remove:
+            misc.delete_file(remote, '/etc/yum.repos.d/epel.repo', sudo=True)
+
+
+def enable_epel(remote, enable=True):
+    ''' enable/disable the epel repo '''
+    args = 'sudo sed -i'.split()
+    if enable:
+        args.extend(['s/enabled=0/enabled=1/'])
+    else:
+        args.extend(['s/enabled=1/enabled=0/'])
+    args.extend(['/etc/yum.repos.d/epel.repo'])
+
+    remote.run(args=args)
+    remote.run(args='sudo yum clean all')
+
+
+ at contextlib.contextmanager
+def install_extra_pkgs(client):
+    ''' Install EXTRA_PKGS '''
+    try:
+        for pkg in EXTRA_PKGS:
+            packaging.install_package(pkg, client)
+        yield
+
+    finally:
+        for pkg in EXTRA_PKGS:
+            packaging.remove_package(pkg, client)
+
+
+ at contextlib.contextmanager
+def clone_calamari(config, client):
+    ''' clone calamari source into current directory on remote '''
+    branch = config.get('calamari_branch', 'master')
+    url = config.get('calamari_giturl', 'git://github.com/ceph/calamari')
+    try:
+        out = StringIO()
+        # ensure branch is present (clone -b will succeed even if
+        # the branch doesn't exist, falling back to master)
+        client.run(
+            args='git ls-remote %s %s' % (url, branch),
+            stdout=out,
+            label='check for calamari branch %s existence' % branch
+        )
+        if len(out.getvalue()) == 0:
+            raise RuntimeError("Calamari branch %s doesn't exist" % branch)
+        client.run(args='git clone -b %s %s' % (branch, url))
+        yield
+    finally:
+        # sudo python setup.py develop may have left some root files around
+        client.run(args='sudo rm -rf calamari')
+
+
+ at contextlib.contextmanager
+def write_info_yaml(cluster, client):
+    ''' write info.yaml to client for nosetests '''
+    try:
+        info = {
+            'cluster': {
+                rem.name: {'roles': roles}
+                for rem, roles in cluster.remotes.iteritems()
+            }
+        }
+        misc.create_file(client, 'calamari/info.yaml',
+                         data=yaml.safe_dump(info, default_flow_style=False))
+        yield
+    finally:
+        misc.delete_file(client, 'calamari/info.yaml')
+
+
+ at contextlib.contextmanager
+def write_test_conf(client):
+    ''' write calamari/tests/test.conf to client for nosetests '''
+    try:
+        testconf = textwrap.dedent('''
+            [testing]
+
+            calamari_control = external
+            ceph_control = external
+            bootstrap = False
+            api_username = admin
+            api_password = admin
+            embedded_timeout_factor = 1
+            external_timeout_factor = 3
+            external_cluster_path = info.yaml
+        ''')
+        misc.create_file(client, 'calamari/tests/test.conf', data=testconf)
+        yield
+
+    finally:
+        misc.delete_file(client, 'calamari/tests/test.conf')
+
+
+ at contextlib.contextmanager
+def prepare_nosetest_env(client):
+    try:
+        # extra dependencies that would be in the devmode venv
+        if client.os.package_type == 'rpm':
+            enable_epel(client, enable=True)
+        for package in EXTRA_NOSETEST_PKGS:
+            packaging.install_package(package, client)
+        if client.os.package_type == 'rpm':
+            enable_epel(client, enable=False)
+
+        # install nose itself into the calamari venv, force it in case it's
+        # already installed in the system, so we can invoke it by path without
+        # fear that it's not present
+        pip(client, 'nose', venv='/opt/calamari/venv', force=True)
+
+        # install a later version of requests into the venv as well
+        # (for precise)
+        pip(client, 'requests', venv='/opt/calamari/venv', force=True)
+
+        # link (setup.py develop) calamari/rest-api into the production venv
+        # because production does not include calamari_rest.management, needed
+        # for test_rest_api.py's ApiIntrospection
+        args = 'cd calamari/rest-api'.split() + [run.Raw(';')] + \
+               'sudo /opt/calamari/venv/bin/python setup.py develop'.split()
+        client.run(args=args)
+
+        # because, at least in Python 2.6/Centos, site.py uses
+        # 'os.path.exists()' to process .pth file entries, and exists() uses
+        # access(2) to check for existence, all the paths leading up to
+        # $HOME/calamari/rest-api need to be searchable by all users of
+        # the package, which will include the WSGI/Django app, running
+        # as the Apache user.  So make them all world-read-and-execute.
+        args = 'sudo chmod a+x'.split() + \
+            ['.', './calamari', './calamari/rest-api']
+        client.run(args=args)
+
+        # make one dummy request just to get the WSGI app to do
+        # all its log creation here, before the chmod below (I'm
+        # looking at you, graphite -- /var/log/calamari/info.log and
+        # /var/log/calamari/exception.log)
+        client.run(args='wget -q -O /dev/null http://localhost')
+
+        # /var/log/calamari/* is root-or-apache write-only
+        client.run(args='sudo chmod a+w /var/log/calamari/*')
+
+        yield
+
+    finally:
+        args = 'cd calamari/rest-api'.split() + [run.Raw(';')] + \
+               'sudo /opt/calamari/venv/bin/python setup.py develop -u'.split()
+        client.run(args=args)
+        for pkg in ('nose', 'requests'):
+            pip(client, pkg, venv='/opt/calamari/venv', uninstall=True)
+        for package in EXTRA_NOSETEST_PKGS:
+            packaging.remove_package(package, client)
+
+
+ at contextlib.contextmanager
+def run_nosetests(client):
+    ''' Actually run the tests '''
+    args = [
+        'cd',
+        'calamari',
+        run.Raw(';'),
+        'CALAMARI_CONFIG=/etc/calamari/calamari.conf',
+        '/opt/calamari/venv/bin/nosetests',
+        '-v',
+        'tests/',
+    ]
+    client.run(args=args)
+    yield
+
+
+ at contextlib.contextmanager
+def task(ctx, config):
+    """
+    Run Calamari tests against an instance set up by 'calamari_server'.
+
+    -- clone the Calamari source into $HOME (see options)
+    -- write calamari/info.yaml describing the cluster
+    -- write calamari/tests/test.conf containing
+        'external' for calamari_control and ceph_control
+        'bootstrap = False' to disable test bootstrapping (installing minions)
+        no api_url necessary (inferred from client.0)
+        'external_cluster_path = info.yaml'
+    -- modify the production Calamari install to allow test runs:
+        install nose in the venv
+        install EXTRA_NOSETEST_PKGS
+        link in, with setup.py develop, calamari_rest (for ApiIntrospection)
+    -- set CALAMARI_CONFIG to point to /etc/calamari/calamari.conf
+    -- nosetests -v tests/
+
+    Options are:
+        calamari_giturl: url from which to git clone calamari
+                         (default: git://github.com/ceph/calamari)
+        calamari_branch: git branch of calamari to check out
+                         (default: master)
+
+    Note: the tests must find a clean cluster, so don't forget to
+    set the crush default type appropriately, or install min_size OSD hosts
+    """
+    client0 = find_client0(ctx.cluster)
+    if client0 is None:
+        raise RuntimeError("must have client.0 role")
+
+    with contextutil.nested(
+        lambda: install_epel(client0),
+        lambda: install_extra_pkgs(client0),
+        lambda: clone_calamari(config, client0),
+        lambda: write_info_yaml(ctx.cluster, client0),
+        lambda: write_test_conf(client0),
+        lambda: prepare_nosetest_env(client0),
+        lambda: run_nosetests(client0),
+    ):
+        yield
diff --git a/qa/tasks/calamari_setup.py b/qa/tasks/calamari_setup.py
new file mode 100644
index 0000000..8ef404f
--- /dev/null
+++ b/qa/tasks/calamari_setup.py
@@ -0,0 +1,467 @@
+"""
+Calamari setup task
+"""
+import contextlib
+import logging
+import os
+import requests
+import shutil
+import webbrowser
+
+from cStringIO import StringIO
+from teuthology.orchestra import run
+from teuthology import contextutil
+from teuthology import misc
+
+log = logging.getLogger(__name__)
+
+
+DEFAULTS = {
+    'version': 'v0.80.9',
+    'test_image': None,
+    'start_browser': False,
+    'email': 'x at y.com',
+    'no_epel': True,
+    'calamari_user': 'admin',
+    'calamari_password': 'admin',
+}
+
+
+ at contextlib.contextmanager
+def task(ctx, config):
+    """
+    Do the setup of a calamari server.
+
+    - calamari_setup:
+        version: 'v80.1'
+        test_image: <path to tarball or iso>
+
+    Options are (see DEFAULTS above):
+
+    version -- ceph version we are testing against
+    test_image -- Can be an HTTP URL, in which case fetch from this
+                  http path; can also be local path
+    start_browser -- If True, start a browser.  To be used by runs that will
+                     bring up a browser quickly for human use.  Set to False
+                     for overnight suites that are testing for problems in
+                     the installation itself
+    email -- email address for the user
+    no_epel -- indicates if we should remove epel files prior to yum
+               installations.
+    calamari_user -- user name to log into gui
+    calamari_password -- calamari user password
+    """
+    local_config = DEFAULTS
+    local_config.update(config)
+    config = local_config
+    cal_svr = None
+    for remote_, roles in ctx.cluster.remotes.items():
+        if 'client.0' in roles:
+            cal_svr = remote_
+            break
+    if not cal_svr:
+        raise RuntimeError('client.0 not found in roles')
+    with contextutil.nested(
+        lambda: adjust_yum_repos(ctx, cal_svr, config['no_epel']),
+        lambda: calamari_install(config, cal_svr),
+        lambda: ceph_install(ctx, cal_svr),
+        # do it again because ceph-deploy installed epel for centos
+        lambda: remove_epel(ctx, config['no_epel']),
+        lambda: calamari_connect(ctx, cal_svr),
+        lambda: browser(config['start_browser'], cal_svr.hostname),
+    ):
+        yield
+
+
+ at contextlib.contextmanager
+def adjust_yum_repos(ctx, cal_svr, no_epel):
+    """
+    For each remote machine, fix the repos if yum is used.
+    """
+    ice_distro = str(cal_svr.os)
+    if ice_distro.startswith('rhel') or ice_distro.startswith('centos'):
+        if no_epel:
+            for remote in ctx.cluster.remotes:
+                fix_yum_repos(remote, ice_distro)
+    try:
+        yield
+    finally:
+        if ice_distro.startswith('rhel') or ice_distro.startswith('centos'):
+            if no_epel:
+                for remote in ctx.cluster.remotes:
+                    restore_yum_repos(remote)
+
+
+def restore_yum_repos(remote):
+    """
+    Copy the old saved repo back in.
+    """
+    if remote.run(args=['sudo', 'rm', '-rf', '/etc/yum.repos.d']).exitstatus:
+        return False
+    if remote.run(args=['sudo', 'mv', '/etc/yum.repos.d.old',
+                        '/etc/yum.repos.d']).exitstatus:
+        return False
+
+
+def fix_yum_repos(remote, distro):
+    """
+    For yum calamari installations, the repos.d directory should only
+    contain a repo file named rhel<version-number>.repo
+    """
+    if distro.startswith('centos'):
+        # hack alert: detour: install lttng for ceph
+        # this works because epel is preinstalled on the vpms
+        # this is not a generic solution
+        # this is here solely to test the one-off 1.3.0 release for centos6
+        remote.run(args="sudo yum -y install lttng-tools")
+        cmds = [
+            'sudo mkdir /etc/yum.repos.d.old'.split(),
+            ['sudo', 'cp', run.Raw('/etc/yum.repos.d/*'),
+             '/etc/yum.repos.d.old'],
+            ['sudo', 'rm', run.Raw('/etc/yum.repos.d/epel*')],
+        ]
+        for cmd in cmds:
+            if remote.run(args=cmd).exitstatus:
+                return False
+    else:
+        cmds = [
+            'sudo mv /etc/yum.repos.d /etc/yum.repos.d.old'.split(),
+            'sudo mkdir /etc/yum.repos.d'.split(),
+        ]
+        for cmd in cmds:
+            if remote.run(args=cmd).exitstatus:
+                return False
+
+        # map "distroversion" from Remote.os to a tuple of
+        # (repo title, repo name descriptor, apt-mirror repo path chunk)
+        yum_repo_params = {
+            'rhel 6.4': ('rhel6-server', 'RHEL', 'rhel6repo-server'),
+            'rhel 6.5': ('rhel6-server', 'RHEL', 'rhel6repo-server'),
+            'rhel 7.0': ('rhel7-server', 'RHEL', 'rhel7repo/server'),
+        }
+        repotitle, reponame, path = yum_repo_params[distro]
+        repopath = '/etc/yum.repos.d/%s.repo' % repotitle
+        # TO DO:  Make this data configurable too
+        repo_contents = '\n'.join(
+            ('[%s]' % repotitle,
+             'name=%s $releasever - $basearch' % reponame,
+             'baseurl=http://apt-mirror.front.sepia.ceph.com/' + path,
+             'gpgcheck=0',
+             'enabled=1')
+        )
+        misc.sudo_write_file(remote, repopath, repo_contents)
+    cmds = [
+        'sudo yum clean all'.split(),
+        'sudo yum makecache'.split(),
+    ]
+    for cmd in cmds:
+        if remote.run(args=cmd).exitstatus:
+            return False
+    return True
+
+
+ at contextlib.contextmanager
+def remove_epel(ctx, no_epel):
+    """
+    just remove epel.  No undo; assumed that it's used after
+    adjust_yum_repos, and relies on its state-save/restore.
+    """
+    if no_epel:
+        for remote in ctx.cluster.remotes:
+            if remote.os.name.startswith('centos'):
+                remote.run(args=[
+                    'sudo', 'rm', '-f', run.Raw('/etc/yum.repos.d/epel*')
+                ])
+    try:
+        yield
+    finally:
+        pass
+
+
+def get_iceball_with_http(url, destdir):
+    '''
+    Copy iceball with http to destdir.  Try both .tar.gz and .iso.
+    '''
+    # stream=True means we don't download until copyfileobj below,
+    # and don't need a temp file
+    r = requests.get(url, stream=True)
+    if not r.ok:
+        raise RuntimeError("Failed to download %s", str(url))
+    filename = os.path.join(destdir, url.split('/')[-1])
+    with open(filename, 'w') as f:
+        shutil.copyfileobj(r.raw, f)
+    log.info('saved %s as %s' % (url, filename))
+    return filename
+
+
+ at contextlib.contextmanager
+def calamari_install(config, cal_svr):
+    """
+    Install calamari
+
+    The steps here are:
+        -- Get the iceball, locally or from http
+        -- Copy the iceball to the calamari server, and untar/mount it.
+        -- Run ice-setup on the calamari server.
+        -- Run calamari-ctl initialize.
+    """
+    client_id = str(cal_svr)
+    at_loc = client_id.find('@')
+    if at_loc > 0:
+        client_id = client_id[at_loc + 1:]
+
+    test_image = config['test_image']
+
+    if not test_image:
+        raise RuntimeError('Must supply test image')
+    log.info('calamari test image: %s' % test_image)
+    delete_iceball = False
+
+    if test_image.startswith('http'):
+        iceball_file = get_iceball_with_http(test_image, '/tmp')
+        delete_iceball = True
+    else:
+        iceball_file = test_image
+
+    remote_iceball_file = os.path.join('/tmp', os.path.split(iceball_file)[1])
+    cal_svr.put_file(iceball_file, remote_iceball_file)
+    if iceball_file.endswith('.tar.gz'):   # XXX specify tar/iso in config?
+        icetype = 'tarball'
+    elif iceball_file.endswith('.iso'):
+        icetype = 'iso'
+    else:
+        raise RuntimeError('Can''t handle iceball {0}'.format(iceball_file))
+
+    if icetype == 'tarball':
+        ret = cal_svr.run(args=['gunzip', run.Raw('<'), remote_iceball_file,
+                          run.Raw('|'), 'tar', 'xvf', run.Raw('-')])
+        if ret.exitstatus:
+            raise RuntimeError('remote iceball untar failed')
+    elif icetype == 'iso':
+        mountpoint = '/mnt/'   # XXX create?
+        ret = cal_svr.run(
+            args=['sudo', 'mount', '-o', 'loop', '-r',
+                  remote_iceball_file, mountpoint]
+        )
+
+    # install ice_setup package
+    args = {
+        'deb': 'sudo dpkg -i /mnt/ice-setup*deb',
+        'rpm': 'sudo yum -y localinstall /mnt/ice_setup*rpm'
+    }.get(cal_svr.system_type, None)
+    if not args:
+        raise RuntimeError('{0}: unknown system type'.format(cal_svr))
+    ret = cal_svr.run(args=args)
+    if ret.exitstatus:
+        raise RuntimeError('ice_setup package install failed')
+
+    # Run ice_setup
+    icesetdata = 'yes\n\n%s\nhttp\n' % client_id
+    ice_in = StringIO(icesetdata)
+    ice_out = StringIO()
+    if icetype == 'tarball':
+        args = 'sudo python ice_setup.py'
+    else:
+        args = 'sudo ice_setup -d /mnt'
+    ret = cal_svr.run(args=args, stdin=ice_in, stdout=ice_out)
+    log.debug(ice_out.getvalue())
+    if ret.exitstatus:
+        raise RuntimeError('ice_setup failed')
+
+    # Run calamari-ctl initialize.
+    icesetdata = '%s\n%s\n%s\n%s\n' % (
+        config['calamari_user'],
+        config['email'],
+        config['calamari_password'],
+        config['calamari_password'],
+    )
+    ice_in = StringIO(icesetdata)
+    ret = cal_svr.run(args=['sudo', 'calamari-ctl', 'initialize'],
+                      stdin=ice_in, stdout=ice_out)
+    log.debug(ice_out.getvalue())
+    if ret.exitstatus:
+        raise RuntimeError('calamari-ctl initialize failed')
+    try:
+        yield
+    finally:
+        log.info('Cleaning up after Calamari installation')
+        if icetype == 'iso':
+            cal_svr.run(args=['sudo', 'umount', mountpoint])
+        if delete_iceball:
+            os.unlink(iceball_file)
+
+
+ at contextlib.contextmanager
+def ceph_install(ctx, cal_svr):
+    """
+    Install ceph if ceph was not previously installed by teuthology.  This
+    code tests the case where calamari is installed on a brand new system.
+    """
+    loc_inst = False
+    if 'install' not in [x.keys()[0] for x in ctx.config['tasks']]:
+        loc_inst = True
+        ret = deploy_ceph(ctx, cal_svr)
+        if ret:
+            raise RuntimeError('ceph installs failed')
+    try:
+        yield
+    finally:
+        if loc_inst:
+            if not undeploy_ceph(ctx, cal_svr):
+                log.error('Cleanup of Ceph installed by Calamari-setup failed')
+
+
+def deploy_ceph(ctx, cal_svr):
+    """
+    Perform the ceph-deploy actions needed to bring up a Ceph cluster.  This
+    test is needed to check the ceph-deploy that comes with the calamari
+    package.
+    """
+    osd_to_name = {}
+    all_machines = set()
+    all_mons = set()
+    all_osds = set()
+
+    # collect which remotes are osds and which are mons
+    for remote in ctx.cluster.remotes:
+        all_machines.add(remote.shortname)
+        roles = ctx.cluster.remotes[remote]
+        for role in roles:
+            daemon_type, number = role.split('.')
+            if daemon_type == 'osd':
+                all_osds.add(remote.shortname)
+                osd_to_name[number] = remote.shortname
+            if daemon_type == 'mon':
+                all_mons.add(remote.shortname)
+
+    # figure out whether we're in "1.3+" mode: prior to 1.3, there was
+    # only one Ceph repo, and it was all installed on every Ceph host.
+    # with 1.3, we've split that into MON and OSD repos (in order to
+    # be able to separately track subscriptions per-node).  This
+    # requires new switches to ceph-deploy to select which locally-served
+    # repo is connected to which cluster host.
+    #
+    # (TODO: A further issue is that the installation/setup may not have
+    # created local repos at all, but that is the subject of a future
+    # change.)
+
+    r = cal_svr.run(args='/usr/bin/test -d /mnt/MON', check_status=False)
+    use_install_repo = (r.returncode == 0)
+
+    # pre-1.3:
+    # ceph-deploy new <all_mons>
+    # ceph-deploy install <all_machines>
+    # ceph-deploy mon create-initial
+    #
+    # 1.3 and later:
+    # ceph-deploy new <all_mons>
+    # ceph-deploy install --repo --release=ceph-mon <all_mons>
+    # ceph-deploy install <all_mons>
+    # ceph-deploy install --repo --release=ceph-osd <all_osds>
+    # ceph-deploy install <all_osds>
+    # ceph-deploy mon create-initial
+    #
+    # one might think the install <all_mons> and install <all_osds>
+    # commands would need --mon and --osd, but #12147 has not yet
+    # made it into RHCS 1.3.0; since the package split also hasn't
+    # landed, we can avoid using the flag and avoid the bug.
+
+    cmds = ['ceph-deploy new ' + ' '.join(all_mons)]
+
+    if use_install_repo:
+        cmds.append('ceph-deploy repo ceph-mon ' +
+                    ' '.join(all_mons))
+        cmds.append('ceph-deploy install --no-adjust-repos --mon ' +
+                    ' '.join(all_mons))
+        cmds.append('ceph-deploy repo ceph-osd ' +
+                    ' '.join(all_osds))
+        cmds.append('ceph-deploy install --no-adjust-repos --osd ' +
+                    ' '.join(all_osds))
+        # We tell users to use `hostname` in our docs. Do the same here.
+        cmds.append('ceph-deploy install --no-adjust-repos --cli `hostname`')
+    else:
+        cmds.append('ceph-deploy install ' + ' '.join(all_machines))
+
+    cmds.append('ceph-deploy mon create-initial')
+
+    for cmd in cmds:
+        cal_svr.run(args=cmd).exitstatus
+
+    disk_labels = '_dcba'
+    # NEEDS WORK assumes disks start with vd (need to check this somewhere)
+    for cmd_pts in [['disk', 'zap'], ['osd', 'prepare'], ['osd', 'activate']]:
+        mach_osd_cnt = {}
+        for osdn in osd_to_name:
+            osd_mac = osd_to_name[osdn]
+            mach_osd_cnt[osd_mac] = mach_osd_cnt.get(osd_mac, 0) + 1
+            arg_list = ['ceph-deploy']
+            arg_list.extend(cmd_pts)
+            disk_id = '%s:vd%s' % (osd_to_name[osdn],
+                                   disk_labels[mach_osd_cnt[osd_mac]])
+            if 'activate' in cmd_pts:
+                disk_id += '1'
+            arg_list.append(disk_id)
+            cal_svr.run(args=arg_list).exitstatus
+
+
+def undeploy_ceph(ctx, cal_svr):
+    """
+    Cleanup deployment of ceph.
+    """
+    all_machines = []
+    ret = True
+    for remote in ctx.cluster.remotes:
+        roles = ctx.cluster.remotes[remote]
+        if (
+            not any('osd' in role for role in roles) and
+            not any('mon' in role for role in roles)
+        ):
+            continue
+        ret &= remote.run(
+            args=['sudo', 'stop', 'ceph-all', run.Raw('||'),
+                  'sudo', 'service', 'ceph', 'stop']
+        ).exitstatus
+        all_machines.append(remote.shortname)
+    all_machines = set(all_machines)
+    cmd1 = ['ceph-deploy', 'uninstall']
+    cmd1.extend(all_machines)
+    ret &= cal_svr.run(args=cmd1).exitstatus
+    cmd2 = ['ceph-deploy', 'purge']
+    cmd2.extend(all_machines)
+    ret &= cal_svr.run(args=cmd2).exitstatus
+    for remote in ctx.cluster.remotes:
+        ret &= remote.run(args=['sudo', 'rm', '-rf',
+                                '.ssh/known_hosts']).exitstatus
+    return ret
+
+
+ at contextlib.contextmanager
+def calamari_connect(ctx, cal_svr):
+    """
+    Connect calamari to the ceph nodes.
+    """
+    connects = ['ceph-deploy', 'calamari', 'connect']
+    for machine_info in ctx.cluster.remotes:
+        if 'client.0' not in ctx.cluster.remotes[machine_info]:
+            connects.append(machine_info.shortname)
+    ret = cal_svr.run(args=connects)
+    if ret.exitstatus:
+        raise RuntimeError('calamari connect failed')
+    try:
+        yield
+    finally:
+        log.info('Calamari test terminating')
+
+
+ at contextlib.contextmanager
+def browser(start_browser, web_page):
+    """
+    Bring up a browser, if wanted.
+    """
+    if start_browser:
+        webbrowser.open('http://%s' % web_page)
+    try:
+        yield
+    finally:
+        if start_browser:
+            log.info('Web browser support terminating')
diff --git a/qa/tasks/ceph.py b/qa/tasks/ceph.py
new file mode 100644
index 0000000..ec08457
--- /dev/null
+++ b/qa/tasks/ceph.py
@@ -0,0 +1,1531 @@
+"""
+Ceph cluster task.
+
+Handle the setup, starting, and clean-up of a Ceph cluster.
+"""
+from cStringIO import StringIO
+
+import argparse
+import contextlib
+import errno
+import logging
+import os
+import json
+import time
+import gevent
+
+from ceph_manager import CephManager, write_conf
+from tasks.cephfs.filesystem import Filesystem
+from teuthology import misc as teuthology
+from teuthology import contextutil
+from teuthology import exceptions
+from teuthology.orchestra import run
+import ceph_client as cclient
+from teuthology.orchestra.daemon import DaemonGroup
+
+CEPH_ROLE_TYPES = ['mon', 'osd', 'mds', 'rgw']
+
+log = logging.getLogger(__name__)
+
+
+def generate_caps(type_):
+    """
+    Each call will return the next capability for each system type
+    (essentially a subset of possible role values).  Valid types are osd,
+    mds and client.
+    """
+    defaults = dict(
+        osd=dict(
+            mon='allow *',
+            osd='allow *',
+        ),
+        mds=dict(
+            mon='allow *',
+            osd='allow *',
+            mds='allow',
+        ),
+        client=dict(
+            mon='allow rw',
+            osd='allow rwx',
+            mds='allow',
+        ),
+    )
+    for subsystem, capability in defaults[type_].items():
+        yield '--cap'
+        yield subsystem
+        yield capability
+
+
+ at contextlib.contextmanager
+def ceph_log(ctx, config):
+    """
+    Create /var/log/ceph log directory that is open to everyone.
+    Add valgrind and profiling-logger directories.
+
+    :param ctx: Context
+    :param config: Configuration
+    """
+    log.info('Making ceph log dir writeable by non-root...')
+    run.wait(
+        ctx.cluster.run(
+            args=[
+                'sudo',
+                'chmod',
+                '777',
+                '/var/log/ceph',
+            ],
+            wait=False,
+        )
+    )
+    log.info('Disabling ceph logrotate...')
+    run.wait(
+        ctx.cluster.run(
+            args=[
+                'sudo',
+                'rm', '-f', '--',
+                '/etc/logrotate.d/ceph',
+            ],
+            wait=False,
+        )
+    )
+    log.info('Creating extra log directories...')
+    run.wait(
+        ctx.cluster.run(
+            args=[
+                'sudo',
+                'install', '-d', '-m0777', '--',
+                '/var/log/ceph/valgrind',
+                '/var/log/ceph/profiling-logger',
+            ],
+            wait=False,
+        )
+    )
+
+    class Rotater(object):
+        stop_event = gevent.event.Event()
+
+        def invoke_logrotate(self):
+            # 1) install ceph-test.conf in /etc/logrotate.d
+            # 2) continuously loop over logrotate invocation with ceph-test.conf
+            while not self.stop_event.is_set():
+                self.stop_event.wait(timeout=30)
+                run.wait(
+                    ctx.cluster.run(
+                        args=['sudo', 'logrotate', '/etc/logrotate.d/ceph-test.conf'
+                              ],
+                        wait=False,
+                    )
+                )
+
+        def begin(self):
+            self.thread = gevent.spawn(self.invoke_logrotate)
+
+        def end(self):
+            self.stop_event.set()
+            self.thread.get()
+
+    def write_rotate_conf(ctx, daemons):
+        testdir = teuthology.get_testdir(ctx)
+        rotate_conf_path = os.path.join(os.path.dirname(__file__), 'logrotate.conf')
+        with file(rotate_conf_path, 'rb') as f:
+            conf = ""
+            for daemon, size in daemons.iteritems():
+                log.info('writing logrotate stanza for {daemon}'.format(daemon=daemon))
+                conf += f.read().format(daemon_type=daemon, max_size=size)
+                f.seek(0, 0)
+
+            for remote in ctx.cluster.remotes.iterkeys():
+                teuthology.write_file(remote=remote,
+                                      path='{tdir}/logrotate.ceph-test.conf'.format(tdir=testdir),
+                                      data=StringIO(conf)
+                                      )
+                remote.run(
+                    args=[
+                        'sudo',
+                        'mv',
+                        '{tdir}/logrotate.ceph-test.conf'.format(tdir=testdir),
+                        '/etc/logrotate.d/ceph-test.conf',
+                        run.Raw('&&'),
+                        'sudo',
+                        'chmod',
+                        '0644',
+                        '/etc/logrotate.d/ceph-test.conf',
+                        run.Raw('&&'),
+                        'sudo',
+                        'chown',
+                        'root.root',
+                        '/etc/logrotate.d/ceph-test.conf'
+                    ]
+                )
+                remote.chcon('/etc/logrotate.d/ceph-test.conf',
+                             'system_u:object_r:etc_t:s0')
+
+    if ctx.config.get('log-rotate'):
+        daemons = ctx.config.get('log-rotate')
+        log.info('Setting up log rotation with ' + str(daemons))
+        write_rotate_conf(ctx, daemons)
+        logrotater = Rotater()
+        logrotater.begin()
+    try:
+        yield
+
+    finally:
+        if ctx.config.get('log-rotate'):
+            log.info('Shutting down logrotate')
+            logrotater.end()
+            ctx.cluster.run(
+                args=['sudo', 'rm', '/etc/logrotate.d/ceph-test.conf'
+                      ]
+            )
+        if ctx.archive is not None and \
+                not (ctx.config.get('archive-on-error') and ctx.summary['success']):
+            # and logs
+            log.info('Compressing logs...')
+            run.wait(
+                ctx.cluster.run(
+                    args=[
+                        'sudo',
+                        'find',
+                        '/var/log/ceph',
+                        '-name',
+                        '*.log',
+                        '-print0',
+                        run.Raw('|'),
+                        'sudo',
+                        'xargs',
+                        '-0',
+                        '--no-run-if-empty',
+                        '--',
+                        'gzip',
+                        '--',
+                    ],
+                    wait=False,
+                ),
+            )
+
+            log.info('Archiving logs...')
+            path = os.path.join(ctx.archive, 'remote')
+            os.makedirs(path)
+            for remote in ctx.cluster.remotes.iterkeys():
+                sub = os.path.join(path, remote.shortname)
+                os.makedirs(sub)
+                teuthology.pull_directory(remote, '/var/log/ceph',
+                                          os.path.join(sub, 'log'))
+
+
+def assign_devs(roles, devs):
+    """
+    Create a dictionary of devs indexed by roles
+
+    :param roles: List of roles
+    :param devs: Corresponding list of devices.
+    :returns: Dictionary of devs indexed by roles.
+    """
+    return dict(zip(roles, devs))
+
+
+ at contextlib.contextmanager
+def valgrind_post(ctx, config):
+    """
+    After the tests run, look throught all the valgrind logs.  Exceptions are raised
+    if textual errors occured in the logs, or if valgrind exceptions were detected in
+    the logs.
+
+    :param ctx: Context
+    :param config: Configuration
+    """
+    try:
+        yield
+    finally:
+        lookup_procs = list()
+        log.info('Checking for errors in any valgrind logs...')
+        for remote in ctx.cluster.remotes.iterkeys():
+            # look at valgrind logs for each node
+            proc = remote.run(
+                args=[
+                    'sudo',
+                    'zgrep',
+                    '<kind>',
+                    run.Raw('/var/log/ceph/valgrind/*'),
+                    '/dev/null',  # include a second file so that we always get a filename prefix on the output
+                    run.Raw('|'),
+                    'sort',
+                    run.Raw('|'),
+                    'uniq',
+                ],
+                wait=False,
+                check_status=False,
+                stdout=StringIO(),
+            )
+            lookup_procs.append((proc, remote))
+
+        valgrind_exception = None
+        for (proc, remote) in lookup_procs:
+            proc.wait()
+            out = proc.stdout.getvalue()
+            for line in out.split('\n'):
+                if line == '':
+                    continue
+                try:
+                    (file, kind) = line.split(':')
+                except Exception:
+                    log.error('failed to split line %s', line)
+                    raise
+                log.debug('file %s kind %s', file, kind)
+                if (file.find('mds') >= 0) and kind.find('Lost') > 0:
+                    continue
+                log.error('saw valgrind issue %s in %s', kind, file)
+                valgrind_exception = Exception('saw valgrind issues')
+
+        if config.get('expect_valgrind_errors'):
+            if not valgrind_exception:
+                raise Exception('expected valgrind issues and found none')
+        else:
+            if valgrind_exception:
+                raise valgrind_exception
+
+
+ at contextlib.contextmanager
+def crush_setup(ctx, config):
+    cluster_name = config['cluster']
+    first_mon = teuthology.get_first_mon(ctx, config, cluster_name)
+    (mon_remote,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+
+    profile = config.get('crush_tunables', 'default')
+    log.info('Setting crush tunables to %s', profile)
+    mon_remote.run(
+        args=['sudo', 'ceph', '--cluster', cluster_name,
+              'osd', 'crush', 'tunables', profile])
+    yield
+
+
+ at contextlib.contextmanager
+def cephfs_setup(ctx, config):
+    cluster_name = config['cluster']
+    testdir = teuthology.get_testdir(ctx)
+    coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir)
+
+    first_mon = teuthology.get_first_mon(ctx, config, cluster_name)
+    (mon_remote,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+    mdss = ctx.cluster.only(teuthology.is_type('mds', cluster_name))
+    # If there are any MDSs, then create a filesystem for them to use
+    # Do this last because requires mon cluster to be up and running
+    if mdss.remotes:
+        log.info('Setting up CephFS filesystem...')
+
+        ceph_fs = Filesystem(ctx) # TODO: make Filesystem cluster-aware
+        if not ceph_fs.legacy_configured():
+            ceph_fs.create()
+
+        is_active_mds = lambda role: 'mds.' in role and not role.endswith('-s') and '-s-' not in role
+        all_roles = [item for remote_roles in mdss.remotes.values() for item in remote_roles]
+        num_active = len([r for r in all_roles if is_active_mds(r)])
+        mon_remote.run(
+            args=[
+                'sudo',
+                'adjust-ulimits',
+                'ceph-coverage',
+                coverage_dir,
+                'ceph', 'mds', 'set', 'allow_multimds', 'true',
+                '--yes-i-really-mean-it'],
+	    check_status=False,  # probably old version, upgrade test
+        )
+        mon_remote.run(args=[
+            'sudo',
+            'adjust-ulimits',
+            'ceph-coverage',
+            coverage_dir,
+            'ceph',
+            '--cluster', cluster_name,
+            'mds', 'set_max_mds', str(num_active)])
+
+    yield
+
+
+ at contextlib.contextmanager
+def cluster(ctx, config):
+    """
+    Handle the creation and removal of a ceph cluster.
+
+    On startup:
+        Create directories needed for the cluster.
+        Create remote journals for all osds.
+        Create and set keyring.
+        Copy the monmap to tht test systems.
+        Setup mon nodes.
+        Setup mds nodes.
+        Mkfs osd nodes.
+        Add keyring information to monmaps
+        Mkfs mon nodes.
+
+    On exit:
+        If errors occured, extract a failure message and store in ctx.summary.
+        Unmount all test files and temporary journaling files.
+        Save the monitor information and archive all ceph logs.
+        Cleanup the keyring setup, and remove all monitor map and data files left over.
+
+    :param ctx: Context
+    :param config: Configuration
+    """
+    if ctx.config.get('use_existing_cluster', False) is True:
+        log.info("'use_existing_cluster' is true; skipping cluster creation")
+        yield
+
+    testdir = teuthology.get_testdir(ctx)
+    cluster_name = config['cluster']
+    data_dir = '{tdir}/{cluster}.data'.format(tdir=testdir, cluster=cluster_name)
+    log.info('Creating ceph cluster %s...', cluster_name)
+    run.wait(
+        ctx.cluster.run(
+            args=[
+                'install', '-d', '-m0755', '--',
+                data_dir,
+            ],
+            wait=False,
+        )
+    )
+
+    run.wait(
+        ctx.cluster.run(
+            args=[
+                'sudo',
+                'install', '-d', '-m0777', '--', '/var/run/ceph',
+            ],
+            wait=False,
+        )
+    )
+
+    devs_to_clean = {}
+    remote_to_roles_to_devs = {}
+    remote_to_roles_to_journals = {}
+    osds = ctx.cluster.only(teuthology.is_type('osd', cluster_name))
+    for remote, roles_for_host in osds.remotes.iteritems():
+        devs = teuthology.get_scratch_devices(remote)
+        roles_to_devs = {}
+        roles_to_journals = {}
+        if config.get('fs'):
+            log.info('fs option selected, checking for scratch devs')
+            log.info('found devs: %s' % (str(devs),))
+            devs_id_map = teuthology.get_wwn_id_map(remote, devs)
+            iddevs = devs_id_map.values()
+            roles_to_devs = assign_devs(
+                teuthology.cluster_roles_of_type(roles_for_host, 'osd', cluster_name), iddevs
+            )
+            if len(roles_to_devs) < len(iddevs):
+                iddevs = iddevs[len(roles_to_devs):]
+            devs_to_clean[remote] = []
+
+        if config.get('block_journal'):
+            log.info('block journal enabled')
+            roles_to_journals = assign_devs(
+                teuthology.cluster_roles_of_type(roles_for_host, 'osd', cluster_name), iddevs
+            )
+            log.info('journal map: %s', roles_to_journals)
+
+        if config.get('tmpfs_journal'):
+            log.info('tmpfs journal enabled')
+            roles_to_journals = {}
+            remote.run(args=['sudo', 'mount', '-t', 'tmpfs', 'tmpfs', '/mnt'])
+            for role in teuthology.cluster_roles_of_type(roles_for_host, 'osd', cluster_name):
+                tmpfs = '/mnt/' + role
+                roles_to_journals[role] = tmpfs
+                remote.run(args=['truncate', '-s', '1500M', tmpfs])
+            log.info('journal map: %s', roles_to_journals)
+
+        log.info('dev map: %s' % (str(roles_to_devs),))
+        remote_to_roles_to_devs[remote] = roles_to_devs
+        remote_to_roles_to_journals[remote] = roles_to_journals
+
+    log.info('Generating config...')
+    remotes_and_roles = ctx.cluster.remotes.items()
+    roles = [role_list for (remote, role_list) in remotes_and_roles]
+    ips = [host for (host, port) in
+           (remote.ssh.get_transport().getpeername() for (remote, role_list) in remotes_and_roles)]
+    conf = teuthology.skeleton_config(ctx, roles=roles, ips=ips, cluster=cluster_name)
+    for remote, roles_to_journals in remote_to_roles_to_journals.iteritems():
+        for role, journal in roles_to_journals.iteritems():
+            name = teuthology.ceph_role(role)
+            if name not in conf:
+                conf[name] = {}
+            conf[name]['osd journal'] = journal
+    for section, keys in config['conf'].iteritems():
+        for key, value in keys.iteritems():
+            log.info("[%s] %s = %s" % (section, key, value))
+            if section not in conf:
+                conf[section] = {}
+            conf[section][key] = value
+
+    if config.get('tmpfs_journal'):
+        conf['journal dio'] = False
+
+    if not hasattr(ctx, 'ceph'):
+        ctx.ceph = {}
+    ctx.ceph[cluster_name] = argparse.Namespace()
+    ctx.ceph[cluster_name].conf = conf
+
+    default_keyring = '/etc/ceph/{cluster}.keyring'.format(cluster=cluster_name)
+    keyring_path = config.get('keyring_path', default_keyring)
+
+    coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir)
+
+    firstmon = teuthology.get_first_mon(ctx, config, cluster_name)
+
+    log.info('Setting up %s...' % firstmon)
+    ctx.cluster.only(firstmon).run(
+        args=[
+            'sudo',
+            'adjust-ulimits',
+            'ceph-coverage',
+            coverage_dir,
+            'ceph-authtool',
+            '--create-keyring',
+            keyring_path,
+        ],
+    )
+    ctx.cluster.only(firstmon).run(
+        args=[
+            'sudo',
+            'adjust-ulimits',
+            'ceph-coverage',
+            coverage_dir,
+            'ceph-authtool',
+            '--gen-key',
+            '--name=mon.',
+            keyring_path,
+        ],
+    )
+    ctx.cluster.only(firstmon).run(
+        args=[
+            'sudo',
+            'chmod',
+            '0644',
+            keyring_path,
+        ],
+    )
+    (mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys()
+    monmap_path = '{tdir}/{cluster}.monmap'.format(tdir=testdir,
+                                                   cluster=cluster_name)
+    fsid = teuthology.create_simple_monmap(
+        ctx,
+        remote=mon0_remote,
+        conf=conf,
+        path=monmap_path,
+    )
+    if not 'global' in conf:
+        conf['global'] = {}
+    conf['global']['fsid'] = fsid
+
+    default_conf_path = '/etc/ceph/{cluster}.conf'.format(cluster=cluster_name)
+    conf_path = config.get('conf_path', default_conf_path)
+    log.info('Writing %s for FSID %s...' % (conf_path, fsid))
+    write_conf(ctx, conf_path, cluster_name)
+
+    log.info('Creating admin key on %s...' % firstmon)
+    ctx.cluster.only(firstmon).run(
+        args=[
+            'sudo',
+            'adjust-ulimits',
+            'ceph-coverage',
+            coverage_dir,
+            'ceph-authtool',
+            '--gen-key',
+            '--name=client.admin',
+            '--set-uid=0',
+            '--cap', 'mon', 'allow *',
+            '--cap', 'osd', 'allow *',
+            '--cap', 'mds', 'allow *',
+            keyring_path,
+        ],
+    )
+
+    log.info('Copying monmap to all nodes...')
+    keyring = teuthology.get_file(
+        remote=mon0_remote,
+        path=keyring_path,
+    )
+    monmap = teuthology.get_file(
+        remote=mon0_remote,
+        path=monmap_path,
+    )
+
+    for rem in ctx.cluster.remotes.iterkeys():
+        # copy mon key and initial monmap
+        log.info('Sending monmap to node {remote}'.format(remote=rem))
+        teuthology.sudo_write_file(
+            remote=rem,
+            path=keyring_path,
+            data=keyring,
+            perms='0644'
+        )
+        teuthology.write_file(
+            remote=rem,
+            path=monmap_path,
+            data=monmap,
+        )
+
+    log.info('Setting up mon nodes...')
+    mons = ctx.cluster.only(teuthology.is_type('mon', cluster_name))
+    osdmap_path = '{tdir}/{cluster}.osdmap'.format(tdir=testdir,
+                                                   cluster=cluster_name)
+    run.wait(
+        mons.run(
+            args=[
+                'adjust-ulimits',
+                'ceph-coverage',
+                coverage_dir,
+                'osdmaptool',
+                '-c', conf_path,
+                '--clobber',
+                '--createsimple', '{num:d}'.format(
+                    num=teuthology.num_instances_of_type(ctx.cluster, 'osd',
+                                                         cluster_name),
+                ),
+                osdmap_path,
+                '--pg_bits', '2',
+                '--pgp_bits', '4',
+            ],
+            wait=False,
+        ),
+    )
+
+    log.info('Setting up mds nodes...')
+    mdss = ctx.cluster.only(teuthology.is_type('mds', cluster_name))
+    for remote, roles_for_host in mdss.remotes.iteritems():
+        for role in teuthology.cluster_roles_of_type(roles_for_host, 'mds',
+                                                     cluster_name):
+            _, _, id_ = teuthology.split_role(role)
+            mds_dir = '/var/lib/ceph/mds/{cluster}-{id}'.format(
+                cluster=cluster_name,
+                id=id_,
+            )
+            remote.run(
+                args=[
+                    'sudo',
+                    'mkdir',
+                    '-p',
+                    mds_dir,
+                    run.Raw('&&'),
+                    'sudo',
+                    'adjust-ulimits',
+                    'ceph-coverage',
+                    coverage_dir,
+                    'ceph-authtool',
+                    '--create-keyring',
+                    '--gen-key',
+                    '--name=mds.{id}'.format(id=id_),
+                    mds_dir + '/keyring',
+                ],
+            )
+
+    cclient.create_keyring(ctx, cluster_name)
+    log.info('Running mkfs on osd nodes...')
+
+    if not hasattr(ctx, 'disk_config'):
+        ctx.disk_config = argparse.Namespace()
+    if not hasattr(ctx.disk_config, 'remote_to_roles_to_dev'):
+        ctx.disk_config.remote_to_roles_to_dev = {}
+    if not hasattr(ctx.disk_config, 'remote_to_roles_to_journals'):
+        ctx.disk_config.remote_to_roles_to_journals = {}
+    if not hasattr(ctx.disk_config, 'remote_to_roles_to_dev_mount_options'):
+        ctx.disk_config.remote_to_roles_to_dev_mount_options = {}
+    if not hasattr(ctx.disk_config, 'remote_to_roles_to_dev_fstype'):
+        ctx.disk_config.remote_to_roles_to_dev_fstype = {}
+
+    teuthology.deep_merge(ctx.disk_config.remote_to_roles_to_dev, remote_to_roles_to_devs)
+    teuthology.deep_merge(ctx.disk_config.remote_to_roles_to_journals, remote_to_roles_to_journals)
+
+    log.info("ctx.disk_config.remote_to_roles_to_dev: {r}".format(r=str(ctx.disk_config.remote_to_roles_to_dev)))
+    for remote, roles_for_host in osds.remotes.iteritems():
+        roles_to_devs = remote_to_roles_to_devs[remote]
+        roles_to_journals = remote_to_roles_to_journals[remote]
+
+        for role in teuthology.cluster_roles_of_type(roles_for_host, 'osd', cluster_name):
+            _, _, id_ = teuthology.split_role(role)
+            mnt_point = '/var/lib/ceph/osd/{cluster}-{id}'.format(cluster=cluster_name, id=id_)
+            remote.run(
+                args=[
+                    'sudo',
+                    'mkdir',
+                    '-p',
+                    mnt_point,
+                ])
+            log.info(str(roles_to_journals))
+            log.info(role)
+            if roles_to_devs.get(role):
+                dev = roles_to_devs[role]
+                fs = config.get('fs')
+                package = None
+                mkfs_options = config.get('mkfs_options')
+                mount_options = config.get('mount_options')
+                if fs == 'btrfs':
+                    # package = 'btrfs-tools'
+                    if mount_options is None:
+                        mount_options = ['noatime', 'user_subvol_rm_allowed']
+                    if mkfs_options is None:
+                        mkfs_options = ['-m', 'single',
+                                        '-l', '32768',
+                                        '-n', '32768']
+                if fs == 'xfs':
+                    # package = 'xfsprogs'
+                    if mount_options is None:
+                        mount_options = ['noatime']
+                    if mkfs_options is None:
+                        mkfs_options = ['-f', '-i', 'size=2048']
+                if fs == 'ext4' or fs == 'ext3':
+                    if mount_options is None:
+                        mount_options = ['noatime', 'user_xattr']
+
+                if mount_options is None:
+                    mount_options = []
+                if mkfs_options is None:
+                    mkfs_options = []
+                mkfs = ['mkfs.%s' % fs] + mkfs_options
+                log.info('%s on %s on %s' % (mkfs, dev, remote))
+                if package is not None:
+                    remote.run(
+                        args=[
+                            'sudo',
+                            'apt-get', 'install', '-y', package
+                        ],
+                        stdout=StringIO(),
+                    )
+
+                try:
+                    remote.run(args=['yes', run.Raw('|')] + ['sudo'] + mkfs + [dev])
+                except run.CommandFailedError:
+                    # Newer btfs-tools doesn't prompt for overwrite, use -f
+                    if '-f' not in mount_options:
+                        mkfs_options.append('-f')
+                        mkfs = ['mkfs.%s' % fs] + mkfs_options
+                        log.info('%s on %s on %s' % (mkfs, dev, remote))
+                    remote.run(args=['yes', run.Raw('|')] + ['sudo'] + mkfs + [dev])
+
+                log.info('mount %s on %s -o %s' % (dev, remote,
+                                                   ','.join(mount_options)))
+                remote.run(
+                    args=[
+                        'sudo',
+                        'mount',
+                        '-t', fs,
+                        '-o', ','.join(mount_options),
+                        dev,
+                        mnt_point,
+                    ]
+                )
+                remote.run(
+                    args=[
+                        'sudo', '/sbin/restorecon', mnt_point,
+                    ],
+                    check_status=False,
+                )
+                if not remote in ctx.disk_config.remote_to_roles_to_dev_mount_options:
+                    ctx.disk_config.remote_to_roles_to_dev_mount_options[remote] = {}
+                ctx.disk_config.remote_to_roles_to_dev_mount_options[remote][role] = mount_options
+                if not remote in ctx.disk_config.remote_to_roles_to_dev_fstype:
+                    ctx.disk_config.remote_to_roles_to_dev_fstype[remote] = {}
+                ctx.disk_config.remote_to_roles_to_dev_fstype[remote][role] = fs
+                devs_to_clean[remote].append(mnt_point)
+
+        for role in teuthology.cluster_roles_of_type(roles_for_host, 'osd', cluster_name):
+            _, _, id_ = teuthology.split_role(role)
+            remote.run(
+                args=[
+                    'sudo',
+                    'MALLOC_CHECK_=3',
+                    'adjust-ulimits',
+                    'ceph-coverage',
+                    coverage_dir,
+                    'ceph-osd',
+                    '--cluster',
+                    cluster_name,
+                    '--mkfs',
+                    '--mkkey',
+                    '-i', id_,
+                    '--monmap', monmap_path,
+                ],
+            )
+
+    log.info('Reading keys from all nodes...')
+    keys_fp = StringIO()
+    keys = []
+    for remote, roles_for_host in ctx.cluster.remotes.iteritems():
+        for type_ in ['mds', 'osd']:
+            for role in teuthology.cluster_roles_of_type(roles_for_host, type_, cluster_name):
+                _, _, id_ = teuthology.split_role(role)
+                data = teuthology.get_file(
+                    remote=remote,
+                    path='/var/lib/ceph/{type}/{cluster}-{id}/keyring'.format(
+                        type=type_,
+                        id=id_,
+                        cluster=cluster_name,
+                    ),
+                    sudo=True,
+                )
+                keys.append((type_, id_, data))
+                keys_fp.write(data)
+    for remote, roles_for_host in ctx.cluster.remotes.iteritems():
+        for role in teuthology.cluster_roles_of_type(roles_for_host, 'client', cluster_name):
+            _, _, id_ = teuthology.split_role(role)
+            data = teuthology.get_file(
+                remote=remote,
+                path='/etc/ceph/{cluster}.client.{id}.keyring'.format(id=id_, cluster=cluster_name)
+            )
+            keys.append(('client', id_, data))
+            keys_fp.write(data)
+
+    log.info('Adding keys to all mons...')
+    writes = mons.run(
+        args=[
+            'sudo', 'tee', '-a',
+            keyring_path,
+        ],
+        stdin=run.PIPE,
+        wait=False,
+        stdout=StringIO(),
+    )
+    keys_fp.seek(0)
+    teuthology.feed_many_stdins_and_close(keys_fp, writes)
+    run.wait(writes)
+    for type_, id_, data in keys:
+        run.wait(
+            mons.run(
+                args=[
+                         'sudo',
+                         'adjust-ulimits',
+                         'ceph-coverage',
+                         coverage_dir,
+                         'ceph-authtool',
+                         keyring_path,
+                         '--name={type}.{id}'.format(
+                             type=type_,
+                             id=id_,
+                         ),
+                     ] + list(generate_caps(type_)),
+                wait=False,
+            ),
+        )
+
+    log.info('Running mkfs on mon nodes...')
+    for remote, roles_for_host in mons.remotes.iteritems():
+        for role in teuthology.cluster_roles_of_type(roles_for_host, 'mon', cluster_name):
+            _, _, id_ = teuthology.split_role(role)
+            remote.run(
+                args=[
+                    'sudo',
+                    'mkdir',
+                    '-p',
+                    '/var/lib/ceph/mon/{cluster}-{id}'.format(id=id_, cluster=cluster_name),
+                ],
+            )
+            remote.run(
+                args=[
+                    'sudo',
+                    'adjust-ulimits',
+                    'ceph-coverage',
+                    coverage_dir,
+                    'ceph-mon',
+                    '--cluster', cluster_name,
+                    '--mkfs',
+                    '-i', id_,
+                    '--monmap', monmap_path,
+                    '--osdmap', osdmap_path,
+                    '--keyring', keyring_path,
+                ],
+            )
+
+    run.wait(
+        mons.run(
+            args=[
+                'rm',
+                '--',
+                monmap_path,
+                osdmap_path,
+            ],
+            wait=False,
+        ),
+    )
+
+    try:
+        yield
+    except Exception:
+        # we need to know this below
+        ctx.summary['success'] = False
+        raise
+    finally:
+        (mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys()
+
+        log.info('Checking cluster log for badness...')
+
+        def first_in_ceph_log(pattern, excludes):
+            """
+            Find the first occurence of the pattern specified in the Ceph log,
+            Returns None if none found.
+
+            :param pattern: Pattern scanned for.
+            :param excludes: Patterns to ignore.
+            :return: First line of text (or None if not found)
+            """
+            args = [
+                'sudo',
+                'egrep', pattern,
+                '/var/log/ceph/{cluster}.log'.format(cluster=cluster_name),
+            ]
+            for exclude in excludes:
+                args.extend([run.Raw('|'), 'egrep', '-v', exclude])
+            args.extend([
+                run.Raw('|'), 'head', '-n', '1',
+            ])
+            r = mon0_remote.run(
+                stdout=StringIO(),
+                args=args,
+            )
+            stdout = r.stdout.getvalue()
+            if stdout != '':
+                return stdout
+            return None
+
+        if first_in_ceph_log('\[ERR\]|\[WRN\]|\[SEC\]',
+                             config['log_whitelist']) is not None:
+            log.warning('Found errors (ERR|WRN|SEC) in cluster log')
+            ctx.summary['success'] = False
+            # use the most severe problem as the failure reason
+            if 'failure_reason' not in ctx.summary:
+                for pattern in ['\[SEC\]', '\[ERR\]', '\[WRN\]']:
+                    match = first_in_ceph_log(pattern, config['log_whitelist'])
+                    if match is not None:
+                        ctx.summary['failure_reason'] = \
+                            '"{match}" in cluster log'.format(
+                                match=match.rstrip('\n'),
+                            )
+                        break
+
+        for remote, dirs in devs_to_clean.iteritems():
+            for dir_ in dirs:
+                log.info('Unmounting %s on %s' % (dir_, remote))
+                try:
+                    remote.run(
+                        args=[
+                            'sync',
+                            run.Raw('&&'),
+                            'sudo',
+                            'umount',
+                            '-f',
+                            dir_
+                        ]
+                    )
+                except Exception as e:
+                    remote.run(args=[
+                        'sudo',
+                        run.Raw('PATH=/usr/sbin:$PATH'),
+                        'lsof',
+                        run.Raw(';'),
+                        'ps', 'auxf',
+                    ])
+                    raise e
+
+        if config.get('tmpfs_journal'):
+            log.info('tmpfs journal enabled - unmounting tmpfs at /mnt')
+            for remote, roles_for_host in osds.remotes.iteritems():
+                remote.run(
+                    args=['sudo', 'umount', '-f', '/mnt'],
+                    check_status=False,
+                )
+
+        if ctx.archive is not None and \
+                not (ctx.config.get('archive-on-error') and ctx.summary['success']):
+
+            # archive mon data, too
+            log.info('Archiving mon data...')
+            path = os.path.join(ctx.archive, 'data')
+            try:
+                os.makedirs(path)
+            except OSError as e:
+                if e.errno == errno.EEXIST:
+                    pass
+                else:
+                    raise
+            for remote, roles in mons.remotes.iteritems():
+                for role in roles:
+                    is_mon = teuthology.is_type('mon', cluster_name)
+                    if is_mon(role):
+                        _, _, id_ = teuthology.split_role(role)
+                        mon_dir = '/var/lib/ceph/mon/' + \
+                                  '{0}-{1}'.format(cluster_name, id_)
+                        teuthology.pull_directory_tarball(
+                            remote,
+                            mon_dir,
+                            path + '/' + role + '.tgz')
+
+        log.info('Cleaning ceph cluster...')
+        run.wait(
+            ctx.cluster.run(
+                args=[
+                    'sudo',
+                    'rm',
+                    '-rf',
+                    '--',
+                    conf_path,
+                    keyring_path,
+                    data_dir,
+                    monmap_path,
+                    osdmap_path,
+                    run.Raw('{tdir}/../*.pid'.format(tdir=testdir)),
+                ],
+                wait=False,
+            ),
+        )
+
+
+def osd_scrub_pgs(ctx, config):
+    """
+    Scrub pgs when we exit.
+
+    First make sure all pgs are active and clean.
+    Next scrub all osds.
+    Then periodically check until all pgs have scrub time stamps that
+    indicate the last scrub completed.  Time out if no progess is made
+    here after two minutes.
+    """
+    retries = 12
+    delays = 10
+    cluster_name = config['cluster']
+    manager = ctx.managers[cluster_name]
+    all_clean = False
+    for _ in range(0, retries):
+        stats = manager.get_pg_stats()
+        states = [stat['state'] for stat in stats]
+        if len(set(states)) == 1 and states[0] == 'active+clean':
+            all_clean = True
+            break
+        log.info("Waiting for all osds to be active and clean.")
+        time.sleep(delays)
+    if not all_clean:
+        log.info("Scrubbing terminated -- not all pgs were active and clean.")
+        return
+    check_time_now = time.localtime()
+    time.sleep(1)
+    all_roles = teuthology.all_roles(ctx.cluster)
+    for role in teuthology.cluster_roles_of_type(all_roles, 'osd', cluster_name):
+        log.info("Scrubbing {osd}".format(osd=role))
+        _, _, id_ = teuthology.split_role(role)
+        manager.raw_cluster_cmd('osd', 'deep-scrub', id_)
+    prev_good = 0
+    gap_cnt = 0
+    loop = True
+    while loop:
+        stats = manager.get_pg_stats()
+        timez = [stat['last_scrub_stamp'] for stat in stats]
+        loop = False
+        thiscnt = 0
+        for tmval in timez:
+            pgtm = time.strptime(tmval[0:tmval.find('.')], '%Y-%m-%d %H:%M:%S')
+            if pgtm > check_time_now:
+                thiscnt += 1
+            else:
+                loop = True
+        if thiscnt > prev_good:
+            prev_good = thiscnt
+            gap_cnt = 0
+        else:
+            gap_cnt += 1
+            if gap_cnt > retries:
+                log.info('Exiting scrub checking -- not all pgs scrubbed.')
+                return
+        if loop:
+            log.info('Still waiting for all pgs to be scrubbed.')
+            time.sleep(delays)
+
+
+ at contextlib.contextmanager
+def run_daemon(ctx, config, type_):
+    """
+    Run daemons for a role type.  Handle the startup and termination of a a daemon.
+    On startup -- set coverages, cpu_profile, valgrind values for all remotes,
+    and a max_mds value for one mds.
+    On cleanup -- Stop all existing daemons of this type.
+
+    :param ctx: Context
+    :param config: Configuration
+    :paran type_: Role type
+    """
+    cluster_name = config['cluster']
+    log.info('Starting %s daemons in cluster %s...', type_, cluster_name)
+    testdir = teuthology.get_testdir(ctx)
+    daemons = ctx.cluster.only(teuthology.is_type(type_, cluster_name))
+
+    # check whether any daemons if this type are configured
+    if daemons is None:
+        return
+    coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir)
+
+    daemon_signal = 'kill'
+    if config.get('coverage') or config.get('valgrind') is not None:
+        daemon_signal = 'term'
+
+    for remote, roles_for_host in daemons.remotes.iteritems():
+        is_type_ = teuthology.is_type(type_, cluster_name)
+        for role in roles_for_host:
+            if not is_type_(role):
+                continue
+            _, _, id_ = teuthology.split_role(role)
+
+            run_cmd = [
+                'sudo',
+                'adjust-ulimits',
+                'ceph-coverage',
+                coverage_dir,
+                'daemon-helper',
+                daemon_signal,
+            ]
+            run_cmd_tail = [
+                'ceph-%s' % (type_),
+                '-f',
+                '--cluster', cluster_name,
+                '-i', id_]
+
+            if type_ in config.get('cpu_profile', []):
+                profile_path = '/var/log/ceph/profiling-logger/%s.prof' % (role)
+                run_cmd.extend(['env', 'CPUPROFILE=%s' % profile_path])
+
+            if config.get('valgrind') is not None:
+                valgrind_args = None
+                if type_ in config['valgrind']:
+                    valgrind_args = config['valgrind'][type_]
+                if role in config['valgrind']:
+                    valgrind_args = config['valgrind'][role]
+                run_cmd = teuthology.get_valgrind_args(testdir, role,
+                                                       run_cmd,
+                                                       valgrind_args)
+
+            run_cmd.extend(run_cmd_tail)
+
+            ctx.daemons.add_daemon(remote, type_, id_,
+                                   cluster=cluster_name,
+                                   args=run_cmd,
+                                   logger=log.getChild(role),
+                                   stdin=run.PIPE,
+                                   wait=False,
+                                   )
+
+    try:
+        yield
+    finally:
+        teuthology.stop_daemons_of_type(ctx, type_, cluster_name)
+
+
+def healthy(ctx, config):
+    """
+    Wait for all osd's to be up, and for the ceph health monitor to return HEALTH_OK.
+
+    :param ctx: Context
+    :param config: Configuration
+    """
+    config = config if isinstance(config, dict) else dict()
+    cluster_name = config.get('cluster', 'ceph')
+    log.info('Waiting until ceph cluster %s is healthy...', cluster_name)
+    firstmon = teuthology.get_first_mon(ctx, config, cluster_name)
+    (mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys()
+    teuthology.wait_until_osds_up(
+        ctx,
+        cluster=ctx.cluster,
+        remote=mon0_remote,
+        ceph_cluster=cluster_name,
+    )
+    teuthology.wait_until_healthy(
+        ctx,
+        remote=mon0_remote,
+        ceph_cluster=cluster_name,
+    )
+
+    if ctx.cluster.only(teuthology.is_type('mds', cluster_name)).remotes:
+        # Some MDSs exist, wait for them to be healthy
+        ceph_fs = Filesystem(ctx) # TODO: make Filesystem cluster-aware
+        ceph_fs.wait_for_daemons(timeout=300)
+
+
+def wait_for_osds_up(ctx, config):
+    """
+    Wait for all osd's to come up.
+
+    :param ctx: Context
+    :param config: Configuration
+    """
+    log.info('Waiting until ceph osds are all up...')
+    cluster_name = config.get('cluster', 'ceph')
+    firstmon = teuthology.get_first_mon(ctx, config, cluster_name)
+    (mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys()
+    teuthology.wait_until_osds_up(
+        ctx,
+        cluster=ctx.cluster,
+        remote=mon0_remote
+    )
+
+
+def wait_for_mon_quorum(ctx, config):
+    """
+    Check renote ceph status until all monitors are up.
+
+    :param ctx: Context
+    :param config: Configuration
+    """
+    if isinstance(config, dict):
+        mons = config['daemons']
+        cluster_name = config.get('cluster', 'ceph')
+    else:
+        assert isinstance(config, list)
+        mons = config
+        cluster_name = 'ceph'
+    firstmon = teuthology.get_first_mon(ctx, config, cluster_name)
+    (remote,) = ctx.cluster.only(firstmon).remotes.keys()
+    while True:
+        r = remote.run(
+            args=[
+                'sudo',
+                'ceph',
+                'quorum_status',
+            ],
+            stdout=StringIO(),
+            logger=log.getChild('quorum_status'),
+        )
+        j = json.loads(r.stdout.getvalue())
+        q = j.get('quorum_names', [])
+        log.debug('Quorum: %s', q)
+        if sorted(q) == sorted(mons):
+            break
+        time.sleep(1)
+
+
+def created_pool(ctx, config):
+    """
+    Add new pools to the dictionary of pools that the ceph-manager
+    knows about.
+    """
+    for new_pool in config:
+        if new_pool not in ctx.managers['ceph'].pools:
+            ctx.managers['ceph'].pools[new_pool] = ctx.managers['ceph'].get_pool_property(
+                new_pool, 'pg_num')
+
+
+ at contextlib.contextmanager
+def restart(ctx, config):
+    """
+   restart ceph daemons
+
+   For example::
+      tasks:
+      - ceph.restart: [all]
+
+   For example::
+      tasks:
+      - ceph.restart: [osd.0, mon.1, mds.*]
+
+   or::
+
+      tasks:
+      - ceph.restart:
+          daemons: [osd.0, mon.1]
+          wait-for-healthy: false
+          wait-for-osds-up: true
+
+    :param ctx: Context
+    :param config: Configuration
+    """
+    if config is None:
+        config = {}
+    elif isinstance(config, list):
+        config = {'daemons': config}
+
+    daemons = ctx.daemons.resolve_role_list(config.get('daemons', None), CEPH_ROLE_TYPES, True)
+    clusters = set()
+    for role in daemons:
+        cluster, type_, id_ = teuthology.split_role(role)
+        ctx.daemons.get_daemon(type_, id_, cluster).restart()
+        clusters.add(cluster)
+
+    if config.get('wait-for-healthy', True):
+        for cluster in clusters:
+            healthy(ctx=ctx, config=dict(cluster=cluster))
+    if config.get('wait-for-osds-up', False):
+        for cluster in clusters:
+            wait_for_osds_up(ctx=ctx, config=dict(cluster=cluster))
+    yield
+
+
+ at contextlib.contextmanager
+def stop(ctx, config):
+    """
+    Stop ceph daemons
+
+    For example::
+      tasks:
+      - ceph.stop: [mds.*]
+
+      tasks:
+      - ceph.stop: [osd.0, osd.2]
+
+      tasks:
+      - ceph.stop:
+          daemons: [osd.0, osd.2]
+
+    """
+    if config is None:
+        config = {}
+    elif isinstance(config, list):
+        config = {'daemons': config}
+
+    daemons = ctx.daemons.resolve_role_list(config.get('daemons', None), CEPH_ROLE_TYPES, True)
+    for role in daemons:
+        cluster, type_, id_ = teuthology.split_role(role)
+        ctx.daemons.get_daemon(type_, id_, cluster).stop()
+
+    yield
+
+
+ at contextlib.contextmanager
+def wait_for_failure(ctx, config):
+    """
+    Wait for a failure of a ceph daemon
+
+    For example::
+      tasks:
+      - ceph.wait_for_failure: [mds.*]
+
+      tasks:
+      - ceph.wait_for_failure: [osd.0, osd.2]
+
+      tasks:
+      - ceph.wait_for_failure:
+          daemons: [osd.0, osd.2]
+
+    """
+    if config is None:
+        config = {}
+    elif isinstance(config, list):
+        config = {'daemons': config}
+
+    daemons = ctx.daemons.resolve_role_list(config.get('daemons', None), CEPH_ROLE_TYPES, True)
+    for role in daemons:
+        cluster, type_, id_ = teuthology.split_role(role)
+        try:
+            ctx.daemons.get_daemon(type_, id_, cluster).wait()
+        except:
+            log.info('Saw expected daemon failure.  Continuing.')
+            pass
+        else:
+            raise RuntimeError('daemon %s did not fail' % role)
+
+    yield
+
+
+def validate_config(ctx, config):
+    """
+    Perform some simple validation on task configuration.
+    Raises exceptions.ConfigError if an error is found.
+    """
+    # check for osds from multiple clusters on the same host
+    for remote, roles_for_host in ctx.cluster.remotes.items():
+        last_cluster = None
+        last_role = None
+        for role in roles_for_host:
+            role_cluster, role_type, _ = teuthology.split_role(role)
+            if role_type != 'osd':
+                continue
+            if last_cluster and last_cluster != role_cluster:
+                msg = "Host should not have osds (%s and %s) from multiple clusters" % (
+                    last_role, role)
+                raise exceptions.ConfigError(msg)
+            last_cluster = role_cluster
+            last_role = role
+
+
+ at contextlib.contextmanager
+def task(ctx, config):
+    """
+    Set up and tear down a Ceph cluster.
+
+    For example::
+
+        tasks:
+        - ceph:
+        - interactive:
+
+    You can also specify what branch to run::
+
+        tasks:
+        - ceph:
+            branch: foo
+
+    Or a tag::
+
+        tasks:
+        - ceph:
+            tag: v0.42.13
+
+    Or a sha1::
+
+        tasks:
+        - ceph:
+            sha1: 1376a5ab0c89780eab39ffbbe436f6a6092314ed
+
+    Or a local source dir::
+
+        tasks:
+        - ceph:
+            path: /home/sage/ceph
+
+    To capture code coverage data, use::
+
+        tasks:
+        - ceph:
+            coverage: true
+
+    To use btrfs, ext4, or xfs on the target's scratch disks, use::
+
+        tasks:
+        - ceph:
+            fs: xfs
+            mkfs_options: [-b,size=65536,-l,logdev=/dev/sdc1]
+            mount_options: [nobarrier, inode64]
+
+    Note, this will cause the task to check the /scratch_devs file on each node
+    for available devices.  If no such file is found, /dev/sdb will be used.
+
+    To run some daemons under valgrind, include their names
+    and the tool/args to use in a valgrind section::
+
+        tasks:
+        - ceph:
+          valgrind:
+            mds.1: --tool=memcheck
+            osd.1: [--tool=memcheck, --leak-check=no]
+
+    Those nodes which are using memcheck or valgrind will get
+    checked for bad results.
+
+    To adjust or modify config options, use::
+
+        tasks:
+        - ceph:
+            conf:
+              section:
+                key: value
+
+    For example::
+
+        tasks:
+        - ceph:
+            conf:
+              mds.0:
+                some option: value
+                other key: other value
+              client.0:
+                debug client: 10
+                debug ms: 1
+
+    By default, the cluster log is checked for errors and warnings,
+    and the run marked failed if any appear. You can ignore log
+    entries by giving a list of egrep compatible regexes, i.e.:
+
+        tasks:
+        - ceph:
+            log-whitelist: ['foo.*bar', 'bad message']
+
+    To run multiple ceph clusters, use multiple ceph tasks, and roles
+    with a cluster name prefix, e.g. cluster1.client.0. Roles with no
+    cluster use the default cluster name, 'ceph'. OSDs from separate
+    clusters must be on separate hosts. Clients and non-osd daemons
+    from multiple clusters may be colocated. For each cluster, add an
+    instance of the ceph task with the cluster name specified, e.g.::
+
+        roles:
+        - [mon.a, osd.0, osd.1]
+        - [backup.mon.a, backup.osd.0, backup.osd.1]
+        - [client.0, backup.client.0]
+        tasks:
+        - ceph:
+            cluster: ceph
+        - ceph:
+            cluster: backup
+
+    :param ctx: Context
+    :param config: Configuration
+
+    """
+    if config is None:
+        config = {}
+    assert isinstance(config, dict), \
+        "task ceph only supports a dictionary for configuration"
+
+    overrides = ctx.config.get('overrides', {})
+    teuthology.deep_merge(config, overrides.get('ceph', {}))
+
+    first_ceph_cluster = False
+    if not hasattr(ctx, 'daemons'):
+        first_ceph_cluster = True
+        ctx.daemons = DaemonGroup()
+
+    testdir = teuthology.get_testdir(ctx)
+    if config.get('coverage'):
+        coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir)
+        log.info('Creating coverage directory...')
+        run.wait(
+            ctx.cluster.run(
+                args=[
+                    'install', '-d', '-m0755', '--',
+                    coverage_dir,
+                ],
+                wait=False,
+            )
+        )
+
+    if 'cluster' not in config:
+        config['cluster'] = 'ceph'
+
+    validate_config(ctx, config)
+
+    subtasks = []
+    if first_ceph_cluster:
+        # these tasks handle general log setup and parsing on all hosts,
+        # so they should only be run once
+        subtasks = [
+            lambda: ceph_log(ctx=ctx, config=None),
+            lambda: valgrind_post(ctx=ctx, config=config),
+        ]
+
+    subtasks += [
+        lambda: cluster(ctx=ctx, config=dict(
+            conf=config.get('conf', {}),
+            fs=config.get('fs', None),
+            mkfs_options=config.get('mkfs_options', None),
+            mount_options=config.get('mount_options', None),
+            block_journal=config.get('block_journal', None),
+            tmpfs_journal=config.get('tmpfs_journal', None),
+            log_whitelist=config.get('log-whitelist', []),
+            cpu_profile=set(config.get('cpu_profile', []),),
+            cluster=config['cluster'],
+        )),
+        lambda: run_daemon(ctx=ctx, config=config, type_='mon'),
+        lambda: crush_setup(ctx=ctx, config=config),
+        lambda: run_daemon(ctx=ctx, config=config, type_='osd'),
+        lambda: cephfs_setup(ctx=ctx, config=config),
+        lambda: run_daemon(ctx=ctx, config=config, type_='mds'),
+    ]
+
+    with contextutil.nested(*subtasks):
+        try:
+            if config.get('wait-for-healthy', True):
+                healthy(ctx=ctx, config=dict(cluster=config['cluster']))
+            first_mon = teuthology.get_first_mon(ctx, config, config['cluster'])
+            (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+            if not hasattr(ctx, 'managers'):
+                ctx.managers = {}
+            ctx.managers[config['cluster']] = CephManager(
+                mon,
+                ctx=ctx,
+                logger=log.getChild('ceph_manager.' + config['cluster']),
+                cluster=config['cluster'],
+            )
+            yield
+        finally:
+            if config.get('wait-for-scrub', True):
+                osd_scrub_pgs(ctx, config)
diff --git a/qa/tasks/ceph_client.py b/qa/tasks/ceph_client.py
new file mode 100644
index 0000000..3ca90b7
--- /dev/null
+++ b/qa/tasks/ceph_client.py
@@ -0,0 +1,42 @@
+"""
+Set up client keyring
+"""
+import logging
+
+from teuthology import misc as teuthology
+from teuthology.orchestra import run
+
+log = logging.getLogger(__name__)
+
+def create_keyring(ctx, cluster_name):
+    """
+    Set up key ring on remote sites
+    """
+    log.info('Setting up client nodes...')
+    clients = ctx.cluster.only(teuthology.is_type('client', cluster_name))
+    testdir = teuthology.get_testdir(ctx)
+    coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir)
+    for remote, roles_for_host in clients.remotes.iteritems():
+        for role in teuthology.cluster_roles_of_type(roles_for_host, 'client',
+                                                     cluster_name):
+            name = teuthology.ceph_role(role)
+            client_keyring = '/etc/ceph/{0}.{1}.keyring'.format(cluster_name, name)
+            remote.run(
+                args=[
+                    'sudo',
+                    'adjust-ulimits',
+                    'ceph-coverage',
+                    coverage_dir,
+                    'ceph-authtool',
+                    '--create-keyring',
+                    '--gen-key',
+                    # TODO this --name= is not really obeyed, all unknown "types" are munged to "client"
+                    '--name={name}'.format(name=name),
+                    client_keyring,
+                    run.Raw('&&'),
+                    'sudo',
+                    'chmod',
+                    '0644',
+                    client_keyring,
+                    ],
+                )
diff --git a/qa/tasks/ceph_deploy.py b/qa/tasks/ceph_deploy.py
new file mode 100644
index 0000000..315de12
--- /dev/null
+++ b/qa/tasks/ceph_deploy.py
@@ -0,0 +1,694 @@
+"""
+Execute ceph-deploy as a task
+"""
+from cStringIO import StringIO
+
+import contextlib
+import os
+import time
+import logging
+import traceback
+
+from teuthology import misc as teuthology
+from teuthology import contextutil
+from teuthology.config import config as teuth_config
+from teuthology.task import install as install_fn
+from teuthology.orchestra import run
+from tasks.cephfs.filesystem import Filesystem
+
+log = logging.getLogger(__name__)
+
+
+ at contextlib.contextmanager
+def download_ceph_deploy(ctx, config):
+    """
+    Downloads ceph-deploy from the ceph.com git mirror and (by default)
+    switches to the master branch. If the `ceph-deploy-branch` is specified, it
+    will use that instead.
+    """
+    log.info('Downloading ceph-deploy...')
+    testdir = teuthology.get_testdir(ctx)
+    ceph_admin = ctx.cluster.only(teuthology.get_first_mon(ctx, config))
+    ceph_deploy_branch = config.get('ceph-deploy-branch', 'master')
+
+    ceph_admin.run(
+        args=[
+            'git', 'clone', '-b', ceph_deploy_branch,
+            teuth_config.ceph_git_base_url + 'ceph-deploy.git',
+            '{tdir}/ceph-deploy'.format(tdir=testdir),
+        ],
+    )
+    ceph_admin.run(
+        args=[
+            'cd',
+            '{tdir}/ceph-deploy'.format(tdir=testdir),
+            run.Raw('&&'),
+            './bootstrap',
+        ],
+    )
+
+    try:
+        yield
+    finally:
+        log.info('Removing ceph-deploy ...')
+        ceph_admin.run(
+            args=[
+                'rm',
+                '-rf',
+                '{tdir}/ceph-deploy'.format(tdir=testdir),
+            ],
+        )
+
+
+def is_healthy(ctx, config):
+    """Wait until a Ceph cluster is healthy."""
+    testdir = teuthology.get_testdir(ctx)
+    ceph_admin = teuthology.get_first_mon(ctx, config)
+    (remote,) = ctx.cluster.only(ceph_admin).remotes.keys()
+    max_tries = 90  # 90 tries * 10 secs --> 15 minutes
+    tries = 0
+    while True:
+        tries += 1
+        if tries >= max_tries:
+            msg = "ceph health was unable to get 'HEALTH_OK' after waiting 15 minutes"
+            remote.run(
+                args=[
+                    'cd',
+                    '{tdir}'.format(tdir=testdir),
+                    run.Raw('&&'),
+                    'sudo', 'ceph',
+                    'report',
+                ],
+            )
+            raise RuntimeError(msg)
+
+        r = remote.run(
+            args=[
+                'cd',
+                '{tdir}'.format(tdir=testdir),
+                run.Raw('&&'),
+                'sudo', 'ceph',
+                'health',
+            ],
+            stdout=StringIO(),
+            logger=log.getChild('health'),
+        )
+        out = r.stdout.getvalue()
+        log.info('Ceph health: %s', out.rstrip('\n'))
+        if out.split(None, 1)[0] == 'HEALTH_OK':
+            break
+        time.sleep(10)
+
+
+def get_nodes_using_role(ctx, target_role):
+    """
+    Extract the names of nodes that match a given role from a cluster, and modify the
+    cluster's service IDs to match the resulting node-based naming scheme that ceph-deploy
+    uses, such that if "mon.a" is on host "foo23", it'll be renamed to "mon.foo23".
+    """
+
+    # Nodes containing a service of the specified role
+    nodes_of_interest = []
+
+    # Prepare a modified version of cluster.remotes with ceph-deploy-ized names
+    modified_remotes = {}
+
+    for _remote, roles_for_host in ctx.cluster.remotes.iteritems():
+        modified_remotes[_remote] = []
+        for svc_id in roles_for_host:
+            if svc_id.startswith("{0}.".format(target_role)):
+                fqdn = str(_remote).split('@')[-1]
+                nodename = str(str(_remote).split('.')[0]).split('@')[1]
+                if target_role == 'mon':
+                    nodes_of_interest.append(fqdn)
+                else:
+                    nodes_of_interest.append(nodename)
+
+                modified_remotes[_remote].append(
+                    "{0}.{1}".format(target_role, nodename))
+            else:
+                modified_remotes[_remote].append(svc_id)
+
+    ctx.cluster.remotes = modified_remotes
+
+    return nodes_of_interest
+
+
+def get_dev_for_osd(ctx, config):
+    """Get a list of all osd device names."""
+    osd_devs = []
+    for remote, roles_for_host in ctx.cluster.remotes.iteritems():
+        host = remote.name.split('@')[-1]
+        shortname = host.split('.')[0]
+        devs = teuthology.get_scratch_devices(remote)
+        num_osd_per_host = list(
+            teuthology.roles_of_type(
+                roles_for_host, 'osd'))
+        num_osds = len(num_osd_per_host)
+        if config.get('separate_journal_disk') is not None:
+            num_devs_reqd = 2 * num_osds
+            assert num_devs_reqd <= len(
+                devs), 'fewer data and journal disks than required ' + shortname
+            for dindex in range(0, num_devs_reqd, 2):
+                jd_index = dindex + 1
+                dev_short = devs[dindex].split('/')[-1]
+                jdev_short = devs[jd_index].split('/')[-1]
+                osd_devs.append((shortname, dev_short, jdev_short))
+        else:
+            assert num_osds <= len(devs), 'fewer disks than osds ' + shortname
+            for dev in devs[:num_osds]:
+                dev_short = dev.split('/')[-1]
+                osd_devs.append((shortname, dev_short))
+    return osd_devs
+
+
+def get_all_nodes(ctx, config):
+    """Return a string of node names separated by blanks"""
+    nodelist = []
+    for t, k in ctx.config['targets'].iteritems():
+        host = t.split('@')[-1]
+        simple_host = host.split('.')[0]
+        nodelist.append(simple_host)
+    nodelist = " ".join(nodelist)
+    return nodelist
+
+
+ at contextlib.contextmanager
+def build_ceph_cluster(ctx, config):
+    """Build a ceph cluster"""
+
+    # Expect to find ceph_admin on the first mon by ID, same place that the download task
+    # puts it.  Remember this here, because subsequently IDs will change from those in
+    # the test config to those that ceph-deploy invents.
+    (ceph_admin,) = ctx.cluster.only(
+        teuthology.get_first_mon(ctx, config)).remotes.iterkeys()
+
+    def execute_ceph_deploy(cmd):
+        """Remotely execute a ceph_deploy command"""
+        return ceph_admin.run(
+            args=[
+                'cd',
+                '{tdir}/ceph-deploy'.format(tdir=testdir),
+                run.Raw('&&'),
+                run.Raw(cmd),
+            ],
+            check_status=False,
+        ).exitstatus
+
+    try:
+        log.info('Building ceph cluster using ceph-deploy...')
+        testdir = teuthology.get_testdir(ctx)
+        ceph_branch = None
+        if config.get('branch') is not None:
+            cbranch = config.get('branch')
+            for var, val in cbranch.iteritems():
+                ceph_branch = '--{var}={val}'.format(var=var, val=val)
+        all_nodes = get_all_nodes(ctx, config)
+        mds_nodes = get_nodes_using_role(ctx, 'mds')
+        mds_nodes = " ".join(mds_nodes)
+        mon_node = get_nodes_using_role(ctx, 'mon')
+        mon_nodes = " ".join(mon_node)
+        new_mon = './ceph-deploy new' + " " + mon_nodes
+        mon_hostname = mon_nodes.split(' ')[0]
+        mon_hostname = str(mon_hostname)
+        gather_keys = './ceph-deploy gatherkeys' + " " + mon_hostname
+        deploy_mds = './ceph-deploy mds create' + " " + mds_nodes
+        no_of_osds = 0
+
+        if mon_nodes is None:
+            raise RuntimeError("no monitor nodes in the config file")
+
+        estatus_new = execute_ceph_deploy(new_mon)
+        if estatus_new != 0:
+            raise RuntimeError("ceph-deploy: new command failed")
+
+        log.info('adding config inputs...')
+        testdir = teuthology.get_testdir(ctx)
+        conf_path = '{tdir}/ceph-deploy/ceph.conf'.format(tdir=testdir)
+
+        if config.get('conf') is not None:
+            confp = config.get('conf')
+            for section, keys in confp.iteritems():
+                lines = '[{section}]\n'.format(section=section)
+                teuthology.append_lines_to_file(ceph_admin, conf_path, lines,
+                                                sudo=True)
+                for key, value in keys.iteritems():
+                    log.info("[%s] %s = %s" % (section, key, value))
+                    lines = '{key} = {value}\n'.format(key=key, value=value)
+                    teuthology.append_lines_to_file(
+                        ceph_admin, conf_path, lines, sudo=True)
+
+        # install ceph
+        dev_branch = ctx.config['branch']
+        branch = '--dev={branch}'.format(branch=dev_branch)
+        if ceph_branch:
+            option = ceph_branch
+        else:
+            option = branch
+        install_nodes = './ceph-deploy install ' + option + " " + all_nodes
+        estatus_install = execute_ceph_deploy(install_nodes)
+        if estatus_install != 0:
+            raise RuntimeError("ceph-deploy: Failed to install ceph")
+        # install ceph-test package too
+        install_nodes2 = './ceph-deploy install --tests ' + option + \
+                         " " + all_nodes
+        estatus_install = execute_ceph_deploy(install_nodes2)
+        if estatus_install != 0:
+            raise RuntimeError("ceph-deploy: Failed to install ceph-test")
+
+        mon_create_nodes = './ceph-deploy mon create-initial'
+        # If the following fails, it is OK, it might just be that the monitors
+        # are taking way more than a minute/monitor to form quorum, so lets
+        # try the next block which will wait up to 15 minutes to gatherkeys.
+        execute_ceph_deploy(mon_create_nodes)
+
+        # create-keys is explicit now
+        # http://tracker.ceph.com/issues/16036
+        mons = ctx.cluster.only(teuthology.is_type('mon'))
+        for remote in mons.remotes.iterkeys():
+            remote.run(args=['sudo', 'ceph-create-keys', '--cluster', 'ceph',
+                             '--id', remote.shortname])
+
+        estatus_gather = execute_ceph_deploy(gather_keys)
+        if mds_nodes:
+            estatus_mds = execute_ceph_deploy(deploy_mds)
+            if estatus_mds != 0:
+                raise RuntimeError("ceph-deploy: Failed to deploy mds")
+
+        if config.get('test_mon_destroy') is not None:
+            for d in range(1, len(mon_node)):
+                mon_destroy_nodes = './ceph-deploy mon destroy' + \
+                    " " + mon_node[d]
+                estatus_mon_d = execute_ceph_deploy(mon_destroy_nodes)
+                if estatus_mon_d != 0:
+                    raise RuntimeError("ceph-deploy: Failed to delete monitor")
+
+        node_dev_list = get_dev_for_osd(ctx, config)
+        for d in node_dev_list:
+            node = d[0]
+            for disk in d[1:]:
+                zap = './ceph-deploy disk zap ' + node + ':' + disk
+                estatus = execute_ceph_deploy(zap)
+                if estatus != 0:
+                    raise RuntimeError("ceph-deploy: Failed to zap osds")
+            osd_create_cmd = './ceph-deploy osd create '
+            if config.get('dmcrypt') is not None:
+                osd_create_cmd += '--dmcrypt '
+            osd_create_cmd += ":".join(d)
+            estatus_osd = execute_ceph_deploy(osd_create_cmd)
+            if estatus_osd == 0:
+                log.info('successfully created osd')
+                no_of_osds += 1
+            else:
+                raise RuntimeError("ceph-deploy: Failed to create osds")
+
+        if config.get('wait-for-healthy', True) and no_of_osds >= 2:
+            is_healthy(ctx=ctx, config=None)
+
+            log.info('Setting up client nodes...')
+            conf_path = '/etc/ceph/ceph.conf'
+            admin_keyring_path = '/etc/ceph/ceph.client.admin.keyring'
+            first_mon = teuthology.get_first_mon(ctx, config)
+            (mon0_remote,) = ctx.cluster.only(first_mon).remotes.keys()
+            conf_data = teuthology.get_file(
+                remote=mon0_remote,
+                path=conf_path,
+                sudo=True,
+            )
+            admin_keyring = teuthology.get_file(
+                remote=mon0_remote,
+                path=admin_keyring_path,
+                sudo=True,
+            )
+
+            clients = ctx.cluster.only(teuthology.is_type('client'))
+            for remot, roles_for_host in clients.remotes.iteritems():
+                for id_ in teuthology.roles_of_type(roles_for_host, 'client'):
+                    client_keyring = \
+                        '/etc/ceph/ceph.client.{id}.keyring'.format(id=id_)
+                    mon0_remote.run(
+                        args=[
+                            'cd',
+                            '{tdir}'.format(tdir=testdir),
+                            run.Raw('&&'),
+                            'sudo', 'bash', '-c',
+                            run.Raw('"'), 'ceph',
+                            'auth',
+                            'get-or-create',
+                            'client.{id}'.format(id=id_),
+                            'mds', 'allow',
+                            'mon', 'allow *',
+                            'osd', 'allow *',
+                            run.Raw('>'),
+                            client_keyring,
+                            run.Raw('"'),
+                        ],
+                    )
+                    key_data = teuthology.get_file(
+                        remote=mon0_remote,
+                        path=client_keyring,
+                        sudo=True,
+                    )
+                    teuthology.sudo_write_file(
+                        remote=remot,
+                        path=client_keyring,
+                        data=key_data,
+                        perms='0644'
+                    )
+                    teuthology.sudo_write_file(
+                        remote=remot,
+                        path=admin_keyring_path,
+                        data=admin_keyring,
+                        perms='0644'
+                    )
+                    teuthology.sudo_write_file(
+                        remote=remot,
+                        path=conf_path,
+                        data=conf_data,
+                        perms='0644'
+                    )
+
+            if mds_nodes:
+                log.info('Configuring CephFS...')
+                ceph_fs = Filesystem(ctx)
+                if not ceph_fs.legacy_configured():
+                    ceph_fs.create()
+        elif not config.get('only_mon'):
+            raise RuntimeError(
+                "The cluster is NOT operational due to insufficient OSDs")
+        yield
+
+    except Exception:
+        log.info(
+            "Error encountered, logging exception before tearing down ceph-deploy")
+        log.info(traceback.format_exc())
+        raise
+    finally:
+        if config.get('keep_running'):
+            return
+        log.info('Stopping ceph...')
+        ctx.cluster.run(args=['sudo', 'stop', 'ceph-all', run.Raw('||'),
+                              'sudo', 'service', 'ceph', 'stop', run.Raw('||'),
+                              'sudo', 'systemctl', 'stop', 'ceph.target'])
+
+        # Are you really not running anymore?
+        # try first with the init tooling
+        # ignoring the status so this becomes informational only
+        ctx.cluster.run(
+            args=[
+                'sudo', 'status', 'ceph-all', run.Raw('||'),
+                'sudo', 'service', 'ceph', 'status', run.Raw('||'),
+                'sudo', 'systemctl', 'status', 'ceph.target'],
+            check_status=False)
+
+        # and now just check for the processes themselves, as if upstart/sysvinit
+        # is lying to us. Ignore errors if the grep fails
+        ctx.cluster.run(args=['sudo', 'ps', 'aux', run.Raw('|'),
+                              'grep', '-v', 'grep', run.Raw('|'),
+                              'grep', 'ceph'], check_status=False)
+
+        if ctx.archive is not None:
+            # archive mon data, too
+            log.info('Archiving mon data...')
+            path = os.path.join(ctx.archive, 'data')
+            os.makedirs(path)
+            mons = ctx.cluster.only(teuthology.is_type('mon'))
+            for remote, roles in mons.remotes.iteritems():
+                for role in roles:
+                    if role.startswith('mon.'):
+                        teuthology.pull_directory_tarball(
+                            remote,
+                            '/var/lib/ceph/mon',
+                            path + '/' + role + '.tgz')
+
+            log.info('Compressing logs...')
+            run.wait(
+                ctx.cluster.run(
+                    args=[
+                        'sudo',
+                        'find',
+                        '/var/log/ceph',
+                        '-name',
+                        '*.log',
+                        '-print0',
+                        run.Raw('|'),
+                        'sudo',
+                        'xargs',
+                        '-0',
+                        '--no-run-if-empty',
+                        '--',
+                        'gzip',
+                        '--',
+                    ],
+                    wait=False,
+                ),
+            )
+
+            log.info('Archiving logs...')
+            path = os.path.join(ctx.archive, 'remote')
+            os.makedirs(path)
+            for remote in ctx.cluster.remotes.iterkeys():
+                sub = os.path.join(path, remote.shortname)
+                os.makedirs(sub)
+                teuthology.pull_directory(remote, '/var/log/ceph',
+                                          os.path.join(sub, 'log'))
+
+        # Prevent these from being undefined if the try block fails
+        all_nodes = get_all_nodes(ctx, config)
+        purge_nodes = './ceph-deploy purge' + " " + all_nodes
+        purgedata_nodes = './ceph-deploy purgedata' + " " + all_nodes
+
+        log.info('Purging package...')
+        execute_ceph_deploy(purge_nodes)
+        log.info('Purging data...')
+        execute_ceph_deploy(purgedata_nodes)
+
+
+ at contextlib.contextmanager
+def cli_test(ctx, config):
+    """
+     ceph-deploy cli to exercise most commonly use cli's and ensure
+     all commands works and also startup the init system.
+
+    """
+    log.info('Ceph-deploy Test')
+    if config is None:
+        config = {}
+    test_branch = ''
+    conf_dir = teuthology.get_testdir(ctx) + "/cdtest"
+
+    def execute_cdeploy(admin, cmd, path):
+        """Execute ceph-deploy commands """
+        """Either use git path or repo path """
+        args = ['cd', conf_dir, run.Raw(';')]
+        if path:
+            args.append('{path}/ceph-deploy/ceph-deploy'.format(path=path));
+        else:
+            args.append('ceph-deploy')
+        args.append(run.Raw(cmd))
+        ec = admin.run(args=args, check_status=False).exitstatus
+        if ec != 0:
+            raise RuntimeError(
+                "failed during ceph-deploy cmd: {cmd} , ec={ec}".format(cmd=cmd, ec=ec))
+
+    if config.get('rhbuild'):
+        path = None
+    else:
+        path = teuthology.get_testdir(ctx)
+        # test on branch from config eg: wip-* , master or next etc
+        # packages for all distro's should exist for wip*
+        if ctx.config.get('branch'):
+            branch = ctx.config.get('branch')
+            test_branch = ' --dev={branch} '.format(branch=branch)
+    mons = ctx.cluster.only(teuthology.is_type('mon'))
+    for node, role in mons.remotes.iteritems():
+        admin = node
+        admin.run(args=['mkdir', conf_dir], check_status=False)
+        nodename = admin.shortname
+    system_type = teuthology.get_system_type(admin)
+    if config.get('rhbuild'):
+        admin.run(args=['sudo', 'yum', 'install', 'ceph-deploy', '-y'])
+    log.info('system type is %s', system_type)
+    osds = ctx.cluster.only(teuthology.is_type('osd'))
+
+    for remote, roles in osds.remotes.iteritems():
+        devs = teuthology.get_scratch_devices(remote)
+        log.info("roles %s", roles)
+        if (len(devs) < 3):
+            log.error(
+                'Test needs minimum of 3 devices, only found %s',
+                str(devs))
+            raise RuntimeError("Needs minimum of 3 devices ")
+
+    conf_path = '{conf_dir}/ceph.conf'.format(conf_dir=conf_dir)
+    new_cmd = 'new ' + nodename
+    execute_cdeploy(admin, new_cmd, path)
+    if config.get('conf') is not None:
+        confp = config.get('conf')
+        for section, keys in confp.iteritems():
+            lines = '[{section}]\n'.format(section=section)
+            teuthology.append_lines_to_file(admin, conf_path, lines,
+                                            sudo=True)
+            for key, value in keys.iteritems():
+                log.info("[%s] %s = %s" % (section, key, value))
+                lines = '{key} = {value}\n'.format(key=key, value=value)
+                teuthology.append_lines_to_file(admin, conf_path, lines,
+                                                sudo=True)
+    new_mon_install = 'install {branch} --mon '.format(
+        branch=test_branch) + nodename
+    new_osd_install = 'install {branch} --osd '.format(
+        branch=test_branch) + nodename
+    new_admin = 'install {branch} --cli '.format(branch=test_branch) + nodename
+    create_initial = 'mon create-initial '
+    execute_cdeploy(admin, new_mon_install, path)
+    execute_cdeploy(admin, new_osd_install, path)
+    execute_cdeploy(admin, new_admin, path)
+    execute_cdeploy(admin, create_initial, path)
+
+    for i in range(3):
+        zap_disk = 'disk zap ' + "{n}:{d}".format(n=nodename, d=devs[i])
+        prepare = 'osd prepare ' + "{n}:{d}".format(n=nodename, d=devs[i])
+        execute_cdeploy(admin, zap_disk, path)
+        execute_cdeploy(admin, prepare, path)
+
+    log.info("list files for debugging purpose to check file permissions")
+    admin.run(args=['ls', run.Raw('-lt'), conf_dir])
+    remote.run(args=['sudo', 'ceph', '-s'], check_status=False)
+    r = remote.run(args=['sudo', 'ceph', 'health'], stdout=StringIO())
+    out = r.stdout.getvalue()
+    log.info('Ceph health: %s', out.rstrip('\n'))
+    log.info("Waiting for cluster to become healthy")
+    with contextutil.safe_while(sleep=10, tries=6,
+                                action='check health') as proceed:
+       while proceed():
+           r = remote.run(args=['sudo', 'ceph', 'health'], stdout=StringIO())
+           out = r.stdout.getvalue()
+           if (out.split(None,1)[0] == 'HEALTH_OK'):
+               break
+    rgw_install = 'install {branch} --rgw {node}'.format(
+        branch=test_branch,
+        node=nodename,
+    )
+    rgw_create = 'rgw create ' + nodename
+    execute_cdeploy(admin, rgw_install, path)
+    execute_cdeploy(admin, rgw_create, path)
+    log.info('All ceph-deploy cli tests passed')
+    try:
+        yield
+    finally:
+        log.info("cleaning up")
+        ctx.cluster.run(args=['sudo', 'stop', 'ceph-all', run.Raw('||'),
+                              'sudo', 'service', 'ceph', 'stop', run.Raw('||'),
+                              'sudo', 'systemctl', 'stop', 'ceph.target'],
+                        check_status=False)
+        time.sleep(4)
+        for i in range(3):
+            umount_dev = "{d}1".format(d=devs[i])
+            r = remote.run(args=['sudo', 'umount', run.Raw(umount_dev)])
+        cmd = 'purge ' + nodename
+        execute_cdeploy(admin, cmd, path)
+        cmd = 'purgedata ' + nodename
+        execute_cdeploy(admin, cmd, path)
+        log.info("Removing temporary dir")
+        admin.run(
+            args=[
+                'rm',
+                run.Raw('-rf'),
+                run.Raw(conf_dir)],
+            check_status=False)
+        if config.get('rhbuild'):
+            admin.run(args=['sudo', 'yum', 'remove', 'ceph-deploy', '-y'])
+
+
+ at contextlib.contextmanager
+def single_node_test(ctx, config):
+    """
+    - ceph-deploy.single_node_test: null
+
+    #rhbuild testing
+    - ceph-deploy.single_node_test:
+        rhbuild: 1.2.3
+
+    """
+    log.info("Testing ceph-deploy on single node")
+    if config is None:
+        config = {}
+    overrides = ctx.config.get('overrides', {})
+    teuthology.deep_merge(config, overrides.get('ceph-deploy', {}))
+
+    if config.get('rhbuild'):
+        log.info("RH Build, Skip Download")
+        with contextutil.nested(
+            lambda: cli_test(ctx=ctx, config=config),
+        ):
+            yield
+    else:
+        with contextutil.nested(
+            lambda: install_fn.ship_utilities(ctx=ctx, config=None),
+            lambda: download_ceph_deploy(ctx=ctx, config=config),
+            lambda: cli_test(ctx=ctx, config=config),
+        ):
+            yield
+
+
+ at contextlib.contextmanager
+def task(ctx, config):
+    """
+    Set up and tear down a Ceph cluster.
+
+    For example::
+
+        tasks:
+        - install:
+             extras: yes
+        - ssh_keys:
+        - ceph-deploy:
+             branch:
+                stable: bobtail
+             mon_initial_members: 1
+             only_mon: true
+             keep_running: true
+
+        tasks:
+        - install:
+             extras: yes
+        - ssh_keys:
+        - ceph-deploy:
+             branch:
+                dev: master
+             conf:
+                mon:
+                   debug mon = 20
+
+        tasks:
+        - install:
+             extras: yes
+        - ssh_keys:
+        - ceph-deploy:
+             branch:
+                testing:
+             dmcrypt: yes
+             separate_journal_disk: yes
+
+    """
+    if config is None:
+        config = {}
+
+    assert isinstance(config, dict), \
+        "task ceph-deploy only supports a dictionary for configuration"
+
+    overrides = ctx.config.get('overrides', {})
+    teuthology.deep_merge(config, overrides.get('ceph-deploy', {}))
+
+    if config.get('branch') is not None:
+        assert isinstance(
+            config['branch'], dict), 'branch must be a dictionary'
+
+    log.info('task ceph-deploy with config ' + str(config))
+
+    with contextutil.nested(
+        lambda: install_fn.ship_utilities(ctx=ctx, config=None),
+        lambda: download_ceph_deploy(ctx=ctx, config=config),
+        lambda: build_ceph_cluster(ctx=ctx, config=config),
+    ):
+        yield
diff --git a/qa/tasks/ceph_fuse.py b/qa/tasks/ceph_fuse.py
new file mode 100644
index 0000000..9c308ae
--- /dev/null
+++ b/qa/tasks/ceph_fuse.py
@@ -0,0 +1,145 @@
+"""
+Ceph FUSE client task
+"""
+
+import contextlib
+import logging
+
+from teuthology import misc as teuthology
+from cephfs.fuse_mount import FuseMount
+
+log = logging.getLogger(__name__)
+
+
+def get_client_configs(ctx, config):
+    """
+    Get a map of the configuration for each FUSE client in the configuration by
+    combining the configuration of the current task with any global overrides.
+
+    :param ctx: Context instance
+    :param config: configuration for this task
+    :return: dict of client name to config or to None
+    """
+    if config is None:
+        config = dict(('client.{id}'.format(id=id_), None)
+                      for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client'))
+    elif isinstance(config, list):
+        config = dict((name, None) for name in config)
+
+    overrides = ctx.config.get('overrides', {})
+    teuthology.deep_merge(config, overrides.get('ceph-fuse', {}))
+
+    return config
+
+
+ at contextlib.contextmanager
+def task(ctx, config):
+    """
+    Mount/unmount a ``ceph-fuse`` client.
+
+    The config is optional and defaults to mounting on all clients. If
+    a config is given, it is expected to be a list of clients to do
+    this operation on. This lets you e.g. set up one client with
+    ``ceph-fuse`` and another with ``kclient``.
+
+    Example that mounts all clients::
+
+        tasks:
+        - ceph:
+        - ceph-fuse:
+        - interactive:
+
+    Example that uses both ``kclient` and ``ceph-fuse``::
+
+        tasks:
+        - ceph:
+        - ceph-fuse: [client.0]
+        - kclient: [client.1]
+        - interactive:
+
+    Example that enables valgrind:
+
+        tasks:
+        - ceph:
+        - ceph-fuse:
+            client.0:
+              valgrind: [--tool=memcheck, --leak-check=full, --show-reachable=yes]
+        - interactive:
+
+    Example that stops an already-mounted client:
+
+    ::
+
+        tasks:
+            - ceph:
+            - ceph-fuse: [client.0]
+            - ... do something that requires the FS mounted ...
+            - ceph-fuse:
+                client.0:
+                    mounted: false
+            - ... do something that requires the FS unmounted ...
+
+    Example that adds more generous wait time for mount (for virtual machines):
+
+        tasks:
+        - ceph:
+        - ceph-fuse:
+            client.0:
+              mount_wait: 60 # default is 0, do not wait before checking /sys/
+              mount_timeout: 120 # default is 30, give up if /sys/ is not populated
+        - interactive:
+
+    :param ctx: Context
+    :param config: Configuration
+    """
+    log.info('Mounting ceph-fuse clients...')
+
+    testdir = teuthology.get_testdir(ctx)
+    config = get_client_configs(ctx, config)
+
+    # List clients we will configure mounts for, default is all clients
+    clients = list(teuthology.get_clients(ctx=ctx, roles=config.keys()))
+
+    all_mounts = getattr(ctx, 'mounts', {})
+    mounted_by_me = {}
+
+    # Construct any new FuseMount instances
+    for id_, remote in clients:
+        client_config = config.get("client.%s" % id_)
+        if client_config is None:
+            client_config = {}
+
+        if id_ not in all_mounts:
+            fuse_mount = FuseMount(client_config, testdir, id_, remote)
+            all_mounts[id_] = fuse_mount
+        else:
+            # Catch bad configs where someone has e.g. tried to use ceph-fuse and kcephfs for the same client
+            assert isinstance(all_mounts[id_], FuseMount)
+
+        if client_config.get('mounted', True):
+            mounted_by_me[id_] = all_mounts[id_]
+
+    ctx.mounts = all_mounts
+
+    # Mount any clients we have been asked to (default to mount all)
+    for mount in mounted_by_me.values():
+        mount.mount()
+
+    for mount in mounted_by_me.values():
+        mount.wait_until_mounted()
+
+    # Umount any pre-existing clients that we have not been asked to mount
+    for client_id in set(all_mounts.keys()) - set(mounted_by_me.keys()):
+        mount = all_mounts[client_id]
+        if mount.is_mounted():
+            mount.umount_wait()
+
+    try:
+        yield all_mounts
+    finally:
+        log.info('Unmounting ceph-fuse clients...')
+
+        for mount in mounted_by_me.values():
+            # Conditional because an inner context might have umounted it
+            if mount.is_mounted():
+                mount.umount_wait()
diff --git a/qa/tasks/ceph_manager.py b/qa/tasks/ceph_manager.py
new file mode 100644
index 0000000..1d06d56
--- /dev/null
+++ b/qa/tasks/ceph_manager.py
@@ -0,0 +1,2014 @@
+"""
+ceph manager -- Thrasher and CephManager objects
+"""
+from cStringIO import StringIO
+from functools import wraps
+import contextlib
+import random
+import signal
+import time
+import gevent
+import base64
+import json
+import logging
+import threading
+import traceback
+import os
+from teuthology import misc as teuthology
+from tasks.scrub import Scrubber
+from util.rados import cmd_erasure_code_profile
+from util import get_remote
+from teuthology.contextutil import safe_while
+from teuthology.orchestra.remote import Remote
+from teuthology.orchestra import run
+from teuthology.exceptions import CommandFailedError
+
+
+DEFAULT_CONF_PATH = '/etc/ceph/ceph.conf'
+
+log = logging.getLogger(__name__)
+
+
+def write_conf(ctx, conf_path=DEFAULT_CONF_PATH, cluster='ceph'):
+    conf_fp = StringIO()
+    ctx.ceph[cluster].conf.write(conf_fp)
+    conf_fp.seek(0)
+    writes = ctx.cluster.run(
+        args=[
+            'sudo', 'mkdir', '-p', '/etc/ceph', run.Raw('&&'),
+            'sudo', 'chmod', '0755', '/etc/ceph', run.Raw('&&'),
+            'sudo', 'python',
+            '-c',
+            ('import shutil, sys; '
+             'shutil.copyfileobj(sys.stdin, file(sys.argv[1], "wb"))'),
+            conf_path,
+            run.Raw('&&'),
+            'sudo', 'chmod', '0644', conf_path,
+        ],
+        stdin=run.PIPE,
+        wait=False)
+    teuthology.feed_many_stdins_and_close(conf_fp, writes)
+    run.wait(writes)
+
+
+def mount_osd_data(ctx, remote, cluster, osd):
+    """
+    Mount a remote OSD
+
+    :param ctx: Context
+    :param remote: Remote site
+    :param cluster: name of ceph cluster
+    :param osd: Osd name
+    """
+    log.debug('Mounting data for osd.{o} on {r}'.format(o=osd, r=remote))
+    role = "{0}.osd.{1}".format(cluster, osd)
+    alt_role = role if cluster != 'ceph' else "osd.{0}".format(osd)
+    if remote in ctx.disk_config.remote_to_roles_to_dev:
+        if alt_role in ctx.disk_config.remote_to_roles_to_dev[remote]:
+            role = alt_role
+        if role not in ctx.disk_config.remote_to_roles_to_dev[remote]:
+            return
+        dev = ctx.disk_config.remote_to_roles_to_dev[remote][role]
+        mount_options = ctx.disk_config.\
+            remote_to_roles_to_dev_mount_options[remote][role]
+        fstype = ctx.disk_config.remote_to_roles_to_dev_fstype[remote][role]
+        mnt = os.path.join('/var/lib/ceph/osd', '{0}-{1}'.format(cluster, osd))
+
+        log.info('Mounting osd.{o}: dev: {n}, cluster: {c}'
+                 'mountpoint: {p}, type: {t}, options: {v}'.format(
+                     o=osd, n=remote.name, p=mnt, t=fstype, v=mount_options,
+                     c=cluster))
+
+        remote.run(
+            args=[
+                'sudo',
+                'mount',
+                '-t', fstype,
+                '-o', ','.join(mount_options),
+                dev,
+                mnt,
+            ]
+            )
+
+
+class Thrasher:
+    """
+    Object used to thrash Ceph
+    """
+    def __init__(self, manager, config, logger=None):
+        self.ceph_manager = manager
+        self.cluster = manager.cluster
+        self.ceph_manager.wait_for_clean()
+        osd_status = self.ceph_manager.get_osd_status()
+        self.in_osds = osd_status['in']
+        self.live_osds = osd_status['live']
+        self.out_osds = osd_status['out']
+        self.dead_osds = osd_status['dead']
+        self.stopping = False
+        self.logger = logger
+        self.config = config
+        self.revive_timeout = self.config.get("revive_timeout", 150)
+        if self.config.get('powercycle'):
+            self.revive_timeout += 120
+        self.clean_wait = self.config.get('clean_wait', 0)
+        self.minin = self.config.get("min_in", 3)
+        self.chance_move_pg = self.config.get('chance_move_pg', 1.0)
+        self.sighup_delay = self.config.get('sighup_delay')
+
+        num_osds = self.in_osds + self.out_osds
+        self.max_pgs = self.config.get("max_pgs_per_pool_osd", 1200) * num_osds
+        if self.logger is not None:
+            self.log = lambda x: self.logger.info(x)
+        else:
+            def tmp(x):
+                """
+                Implement log behavior
+                """
+                print x
+            self.log = tmp
+        if self.config is None:
+            self.config = dict()
+        # prevent monitor from auto-marking things out while thrasher runs
+        # try both old and new tell syntax, in case we are testing old code
+        try:
+            manager.raw_cluster_cmd('--', 'tell', 'mon.*', 'injectargs',
+                                    '--mon-osd-down-out-interval 0')
+        except Exception:
+            manager.raw_cluster_cmd('--', 'mon', 'tell', '*', 'injectargs',
+                                    '--mon-osd-down-out-interval 0')
+        self.thread = gevent.spawn(self.do_thrash)
+        if self.sighup_delay:
+            self.sighup_thread = gevent.spawn(self.do_sighup)
+        if self.config.get('powercycle') or not self.cmd_exists_on_osds("ceph-objectstore-tool"):
+            self.ceph_objectstore_tool = False
+            self.test_rm_past_intervals = False
+            if self.config.get('powercycle'):
+                self.log("Unable to test ceph-objectstore-tool, "
+                         "powercycle testing")
+            else:
+                self.log("Unable to test ceph-objectstore-tool, "
+                         "not available on all OSD nodes")
+        else:
+            self.ceph_objectstore_tool = \
+                self.config.get('ceph_objectstore_tool', True)
+            self.test_rm_past_intervals = \
+                self.config.get('test_rm_past_intervals', True)
+
+    def cmd_exists_on_osds(self, cmd):
+        allremotes = self.ceph_manager.ctx.cluster.only(\
+            teuthology.is_type('osd', self.cluster)).remotes.keys()
+        allremotes = list(set(allremotes))
+        for remote in allremotes:
+            proc = remote.run(args=['type', cmd], wait=True,
+                              check_status=False, stdout=StringIO(),
+                              stderr=StringIO())
+            if proc.exitstatus != 0:
+                return False;
+        return True;
+
+    def kill_osd(self, osd=None, mark_down=False, mark_out=False):
+        """
+        :param osd: Osd to be killed.
+        :mark_down: Mark down if true.
+        :mark_out: Mark out if true.
+        """
+        if osd is None:
+            osd = random.choice(self.live_osds)
+        self.log("Killing osd %s, live_osds are %s" % (str(osd),
+                                                       str(self.live_osds)))
+        self.live_osds.remove(osd)
+        self.dead_osds.append(osd)
+        self.ceph_manager.kill_osd(osd)
+        if mark_down:
+            self.ceph_manager.mark_down_osd(osd)
+        if mark_out and osd in self.in_osds:
+            self.out_osd(osd)
+        if self.ceph_objectstore_tool:
+            self.log("Testing ceph-objectstore-tool on down osd")
+            remote = self.ceph_manager.find_remote('osd', osd)
+            FSPATH = self.ceph_manager.get_filepath()
+            JPATH = os.path.join(FSPATH, "journal")
+            exp_osd = imp_osd = osd
+            exp_remote = imp_remote = remote
+            # If an older osd is available we'll move a pg from there
+            if (len(self.dead_osds) > 1 and
+                    random.random() < self.chance_move_pg):
+                exp_osd = random.choice(self.dead_osds[:-1])
+                exp_remote = self.ceph_manager.find_remote('osd', exp_osd)
+            if ('keyvaluestore_backend' in
+                    self.ceph_manager.ctx.ceph[self.cluster].conf['osd']):
+                prefix = ("sudo adjust-ulimits ceph-objectstore-tool "
+                          "--data-path {fpath} --journal-path {jpath} "
+                          "--type keyvaluestore "
+                          "--log-file="
+                          "/var/log/ceph/objectstore_tool.\\$pid.log ".
+                          format(fpath=FSPATH, jpath=JPATH))
+            else:
+                prefix = ("sudo adjust-ulimits ceph-objectstore-tool "
+                          "--data-path {fpath} --journal-path {jpath} "
+                          "--log-file="
+                          "/var/log/ceph/objectstore_tool.\\$pid.log ".
+                          format(fpath=FSPATH, jpath=JPATH))
+            cmd = (prefix + "--op list-pgs").format(id=exp_osd)
+
+            # ceph-objectstore-tool might be temporarily absent during an 
+            # upgrade - see http://tracker.ceph.com/issues/18014
+            with safe_while(sleep=15, tries=40, action="type ceph-objectstore-tool") as proceed:
+                while proceed():
+                    proc = exp_remote.run(args=['type', 'ceph-objectstore-tool'], 
+                               wait=True, check_status=False, stdout=StringIO(),
+                               stderr=StringIO())
+                    if proc.exitstatus == 0:
+                        break
+                    log.debug("ceph-objectstore-tool binary not present, trying again")
+
+            proc = exp_remote.run(args=cmd, wait=True,
+                                  check_status=False, stdout=StringIO())
+            if proc.exitstatus:
+                raise Exception("ceph-objectstore-tool: "
+                                "exp list-pgs failure with status {ret}".
+                                format(ret=proc.exitstatus))
+            pgs = proc.stdout.getvalue().split('\n')[:-1]
+            if len(pgs) == 0:
+                self.log("No PGs found for osd.{osd}".format(osd=exp_osd))
+                return
+            pg = random.choice(pgs)
+            exp_path = teuthology.get_testdir(self.ceph_manager.ctx)
+            exp_path = os.path.join(exp_path, '{0}.data'.format(self.cluster))
+            exp_path = os.path.join(exp_path,
+                                    "exp.{pg}.{id}".format(
+                                        pg=pg,
+                                        id=exp_osd))
+            # export
+            cmd = prefix + "--op export --pgid {pg} --file {file}"
+            cmd = cmd.format(id=exp_osd, pg=pg, file=exp_path)
+            proc = exp_remote.run(args=cmd)
+            if proc.exitstatus:
+                raise Exception("ceph-objectstore-tool: "
+                                "export failure with status {ret}".
+                                format(ret=proc.exitstatus))
+            # remove
+            cmd = prefix + "--op remove --pgid {pg}"
+            cmd = cmd.format(id=exp_osd, pg=pg)
+            proc = exp_remote.run(args=cmd)
+            if proc.exitstatus:
+                raise Exception("ceph-objectstore-tool: "
+                                "remove failure with status {ret}".
+                                format(ret=proc.exitstatus))
+            # If there are at least 2 dead osds we might move the pg
+            if exp_osd != imp_osd:
+                # If pg isn't already on this osd, then we will move it there
+                cmd = (prefix + "--op list-pgs").format(id=imp_osd)
+                proc = imp_remote.run(args=cmd, wait=True,
+                                      check_status=False, stdout=StringIO())
+                if proc.exitstatus:
+                    raise Exception("ceph-objectstore-tool: "
+                                    "imp list-pgs failure with status {ret}".
+                                    format(ret=proc.exitstatus))
+                pgs = proc.stdout.getvalue().split('\n')[:-1]
+                if pg not in pgs:
+                    self.log("Moving pg {pg} from osd.{fosd} to osd.{tosd}".
+                             format(pg=pg, fosd=exp_osd, tosd=imp_osd))
+                    if imp_remote != exp_remote:
+                        # Copy export file to the other machine
+                        self.log("Transfer export file from {srem} to {trem}".
+                                 format(srem=exp_remote, trem=imp_remote))
+                        tmpexport = Remote.get_file(exp_remote, exp_path)
+                        Remote.put_file(imp_remote, tmpexport, exp_path)
+                        os.remove(tmpexport)
+                else:
+                    # Can't move the pg after all
+                    imp_osd = exp_osd
+                    imp_remote = exp_remote
+            # import
+            cmd = (prefix + "--op import --file {file}")
+            cmd = cmd.format(id=imp_osd, file=exp_path)
+            proc = imp_remote.run(args=cmd, wait=True, check_status=False,
+                                  stderr=StringIO())
+            if proc.exitstatus == 1:
+                bogosity = "The OSD you are using is older than the exported PG"
+                if bogosity in proc.stderr.getvalue():
+                    self.log("OSD older than exported PG"
+                             "...ignored")
+            elif proc.exitstatus == 10:
+                self.log("Pool went away before processing an import"
+                         "...ignored")
+            elif proc.exitstatus == 11:
+                self.log("Attempt to import an incompatible export"
+                         "...ignored")
+            elif proc.exitstatus:
+                raise Exception("ceph-objectstore-tool: "
+                                "import failure with status {ret}".
+                                format(ret=proc.exitstatus))
+            cmd = "rm -f {file}".format(file=exp_path)
+            exp_remote.run(args=cmd)
+            if imp_remote != exp_remote:
+                imp_remote.run(args=cmd)
+
+            # apply low split settings to each pool
+            for pool in self.ceph_manager.list_pools():
+                no_sudo_prefix = prefix[5:]
+                cmd = ("CEPH_ARGS='--filestore-merge-threshold 1 "
+                       "--filestore-split-multiple 1' sudo -E "
+                       + no_sudo_prefix + "--op apply-layout-settings --pool " + pool).format(id=osd)
+                proc = remote.run(args=cmd, wait=True, check_status=False, stderr=StringIO())
+                output = proc.stderr.getvalue()
+                if 'Couldn\'t find pool' in output:
+                    continue
+                if proc.exitstatus:
+                    raise Exception("ceph-objectstore-tool apply-layout-settings"
+                                    " failed with {status}".format(status=proc.exitstatus))
+
+    def rm_past_intervals(self, osd=None):
+        """
+        :param osd: Osd to find pg to remove past intervals
+        """
+        if self.test_rm_past_intervals:
+            if osd is None:
+                osd = random.choice(self.dead_osds)
+            self.log("Use ceph_objectstore_tool to remove past intervals")
+            remote = self.ceph_manager.find_remote('osd', osd)
+            FSPATH = self.ceph_manager.get_filepath()
+            JPATH = os.path.join(FSPATH, "journal")
+            if ('keyvaluestore_backend' in
+                    self.ceph_manager.ctx.ceph[self.cluster].conf['osd']):
+                prefix = ("sudo adjust-ulimits ceph-objectstore-tool "
+                          "--data-path {fpath} --journal-path {jpath} "
+                          "--type keyvaluestore "
+                          "--log-file="
+                          "/var/log/ceph/objectstore_tool.\\$pid.log ".
+                          format(fpath=FSPATH, jpath=JPATH))
+            else:
+                prefix = ("sudo adjust-ulimits ceph-objectstore-tool "
+                          "--data-path {fpath} --journal-path {jpath} "
+                          "--log-file="
+                          "/var/log/ceph/objectstore_tool.\\$pid.log ".
+                          format(fpath=FSPATH, jpath=JPATH))
+            cmd = (prefix + "--op list-pgs").format(id=osd)
+            proc = remote.run(args=cmd, wait=True,
+                              check_status=False, stdout=StringIO())
+            if proc.exitstatus:
+                raise Exception("ceph_objectstore_tool: "
+                                "exp list-pgs failure with status {ret}".
+                                format(ret=proc.exitstatus))
+            pgs = proc.stdout.getvalue().split('\n')[:-1]
+            if len(pgs) == 0:
+                self.log("No PGs found for osd.{osd}".format(osd=osd))
+                return
+            pg = random.choice(pgs)
+            cmd = (prefix + "--op rm-past-intervals --pgid {pg}").\
+                format(id=osd, pg=pg)
+            proc = remote.run(args=cmd)
+            if proc.exitstatus:
+                raise Exception("ceph_objectstore_tool: "
+                                "rm-past-intervals failure with status {ret}".
+                                format(ret=proc.exitstatus))
+
+    def blackhole_kill_osd(self, osd=None):
+        """
+        If all else fails, kill the osd.
+        :param osd: Osd to be killed.
+        """
+        if osd is None:
+            osd = random.choice(self.live_osds)
+        self.log("Blackholing and then killing osd %s, live_osds are %s" %
+                 (str(osd), str(self.live_osds)))
+        self.live_osds.remove(osd)
+        self.dead_osds.append(osd)
+        self.ceph_manager.blackhole_kill_osd(osd)
+
+    def revive_osd(self, osd=None, skip_admin_check=False):
+        """
+        Revive the osd.
+        :param osd: Osd to be revived.
+        """
+        if osd is None:
+            osd = random.choice(self.dead_osds)
+        self.log("Reviving osd %s" % (str(osd),))
+        self.ceph_manager.revive_osd(
+            osd,
+            self.revive_timeout,
+            skip_admin_check=skip_admin_check)
+        self.dead_osds.remove(osd)
+        self.live_osds.append(osd)
+
+    def out_osd(self, osd=None):
+        """
+        Mark the osd out
+        :param osd: Osd to be marked.
+        """
+        if osd is None:
+            osd = random.choice(self.in_osds)
+        self.log("Removing osd %s, in_osds are: %s" %
+                 (str(osd), str(self.in_osds)))
+        self.ceph_manager.mark_out_osd(osd)
+        self.in_osds.remove(osd)
+        self.out_osds.append(osd)
+
+    def in_osd(self, osd=None):
+        """
+        Mark the osd out
+        :param osd: Osd to be marked.
+        """
+        if osd is None:
+            osd = random.choice(self.out_osds)
+        if osd in self.dead_osds:
+            return self.revive_osd(osd)
+        self.log("Adding osd %s" % (str(osd),))
+        self.out_osds.remove(osd)
+        self.in_osds.append(osd)
+        self.ceph_manager.mark_in_osd(osd)
+        self.log("Added osd %s" % (str(osd),))
+
+    def reweight_osd(self, osd=None):
+        """
+        Reweight an osd that is in
+        :param osd: Osd to be marked.
+        """
+        if osd is None:
+            osd = random.choice(self.in_osds)
+        val = random.uniform(.1, 1.0)
+        self.log("Reweighting osd %s to %s" % (str(osd), str(val)))
+        self.ceph_manager.raw_cluster_cmd('osd', 'reweight',
+                                          str(osd), str(val))
+
+    def primary_affinity(self, osd=None):
+        if osd is None:
+            osd = random.choice(self.in_osds)
+        if random.random() >= .5:
+            pa = random.random()
+        elif random.random() >= .5:
+            pa = 1
+        else:
+            pa = 0
+        self.log('Setting osd %s primary_affinity to %f' % (str(osd), pa))
+        self.ceph_manager.raw_cluster_cmd('osd', 'primary-affinity',
+                                          str(osd), str(pa))
+
+    def all_up(self):
+        """
+        Make sure all osds are up and not out.
+        """
+        while len(self.dead_osds) > 0:
+            self.log("reviving osd")
+            self.revive_osd()
+        while len(self.out_osds) > 0:
+            self.log("inning osd")
+            self.in_osd()
+
+    def do_join(self):
+        """
+        Break out of this Ceph loop
+        """
+        self.stopping = True
+        self.thread.get()
+        if self.sighup_delay:
+            self.log("joining the do_sighup greenlet")
+            self.sighup_thread.get()
+
+    def grow_pool(self):
+        """
+        Increase the size of the pool
+        """
+        pool = self.ceph_manager.get_pool()
+        self.log("Growing pool %s" % (pool,))
+        self.ceph_manager.expand_pool(pool,
+                                      self.config.get('pool_grow_by', 10),
+                                      self.max_pgs)
+
+    def fix_pgp_num(self):
+        """
+        Fix number of pgs in pool.
+        """
+        pool = self.ceph_manager.get_pool()
+        self.log("fixing pg num pool %s" % (pool,))
+        self.ceph_manager.set_pool_pgpnum(pool)
+
+    def test_pool_min_size(self):
+        """
+        Kill and revive all osds except one.
+        """
+        self.log("test_pool_min_size")
+        self.all_up()
+        self.ceph_manager.wait_for_recovery(
+            timeout=self.config.get('timeout')
+            )
+        the_one = random.choice(self.in_osds)
+        self.log("Killing everyone but %s", the_one)
+        to_kill = filter(lambda x: x != the_one, self.in_osds)
+        [self.kill_osd(i) for i in to_kill]
+        [self.out_osd(i) for i in to_kill]
+        time.sleep(self.config.get("test_pool_min_size_time", 10))
+        self.log("Killing %s" % (the_one,))
+        self.kill_osd(the_one)
+        self.out_osd(the_one)
+        self.log("Reviving everyone but %s" % (the_one,))
+        [self.revive_osd(i) for i in to_kill]
+        [self.in_osd(i) for i in to_kill]
+        self.log("Revived everyone but %s" % (the_one,))
+        self.log("Waiting for clean")
+        self.ceph_manager.wait_for_recovery(
+            timeout=self.config.get('timeout')
+            )
+
+    def inject_pause(self, conf_key, duration, check_after, should_be_down):
+        """
+        Pause injection testing. Check for osd being down when finished.
+        """
+        the_one = random.choice(self.live_osds)
+        self.log("inject_pause on {osd}".format(osd=the_one))
+        self.log(
+            "Testing {key} pause injection for duration {duration}".format(
+                key=conf_key,
+                duration=duration
+                ))
+        self.log(
+            "Checking after {after}, should_be_down={shouldbedown}".format(
+                after=check_after,
+                shouldbedown=should_be_down
+                ))
+        self.ceph_manager.set_config(the_one, **{conf_key: duration})
+        if not should_be_down:
+            return
+        time.sleep(check_after)
+        status = self.ceph_manager.get_osd_status()
+        assert the_one in status['down']
+        time.sleep(duration - check_after + 20)
+        status = self.ceph_manager.get_osd_status()
+        assert not the_one in status['down']
+
+    def test_backfill_full(self):
+        """
+        Test backfills stopping when the replica fills up.
+
+        First, use osd_backfill_full_ratio to simulate a now full
+        osd by setting it to 0 on all of the OSDs.
+
+        Second, on a random subset, set
+        osd_debug_skip_full_check_in_backfill_reservation to force
+        the more complicated check in do_scan to be exercised.
+
+        Then, verify that all backfills stop.
+        """
+        self.log("injecting osd_backfill_full_ratio = 0")
+        for i in self.live_osds:
+            self.ceph_manager.set_config(
+                i,
+                osd_debug_skip_full_check_in_backfill_reservation=
+                random.choice(['false', 'true']),
+                osd_backfill_full_ratio=0)
+        for i in range(30):
+            status = self.ceph_manager.compile_pg_status()
+            if 'backfill' not in status.keys():
+                break
+            self.log(
+                "waiting for {still_going} backfills".format(
+                    still_going=status.get('backfill')))
+            time.sleep(1)
+        assert('backfill' not in self.ceph_manager.compile_pg_status().keys())
+        for i in self.live_osds:
+            self.ceph_manager.set_config(
+                i,
+                osd_debug_skip_full_check_in_backfill_reservation='false',
+                osd_backfill_full_ratio=0.85)
+
+    def test_map_discontinuity(self):
+        """
+        1) Allows the osds to recover
+        2) kills an osd
+        3) allows the remaining osds to recover
+        4) waits for some time
+        5) revives the osd
+        This sequence should cause the revived osd to have to handle
+        a map gap since the mons would have trimmed
+        """
+        while len(self.in_osds) < (self.minin + 1):
+            self.in_osd()
+        self.log("Waiting for recovery")
+        self.ceph_manager.wait_for_all_up(
+            timeout=self.config.get('timeout')
+            )
+        # now we wait 20s for the pg status to change, if it takes longer,
+        # the test *should* fail!
+        time.sleep(20)
+        self.ceph_manager.wait_for_clean(
+            timeout=self.config.get('timeout')
+            )
+
+        # now we wait 20s for the backfill replicas to hear about the clean
+        time.sleep(20)
+        self.log("Recovered, killing an osd")
+        self.kill_osd(mark_down=True, mark_out=True)
+        self.log("Waiting for clean again")
+        self.ceph_manager.wait_for_clean(
+            timeout=self.config.get('timeout')
+            )
+        self.log("Waiting for trim")
+        time.sleep(int(self.config.get("map_discontinuity_sleep_time", 40)))
+        self.revive_osd()
+
+    def choose_action(self):
+        """
+        Random action selector.
+        """
+        chance_down = self.config.get('chance_down', 0.4)
+        chance_test_min_size = self.config.get('chance_test_min_size', 0)
+        chance_test_backfill_full = \
+            self.config.get('chance_test_backfill_full', 0)
+        if isinstance(chance_down, int):
+            chance_down = float(chance_down) / 100
+        minin = self.minin
+        minout = self.config.get("min_out", 0)
+        minlive = self.config.get("min_live", 2)
+        mindead = self.config.get("min_dead", 0)
+
+        self.log('choose_action: min_in %d min_out '
+                 '%d min_live %d min_dead %d' %
+                 (minin, minout, minlive, mindead))
+        actions = []
+        if len(self.in_osds) > minin:
+            actions.append((self.out_osd, 1.0,))
+        if len(self.live_osds) > minlive and chance_down > 0:
+            actions.append((self.kill_osd, chance_down,))
+        if len(self.dead_osds) > 1:
+            actions.append((self.rm_past_intervals, 1.0,))
+        if len(self.out_osds) > minout:
+            actions.append((self.in_osd, 1.7,))
+        if len(self.dead_osds) > mindead:
+            actions.append((self.revive_osd, 1.0,))
+        if self.config.get('thrash_primary_affinity', True):
+            actions.append((self.primary_affinity, 1.0,))
+        actions.append((self.reweight_osd,
+                        self.config.get('reweight_osd', .5),))
+        actions.append((self.grow_pool,
+                        self.config.get('chance_pgnum_grow', 0),))
+        actions.append((self.fix_pgp_num,
+                        self.config.get('chance_pgpnum_fix', 0),))
+        actions.append((self.test_pool_min_size,
+                        chance_test_min_size,))
+        actions.append((self.test_backfill_full,
+                        chance_test_backfill_full,))
+        for key in ['heartbeat_inject_failure', 'filestore_inject_stall']:
+            for scenario in [
+                (lambda:
+                 self.inject_pause(key,
+                                   self.config.get('pause_short', 3),
+                                   0,
+                                   False),
+                 self.config.get('chance_inject_pause_short', 1),),
+                (lambda:
+                 self.inject_pause(key,
+                                   self.config.get('pause_long', 80),
+                                   self.config.get('pause_check_after', 70),
+                                   True),
+                 self.config.get('chance_inject_pause_long', 0),)]:
+                actions.append(scenario)
+
+        total = sum([y for (x, y) in actions])
+        val = random.uniform(0, total)
+        for (action, prob) in actions:
+            if val < prob:
+                return action
+            val -= prob
+        return None
+
+    def log_exc(func):
+        @wraps(func)
+        def wrapper(self):
+            try:
+                return func(self)
+            except:
+                self.log(traceback.format_exc())
+                raise
+        return wrapper
+
+    @log_exc
+    def do_sighup(self):
+        """
+        Loops and sends signal.SIGHUP to a random live osd.
+
+        Loop delay is controlled by the config value sighup_delay.
+        """
+        delay = float(self.sighup_delay)
+        self.log("starting do_sighup with a delay of {0}".format(delay))
+        while not self.stopping:
+            osd = random.choice(self.live_osds)
+            self.ceph_manager.signal_osd(osd, signal.SIGHUP, silent=True)
+            time.sleep(delay)
+
+    @log_exc
+    def do_thrash(self):
+        """
+        Loop to select random actions to thrash ceph manager with.
+        """
+        cleanint = self.config.get("clean_interval", 60)
+        scrubint = self.config.get("scrub_interval", -1)
+        maxdead = self.config.get("max_dead", 0)
+        delay = self.config.get("op_delay", 5)
+        self.log("starting do_thrash")
+        while not self.stopping:
+            to_log = [str(x) for x in ["in_osds: ", self.in_osds,
+                                       "out_osds: ", self.out_osds,
+                                       "dead_osds: ", self.dead_osds,
+                                       "live_osds: ", self.live_osds]]
+            self.log(" ".join(to_log))
+            if random.uniform(0, 1) < (float(delay) / cleanint):
+                while len(self.dead_osds) > maxdead:
+                    self.revive_osd()
+                for osd in self.in_osds:
+                    self.ceph_manager.raw_cluster_cmd('osd', 'reweight',
+                                                      str(osd), str(1))
+                if random.uniform(0, 1) < float(
+                        self.config.get('chance_test_map_discontinuity', 0)):
+                    self.test_map_discontinuity()
+                else:
+                    self.ceph_manager.wait_for_recovery(
+                        timeout=self.config.get('timeout')
+                        )
+                time.sleep(self.clean_wait)
+                if scrubint > 0:
+                    if random.uniform(0, 1) < (float(delay) / scrubint):
+                        self.log('Scrubbing while thrashing being performed')
+                        Scrubber(self.ceph_manager, self.config)
+            self.choose_action()()
+            time.sleep(delay)
+        self.all_up()
+
+
+class ObjectStoreTool:
+
+    def __init__(self, manager, pool, **kwargs):
+        self.manager = manager
+        self.pool = pool
+        self.osd = kwargs.get('osd', None)
+        self.object_name = kwargs.get('object_name', None)
+        if self.osd and self.pool and self.object_name:
+            if self.osd == "primary":
+                self.osd = self.manager.get_object_primary(self.pool,
+                                                           self.object_name)
+        assert self.osd
+        if self.object_name:
+            self.pgid = self.manager.get_object_pg_with_shard(self.pool,
+                                                              self.object_name,
+                                                              self.osd)
+        self.remote = self.manager.ctx.\
+            cluster.only('osd.{o}'.format(o=self.osd)).remotes.keys()[0]
+        path = self.manager.get_filepath().format(id=self.osd)
+        self.paths = ("--data-path {path} --journal-path {path}/journal".
+                      format(path=path))
+
+    def build_cmd(self, options, args, stdin):
+        lines = []
+        if self.object_name:
+            lines.append("object=$(sudo adjust-ulimits ceph-objectstore-tool "
+                         "{paths} --pgid {pgid} --op list |"
+                         "grep '\"oid\":\"{name}\"')".
+                         format(paths=self.paths,
+                                pgid=self.pgid,
+                                name=self.object_name))
+            args = '"$object" ' + args
+            options += " --pgid {pgid}".format(pgid=self.pgid)
+        cmd = ("sudo adjust-ulimits ceph-objectstore-tool {paths} {options} {args}".
+               format(paths=self.paths,
+                      args=args,
+                      options=options))
+        if stdin:
+            cmd = ("echo {payload} | base64 --decode | {cmd}".
+                   format(payload=base64.encode(stdin),
+                          cmd=cmd))
+        lines.append(cmd)
+        return "\n".join(lines)
+
+    def run(self, options, args, stdin=None):
+        self.manager.kill_osd(self.osd)
+        cmd = self.build_cmd(options, args, stdin)
+        self.manager.log(cmd)
+        try:
+            proc = self.remote.run(args=['bash', '-e', '-x', '-c', cmd],
+                                   check_status=False,
+                                   stdout=StringIO(),
+                                   stderr=StringIO())
+            proc.wait()
+            if proc.exitstatus != 0:
+                self.manager.log("failed with " + str(proc.exitstatus))
+                error = proc.stdout.getvalue() + " " + proc.stderr.getvalue()
+                raise Exception(error)
+        finally:
+            self.manager.revive_osd(self.osd)
+
+
+class CephManager:
+    """
+    Ceph manager object.
+    Contains several local functions that form a bulk of this module.
+    """
+
+    REPLICATED_POOL = 1
+    ERASURE_CODED_POOL = 3
+
+    def __init__(self, controller, ctx=None, config=None, logger=None,
+                 cluster='ceph'):
+        self.lock = threading.RLock()
+        self.ctx = ctx
+        self.config = config
+        self.controller = controller
+        self.next_pool_id = 0
+        self.cluster = cluster
+        if (logger):
+            self.log = lambda x: logger.info(x)
+        else:
+            def tmp(x):
+                """
+                implement log behavior.
+                """
+                print x
+            self.log = tmp
+        if self.config is None:
+            self.config = dict()
+        pools = self.list_pools()
+        self.pools = {}
+        for pool in pools:
+            # we may race with a pool deletion; ignore failures here
+            try:
+                self.pools[pool] = self.get_pool_property(pool, 'pg_num')
+            except CommandFailedError:
+                self.log('Failed to get pg_num from pool %s, ignoring' % pool)
+
+    def raw_cluster_cmd(self, *args):
+        """
+        Start ceph on a raw cluster.  Return count
+        """
+        testdir = teuthology.get_testdir(self.ctx)
+        ceph_args = [
+            'sudo',
+            'adjust-ulimits',
+            'ceph-coverage',
+            '{tdir}/archive/coverage'.format(tdir=testdir),
+            'ceph',
+            '--cluster',
+            self.cluster,
+        ]
+        ceph_args.extend(args)
+        proc = self.controller.run(
+            args=ceph_args,
+            stdout=StringIO(),
+            )
+        return proc.stdout.getvalue()
+
+    def raw_cluster_cmd_result(self, *args):
+        """
+        Start ceph on a cluster.  Return success or failure information.
+        """
+        testdir = teuthology.get_testdir(self.ctx)
+        ceph_args = [
+            'sudo',
+            'adjust-ulimits',
+            'ceph-coverage',
+            '{tdir}/archive/coverage'.format(tdir=testdir),
+            'ceph',
+            '--cluster',
+            self.cluster,
+        ]
+        ceph_args.extend(args)
+        proc = self.controller.run(
+            args=ceph_args,
+            check_status=False,
+            )
+        return proc.exitstatus
+
+    def run_ceph_w(self):
+        """
+        Execute "ceph -w" in the background with stdout connected to a StringIO,
+        and return the RemoteProcess.
+        """
+        return self.controller.run(
+            args=["sudo",
+                  "daemon-helper",
+                  "kill",
+                  "ceph",
+                  '--cluster',
+                  self.cluster,
+                  "-w"],
+            wait=False, stdout=StringIO(), stdin=run.PIPE)
+
+    def do_rados(self, remote, cmd, check_status=True):
+        """
+        Execute a remote rados command.
+        """
+        testdir = teuthology.get_testdir(self.ctx)
+        pre = [
+            'adjust-ulimits',
+            'ceph-coverage',
+            '{tdir}/archive/coverage'.format(tdir=testdir),
+            'rados',
+            '--cluster',
+            self.cluster,
+            ]
+        pre.extend(cmd)
+        proc = remote.run(
+            args=pre,
+            wait=True,
+            check_status=check_status
+            )
+        return proc
+
+    def rados_write_objects(self, pool, num_objects, size,
+                            timelimit, threads, cleanup=False):
+        """
+        Write rados objects
+        Threads not used yet.
+        """
+        args = [
+            '-p', pool,
+            '--num-objects', num_objects,
+            '-b', size,
+            'bench', timelimit,
+            'write'
+            ]
+        if not cleanup:
+            args.append('--no-cleanup')
+        return self.do_rados(self.controller, map(str, args))
+
+    def do_put(self, pool, obj, fname, namespace=None):
+        """
+        Implement rados put operation
+        """
+        args = ['-p', pool]
+        if namespace is not None:
+            args += ['-N', namespace]
+        args += [
+            'put',
+            obj,
+            fname
+        ]
+        return self.do_rados(
+            self.controller,
+            args,
+            check_status=False
+        ).exitstatus
+
+    def do_get(self, pool, obj, fname='/dev/null', namespace=None):
+        """
+        Implement rados get operation
+        """
+        args = ['-p', pool]
+        if namespace is not None:
+            args += ['-N', namespace]
+        args += [
+            'get',
+            obj,
+            fname
+        ]
+        return self.do_rados(
+            self.controller,
+            args,
+            check_status=False
+        ).exitstatus
+
+    def do_rm(self, pool, obj, namespace=None):
+        """
+        Implement rados rm operation
+        """
+        args = ['-p', pool]
+        if namespace is not None:
+            args += ['-N', namespace]
+        args += [
+            'rm',
+            obj
+        ]
+        return self.do_rados(
+            self.controller,
+            args,
+            check_status=False
+        ).exitstatus
+
+    def osd_admin_socket(self, osd_id, command, check_status=True):
+        return self.admin_socket('osd', osd_id, command, check_status)
+
+    def find_remote(self, service_type, service_id):
+        """
+        Get the Remote for the host where a particular service runs.
+
+        :param service_type: 'mds', 'osd', 'client'
+        :param service_id: The second part of a role, e.g. '0' for
+                           the role 'client.0'
+        :return: a Remote instance for the host where the
+                 requested role is placed
+        """
+        return get_remote(self.ctx, self.cluster,
+                          service_type, service_id)
+
+    def admin_socket(self, service_type, service_id,
+                     command, check_status=True):
+        """
+        Remotely start up ceph specifying the admin socket
+        :param command: a list of words to use as the command
+                        to the admin socket
+        """
+        testdir = teuthology.get_testdir(self.ctx)
+        remote = self.find_remote(service_type, service_id)
+        args = [
+            'sudo',
+            'adjust-ulimits',
+            'ceph-coverage',
+            '{tdir}/archive/coverage'.format(tdir=testdir),
+            'ceph',
+            '--cluster',
+            self.cluster,
+            '--admin-daemon',
+            '/var/run/ceph/{cluster}-{type}.{id}.asok'.format(
+                cluster=self.cluster,
+                type=service_type,
+                id=service_id),
+            ]
+        args.extend(command)
+        return remote.run(
+            args=args,
+            stdout=StringIO(),
+            wait=True,
+            check_status=check_status
+            )
+
+    def objectstore_tool(self, pool, options, args, **kwargs):
+        return ObjectStoreTool(self, pool, **kwargs).run(options, args)
+
+    def get_pgid(self, pool, pgnum):
+        """
+        :param pool: pool name
+        :param pgnum: pg number
+        :returns: a string representing this pg.
+        """
+        poolnum = self.get_pool_num(pool)
+        pg_str = "{poolnum}.{pgnum}".format(
+            poolnum=poolnum,
+            pgnum=pgnum)
+        return pg_str
+
+    def get_pg_replica(self, pool, pgnum):
+        """
+        get replica for pool, pgnum (e.g. (data, 0)->0
+        """
+        output = self.raw_cluster_cmd("pg", "dump", '--format=json')
+        j = json.loads('\n'.join(output.split('\n')[1:]))
+        pg_str = self.get_pgid(pool, pgnum)
+        for pg in j['pg_stats']:
+            if pg['pgid'] == pg_str:
+                return int(pg['acting'][-1])
+        assert False
+
+    def get_pg_primary(self, pool, pgnum):
+        """
+        get primary for pool, pgnum (e.g. (data, 0)->0
+        """
+        output = self.raw_cluster_cmd("pg", "dump", '--format=json')
+        j = json.loads('\n'.join(output.split('\n')[1:]))
+        pg_str = self.get_pgid(pool, pgnum)
+        for pg in j['pg_stats']:
+            if pg['pgid'] == pg_str:
+                return int(pg['acting'][0])
+        assert False
+
+    def get_pool_num(self, pool):
+        """
+        get number for pool (e.g., data -> 2)
+        """
+        return int(self.get_pool_dump(pool)['pool'])
+
+    def list_pools(self):
+        """
+        list all pool names
+        """
+        osd_dump = self.get_osd_dump_json()
+        self.log(osd_dump['pools'])
+        return [str(i['pool_name']) for i in osd_dump['pools']]
+
+    def clear_pools(self):
+        """
+        remove all pools
+        """
+        [self.remove_pool(i) for i in self.list_pools()]
+
+    def kick_recovery_wq(self, osdnum):
+        """
+        Run kick_recovery_wq on cluster.
+        """
+        return self.raw_cluster_cmd(
+            'tell', "osd.%d" % (int(osdnum),),
+            'debug',
+            'kick_recovery_wq',
+            '0')
+
+    def wait_run_admin_socket(self, service_type,
+                              service_id, args=['version'], timeout=75):
+        """
+        If osd_admin_socket call suceeds, return.  Otherwise wait
+        five seconds and try again.
+        """
+        tries = 0
+        while True:
+            proc = self.admin_socket(service_type, service_id,
+                                     args, check_status=False)
+            if proc.exitstatus is 0:
+                break
+            else:
+                tries += 1
+                if (tries * 5) > timeout:
+                    raise Exception('timed out waiting for admin_socket '
+                                    'to appear after {type}.{id} restart'.
+                                    format(type=service_type,
+                                           id=service_id))
+                self.log("waiting on admin_socket for {type}-{id}, "
+                         "{command}".format(type=service_type,
+                                            id=service_id,
+                                            command=args))
+                time.sleep(5)
+
+    def get_pool_dump(self, pool):
+        """
+        get the osd dump part of a pool
+        """
+        osd_dump = self.get_osd_dump_json()
+        for i in osd_dump['pools']:
+            if i['pool_name'] == pool:
+                return i
+        assert False
+
+    def set_config(self, osdnum, **argdict):
+        """
+        :param osdnum: osd number
+        :param argdict: dictionary containing values to set.
+        """
+        for k, v in argdict.iteritems():
+            self.wait_run_admin_socket(
+                'osd', osdnum,
+                ['config', 'set', str(k), str(v)])
+
+    def raw_cluster_status(self):
+        """
+        Get status from cluster
+        """
+        status = self.raw_cluster_cmd('status', '--format=json-pretty')
+        return json.loads(status)
+
+    def raw_osd_status(self):
+        """
+        Get osd status from cluster
+        """
+        return self.raw_cluster_cmd('osd', 'dump')
+
+    def get_osd_status(self):
+        """
+        Get osd statuses sorted by states that the osds are in.
+        """
+        osd_lines = filter(
+            lambda x: x.startswith('osd.') and (("up" in x) or ("down" in x)),
+            self.raw_osd_status().split('\n'))
+        self.log(osd_lines)
+        in_osds = [int(i[4:].split()[0])
+                   for i in filter(lambda x: " in " in x, osd_lines)]
+        out_osds = [int(i[4:].split()[0])
+                    for i in filter(lambda x: " out " in x, osd_lines)]
+        up_osds = [int(i[4:].split()[0])
+                   for i in filter(lambda x: " up " in x, osd_lines)]
+        down_osds = [int(i[4:].split()[0])
+                     for i in filter(lambda x: " down " in x, osd_lines)]
+        dead_osds = [int(x.id_)
+                     for x in filter(lambda x:
+                                     not x.running(),
+                                     self.ctx.daemons.
+                                     iter_daemons_of_role('osd', self.cluster))]
+        live_osds = [int(x.id_) for x in
+                     filter(lambda x:
+                            x.running(),
+                            self.ctx.daemons.iter_daemons_of_role('osd',
+                                                                  self.cluster))]
+        return {'in': in_osds, 'out': out_osds, 'up': up_osds,
+                'down': down_osds, 'dead': dead_osds, 'live': live_osds,
+                'raw': osd_lines}
+
+    def get_num_pgs(self):
+        """
+        Check cluster status for the number of pgs
+        """
+        status = self.raw_cluster_status()
+        self.log(status)
+        return status['pgmap']['num_pgs']
+
+    def create_erasure_code_profile(self, profile_name, profile):
+        """
+        Create an erasure code profile name that can be used as a parameter
+        when creating an erasure coded pool.
+        """
+        with self.lock:
+            args = cmd_erasure_code_profile(profile_name, profile)
+            self.raw_cluster_cmd(*args)
+
+    def create_pool_with_unique_name(self, pg_num=16,
+                                     erasure_code_profile_name=None):
+        """
+        Create a pool named unique_pool_X where X is unique.
+        """
+        name = ""
+        with self.lock:
+            name = "unique_pool_%s" % (str(self.next_pool_id),)
+            self.next_pool_id += 1
+            self.create_pool(
+                name,
+                pg_num,
+                erasure_code_profile_name=erasure_code_profile_name)
+        return name
+
+    @contextlib.contextmanager
+    def pool(self, pool_name, pg_num=16, erasure_code_profile_name=None):
+        self.create_pool(pool_name, pg_num, erasure_code_profile_name)
+        yield
+        self.remove_pool(pool_name)
+
+    def create_pool(self, pool_name, pg_num=16,
+                    erasure_code_profile_name=None):
+        """
+        Create a pool named from the pool_name parameter.
+        :param pool_name: name of the pool being created.
+        :param pg_num: initial number of pgs.
+        :param erasure_code_profile_name: if set and !None create an
+                                          erasure coded pool using the profile
+        """
+        with self.lock:
+            assert isinstance(pool_name, basestring)
+            assert isinstance(pg_num, int)
+            assert pool_name not in self.pools
+            self.log("creating pool_name %s" % (pool_name,))
+            if erasure_code_profile_name:
+                self.raw_cluster_cmd('osd', 'pool', 'create',
+                                     pool_name, str(pg_num), str(pg_num),
+                                     'erasure', erasure_code_profile_name)
+            else:
+                self.raw_cluster_cmd('osd', 'pool', 'create',
+                                     pool_name, str(pg_num))
+            self.pools[pool_name] = pg_num
+
+    def add_pool_snap(self, pool_name, snap_name):
+        """
+        Add pool snapshot
+        :param pool_name: name of pool to snapshot
+        :param snap_name: name of snapshot to take
+        """
+        self.raw_cluster_cmd('osd', 'pool', 'mksnap',
+                             str(pool_name), str(snap_name))
+
+    def remove_pool_snap(self, pool_name, snap_name):
+        """
+        Remove pool snapshot
+        :param pool_name: name of pool to snapshot
+        :param snap_name: name of snapshot to remove
+        """
+        self.raw_cluster_cmd('osd', 'pool', 'rmsnap',
+                             str(pool_name), str(snap_name))
+
+    def remove_pool(self, pool_name):
+        """
+        Remove the indicated pool
+        :param pool_name: Pool to be removed
+        """
+        with self.lock:
+            assert isinstance(pool_name, basestring)
+            assert pool_name in self.pools
+            self.log("removing pool_name %s" % (pool_name,))
+            del self.pools[pool_name]
+            self.do_rados(self.controller,
+                          ['rmpool', pool_name, pool_name,
+                           "--yes-i-really-really-mean-it"])
+
+    def get_pool(self):
+        """
+        Pick a random pool
+        """
+        with self.lock:
+            return random.choice(self.pools.keys())
+
+    def get_pool_pg_num(self, pool_name):
+        """
+        Return the number of pgs in the pool specified.
+        """
+        with self.lock:
+            assert isinstance(pool_name, basestring)
+            if pool_name in self.pools:
+                return self.pools[pool_name]
+            return 0
+
+    def get_pool_property(self, pool_name, prop):
+        """
+        :param pool_name: pool
+        :param prop: property to be checked.
+        :returns: property as an int value.
+        """
+        with self.lock:
+            assert isinstance(pool_name, basestring)
+            assert isinstance(prop, basestring)
+            output = self.raw_cluster_cmd(
+                'osd',
+                'pool',
+                'get',
+                pool_name,
+                prop)
+            return int(output.split()[1])
+
+    def set_pool_property(self, pool_name, prop, val):
+        """
+        :param pool_name: pool
+        :param prop: property to be set.
+        :param val: value to set.
+
+        This routine retries if set operation fails.
+        """
+        with self.lock:
+            assert isinstance(pool_name, basestring)
+            assert isinstance(prop, basestring)
+            assert isinstance(val, int)
+            tries = 0
+            while True:
+                r = self.raw_cluster_cmd_result(
+                    'osd',
+                    'pool',
+                    'set',
+                    pool_name,
+                    prop,
+                    str(val))
+                if r != 11:  # EAGAIN
+                    break
+                tries += 1
+                if tries > 50:
+                    raise Exception('timed out getting EAGAIN '
+                                    'when setting pool property %s %s = %s' %
+                                    (pool_name, prop, val))
+                self.log('got EAGAIN setting pool property, '
+                         'waiting a few seconds...')
+                time.sleep(2)
+
+    def expand_pool(self, pool_name, by, max_pgs):
+        """
+        Increase the number of pgs in a pool
+        """
+        with self.lock:
+            assert isinstance(pool_name, basestring)
+            assert isinstance(by, int)
+            assert pool_name in self.pools
+            if self.get_num_creating() > 0:
+                return
+            if (self.pools[pool_name] + by) > max_pgs:
+                return
+            self.log("increase pool size by %d" % (by,))
+            new_pg_num = self.pools[pool_name] + by
+            self.set_pool_property(pool_name, "pg_num", new_pg_num)
+            self.pools[pool_name] = new_pg_num
+
+    def set_pool_pgpnum(self, pool_name):
+        """
+        Set pgpnum property of pool_name pool.
+        """
+        with self.lock:
+            assert isinstance(pool_name, basestring)
+            assert pool_name in self.pools
+            if self.get_num_creating() > 0:
+                return
+            self.set_pool_property(pool_name, 'pgp_num', self.pools[pool_name])
+
+    def list_pg_missing(self, pgid):
+        """
+        return list of missing pgs with the id specified
+        """
+        r = None
+        offset = {}
+        while True:
+            out = self.raw_cluster_cmd('--', 'pg', pgid, 'list_missing',
+                                       json.dumps(offset))
+            j = json.loads(out)
+            if r is None:
+                r = j
+            else:
+                r['objects'].extend(j['objects'])
+            if not 'more' in j:
+                break
+            if j['more'] == 0:
+                break
+            offset = j['objects'][-1]['oid']
+        if 'more' in r:
+            del r['more']
+        return r
+
+    def get_pg_stats(self):
+        """
+        Dump the cluster and get pg stats
+        """
+        out = self.raw_cluster_cmd('pg', 'dump', '--format=json')
+        j = json.loads('\n'.join(out.split('\n')[1:]))
+        return j['pg_stats']
+
+    def compile_pg_status(self):
+        """
+        Return a histogram of pg state values
+        """
+        ret = {}
+        j = self.get_pg_stats()
+        for pg in j:
+            for status in pg['state'].split('+'):
+                if status not in ret:
+                    ret[status] = 0
+                ret[status] += 1
+        return ret
+
+    def pg_scrubbing(self, pool, pgnum):
+        """
+        pg scrubbing wrapper
+        """
+        pgstr = self.get_pgid(pool, pgnum)
+        stats = self.get_single_pg_stats(pgstr)
+        return 'scrub' in stats['state']
+
+    def pg_repairing(self, pool, pgnum):
+        """
+        pg repairing wrapper
+        """
+        pgstr = self.get_pgid(pool, pgnum)
+        stats = self.get_single_pg_stats(pgstr)
+        return 'repair' in stats['state']
+
+    def pg_inconsistent(self, pool, pgnum):
+        """
+        pg inconsistent wrapper
+        """
+        pgstr = self.get_pgid(pool, pgnum)
+        stats = self.get_single_pg_stats(pgstr)
+        return 'inconsistent' in stats['state']
+
+    def get_last_scrub_stamp(self, pool, pgnum):
+        """
+        Get the timestamp of the last scrub.
+        """
+        stats = self.get_single_pg_stats(self.get_pgid(pool, pgnum))
+        return stats["last_scrub_stamp"]
+
+    def do_pg_scrub(self, pool, pgnum, stype):
+        """
+        Scrub pg and wait for scrubbing to finish
+        """
+        init = self.get_last_scrub_stamp(pool, pgnum)
+        while init == self.get_last_scrub_stamp(pool, pgnum):
+            self.log("waiting for scrub type %s" % (stype,))
+            self.raw_cluster_cmd('pg', stype, self.get_pgid(pool, pgnum))
+            time.sleep(10)
+
+    def get_single_pg_stats(self, pgid):
+        """
+        Return pg for the pgid specified.
+        """
+        all_stats = self.get_pg_stats()
+
+        for pg in all_stats:
+            if pg['pgid'] == pgid:
+                return pg
+
+        return None
+
+    def get_object_pg_with_shard(self, pool, name, osdid):
+        """
+        """
+        pool_dump = self.get_pool_dump(pool)
+        object_map = self.get_object_map(pool, name)
+        if pool_dump["type"] == CephManager.ERASURE_CODED_POOL:
+            shard = object_map['acting'].index(osdid)
+            return "{pgid}s{shard}".format(pgid=object_map['pgid'],
+                                           shard=shard)
+        else:
+            return object_map['pgid']
+
+    def get_object_primary(self, pool, name):
+        """
+        """
+        object_map = self.get_object_map(pool, name)
+        return object_map['acting_primary']
+
+    def get_object_map(self, pool, name):
+        """
+        osd map --format=json converted to a python object
+        :returns: the python object
+        """
+        out = self.raw_cluster_cmd('--format=json', 'osd', 'map', pool, name)
+        return json.loads('\n'.join(out.split('\n')[1:]))
+
+    def get_osd_dump_json(self):
+        """
+        osd dump --format=json converted to a python object
+        :returns: the python object
+        """
+        out = self.raw_cluster_cmd('osd', 'dump', '--format=json')
+        return json.loads('\n'.join(out.split('\n')[1:]))
+
+    def get_osd_dump(self):
+        """
+        Dump osds
+        :returns: all osds
+        """
+        return self.get_osd_dump_json()['osds']
+
+    def get_stuck_pgs(self, type_, threshold):
+        """
+        :returns: stuck pg information from the cluster
+        """
+        out = self.raw_cluster_cmd('pg', 'dump_stuck', type_, str(threshold),
+                                   '--format=json')
+        return json.loads(out)
+
+    def get_num_unfound_objects(self):
+        """
+        Check cluster status to get the number of unfound objects
+        """
+        status = self.raw_cluster_status()
+        self.log(status)
+        return status['pgmap'].get('unfound_objects', 0)
+
+    def get_num_creating(self):
+        """
+        Find the number of pgs in creating mode.
+        """
+        pgs = self.get_pg_stats()
+        num = 0
+        for pg in pgs:
+            if 'creating' in pg['state']:
+                num += 1
+        return num
+
+    def get_num_active_clean(self):
+        """
+        Find the number of active and clean pgs.
+        """
+        pgs = self.get_pg_stats()
+        num = 0
+        for pg in pgs:
+            if (pg['state'].count('active') and
+                    pg['state'].count('clean') and
+                    not pg['state'].count('stale')):
+                num += 1
+        return num
+
+    def get_num_active_recovered(self):
+        """
+        Find the number of active and recovered pgs.
+        """
+        pgs = self.get_pg_stats()
+        num = 0
+        for pg in pgs:
+            if (pg['state'].count('active') and
+                    not pg['state'].count('recover') and
+                    not pg['state'].count('backfill') and
+                    not pg['state'].count('stale')):
+                num += 1
+        return num
+
+    def get_is_making_recovery_progress(self):
+        """
+        Return whether there is recovery progress discernable in the
+        raw cluster status
+        """
+        status = self.raw_cluster_status()
+        kps = status['pgmap'].get('recovering_keys_per_sec', 0)
+        bps = status['pgmap'].get('recovering_bytes_per_sec', 0)
+        ops = status['pgmap'].get('recovering_objects_per_sec', 0)
+        return kps > 0 or bps > 0 or ops > 0
+
+    def get_num_active(self):
+        """
+        Find the number of active pgs.
+        """
+        pgs = self.get_pg_stats()
+        num = 0
+        for pg in pgs:
+            if pg['state'].count('active') and not pg['state'].count('stale'):
+                num += 1
+        return num
+
+    def get_num_down(self):
+        """
+        Find the number of pgs that are down.
+        """
+        pgs = self.get_pg_stats()
+        num = 0
+        for pg in pgs:
+            if ((pg['state'].count('down') and not
+                    pg['state'].count('stale')) or
+                (pg['state'].count('incomplete') and not
+                    pg['state'].count('stale'))):
+                num += 1
+        return num
+
+    def get_num_active_down(self):
+        """
+        Find the number of pgs that are either active or down.
+        """
+        pgs = self.get_pg_stats()
+        num = 0
+        for pg in pgs:
+            if ((pg['state'].count('active') and not
+                    pg['state'].count('stale')) or
+                (pg['state'].count('down') and not
+                    pg['state'].count('stale')) or
+                (pg['state'].count('incomplete') and not
+                    pg['state'].count('stale'))):
+                num += 1
+        return num
+
+    def is_clean(self):
+        """
+        True if all pgs are clean
+        """
+        return self.get_num_active_clean() == self.get_num_pgs()
+
+    def is_recovered(self):
+        """
+        True if all pgs have recovered
+        """
+        return self.get_num_active_recovered() == self.get_num_pgs()
+
+    def is_active_or_down(self):
+        """
+        True if all pgs are active or down
+        """
+        return self.get_num_active_down() == self.get_num_pgs()
+
+    def wait_for_clean(self, timeout=None):
+        """
+        Returns true when all pgs are clean.
+        """
+        self.log("waiting for clean")
+        start = time.time()
+        num_active_clean = self.get_num_active_clean()
+        while not self.is_clean():
+            if timeout is not None:
+                if self.get_is_making_recovery_progress():
+                    self.log("making progress, resetting timeout")
+                    start = time.time()
+                else:
+                    self.log("no progress seen, keeping timeout for now")
+                    if time.time() - start >= timeout:
+                        self.log('dumping pgs')
+                        out = self.raw_cluster_cmd('pg', 'dump')
+                        self.log(out)
+                        assert time.time() - start < timeout, \
+                            'failed to become clean before timeout expired'
+            cur_active_clean = self.get_num_active_clean()
+            if cur_active_clean != num_active_clean:
+                start = time.time()
+                num_active_clean = cur_active_clean
+            time.sleep(3)
+        self.log("clean!")
+
+    def are_all_osds_up(self):
+        """
+        Returns true if all osds are up.
+        """
+        x = self.get_osd_dump()
+        return (len(x) == sum([(y['up'] > 0) for y in x]))
+
+    def wait_for_all_up(self, timeout=None):
+        """
+        When this exits, either the timeout has expired, or all
+        osds are up.
+        """
+        self.log("waiting for all up")
+        start = time.time()
+        while not self.are_all_osds_up():
+            if timeout is not None:
+                assert time.time() - start < timeout, \
+                    'timeout expired in wait_for_all_up'
+            time.sleep(3)
+        self.log("all up!")
+
+    def wait_for_recovery(self, timeout=None):
+        """
+        Check peering. When this exists, we have recovered.
+        """
+        self.log("waiting for recovery to complete")
+        start = time.time()
+        num_active_recovered = self.get_num_active_recovered()
+        while not self.is_recovered():
+            now = time.time()
+            if timeout is not None:
+                if self.get_is_making_recovery_progress():
+                    self.log("making progress, resetting timeout")
+                    start = time.time()
+                else:
+                    self.log("no progress seen, keeping timeout for now")
+                    if now - start >= timeout:
+                        self.log('dumping pgs')
+                        out = self.raw_cluster_cmd('pg', 'dump')
+                        self.log(out)
+                        assert now - start < timeout, \
+                            'failed to recover before timeout expired'
+            cur_active_recovered = self.get_num_active_recovered()
+            if cur_active_recovered != num_active_recovered:
+                start = time.time()
+                num_active_recovered = cur_active_recovered
+            time.sleep(3)
+        self.log("recovered!")
+
+    def wait_for_active(self, timeout=None):
+        """
+        Check peering. When this exists, we are definitely active
+        """
+        self.log("waiting for peering to complete")
+        start = time.time()
+        num_active = self.get_num_active()
+        while not self.is_active():
+            if timeout is not None:
+                if time.time() - start >= timeout:
+                    self.log('dumping pgs')
+                    out = self.raw_cluster_cmd('pg', 'dump')
+                    self.log(out)
+                    assert time.time() - start < timeout, \
+                        'failed to recover before timeout expired'
+            cur_active = self.get_num_active()
+            if cur_active != num_active:
+                start = time.time()
+                num_active = cur_active
+            time.sleep(3)
+        self.log("active!")
+
+    def wait_for_active_or_down(self, timeout=None):
+        """
+        Check peering. When this exists, we are definitely either
+        active or down
+        """
+        self.log("waiting for peering to complete or become blocked")
+        start = time.time()
+        num_active_down = self.get_num_active_down()
+        while not self.is_active_or_down():
+            if timeout is not None:
+                if time.time() - start >= timeout:
+                    self.log('dumping pgs')
+                    out = self.raw_cluster_cmd('pg', 'dump')
+                    self.log(out)
+                    assert time.time() - start < timeout, \
+                        'failed to recover before timeout expired'
+            cur_active_down = self.get_num_active_down()
+            if cur_active_down != num_active_down:
+                start = time.time()
+                num_active_down = cur_active_down
+            time.sleep(3)
+        self.log("active or down!")
+
+    def osd_is_up(self, osd):
+        """
+        Wrapper for osd check
+        """
+        osds = self.get_osd_dump()
+        return osds[osd]['up'] > 0
+
+    def wait_till_osd_is_up(self, osd, timeout=None):
+        """
+        Loop waiting for osd.
+        """
+        self.log('waiting for osd.%d to be up' % osd)
+        start = time.time()
+        while not self.osd_is_up(osd):
+            if timeout is not None:
+                assert time.time() - start < timeout, \
+                    'osd.%d failed to come up before timeout expired' % osd
+            time.sleep(3)
+        self.log('osd.%d is up' % osd)
+
+    def is_active(self):
+        """
+        Wrapper to check if all pgs are active
+        """
+        return self.get_num_active() == self.get_num_pgs()
+
+    def wait_till_active(self, timeout=None):
+        """
+        Wait until all pgs are active.
+        """
+        self.log("waiting till active")
+        start = time.time()
+        while not self.is_active():
+            if timeout is not None:
+                if time.time() - start >= timeout:
+                    self.log('dumping pgs')
+                    out = self.raw_cluster_cmd('pg', 'dump')
+                    self.log(out)
+                    assert time.time() - start < timeout, \
+                        'failed to become active before timeout expired'
+            time.sleep(3)
+        self.log("active!")
+
+    def mark_out_osd(self, osd):
+        """
+        Wrapper to mark osd out.
+        """
+        self.raw_cluster_cmd('osd', 'out', str(osd))
+
+    def kill_osd(self, osd):
+        """
+        Kill osds by either power cycling (if indicated by the config)
+        or by stopping.
+        """
+        if self.config.get('powercycle'):
+            remote = self.find_remote('osd', osd)
+            self.log('kill_osd on osd.{o} '
+                     'doing powercycle of {s}'.format(o=osd, s=remote.name))
+            self._assert_ipmi(remote)
+            remote.console.power_off()
+        else:
+            self.ctx.daemons.get_daemon('osd', osd, self.cluster).stop()
+
+    @staticmethod
+    def _assert_ipmi(remote):
+        assert remote.console.has_ipmi_credentials, (
+            "powercycling requested but RemoteConsole is not "
+            "initialized.  Check ipmi config.")
+
+    def blackhole_kill_osd(self, osd):
+        """
+        Stop osd if nothing else works.
+        """
+        self.raw_cluster_cmd('--', 'tell', 'osd.%d' % osd,
+                             'injectargs', '--filestore-blackhole')
+        time.sleep(2)
+        self.ctx.daemons.get_daemon('osd', osd, self.cluster).stop()
+
+    def revive_osd(self, osd, timeout=150, skip_admin_check=False):
+        """
+        Revive osds by either power cycling (if indicated by the config)
+        or by restarting.
+        """
+        if self.config.get('powercycle'):
+            remote = self.find_remote('osd', osd)
+            self.log('kill_osd on osd.{o} doing powercycle of {s}'.
+                     format(o=osd, s=remote.name))
+            self._assert_ipmi(remote)
+            remote.console.power_on()
+            if not remote.console.check_status(300):
+                raise Exception('Failed to revive osd.{o} via ipmi'.
+                                format(o=osd))
+            teuthology.reconnect(self.ctx, 60, [remote])
+            mount_osd_data(self.ctx, remote, self.cluster, str(osd))
+            self.make_admin_daemon_dir(remote)
+            self.ctx.daemons.get_daemon('osd', osd, self.cluster).reset()
+        self.ctx.daemons.get_daemon('osd', osd, self.cluster).restart()
+
+        if not skip_admin_check:
+            # wait for dump_ops_in_flight; this command doesn't appear
+            # until after the signal handler is installed and it is safe
+            # to stop the osd again without making valgrind leak checks
+            # unhappy.  see #5924.
+            self.wait_run_admin_socket('osd', osd,
+                                       args=['dump_ops_in_flight'],
+                                       timeout=timeout)
+
+    def mark_down_osd(self, osd):
+        """
+        Cluster command wrapper
+        """
+        self.raw_cluster_cmd('osd', 'down', str(osd))
+
+    def mark_in_osd(self, osd):
+        """
+        Cluster command wrapper
+        """
+        self.raw_cluster_cmd('osd', 'in', str(osd))
+
+    def signal_osd(self, osd, sig, silent=False):
+        """
+        Wrapper to local get_daemon call which sends the given
+        signal to the given osd.
+        """
+        self.ctx.daemons.get_daemon('osd', osd,
+                                    self.cluster).signal(sig, silent=silent)
+
+    ## monitors
+    def signal_mon(self, mon, sig, silent=False):
+        """
+        Wrapper to local get_deamon call
+        """
+        self.ctx.daemons.get_daemon('mon', mon,
+                                    self.cluster).signal(sig, silent=silent)
+
+    def kill_mon(self, mon):
+        """
+        Kill the monitor by either power cycling (if the config says so),
+        or by doing a stop.
+        """
+        if self.config.get('powercycle'):
+            remote = self.find_remote('mon', mon)
+            self.log('kill_mon on mon.{m} doing powercycle of {s}'.
+                     format(m=mon, s=remote.name))
+            self._assert_ipmi(remote)
+            remote.console.power_off()
+        else:
+            self.ctx.daemons.get_daemon('mon', mon, self.cluster).stop()
+
+    def revive_mon(self, mon):
+        """
+        Restart by either power cycling (if the config says so),
+        or by doing a normal restart.
+        """
+        if self.config.get('powercycle'):
+            remote = self.find_remote('mon', mon)
+            self.log('revive_mon on mon.{m} doing powercycle of {s}'.
+                     format(m=mon, s=remote.name))
+            self._assert_ipmi(remote)
+            remote.console.power_on()
+            self.make_admin_daemon_dir(remote)
+        self.ctx.daemons.get_daemon('mon', mon, self.cluster).restart()
+
+    def get_mon_status(self, mon):
+        """
+        Extract all the monitor status information from the cluster
+        """
+        addr = self.ctx.ceph[self.cluster].conf['mon.%s' % mon]['mon addr']
+        out = self.raw_cluster_cmd('-m', addr, 'mon_status')
+        return json.loads(out)
+
+    def get_mon_quorum(self):
+        """
+        Extract monitor quorum information from the cluster
+        """
+        out = self.raw_cluster_cmd('quorum_status')
+        j = json.loads(out)
+        self.log('quorum_status is %s' % out)
+        return j['quorum']
+
+    def wait_for_mon_quorum_size(self, size, timeout=300):
+        """
+        Loop until quorum size is reached.
+        """
+        self.log('waiting for quorum size %d' % size)
+        start = time.time()
+        while not len(self.get_mon_quorum()) == size:
+            if timeout is not None:
+                assert time.time() - start < timeout, \
+                    ('failed to reach quorum size %d '
+                     'before timeout expired' % size)
+            time.sleep(3)
+        self.log("quorum is size %d" % size)
+
+    def get_mon_health(self, debug=False):
+        """
+        Extract all the monitor health information.
+        """
+        out = self.raw_cluster_cmd('health', '--format=json')
+        if debug:
+            self.log('health:\n{h}'.format(h=out))
+        return json.loads(out)
+
+    def get_mds_status(self, mds):
+        """
+        Run cluster commands for the mds in order to get mds information
+        """
+        out = self.raw_cluster_cmd('mds', 'dump', '--format=json')
+        j = json.loads(' '.join(out.splitlines()[1:]))
+        # collate; for dup ids, larger gid wins.
+        for info in j['info'].itervalues():
+            if info['name'] == mds:
+                return info
+        return None
+
+    def get_filepath(self):
+        """
+        Return path to osd data with {id} needing to be replaced
+        """
+        return '/var/lib/ceph/osd/' + self.cluster + '-{id}'
+
+    def make_admin_daemon_dir(self, remote):
+        """
+        Create /var/run/ceph directory on remote site.
+
+        :param ctx: Context
+        :param remote: Remote site
+        """
+        remote.run(args=['sudo',
+                         'install', '-d', '-m0777', '--', '/var/run/ceph', ], )
+
+
+def utility_task(name):
+    """
+    Generate ceph_manager subtask corresponding to ceph_manager
+    method name
+    """
+    def task(ctx, config):
+        if config is None:
+            config = {}
+        args = config.get('args', [])
+        kwargs = config.get('kwargs', {})
+        cluster = config.get('cluster', 'ceph')
+        fn = getattr(ctx.managers[cluster], name)
+        fn(*args, **kwargs)
+    return task
+
+revive_osd = utility_task("revive_osd")
+revive_mon = utility_task("revive_mon")
+kill_osd = utility_task("kill_osd")
+kill_mon = utility_task("kill_mon")
+create_pool = utility_task("create_pool")
+remove_pool = utility_task("remove_pool")
+wait_for_clean = utility_task("wait_for_clean")
+set_pool_property = utility_task("set_pool_property")
+do_pg_scrub = utility_task("do_pg_scrub")
diff --git a/qa/tasks/ceph_objectstore_tool.py b/qa/tasks/ceph_objectstore_tool.py
new file mode 100644
index 0000000..3dc4962
--- /dev/null
+++ b/qa/tasks/ceph_objectstore_tool.py
@@ -0,0 +1,670 @@
+"""
+ceph_objectstore_tool - Simple test of ceph-objectstore-tool utility
+"""
+from cStringIO import StringIO
+import contextlib
+import logging
+import ceph_manager
+from teuthology import misc as teuthology
+import time
+import os
+import string
+from teuthology.orchestra import run
+import sys
+import tempfile
+import json
+from util.rados import (rados, create_replicated_pool, create_ec_pool)
+# from util.rados import (rados, create_ec_pool,
+#                               create_replicated_pool,
+#                               create_cache_pool)
+
+log = logging.getLogger(__name__)
+
+# Should get cluster name "ceph" from somewhere
+# and normal path from osd_data and osd_journal in conf
+FSPATH = "/var/lib/ceph/osd/ceph-{id}"
+JPATH = "/var/lib/ceph/osd/ceph-{id}/journal"
+
+
+def cod_setup_local_data(log, ctx, NUM_OBJECTS, DATADIR,
+                         BASE_NAME, DATALINECOUNT):
+    objects = range(1, NUM_OBJECTS + 1)
+    for i in objects:
+        NAME = BASE_NAME + "{num}".format(num=i)
+        LOCALNAME = os.path.join(DATADIR, NAME)
+
+        dataline = range(DATALINECOUNT)
+        fd = open(LOCALNAME, "w")
+        data = "This is the data for " + NAME + "\n"
+        for _ in dataline:
+            fd.write(data)
+        fd.close()
+
+
+def cod_setup_remote_data(log, ctx, remote, NUM_OBJECTS, DATADIR,
+                          BASE_NAME, DATALINECOUNT):
+
+    objects = range(1, NUM_OBJECTS + 1)
+    for i in objects:
+        NAME = BASE_NAME + "{num}".format(num=i)
+        DDNAME = os.path.join(DATADIR, NAME)
+
+        remote.run(args=['rm', '-f', DDNAME])
+
+        dataline = range(DATALINECOUNT)
+        data = "This is the data for " + NAME + "\n"
+        DATA = ""
+        for _ in dataline:
+            DATA += data
+        teuthology.write_file(remote, DDNAME, DATA)
+
+
+def cod_setup(log, ctx, remote, NUM_OBJECTS, DATADIR,
+              BASE_NAME, DATALINECOUNT, POOL, db, ec):
+    ERRORS = 0
+    log.info("Creating {objs} objects in pool".format(objs=NUM_OBJECTS))
+
+    objects = range(1, NUM_OBJECTS + 1)
+    for i in objects:
+        NAME = BASE_NAME + "{num}".format(num=i)
+        DDNAME = os.path.join(DATADIR, NAME)
+
+        proc = rados(ctx, remote, ['-p', POOL, 'put', NAME, DDNAME],
+                     wait=False)
+        # proc = remote.run(args=['rados', '-p', POOL, 'put', NAME, DDNAME])
+        ret = proc.wait()
+        if ret != 0:
+            log.critical("Rados put failed with status {ret}".
+                         format(ret=proc.exitstatus))
+            sys.exit(1)
+
+        db[NAME] = {}
+
+        keys = range(i)
+        db[NAME]["xattr"] = {}
+        for k in keys:
+            if k == 0:
+                continue
+            mykey = "key{i}-{k}".format(i=i, k=k)
+            myval = "val{i}-{k}".format(i=i, k=k)
+            proc = remote.run(args=['rados', '-p', POOL, 'setxattr',
+                                    NAME, mykey, myval])
+            ret = proc.wait()
+            if ret != 0:
+                log.error("setxattr failed with {ret}".format(ret=ret))
+                ERRORS += 1
+            db[NAME]["xattr"][mykey] = myval
+
+        # Erasure coded pools don't support omap
+        if ec:
+            continue
+
+        # Create omap header in all objects but REPobject1
+        if i != 1:
+            myhdr = "hdr{i}".format(i=i)
+            proc = remote.run(args=['rados', '-p', POOL, 'setomapheader',
+                                    NAME, myhdr])
+            ret = proc.wait()
+            if ret != 0:
+                log.critical("setomapheader failed with {ret}".format(ret=ret))
+                ERRORS += 1
+            db[NAME]["omapheader"] = myhdr
+
+        db[NAME]["omap"] = {}
+        for k in keys:
+            if k == 0:
+                continue
+            mykey = "okey{i}-{k}".format(i=i, k=k)
+            myval = "oval{i}-{k}".format(i=i, k=k)
+            proc = remote.run(args=['rados', '-p', POOL, 'setomapval',
+                                    NAME, mykey, myval])
+            ret = proc.wait()
+            if ret != 0:
+                log.critical("setomapval failed with {ret}".format(ret=ret))
+            db[NAME]["omap"][mykey] = myval
+
+    return ERRORS
+
+
+def get_lines(filename):
+    tmpfd = open(filename, "r")
+    line = True
+    lines = []
+    while line:
+        line = tmpfd.readline().rstrip('\n')
+        if line:
+            lines += [line]
+    tmpfd.close()
+    os.unlink(filename)
+    return lines
+
+
+ at contextlib.contextmanager
+def task(ctx, config):
+    """
+    Run ceph_objectstore_tool test
+
+    The config should be as follows::
+
+        ceph_objectstore_tool:
+          objects: 20 # <number of objects>
+          pgnum: 12
+    """
+
+    if config is None:
+        config = {}
+    assert isinstance(config, dict), \
+        'ceph_objectstore_tool task only accepts a dict for configuration'
+
+    log.info('Beginning ceph_objectstore_tool...')
+
+    log.debug(config)
+    log.debug(ctx)
+    clients = ctx.cluster.only(teuthology.is_type('client'))
+    assert len(clients.remotes) > 0, 'Must specify at least 1 client'
+    (cli_remote, _) = clients.remotes.popitem()
+    log.debug(cli_remote)
+
+    # clients = dict(teuthology.get_clients(ctx=ctx, roles=config.keys()))
+    # client = clients.popitem()
+    # log.info(client)
+    osds = ctx.cluster.only(teuthology.is_type('osd'))
+    log.info("OSDS")
+    log.info(osds)
+    log.info(osds.remotes)
+
+    manager = ctx.managers['ceph']
+    while (len(manager.get_osd_status()['up']) !=
+           len(manager.get_osd_status()['raw'])):
+        time.sleep(10)
+    while (len(manager.get_osd_status()['in']) !=
+           len(manager.get_osd_status()['up'])):
+        time.sleep(10)
+    manager.raw_cluster_cmd('osd', 'set', 'noout')
+    manager.raw_cluster_cmd('osd', 'set', 'nodown')
+
+    PGNUM = config.get('pgnum', 12)
+    log.info("pgnum: {num}".format(num=PGNUM))
+
+    ERRORS = 0
+
+    REP_POOL = "rep_pool"
+    REP_NAME = "REPobject"
+    create_replicated_pool(cli_remote, REP_POOL, PGNUM)
+    ERRORS += test_objectstore(ctx, config, cli_remote, REP_POOL, REP_NAME)
+
+    EC_POOL = "ec_pool"
+    EC_NAME = "ECobject"
+    create_ec_pool(cli_remote, EC_POOL, 'default', PGNUM)
+    ERRORS += test_objectstore(ctx, config, cli_remote,
+                               EC_POOL, EC_NAME, ec=True)
+
+    if ERRORS == 0:
+        log.info("TEST PASSED")
+    else:
+        log.error("TEST FAILED WITH {errcount} ERRORS".format(errcount=ERRORS))
+
+    assert ERRORS == 0
+
+    try:
+        yield
+    finally:
+        log.info('Ending ceph_objectstore_tool')
+
+
+def test_objectstore(ctx, config, cli_remote, REP_POOL, REP_NAME, ec=False):
+    manager = ctx.managers['ceph']
+
+    osds = ctx.cluster.only(teuthology.is_type('osd'))
+
+    TEUTHDIR = teuthology.get_testdir(ctx)
+    DATADIR = os.path.join(TEUTHDIR, "ceph.data")
+    DATALINECOUNT = 10000
+    ERRORS = 0
+    NUM_OBJECTS = config.get('objects', 10)
+    log.info("objects: {num}".format(num=NUM_OBJECTS))
+
+    pool_dump = manager.get_pool_dump(REP_POOL)
+    REPID = pool_dump['pool']
+
+    log.debug("repid={num}".format(num=REPID))
+
+    db = {}
+
+    LOCALDIR = tempfile.mkdtemp("cod")
+
+    cod_setup_local_data(log, ctx, NUM_OBJECTS, LOCALDIR,
+                         REP_NAME, DATALINECOUNT)
+    allremote = []
+    allremote.append(cli_remote)
+    allremote += osds.remotes.keys()
+    allremote = list(set(allremote))
+    for remote in allremote:
+        cod_setup_remote_data(log, ctx, remote, NUM_OBJECTS, DATADIR,
+                              REP_NAME, DATALINECOUNT)
+
+    ERRORS += cod_setup(log, ctx, cli_remote, NUM_OBJECTS, DATADIR,
+                        REP_NAME, DATALINECOUNT, REP_POOL, db, ec)
+
+    pgs = {}
+    for stats in manager.get_pg_stats():
+        if stats["pgid"].find(str(REPID) + ".") != 0:
+            continue
+        if pool_dump["type"] == ceph_manager.CephManager.REPLICATED_POOL:
+            for osd in stats["acting"]:
+                pgs.setdefault(osd, []).append(stats["pgid"])
+        elif pool_dump["type"] == ceph_manager.CephManager.ERASURE_CODED_POOL:
+            shard = 0
+            for osd in stats["acting"]:
+                pgs.setdefault(osd, []).append("{pgid}s{shard}".
+                                               format(pgid=stats["pgid"],
+                                                      shard=shard))
+                shard += 1
+        else:
+            raise Exception("{pool} has an unexpected type {type}".
+                            format(pool=REP_POOL, type=pool_dump["type"]))
+
+    log.info(pgs)
+    log.info(db)
+
+    for osd in manager.get_osd_status()['up']:
+        manager.kill_osd(osd)
+    time.sleep(5)
+
+    pgswithobjects = set()
+    objsinpg = {}
+
+    # Test --op list and generate json for all objects
+    log.info("Test --op list by generating json for all objects")
+    prefix = ("sudo ceph-objectstore-tool "
+              "--data-path {fpath} "
+              "--journal-path {jpath} ").format(fpath=FSPATH, jpath=JPATH)
+    for remote in osds.remotes.iterkeys():
+        log.debug(remote)
+        log.debug(osds.remotes[remote])
+        for role in osds.remotes[remote]:
+            if string.find(role, "osd.") != 0:
+                continue
+            osdid = int(role.split('.')[1])
+            log.info("process osd.{id} on {remote}".
+                     format(id=osdid, remote=remote))
+            cmd = (prefix + "--op list").format(id=osdid)
+            proc = remote.run(args=cmd.split(), check_status=False,
+                              stdout=StringIO())
+            if proc.exitstatus != 0:
+                log.error("Bad exit status {ret} from --op list request".
+                          format(ret=proc.exitstatus))
+                ERRORS += 1
+            else:
+                for pgline in proc.stdout.getvalue().splitlines():
+                    if not pgline:
+                        continue
+                    (pg, obj) = json.loads(pgline)
+                    name = obj['oid']
+                    if name in db:
+                        pgswithobjects.add(pg)
+                        objsinpg.setdefault(pg, []).append(name)
+                        db[name].setdefault("pg2json",
+                                            {})[pg] = json.dumps(obj)
+
+    log.info(db)
+    log.info(pgswithobjects)
+    log.info(objsinpg)
+
+    if pool_dump["type"] == ceph_manager.CephManager.REPLICATED_POOL:
+        # Test get-bytes
+        log.info("Test get-bytes and set-bytes")
+        for basename in db.keys():
+            file = os.path.join(DATADIR, basename)
+            GETNAME = os.path.join(DATADIR, "get")
+            SETNAME = os.path.join(DATADIR, "set")
+
+            for remote in osds.remotes.iterkeys():
+                for role in osds.remotes[remote]:
+                    if string.find(role, "osd.") != 0:
+                        continue
+                    osdid = int(role.split('.')[1])
+                    if osdid not in pgs:
+                        continue
+
+                    for pg, JSON in db[basename]["pg2json"].iteritems():
+                        if pg in pgs[osdid]:
+                            cmd = ((prefix + "--pgid {pg}").
+                                   format(id=osdid, pg=pg).split())
+                            cmd.append(run.Raw("'{json}'".format(json=JSON)))
+                            cmd += ("get-bytes {fname}".
+                                    format(fname=GETNAME).split())
+                            proc = remote.run(args=cmd, check_status=False)
+                            if proc.exitstatus != 0:
+                                remote.run(args="rm -f {getfile}".
+                                           format(getfile=GETNAME).split())
+                                log.error("Bad exit status {ret}".
+                                          format(ret=proc.exitstatus))
+                                ERRORS += 1
+                                continue
+                            cmd = ("diff -q {file} {getfile}".
+                                   format(file=file, getfile=GETNAME))
+                            proc = remote.run(args=cmd.split())
+                            if proc.exitstatus != 0:
+                                log.error("Data from get-bytes differ")
+                                # log.debug("Got:")
+                                # cat_file(logging.DEBUG, GETNAME)
+                                # log.debug("Expected:")
+                                # cat_file(logging.DEBUG, file)
+                                ERRORS += 1
+                            remote.run(args="rm -f {getfile}".
+                                       format(getfile=GETNAME).split())
+
+                            data = ("put-bytes going into {file}\n".
+                                    format(file=file))
+                            teuthology.write_file(remote, SETNAME, data)
+                            cmd = ((prefix + "--pgid {pg}").
+                                   format(id=osdid, pg=pg).split())
+                            cmd.append(run.Raw("'{json}'".format(json=JSON)))
+                            cmd += ("set-bytes {fname}".
+                                    format(fname=SETNAME).split())
+                            proc = remote.run(args=cmd, check_status=False)
+                            proc.wait()
+                            if proc.exitstatus != 0:
+                                log.info("set-bytes failed for object {obj} "
+                                         "in pg {pg} osd.{id} ret={ret}".
+                                         format(obj=basename, pg=pg,
+                                                id=osdid, ret=proc.exitstatus))
+                                ERRORS += 1
+
+                            cmd = ((prefix + "--pgid {pg}").
+                                   format(id=osdid, pg=pg).split())
+                            cmd.append(run.Raw("'{json}'".format(json=JSON)))
+                            cmd += "get-bytes -".split()
+                            proc = remote.run(args=cmd, check_status=False,
+                                              stdout=StringIO())
+                            proc.wait()
+                            if proc.exitstatus != 0:
+                                log.error("get-bytes after "
+                                          "set-bytes ret={ret}".
+                                          format(ret=proc.exitstatus))
+                                ERRORS += 1
+                            else:
+                                if data != proc.stdout.getvalue():
+                                    log.error("Data inconsistent after "
+                                              "set-bytes, got:")
+                                    log.error(proc.stdout.getvalue())
+                                    ERRORS += 1
+
+                            cmd = ((prefix + "--pgid {pg}").
+                                   format(id=osdid, pg=pg).split())
+                            cmd.append(run.Raw("'{json}'".format(json=JSON)))
+                            cmd += ("set-bytes {fname}".
+                                    format(fname=file).split())
+                            proc = remote.run(args=cmd, check_status=False)
+                            proc.wait()
+                            if proc.exitstatus != 0:
+                                log.info("set-bytes failed for object {obj} "
+                                         "in pg {pg} osd.{id} ret={ret}".
+                                         format(obj=basename, pg=pg,
+                                                id=osdid, ret=proc.exitstatus))
+                                ERRORS += 1
+
+    log.info("Test list-attrs get-attr")
+    for basename in db.keys():
+        file = os.path.join(DATADIR, basename)
+        GETNAME = os.path.join(DATADIR, "get")
+        SETNAME = os.path.join(DATADIR, "set")
+
+        for remote in osds.remotes.iterkeys():
+            for role in osds.remotes[remote]:
+                if string.find(role, "osd.") != 0:
+                    continue
+                osdid = int(role.split('.')[1])
+                if osdid not in pgs:
+                    continue
+
+                for pg, JSON in db[basename]["pg2json"].iteritems():
+                    if pg in pgs[osdid]:
+                        cmd = ((prefix + "--pgid {pg}").
+                               format(id=osdid, pg=pg).split())
+                        cmd.append(run.Raw("'{json}'".format(json=JSON)))
+                        cmd += ["list-attrs"]
+                        proc = remote.run(args=cmd, check_status=False,
+                                          stdout=StringIO(), stderr=StringIO())
+                        proc.wait()
+                        if proc.exitstatus != 0:
+                            log.error("Bad exit status {ret}".
+                                      format(ret=proc.exitstatus))
+                            ERRORS += 1
+                            continue
+                        keys = proc.stdout.getvalue().split()
+                        values = dict(db[basename]["xattr"])
+
+                        for key in keys:
+                            if (key == "_" or
+                                    key == "snapset" or
+                                    key == "hinfo_key"):
+                                continue
+                            key = key.strip("_")
+                            if key not in values:
+                                log.error("The key {key} should be present".
+                                          format(key=key))
+                                ERRORS += 1
+                                continue
+                            exp = values.pop(key)
+                            cmd = ((prefix + "--pgid {pg}").
+                                   format(id=osdid, pg=pg).split())
+                            cmd.append(run.Raw("'{json}'".format(json=JSON)))
+                            cmd += ("get-attr {key}".
+                                    format(key="_" + key).split())
+                            proc = remote.run(args=cmd, check_status=False,
+                                              stdout=StringIO())
+                            proc.wait()
+                            if proc.exitstatus != 0:
+                                log.error("get-attr failed with {ret}".
+                                          format(ret=proc.exitstatus))
+                                ERRORS += 1
+                                continue
+                            val = proc.stdout.getvalue()
+                            if exp != val:
+                                log.error("For key {key} got value {got} "
+                                          "instead of {expected}".
+                                          format(key=key, got=val,
+                                                 expected=exp))
+                                ERRORS += 1
+                        if "hinfo_key" in keys:
+                            cmd_prefix = prefix.format(id=osdid)
+                            cmd = """
+      expected=$({prefix} --pgid {pg} '{json}' get-attr {key} | base64)
+      echo placeholder | {prefix} --pgid {pg} '{json}' set-attr {key} -
+      test $({prefix} --pgid {pg} '{json}' get-attr {key}) = placeholder
+      echo $expected | base64 --decode | \
+         {prefix} --pgid {pg} '{json}' set-attr {key} -
+      test $({prefix} --pgid {pg} '{json}' get-attr {key} | base64) = $expected
+                            """.format(prefix=cmd_prefix, pg=pg, json=JSON,
+                                       key="hinfo_key")
+                            log.debug(cmd)
+                            proc = remote.run(args=['bash', '-e', '-x',
+                                                    '-c', cmd],
+                                              check_status=False,
+                                              stdout=StringIO(),
+                                              stderr=StringIO())
+                            proc.wait()
+                            if proc.exitstatus != 0:
+                                log.error("failed with " +
+                                          str(proc.exitstatus))
+                                log.error(proc.stdout.getvalue() + " " +
+                                          proc.stderr.getvalue())
+                                ERRORS += 1
+
+                        if len(values) != 0:
+                            log.error("Not all keys found, remaining keys:")
+                            log.error(values)
+
+    log.info("Test pg info")
+    for remote in osds.remotes.iterkeys():
+        for role in osds.remotes[remote]:
+            if string.find(role, "osd.") != 0:
+                continue
+            osdid = int(role.split('.')[1])
+            if osdid not in pgs:
+                continue
+
+            for pg in pgs[osdid]:
+                cmd = ((prefix + "--op info --pgid {pg}").
+                       format(id=osdid, pg=pg).split())
+                proc = remote.run(args=cmd, check_status=False,
+                                  stdout=StringIO())
+                proc.wait()
+                if proc.exitstatus != 0:
+                    log.error("Failure of --op info command with {ret}".
+                              format(proc.exitstatus))
+                    ERRORS += 1
+                    continue
+                info = proc.stdout.getvalue()
+                if not str(pg) in info:
+                    log.error("Bad data from info: {info}".format(info=info))
+                    ERRORS += 1
+
+    log.info("Test pg logging")
+    for remote in osds.remotes.iterkeys():
+        for role in osds.remotes[remote]:
+            if string.find(role, "osd.") != 0:
+                continue
+            osdid = int(role.split('.')[1])
+            if osdid not in pgs:
+                continue
+
+            for pg in pgs[osdid]:
+                cmd = ((prefix + "--op log --pgid {pg}").
+                       format(id=osdid, pg=pg).split())
+                proc = remote.run(args=cmd, check_status=False,
+                                  stdout=StringIO())
+                proc.wait()
+                if proc.exitstatus != 0:
+                    log.error("Getting log failed for pg {pg} "
+                              "from osd.{id} with {ret}".
+                              format(pg=pg, id=osdid, ret=proc.exitstatus))
+                    ERRORS += 1
+                    continue
+                HASOBJ = pg in pgswithobjects
+                MODOBJ = "modify" in proc.stdout.getvalue()
+                if HASOBJ != MODOBJ:
+                    log.error("Bad log for pg {pg} from osd.{id}".
+                              format(pg=pg, id=osdid))
+                    MSG = (HASOBJ and [""] or ["NOT "])[0]
+                    log.error("Log should {msg}have a modify entry".
+                              format(msg=MSG))
+                    ERRORS += 1
+
+    log.info("Test pg export")
+    EXP_ERRORS = 0
+    for remote in osds.remotes.iterkeys():
+        for role in osds.remotes[remote]:
+            if string.find(role, "osd.") != 0:
+                continue
+            osdid = int(role.split('.')[1])
+            if osdid not in pgs:
+                continue
+
+            for pg in pgs[osdid]:
+                fpath = os.path.join(DATADIR, "osd{id}.{pg}".
+                                     format(id=osdid, pg=pg))
+
+                cmd = ((prefix + "--op export --pgid {pg} --file {file}").
+                       format(id=osdid, pg=pg, file=fpath))
+                proc = remote.run(args=cmd, check_status=False,
+                                  stdout=StringIO())
+                proc.wait()
+                if proc.exitstatus != 0:
+                    log.error("Exporting failed for pg {pg} "
+                              "on osd.{id} with {ret}".
+                              format(pg=pg, id=osdid, ret=proc.exitstatus))
+                    EXP_ERRORS += 1
+
+    ERRORS += EXP_ERRORS
+
+    log.info("Test pg removal")
+    RM_ERRORS = 0
+    for remote in osds.remotes.iterkeys():
+        for role in osds.remotes[remote]:
+            if string.find(role, "osd.") != 0:
+                continue
+            osdid = int(role.split('.')[1])
+            if osdid not in pgs:
+                continue
+
+            for pg in pgs[osdid]:
+                cmd = ((prefix + "--op remove --pgid {pg}").
+                       format(pg=pg, id=osdid))
+                proc = remote.run(args=cmd, check_status=False,
+                                  stdout=StringIO())
+                proc.wait()
+                if proc.exitstatus != 0:
+                    log.error("Removing failed for pg {pg} "
+                              "on osd.{id} with {ret}".
+                              format(pg=pg, id=osdid, ret=proc.exitstatus))
+                    RM_ERRORS += 1
+
+    ERRORS += RM_ERRORS
+
+    IMP_ERRORS = 0
+    if EXP_ERRORS == 0 and RM_ERRORS == 0:
+        log.info("Test pg import")
+
+        for remote in osds.remotes.iterkeys():
+            for role in osds.remotes[remote]:
+                if string.find(role, "osd.") != 0:
+                    continue
+                osdid = int(role.split('.')[1])
+                if osdid not in pgs:
+                    continue
+
+                for pg in pgs[osdid]:
+                    fpath = os.path.join(DATADIR, "osd{id}.{pg}".
+                                         format(id=osdid, pg=pg))
+
+                    cmd = ((prefix + "--op import --file {file}").
+                           format(id=osdid, file=fpath))
+                    proc = remote.run(args=cmd, check_status=False,
+                                      stdout=StringIO())
+                    proc.wait()
+                    if proc.exitstatus != 0:
+                        log.error("Import failed from {file} with {ret}".
+                                  format(file=fpath, ret=proc.exitstatus))
+                        IMP_ERRORS += 1
+    else:
+        log.warning("SKIPPING IMPORT TESTS DUE TO PREVIOUS FAILURES")
+
+    ERRORS += IMP_ERRORS
+
+    if EXP_ERRORS == 0 and RM_ERRORS == 0 and IMP_ERRORS == 0:
+        log.info("Restarting OSDs....")
+        # They are still look to be up because of setting nodown
+        for osd in manager.get_osd_status()['up']:
+            manager.revive_osd(osd)
+        # Wait for health?
+        time.sleep(5)
+        # Let scrub after test runs verify consistency of all copies
+        log.info("Verify replicated import data")
+        objects = range(1, NUM_OBJECTS + 1)
+        for i in objects:
+            NAME = REP_NAME + "{num}".format(num=i)
+            TESTNAME = os.path.join(DATADIR, "gettest")
+            REFNAME = os.path.join(DATADIR, NAME)
+
+            proc = rados(ctx, cli_remote,
+                         ['-p', REP_POOL, 'get', NAME, TESTNAME], wait=False)
+
+            ret = proc.wait()
+            if ret != 0:
+                log.error("After import, rados get failed with {ret}".
+                          format(ret=proc.exitstatus))
+                ERRORS += 1
+                continue
+
+            cmd = "diff -q {gettest} {ref}".format(gettest=TESTNAME,
+                                                   ref=REFNAME)
+            proc = cli_remote.run(args=cmd, check_status=False)
+            proc.wait()
+            if proc.exitstatus != 0:
+                log.error("Data comparison failed for {obj}".format(obj=NAME))
+                ERRORS += 1
+
+    return ERRORS
diff --git a/qa/tasks/cephfs/__init__.py b/qa/tasks/cephfs/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/qa/tasks/cephfs/cephfs_test_case.py b/qa/tasks/cephfs/cephfs_test_case.py
new file mode 100644
index 0000000..1d8ce13
--- /dev/null
+++ b/qa/tasks/cephfs/cephfs_test_case.py
@@ -0,0 +1,411 @@
+import json
+import logging
+import unittest
+from unittest import case
+import time
+import os
+import re
+from StringIO import StringIO
+
+from tasks.cephfs.fuse_mount import FuseMount
+
+from teuthology.orchestra import run
+from teuthology.orchestra.run import CommandFailedError
+
+
+log = logging.getLogger(__name__)
+
+
+def long_running(f):
+    """
+    Decorator that adds an "is_long_running" attribute to the wrapped function
+    """
+    f.is_long_running = True
+    return f
+
+
+def needs_trimming(f):
+    """
+    Mark fn as requiring a client capable of trimming its cache (i.e. for ceph-fuse
+    this means it needs to be able to run as root, currently)
+    """
+    f.needs_trimming = True
+    return f
+
+
+class CephFSTestCase(unittest.TestCase):
+    """
+    Test case for Ceph FS, requires caller to populate Filesystem and Mounts,
+    into the fs, mount_a, mount_b class attributes (setting mount_b is optional)
+
+    Handles resetting the cluster under test between tests.
+    """
+    # Environment references
+    mounts = None
+    fs = None
+    mds_cluster = None
+    ctx = None
+
+    # FIXME weird explicit naming
+    mount_a = None
+    mount_b = None
+
+    # Declarative test requirements: subclasses should override these to indicate
+    # their special needs.  If not met, tests will be skipped.
+    CLIENTS_REQUIRED = 1
+    MDSS_REQUIRED = 1
+    REQUIRE_KCLIENT_REMOTE = False
+    REQUIRE_ONE_CLIENT_REMOTE = False
+    REQUIRE_MEMSTORE = False
+
+    # Whether to create the default filesystem during setUp
+    REQUIRE_FILESYSTEM = True
+
+    LOAD_SETTINGS = []
+
+    def setUp(self):
+        if len(self.fs.mds_ids) < self.MDSS_REQUIRED:
+            raise case.SkipTest("Only have {0} MDSs, require {1}".format(
+                len(self.fs.mds_ids), self.MDSS_REQUIRED
+            ))
+
+        if len(self.mounts) < self.CLIENTS_REQUIRED:
+            raise case.SkipTest("Only have {0} clients, require {1}".format(
+                len(self.mounts), self.CLIENTS_REQUIRED
+            ))
+
+        if self.REQUIRE_KCLIENT_REMOTE:
+            if not isinstance(self.mounts[0], FuseMount) or not isinstance(self.mounts[1], FuseMount):
+                # kclient kill() power cycles nodes, so requires clients to each be on
+                # their own node
+                if self.mounts[0].client_remote.hostname == self.mounts[1].client_remote.hostname:
+                    raise case.SkipTest("kclient clients must be on separate nodes")
+
+        if self.REQUIRE_ONE_CLIENT_REMOTE:
+            if self.mounts[0].client_remote.hostname in self.fs.get_mds_hostnames():
+                raise case.SkipTest("Require first client to be on separate server from MDSs")
+
+        if self.REQUIRE_MEMSTORE:
+            objectstore = self.fs.get_config("osd_objectstore", "osd")
+            if objectstore != "memstore":
+                # You certainly *could* run this on a real OSD, but you don't want to sit
+                # here for hours waiting for the test to fill up a 1TB drive!
+                raise case.SkipTest("Require `memstore` OSD backend to simulate full drives")
+
+        # Unmount all surplus clients
+        for i in range(self.CLIENTS_REQUIRED, len(self.mounts)):
+            mount = self.mounts[i]
+            log.info("Unmounting unneeded client {0}".format(mount.client_id))
+            mount.umount_wait()
+
+        # Create friendly mount_a, mount_b attrs
+        for i in range(0, self.CLIENTS_REQUIRED):
+            setattr(self, "mount_{0}".format(chr(ord('a') + i)), self.mounts[i])
+
+        self.fs.clear_firewall()
+
+        # Unmount in order to start each test on a fresh mount, such
+        # that test_barrier can have a firm expectation of what OSD
+        # epoch the clients start with.
+        if self.mount_a.is_mounted():
+            self.mount_a.umount_wait()
+
+        if self.mount_b:
+            if self.mount_b.is_mounted():
+                self.mount_b.umount_wait()
+
+        # To avoid any issues with e.g. unlink bugs, we destroy and recreate
+        # the filesystem rather than just doing a rm -rf of files
+        self.mds_cluster.mds_stop()
+        self.mds_cluster.delete_all_filesystems()
+
+        # In case the previous filesystem had filled up the RADOS cluster, wait for that
+        # flag to pass.
+        osd_mon_report_interval_max = int(self.fs.get_config("osd_mon_report_interval_max", service_type='osd'))
+        self.wait_until_true(lambda: not self.fs.is_full(),
+                             timeout=osd_mon_report_interval_max * 5)
+
+        # In case anything is in the OSD blacklist list, clear it out.  This is to avoid
+        # the OSD map changing in the background (due to blacklist expiry) while tests run.
+        blacklist = json.loads(self.fs.mon_manager.raw_cluster_cmd("osd", "dump", "--format=json-pretty"))['blacklist']
+        log.info("Removing {0} blacklist entries".format(len(blacklist)))
+        for addr, blacklisted_at in blacklist.items():
+            self.fs.mon_manager.raw_cluster_cmd("osd", "blacklist", "rm", addr)
+
+        # In case some test messed with auth caps, reset them
+        client_mount_ids = [m.client_id for m in self.mounts]
+        for client_id in client_mount_ids:
+            self.fs.mon_manager.raw_cluster_cmd_result(
+                'auth', 'caps', "client.{0}".format(client_id),
+                'mds', 'allow',
+                'mon', 'allow r',
+                'osd', 'allow rw pool={0}'.format(self.fs.get_data_pool_name()))
+
+        log.info(client_mount_ids)
+
+        # In case the test changes the IDs of clients, stash them so that we can
+        # reset in tearDown
+        self._original_client_ids = client_mount_ids
+
+        # In case there were any extra auth identities around from a previous
+        # test, delete them
+        for entry in self.auth_list():
+            ent_type, ent_id = entry['entity'].split(".")
+            if ent_type == "client" and ent_id not in client_mount_ids and ent_id != "admin":
+                self.fs.mon_manager.raw_cluster_cmd("auth", "del", entry['entity'])
+
+        if self.REQUIRE_FILESYSTEM:
+            self.fs.create()
+            self.fs.mds_restart()
+            self.fs.wait_for_daemons()
+            if not self.mount_a.is_mounted():
+                self.mount_a.mount()
+                self.mount_a.wait_until_mounted()
+
+            if self.mount_b:
+                if not self.mount_b.is_mounted():
+                    self.mount_b.mount()
+                    self.mount_b.wait_until_mounted()
+
+        # Load an config settings of interest
+        for setting in self.LOAD_SETTINGS:
+            setattr(self, setting, int(self.fs.mds_asok(
+                ['config', 'get', setting], self.fs.mds_ids[0]
+            )[setting]))
+
+        self.configs_set = set()
+
+    def tearDown(self):
+        self.fs.clear_firewall()
+        for m in self.mounts:
+            m.teardown()
+
+        for i, m in enumerate(self.mounts):
+            m.client_id = self._original_client_ids[i]
+
+        for subsys, key in self.configs_set:
+            self.mds_cluster.clear_ceph_conf(subsys, key)
+
+    def set_conf(self, subsys, key, value):
+        self.configs_set.add((subsys, key))
+        self.mds_cluster.set_ceph_conf(subsys, key, value)
+
+    def auth_list(self):
+        """
+        Convenience wrapper on "ceph auth list"
+        """
+        return json.loads(self.fs.mon_manager.raw_cluster_cmd(
+            "auth", "list", "--format=json-pretty"
+        ))['auth_dump']
+
+    def assert_session_count(self, expected, ls_data=None, mds_id=None):
+        if ls_data is None:
+            ls_data = self.fs.mds_asok(['session', 'ls'], mds_id=mds_id)
+
+        self.assertEqual(expected, len(ls_data), "Expected {0} sessions, found {1}".format(
+            expected, len(ls_data)
+        ))
+
+    def assert_session_state(self, client_id,  expected_state):
+        self.assertEqual(
+            self._session_by_id(
+                self.fs.mds_asok(['session', 'ls'])).get(client_id, {'state': None})['state'],
+            expected_state)
+
+    def get_session_data(self, client_id):
+        return self._session_by_id(client_id)
+
+    def _session_list(self):
+        ls_data = self.fs.mds_asok(['session', 'ls'])
+        ls_data = [s for s in ls_data if s['state'] not in ['stale', 'closed']]
+        return ls_data
+
+    def get_session(self, client_id, session_ls=None):
+        if session_ls is None:
+            session_ls = self.fs.mds_asok(['session', 'ls'])
+
+        return self._session_by_id(session_ls)[client_id]
+
+    def _session_by_id(self, session_ls):
+        return dict([(s['id'], s) for s in session_ls])
+
+    def wait_until_equal(self, get_fn, expect_val, timeout, reject_fn=None):
+        period = 5
+        elapsed = 0
+        while True:
+            val = get_fn()
+            if val == expect_val:
+                return
+            elif reject_fn and reject_fn(val):
+                raise RuntimeError("wait_until_equal: forbidden value {0} seen".format(val))
+            else:
+                if elapsed >= timeout:
+                    raise RuntimeError("Timed out after {0} seconds waiting for {1} (currently {2})".format(
+                        elapsed, expect_val, val
+                    ))
+                else:
+                    log.debug("wait_until_equal: {0} != {1}, waiting...".format(val, expect_val))
+                time.sleep(period)
+                elapsed += period
+
+        log.debug("wait_until_equal: success")
+
+    def wait_until_true(self, condition, timeout):
+        period = 5
+        elapsed = 0
+        while True:
+            if condition():
+                return
+            else:
+                if elapsed >= timeout:
+                    raise RuntimeError("Timed out after {0} seconds".format(elapsed))
+                else:
+                    log.debug("wait_until_true: waiting...")
+                time.sleep(period)
+                elapsed += period
+
+        log.debug("wait_until_true: success")
+
+    def wait_for_daemon_start(self, daemon_ids=None):
+        """
+        Wait until all the daemons appear in the FSMap, either assigned
+        MDS ranks or in the list of standbys
+        """
+        def get_daemon_names():
+            fs_map = self.mds_cluster.get_fs_map()
+            names = [m['name'] for m in fs_map['standbys']]
+            for fs in fs_map['filesystems']:
+                names.extend([info['name'] for info in fs['mdsmap']['info'].values()])
+
+            return names
+
+        if daemon_ids is None:
+            daemon_ids = self.mds_cluster.mds_ids
+
+        try:
+            self.wait_until_true(
+                lambda: set(daemon_ids) & set(get_daemon_names()) == set(daemon_ids),
+                timeout=30
+            )
+        except RuntimeError:
+            log.warn("Timeout waiting for daemons {0}, while we have {1}".format(
+                daemon_ids, get_daemon_names()
+            ))
+            raise
+
+    def assert_mds_crash(self, daemon_id):
+        """
+        Assert that the a particular MDS daemon crashes (block until
+        it does)
+        """
+        try:
+            self.fs.mds_daemons[daemon_id].proc.wait()
+        except CommandFailedError as e:
+            log.info("MDS '{0}' crashed with status {1} as expected".format(daemon_id, e.exitstatus))
+            self.fs.mds_daemons[daemon_id].proc = None
+
+            # Go remove the coredump from the crash, otherwise teuthology.internal.coredump will
+            # catch it later and treat it as a failure.
+            p = self.fs.mds_daemons[daemon_id].remote.run(args=[
+                "sudo", "sysctl", "-n", "kernel.core_pattern"], stdout=StringIO())
+            core_pattern = p.stdout.getvalue().strip()
+            if os.path.dirname(core_pattern):  # Non-default core_pattern with a directory in it
+                # We have seen a core_pattern that looks like it's from teuthology's coredump
+                # task, so proceed to clear out the core file
+                log.info("Clearing core from pattern: {0}".format(core_pattern))
+
+                # Determine the PID of the crashed MDS by inspecting the MDSMap, it had
+                # to talk to the mons to get assigned a rank to reach the point of crashing
+                addr = self.fs.mon_manager.get_mds_status(daemon_id)['addr']
+                pid_str = addr.split("/")[1]
+                log.info("Determined crasher PID was {0}".format(pid_str))
+
+                # Substitute PID into core_pattern to get a glob
+                core_glob = core_pattern.replace("%p", pid_str)
+                core_glob = re.sub("%[a-z]", "*", core_glob)  # Match all for all other % tokens
+
+                # Verify that we see the expected single coredump matching the expected pattern
+                ls_proc = self.fs.mds_daemons[daemon_id].remote.run(args=[
+                    "sudo", "ls", run.Raw(core_glob)
+                ], stdout=StringIO())
+                cores = [f for f in ls_proc.stdout.getvalue().strip().split("\n") if f]
+                log.info("Enumerated cores: {0}".format(cores))
+                self.assertEqual(len(cores), 1)
+
+                log.info("Found core file {0}, deleting it".format(cores[0]))
+
+                self.fs.mds_daemons[daemon_id].remote.run(args=[
+                    "sudo", "rm", "-f", cores[0]
+                ])
+            else:
+                log.info("No core_pattern directory set, nothing to clear (internal.coredump not enabled?)")
+
+        else:
+            raise AssertionError("MDS daemon '{0}' did not crash as expected".format(daemon_id))
+
+    def assert_cluster_log(self, expected_pattern):
+        """
+        Context manager.  Assert that during execution, or up to 5 seconds later,
+        the Ceph cluster log emits a message matching the expected pattern.
+
+        :param expected_pattern: a string that you expect to see in the log output
+        """
+
+        ceph_manager = self.fs.mon_manager
+
+        class ContextManager(object):
+            def match(self):
+                return expected_pattern in self.watcher_process.stdout.getvalue()
+
+            def __enter__(self):
+                self.watcher_process = ceph_manager.run_ceph_w()
+
+            def __exit__(self, exc_type, exc_val, exc_tb):
+                if not self.watcher_process.finished:
+                    # Check if we got an early match, wait a bit if we didn't
+                    if self.match():
+                        return
+                    else:
+                        log.debug("No log hits yet, waiting...")
+                        # Default monc tick interval is 10s, so wait that long and
+                        # then some grace
+                        time.sleep(15)
+
+                self.watcher_process.stdin.close()
+                try:
+                    self.watcher_process.wait()
+                except CommandFailedError:
+                    pass
+
+                if not self.match():
+                    log.error("Log output: \n{0}\n".format(self.watcher_process.stdout.getvalue()))
+                    raise AssertionError("Expected log message not found: '{0}'".format(expected_pattern))
+
+        return ContextManager()
+
+    def wait_for_health(self, pattern, timeout):
+        """
+        Wait until 'ceph health' contains a single message matching the pattern
+        """
+        def seen_health_warning():
+            health = self.fs.mon_manager.get_mon_health()
+            summary_strings = [s['summary'] for s in health['summary']]
+            if len(summary_strings) == 0:
+                log.debug("Not expected number of summary strings ({0})".format(summary_strings))
+                return False
+            elif len(summary_strings) == 1 and pattern in summary_strings[0]:
+                return True
+            else:
+                raise RuntimeError("Unexpected health messages: {0}".format(summary_strings))
+
+        self.wait_until_true(seen_health_warning, timeout)
+
+    def wait_for_health_clear(self, timeout):
+        """
+        Wait until `ceph health` returns no messages
+        """
+        def is_clear():
+            health = self.fs.mon_manager.get_mon_health()
+            return len(health['summary']) == 0
+
+        self.wait_until_true(is_clear, timeout)
diff --git a/qa/tasks/cephfs/filesystem.py b/qa/tasks/cephfs/filesystem.py
new file mode 100644
index 0000000..9f130e3
--- /dev/null
+++ b/qa/tasks/cephfs/filesystem.py
@@ -0,0 +1,905 @@
+
+from StringIO import StringIO
+import json
+import logging
+from gevent import Greenlet
+import os
+import time
+import datetime
+import re
+import errno
+
+from teuthology.exceptions import CommandFailedError
+from teuthology import misc
+from teuthology.nuke import clear_firewall
+from teuthology.parallel import parallel
+from tasks.ceph_manager import write_conf
+from tasks import ceph_manager
+
+
+log = logging.getLogger(__name__)
+
+
+DAEMON_WAIT_TIMEOUT = 120
+ROOT_INO = 1
+
+
+class ObjectNotFound(Exception):
+    def __init__(self, object_name):
+        self._object_name = object_name
+
+    def __str__(self):
+        return "Object not found: '{0}'".format(self._object_name)
+
+
+class MDSCluster(object):
+    """
+    Collective operations on all the MDS daemons in the Ceph cluster.  These
+    daemons may be in use by various Filesystems.
+
+    For the benefit of pre-multi-filesystem tests, this class is also
+    a parent of Filesystem.  The correct way to use MDSCluster going forward is
+    as a separate instance outside of your (multiple) Filesystem instances.
+    """
+
+    @property
+    def admin_remote(self):
+        first_mon = misc.get_first_mon(self._ctx, None)
+        (result,) = self._ctx.cluster.only(first_mon).remotes.iterkeys()
+        return result
+
+    def __init__(self, ctx):
+        self.mds_ids = list(misc.all_roles_of_type(ctx.cluster, 'mds'))
+        self._ctx = ctx
+
+        if len(self.mds_ids) == 0:
+            raise RuntimeError("This task requires at least one MDS")
+
+        self.mon_manager = ceph_manager.CephManager(self.admin_remote, ctx=ctx, logger=log.getChild('ceph_manager'))
+        if hasattr(self._ctx, "daemons"):
+            # Presence of 'daemons' attribute implies ceph task rather than ceph_deploy task
+            self.mds_daemons = dict([(mds_id, self._ctx.daemons.get_daemon('mds', mds_id)) for mds_id in self.mds_ids])
+
+    def _one_or_all(self, mds_id, cb, in_parallel=True):
+        """
+        Call a callback for a single named MDS, or for all.
+
+        Note that the parallelism here isn't for performance, it's to avoid being overly kind
+        to the cluster by waiting a graceful ssh-latency of time between doing things, and to
+        avoid being overly kind by executing them in a particular order.  However, some actions
+        don't cope with being done in parallel, so it's optional (`in_parallel`)
+
+        :param mds_id: MDS daemon name, or None
+        :param cb: Callback taking single argument of MDS daemon name
+        :param in_parallel: whether to invoke callbacks concurrently (else one after the other)
+        """
+        if mds_id is None:
+            if in_parallel:
+                with parallel() as p:
+                    for mds_id in self.mds_ids:
+                        p.spawn(cb, mds_id)
+            else:
+                for mds_id in self.mds_ids:
+                    cb(mds_id)
+        else:
+            cb(mds_id)
+
+    def mds_stop(self, mds_id=None):
+        """
+        Stop the MDS daemon process(se).  If it held a rank, that rank
+        will eventually go laggy.
+        """
+        self._one_or_all(mds_id, lambda id_: self.mds_daemons[id_].stop())
+
+    def mds_fail(self, mds_id=None):
+        """
+        Inform MDSMonitor of the death of the daemon process(es).  If it held
+        a rank, that rank will be relinquished.
+        """
+        self._one_or_all(mds_id, lambda id_: self.mon_manager.raw_cluster_cmd("mds", "fail", id_))
+
+    def mds_restart(self, mds_id=None):
+        self._one_or_all(mds_id, lambda id_: self.mds_daemons[id_].restart())
+
+    def mds_fail_restart(self, mds_id=None):
+        """
+        Variation on restart that includes marking MDSs as failed, so that doing this
+        operation followed by waiting for healthy daemon states guarantees that they
+        have gone down and come up, rather than potentially seeing the healthy states
+        that existed before the restart.
+        """
+        def _fail_restart(id_):
+            self.mds_daemons[id_].stop()
+            self.mon_manager.raw_cluster_cmd("mds", "fail", id_)
+            self.mds_daemons[id_].restart()
+
+        self._one_or_all(mds_id, _fail_restart)
+
+    def get_filesystem(self, name):
+        return Filesystem(self._ctx, name)
+
+    def get_fs_map(self):
+        fs_map = json.loads(self.mon_manager.raw_cluster_cmd("fs", "dump", "--format=json-pretty"))
+        return fs_map
+
+    def delete_all_filesystems(self):
+        """
+        Remove all filesystems that exist, and any pools in use by them.
+        """
+        fs_ls = json.loads(self.mon_manager.raw_cluster_cmd("fs", "ls", "--format=json-pretty"))
+        for fs in fs_ls:
+            self.mon_manager.raw_cluster_cmd("fs", "set", fs['name'], "cluster_down", "true")
+            mds_map = json.loads(
+                self.mon_manager.raw_cluster_cmd(
+                    "fs", "get", fs['name'], "--format=json-pretty"))['mdsmap']
+
+            for gid in mds_map['up'].values():
+                self.mon_manager.raw_cluster_cmd('mds', 'fail', gid.__str__())
+
+            self.mon_manager.raw_cluster_cmd('fs', 'rm', fs['name'], '--yes-i-really-mean-it')
+            self.mon_manager.raw_cluster_cmd('osd', 'pool', 'delete',
+                                             fs['metadata_pool'],
+                                             fs['metadata_pool'],
+                                             '--yes-i-really-really-mean-it')
+            for data_pool in fs['data_pools']:
+                self.mon_manager.raw_cluster_cmd('osd', 'pool', 'delete',
+                                                 data_pool, data_pool,
+                                                 '--yes-i-really-really-mean-it')
+
+    def get_standby_daemons(self):
+        return set([s['name'] for s in self.get_fs_map()['standbys']])
+
+    def get_mds_hostnames(self):
+        result = set()
+        for mds_id in self.mds_ids:
+            mds_remote = self.mon_manager.find_remote('mds', mds_id)
+            result.add(mds_remote.hostname)
+
+        return list(result)
+
+    def get_config(self, key, service_type=None):
+        """
+        Get config from mon by default, or a specific service if caller asks for it
+        """
+        if service_type is None:
+            service_type = 'mon'
+
+        service_id = sorted(misc.all_roles_of_type(self._ctx.cluster, service_type))[0]
+        return self.json_asok(['config', 'get', key], service_type, service_id)[key]
+
+    def set_ceph_conf(self, subsys, key, value):
+        if subsys not in self._ctx.ceph['ceph'].conf:
+            self._ctx.ceph['ceph'].conf[subsys] = {}
+        self._ctx.ceph['ceph'].conf[subsys][key] = value
+        write_conf(self._ctx)  # XXX because we don't have the ceph task's config object, if they
+                               # used a different config path this won't work.
+
+    def clear_ceph_conf(self, subsys, key):
+        del self._ctx.ceph['ceph'].conf[subsys][key]
+        write_conf(self._ctx)
+
+    def json_asok(self, command, service_type, service_id):
+        proc = self.mon_manager.admin_socket(service_type, service_id, command)
+        response_data = proc.stdout.getvalue()
+        log.info("_json_asok output: {0}".format(response_data))
+        if response_data.strip():
+            return json.loads(response_data)
+        else:
+            return None
+
+    def set_clients_block(self, blocked, mds_id=None):
+        """
+        Block (using iptables) client communications to this MDS.  Be careful: if
+        other services are running on this MDS, or other MDSs try to talk to this
+        MDS, their communications may also be blocked as collatoral damage.
+
+        :param mds_id: Optional ID of MDS to block, default to all
+        :return:
+        """
+        da_flag = "-A" if blocked else "-D"
+
+        def set_block(_mds_id):
+            remote = self.mon_manager.find_remote('mds', _mds_id)
+
+            addr = self.get_mds_addr(_mds_id)
+            ip_str, port_str, inst_str = re.match("(.+):(.+)/(.+)", addr).groups()
+
+            remote.run(
+                args=["sudo", "iptables", da_flag, "OUTPUT", "-p", "tcp", "--sport", port_str, "-j", "REJECT", "-m",
+                      "comment", "--comment", "teuthology"])
+            remote.run(
+                args=["sudo", "iptables", da_flag, "INPUT", "-p", "tcp", "--dport", port_str, "-j", "REJECT", "-m",
+                      "comment", "--comment", "teuthology"])
+
+        self._one_or_all(mds_id, set_block, in_parallel=False)
+
+    def clear_firewall(self):
+        clear_firewall(self._ctx)
+
+    def _all_info(self):
+        """
+        Iterator for all the mds_info components in the FSMap
+        """
+        fs_map = self.get_fs_map()
+        for i in fs_map['standbys']:
+            yield i
+        for fs in fs_map['filesystems']:
+            for i in fs['mdsmap']['info'].values():
+                yield i
+
+    def get_mds_addr(self, mds_id):
+        """
+        Return the instance addr as a string, like "10.214.133.138:6807\/10825"
+        """
+        for mds_info in self._all_info():
+            if mds_info['name'] == mds_id:
+                return mds_info['addr']
+
+        log.warn(json.dumps(list(self._all_info()), indent=2))  # dump for debugging
+        raise RuntimeError("MDS id '{0}' not found in map".format(mds_id))
+
+    def get_mds_info(self, mds_id):
+        for mds_info in self._all_info():
+            if mds_info['name'] == mds_id:
+                return mds_info
+
+        return None
+
+    def get_mds_info_by_rank(self, mds_rank):
+        for mds_info in self._all_info():
+            if mds_info['rank'] == mds_rank:
+                return mds_info
+
+        return None
+
+
+class Filesystem(MDSCluster):
+    """
+    This object is for driving a CephFS filesystem.  The MDS daemons driven by
+    MDSCluster may be shared with other Filesystems.
+    """
+    def __init__(self, ctx, name=None):
+        super(Filesystem, self).__init__(ctx)
+
+        if name is None:
+            name = "cephfs"
+
+        self.name = name
+        self.metadata_pool_name = "{0}_metadata".format(name)
+        self.data_pool_name = "{0}_data".format(name)
+
+        client_list = list(misc.all_roles_of_type(self._ctx.cluster, 'client'))
+        self.client_id = client_list[0]
+        self.client_remote = list(misc.get_clients(ctx=ctx, roles=["client.{0}".format(self.client_id)]))[0][1]
+
+    def get_pgs_per_fs_pool(self):
+        """
+        Calculate how many PGs to use when creating a pool, in order to avoid raising any
+        health warnings about mon_pg_warn_min_per_osd
+
+        :return: an integer number of PGs
+        """
+        pg_warn_min_per_osd = int(self.get_config('mon_pg_warn_min_per_osd'))
+        osd_count = len(list(misc.all_roles_of_type(self._ctx.cluster, 'osd')))
+        return pg_warn_min_per_osd * osd_count
+
+    def create(self):
+        log.info("Creating filesystem '{0}'".format(self.name))
+
+        pgs_per_fs_pool = self.get_pgs_per_fs_pool()
+
+        self.mon_manager.raw_cluster_cmd('osd', 'pool', 'create',
+                                         self.metadata_pool_name, pgs_per_fs_pool.__str__())
+        self.mon_manager.raw_cluster_cmd('osd', 'pool', 'create',
+                                         self.data_pool_name, pgs_per_fs_pool.__str__())
+        self.mon_manager.raw_cluster_cmd('fs', 'new',
+                                         self.name, self.metadata_pool_name, self.data_pool_name)
+
+    def exists(self):
+        """
+        Whether a filesystem exists in the mon's filesystem list
+        """
+        fs_list = json.loads(self.mon_manager.raw_cluster_cmd('fs', 'ls', '--format=json-pretty'))
+        return self.name in [fs['name'] for fs in fs_list]
+
+    def legacy_configured(self):
+        """
+        Check if a legacy (i.e. pre "fs new") filesystem configuration is present.  If this is
+        the case, the caller should avoid using Filesystem.create
+        """
+        try:
+            out_text = self.mon_manager.raw_cluster_cmd('--format=json-pretty', 'osd', 'lspools')
+            pools = json.loads(out_text)
+            metadata_pool_exists = 'metadata' in [p['poolname'] for p in pools]
+        except CommandFailedError as e:
+            # For use in upgrade tests, Ceph cuttlefish and earlier don't support
+            # structured output (--format) from the CLI.
+            if e.exitstatus == 22:
+                metadata_pool_exists = True
+            else:
+                raise
+
+        return metadata_pool_exists
+
+    def _df(self):
+        return json.loads(self.mon_manager.raw_cluster_cmd("df", "--format=json-pretty"))
+
+    def get_mds_map(self):
+        fs = json.loads(self.mon_manager.raw_cluster_cmd("fs", "get", self.name, "--format=json-pretty"))
+        return fs['mdsmap']
+
+    def get_data_pool_name(self):
+        return self.data_pool_name
+
+    def get_data_pool_names(self):
+        osd_map = self.mon_manager.get_osd_dump_json()
+        id_to_name = {}
+        for p in osd_map['pools']:
+            id_to_name[p['pool']] = p['pool_name']
+
+        return [id_to_name[pool_id] for pool_id in self.get_mds_map()['data_pools']]
+
+    def get_metadata_pool_name(self):
+        return self.metadata_pool_name
+
+    def get_namespace_id(self):
+        fs = json.loads(self.mon_manager.raw_cluster_cmd("fs", "get", self.name, "--format=json-pretty"))
+        return fs['id']
+
+    def get_pool_df(self, pool_name):
+        """
+        Return a dict like:
+        {u'bytes_used': 0, u'max_avail': 83848701, u'objects': 0, u'kb_used': 0}
+        """
+        for pool_df in self._df()['pools']:
+            if pool_df['name'] == pool_name:
+                return pool_df['stats']
+
+        raise RuntimeError("Pool name '{0}' not found".format(pool_name))
+
+    def get_usage(self):
+        return self._df()['stats']['total_used_bytes']
+
+    def are_daemons_healthy(self):
+        """
+        Return true if all daemons are in one of active, standby, standby-replay, and
+        at least max_mds daemons are in 'active'.
+
+        Unlike most of Filesystem, this function is tolerant of new-style `fs`
+        commands being missing, because we are part of the ceph installation
+        process during upgrade suites, so must fall back to old style commands
+        when we get an EINVAL on a new style command.
+
+        :return:
+        """
+
+        active_count = 0
+        try:
+            mds_map = self.get_mds_map()
+        except CommandFailedError as cfe:
+            # Old version, fall back to non-multi-fs commands
+            if cfe.exitstatus == errno.EINVAL:
+                mds_map = json.loads(
+                        self.mon_manager.raw_cluster_cmd('mds', 'dump', '--format=json'))
+            else:
+                raise
+
+        log.info("are_daemons_healthy: mds map: {0}".format(mds_map))
+
+        for mds_id, mds_status in mds_map['info'].items():
+            if mds_status['state'] not in ["up:active", "up:standby", "up:standby-replay"]:
+                log.warning("Unhealthy mds state {0}:{1}".format(mds_id, mds_status['state']))
+                return False
+            elif mds_status['state'] == 'up:active':
+                active_count += 1
+
+        log.info("are_daemons_healthy: {0}/{1}".format(
+            active_count, mds_map['max_mds']
+        ))
+
+        if active_count >= mds_map['max_mds']:
+            # The MDSMap says these guys are active, but let's check they really are
+            for mds_id, mds_status in mds_map['info'].items():
+                if mds_status['state'] == 'up:active':
+                    try:
+                        daemon_status = self.mds_asok(["status"], mds_id=mds_status['name'])
+                    except CommandFailedError as cfe:
+                        if cfe.exitstatus == errno.EINVAL:
+                            # Old version, can't do this check
+                            continue
+                        else:
+                            # MDS not even running
+                            return False
+
+                    if daemon_status['state'] != 'up:active':
+                        # MDS hasn't taken the latest map yet
+                        return False
+
+            return True
+        else:
+            return False
+
+    def get_daemon_names(self, state):
+        """
+        Return MDS daemon names of those daemons in the given state
+        :param state:
+        :return:
+        """
+        status = self.get_mds_map()
+        result = []
+        for mds_status in sorted(status['info'].values(), lambda a, b: cmp(a['rank'], b['rank'])):
+            if mds_status['state'] == state:
+                result.append(mds_status['name'])
+
+        return result
+
+    def get_active_names(self):
+        """
+        Return MDS daemon names of those daemons holding ranks
+        in state up:active
+
+        :return: list of strings like ['a', 'b'], sorted by rank
+        """
+        return self.get_daemon_names("up:active")
+
+    def get_rank_names(self):
+        """
+        Return MDS daemon names of those daemons holding a rank,
+        sorted by rank.  This includes e.g. up:replay/reconnect
+        as well as active, but does not include standby or
+        standby-replay.
+        """
+        status = self.get_mds_map()
+        result = []
+        for mds_status in sorted(status['info'].values(), lambda a, b: cmp(a['rank'], b['rank'])):
+            if mds_status['rank'] != -1 and mds_status['state'] != 'up:standby-replay':
+                result.append(mds_status['name'])
+
+        return result
+
+    def wait_for_daemons(self, timeout=None):
+        """
+        Wait until all daemons are healthy
+        :return:
+        """
+
+        if timeout is None:
+            timeout = DAEMON_WAIT_TIMEOUT
+
+        elapsed = 0
+        while True:
+            if self.are_daemons_healthy():
+                return
+            else:
+                time.sleep(1)
+                elapsed += 1
+
+            if elapsed > timeout:
+                raise RuntimeError("Timed out waiting for MDS daemons to become healthy")
+
+    def get_lone_mds_id(self):
+        """
+        Get a single MDS ID: the only one if there is only one
+        configured, else the only one currently holding a rank,
+        else raise an error.
+        """
+        if len(self.mds_ids) != 1:
+            alive = self.get_rank_names()
+            if len(alive) == 1:
+                return alive[0]
+            else:
+                raise ValueError("Explicit MDS argument required when multiple MDSs in use")
+        else:
+            return self.mds_ids[0]
+
+    def recreate(self):
+        log.info("Creating new filesystem")
+        self.delete_all_filesystems()
+        self.create()
+
+    def get_metadata_object(self, object_type, object_id):
+        """
+        Retrieve an object from the metadata pool, pass it through
+        ceph-dencoder to dump it to JSON, and return the decoded object.
+        """
+        temp_bin_path = '/tmp/out.bin'
+
+        self.client_remote.run(args=[
+            'sudo', os.path.join(self._prefix, 'rados'), '-p', self.metadata_pool_name, 'get', object_id, temp_bin_path
+        ])
+
+        stdout = StringIO()
+        self.client_remote.run(args=[
+            'sudo', os.path.join(self._prefix, 'ceph-dencoder'), 'type', object_type, 'import', temp_bin_path, 'decode', 'dump_json'
+        ], stdout=stdout)
+        dump_json = stdout.getvalue().strip()
+        try:
+            dump = json.loads(dump_json)
+        except (TypeError, ValueError):
+            log.error("Failed to decode JSON: '{0}'".format(dump_json))
+            raise
+
+        return dump
+
+    def get_journal_version(self):
+        """
+        Read the JournalPointer and Journal::Header objects to learn the version of
+        encoding in use.
+        """
+        journal_pointer_object = '400.00000000'
+        journal_pointer_dump = self.get_metadata_object("JournalPointer", journal_pointer_object)
+        journal_ino = journal_pointer_dump['journal_pointer']['front']
+
+        journal_header_object = "{0:x}.00000000".format(journal_ino)
+        journal_header_dump = self.get_metadata_object('Journaler::Header', journal_header_object)
+
+        version = journal_header_dump['journal_header']['stream_format']
+        log.info("Read journal version {0}".format(version))
+
+        return version
+
+    def mds_asok(self, command, mds_id=None):
+        if mds_id is None:
+            mds_id = self.get_lone_mds_id()
+
+        return self.json_asok(command, 'mds', mds_id)
+
+    def is_full(self):
+        flags = json.loads(self.mon_manager.raw_cluster_cmd("osd", "dump", "--format=json-pretty"))['flags']
+        return 'full' in flags
+
+    def is_pool_full(self, pool_name):
+        pools = json.loads(self.mon_manager.raw_cluster_cmd("osd", "dump", "--format=json-pretty"))['pools']
+        for pool in pools:
+            if pool['pool_name'] == pool_name:
+                return 'full' in pool['flags_names'].split(",")
+
+        raise RuntimeError("Pool not found '{0}'".format(pool_name))
+
+    def wait_for_state(self, goal_state, reject=None, timeout=None, mds_id=None):
+        """
+        Block until the MDS reaches a particular state, or a failure condition
+        is met.
+
+        When there are multiple MDSs, succeed when exaclty one MDS is in the
+        goal state, or fail when any MDS is in the reject state.
+
+        :param goal_state: Return once the MDS is in this state
+        :param reject: Fail if the MDS enters this state before the goal state
+        :param timeout: Fail if this many seconds pass before reaching goal
+        :return: number of seconds waited, rounded down to integer
+        """
+
+        started_at = time.time()
+        while True:
+            if mds_id is not None:
+                # mds_info is None if no daemon with this ID exists in the map
+                mds_info = self.mon_manager.get_mds_status(mds_id)
+                current_state = mds_info['state'] if mds_info else None
+                log.info("Looked up MDS state for {0}: {1}".format(mds_id, current_state))
+            else:
+                # In general, look for a single MDS
+                mds_status = self.get_mds_map()
+                states = [m['state'] for m in mds_status['info'].values()]
+                if [s for s in states if s == goal_state] == [goal_state]:
+                    current_state = goal_state
+                elif reject in states:
+                    current_state = reject
+                else:
+                    current_state = None
+                log.info("mapped states {0} to {1}".format(states, current_state))
+
+            elapsed = time.time() - started_at
+            if current_state == goal_state:
+                log.info("reached state '{0}' in {1}s".format(current_state, elapsed))
+                return elapsed
+            elif reject is not None and current_state == reject:
+                raise RuntimeError("MDS in reject state {0}".format(current_state))
+            elif timeout is not None and elapsed > timeout:
+                log.error("MDS status at timeout: {0}".format(self.get_mds_map()))
+                raise RuntimeError(
+                    "Reached timeout after {0} seconds waiting for state {1}, while in state {2}".format(
+                        elapsed, goal_state, current_state
+                    ))
+            else:
+                time.sleep(1)
+
+    def _read_data_xattr(self, ino_no, xattr_name, type, pool):
+        mds_id = self.mds_ids[0]
+        remote = self.mds_daemons[mds_id].remote
+        if pool is None:
+            pool = self.get_data_pool_name()
+
+        obj_name = "{0:x}.00000000".format(ino_no)
+
+        args = [
+            os.path.join(self._prefix, "rados"), "-p", pool, "getxattr", obj_name, xattr_name
+        ]
+        try:
+            proc = remote.run(
+                args=args,
+                stdout=StringIO())
+        except CommandFailedError as e:
+            log.error(e.__str__())
+            raise ObjectNotFound(obj_name)
+
+        data = proc.stdout.getvalue()
+
+        p = remote.run(
+            args=[os.path.join(self._prefix, "ceph-dencoder"), "type", type, "import", "-", "decode", "dump_json"],
+            stdout=StringIO(),
+            stdin=data
+        )
+
+        return json.loads(p.stdout.getvalue().strip())
+
+    def _write_data_xattr(self, ino_no, xattr_name, data, pool=None):
+        """
+        Write to an xattr of the 0th data object of an inode.  Will
+        succeed whether the object and/or xattr already exist or not.
+
+        :param ino_no: integer inode number
+        :param xattr_name: string name of the xattr
+        :param data: byte array data to write to the xattr
+        :param pool: name of data pool or None to use primary data pool
+        :return: None
+        """
+        remote = self.mds_daemons[self.mds_ids[0]].remote
+        if pool is None:
+            pool = self.get_data_pool_name()
+
+        obj_name = "{0:x}.00000000".format(ino_no)
+        args = [
+            os.path.join(self._prefix, "rados"), "-p", pool, "setxattr",
+            obj_name, xattr_name, data
+        ]
+        remote.run(
+            args=args,
+            stdout=StringIO())
+
+    def read_backtrace(self, ino_no, pool=None):
+        """
+        Read the backtrace from the data pool, return a dict in the format
+        given by inode_backtrace_t::dump, which is something like:
+
+        ::
+
+            rados -p cephfs_data getxattr 10000000002.00000000 parent > out.bin
+            ceph-dencoder type inode_backtrace_t import out.bin decode dump_json
+
+            { "ino": 1099511627778,
+              "ancestors": [
+                    { "dirino": 1,
+                      "dname": "blah",
+                      "version": 11}],
+              "pool": 1,
+              "old_pools": []}
+
+        :param pool: name of pool to read backtrace from.  If omitted, FS must have only
+                     one data pool and that will be used.
+        """
+        return self._read_data_xattr(ino_no, "parent", "inode_backtrace_t", pool)
+
+    def read_layout(self, ino_no, pool=None):
+        """
+        Read 'layout' xattr of an inode and parse the result, returning a dict like:
+        ::
+            {
+                "stripe_unit": 4194304,
+                "stripe_count": 1,
+                "object_size": 4194304,
+                "pool_id": 1,
+                "pool_ns": "",
+            }
+
+        :param pool: name of pool to read backtrace from.  If omitted, FS must have only
+                     one data pool and that will be used.
+        """
+        return self._read_data_xattr(ino_no, "layout", "file_layout_t", pool)
+
+    def _enumerate_data_objects(self, ino, size):
+        """
+        Get the list of expected data objects for a range, and the list of objects
+        that really exist.
+
+        :return a tuple of two lists of strings (expected, actual)
+        """
+        stripe_size = 1024 * 1024 * 4
+
+        size = max(stripe_size, size)
+
+        want_objects = [
+            "{0:x}.{1:08x}".format(ino, n)
+            for n in range(0, ((size - 1) / stripe_size) + 1)
+        ]
+
+        exist_objects = self.rados(["ls"], pool=self.get_data_pool_name()).split("\n")
+
+        return want_objects, exist_objects
+
+    def data_objects_present(self, ino, size):
+        """
+        Check that *all* the expected data objects for an inode are present in the data pool
+        """
+
+        want_objects, exist_objects = self._enumerate_data_objects(ino, size)
+        missing = set(want_objects) - set(exist_objects)
+
+        if missing:
+            log.info("Objects missing (ino {0}, size {1}): {2}".format(
+                ino, size, missing
+            ))
+            return False
+        else:
+            log.info("All objects for ino {0} size {1} found".format(ino, size))
+            return True
+
+    def data_objects_absent(self, ino, size):
+        want_objects, exist_objects = self._enumerate_data_objects(ino, size)
+        present = set(want_objects) & set(exist_objects)
+
+        if present:
+            log.info("Objects not absent (ino {0}, size {1}): {2}".format(
+                ino, size, present
+            ))
+            return False
+        else:
+            log.info("All objects for ino {0} size {1} are absent".format(ino, size))
+            return True
+
+    def rados(self, args, pool=None, namespace=None, stdin_data=None):
+        """
+        Call into the `rados` CLI from an MDS
+        """
+
+        if pool is None:
+            pool = self.get_metadata_pool_name()
+
+        # Doesn't matter which MDS we use to run rados commands, they all
+        # have access to the pools
+        mds_id = self.mds_ids[0]
+        remote = self.mds_daemons[mds_id].remote
+
+        # NB we could alternatively use librados pybindings for this, but it's a one-liner
+        # using the `rados` CLI
+        args = ([os.path.join(self._prefix, "rados"), "-p", pool] +
+                (["--namespace", namespace] if namespace else []) +
+                args)
+        p = remote.run(
+            args=args,
+            stdin=stdin_data,
+            stdout=StringIO())
+        return p.stdout.getvalue().strip()
+
+    def list_dirfrag(self, dir_ino):
+        """
+        Read the named object and return the list of omap keys
+
+        :return a list of 0 or more strings
+        """
+
+        dirfrag_obj_name = "{0:x}.00000000".format(dir_ino)
+
+        try:
+            key_list_str = self.rados(["listomapkeys", dirfrag_obj_name])
+        except CommandFailedError as e:
+            log.error(e.__str__())
+            raise ObjectNotFound(dirfrag_obj_name)
+
+        return key_list_str.split("\n") if key_list_str else []
+
+    def erase_metadata_objects(self, prefix):
+        """
+        For all objects in the metadata pool matching the prefix,
+        erase them.
+
+        This O(N) with the number of objects in the pool, so only suitable
+        for use on toy test filesystems.
+        """
+        all_objects = self.rados(["ls"]).split("\n")
+        matching_objects = [o for o in all_objects if o.startswith(prefix)]
+        for o in matching_objects:
+            self.rados(["rm", o])
+
+    def erase_mds_objects(self, rank):
+        """
+        Erase all the per-MDS objects for a particular rank.  This includes
+        inotable, sessiontable, journal
+        """
+
+        def obj_prefix(multiplier):
+            """
+            MDS object naming conventions like rank 1's
+            journal is at 201.***
+            """
+            return "%x." % (multiplier * 0x100 + rank)
+
+        # MDS_INO_LOG_OFFSET
+        self.erase_metadata_objects(obj_prefix(2))
+        # MDS_INO_LOG_BACKUP_OFFSET
+        self.erase_metadata_objects(obj_prefix(3))
+        # MDS_INO_LOG_POINTER_OFFSET
+        self.erase_metadata_objects(obj_prefix(4))
+        # MDSTables & SessionMap
+        self.erase_metadata_objects("mds{rank:d}_".format(rank=rank))
+
+    @property
+    def _prefix(self):
+        """
+        Override this to set a different
+        """
+        return ""
+
+    def _run_tool(self, tool, args, rank=None, quiet=False):
+        # Tests frequently have [client] configuration that jacks up
+        # the objecter log level (unlikely to be interesting here)
+        # and does not set the mds log level (very interesting here)
+        if quiet:
+            base_args = [os.path.join(self._prefix, tool), '--debug-mds=1', '--debug-objecter=1']
+        else:
+            base_args = [os.path.join(self._prefix, tool), '--debug-mds=4', '--debug-objecter=1']
+
+        if rank is not None:
+            base_args.extend(["--rank", "%d" % rank])
+
+        t1 = datetime.datetime.now()
+        r = self.tool_remote.run(
+            args=base_args + args,
+            stdout=StringIO()).stdout.getvalue().strip()
+        duration = datetime.datetime.now() - t1
+        log.info("Ran {0} in time {1}, result:\n{2}".format(
+            base_args + args, duration, r
+        ))
+        return r
+
+    @property
+    def tool_remote(self):
+        """
+        An arbitrary remote to use when invoking recovery tools.  Use an MDS host because
+        it'll definitely have keys with perms to access cephfs metadata pool.  This is public
+        so that tests can use this remote to go get locally written output files from the tools.
+        """
+        mds_id = self.mds_ids[0]
+        return self.mds_daemons[mds_id].remote
+
+    def journal_tool(self, args, rank=None, quiet=False):
+        """
+        Invoke cephfs-journal-tool with the passed arguments, and return its stdout
+        """
+        return self._run_tool("cephfs-journal-tool", args, rank, quiet)
+
+    def table_tool(self, args, quiet=False):
+        """
+        Invoke cephfs-table-tool with the passed arguments, and return its stdout
+        """
+        return self._run_tool("cephfs-table-tool", args, None, quiet)
+
+    def data_scan(self, args, quiet=False, worker_count=1):
+        """
+        Invoke cephfs-data-scan with the passed arguments, and return its stdout
+
+        :param worker_count: if greater than 1, multiple workers will be run
+                             in parallel and the return value will be None
+        """
+
+        workers = []
+
+        for n in range(0, worker_count):
+            if worker_count > 1:
+                # data-scan args first token is a command, followed by args to it.
+                # insert worker arguments after the command.
+                cmd = args[0]
+                worker_args = [cmd] + ["--worker_n", n.__str__(), "--worker_m", worker_count.__str__()] + args[1:]
+            else:
+                worker_args = args
+
+            workers.append(Greenlet.spawn(lambda wargs=worker_args:
+                                          self._run_tool("cephfs-data-scan", wargs, None, quiet)))
+
+        for w in workers:
+            w.get()
+
+        if worker_count == 1:
+            return workers[0].value
+        else:
+            return None
diff --git a/qa/tasks/cephfs/fuse_mount.py b/qa/tasks/cephfs/fuse_mount.py
new file mode 100644
index 0000000..df2b820
--- /dev/null
+++ b/qa/tasks/cephfs/fuse_mount.py
@@ -0,0 +1,404 @@
+
+from StringIO import StringIO
+import json
+import time
+import logging
+from textwrap import dedent
+
+from teuthology import misc
+from teuthology.contextutil import MaxWhileTries
+from teuthology.orchestra import run
+from teuthology.orchestra.run import CommandFailedError
+from .mount import CephFSMount
+
+log = logging.getLogger(__name__)
+
+
+class FuseMount(CephFSMount):
+    def __init__(self, client_config, test_dir, client_id, client_remote):
+        super(FuseMount, self).__init__(test_dir, client_id, client_remote)
+
+        self.client_config = client_config if client_config else {}
+        self.fuse_daemon = None
+        self._fuse_conn = None
+
+    def mount(self, mount_path=None):
+        log.info("Client client.%s config is %s" % (self.client_id, self.client_config))
+
+        daemon_signal = 'kill'
+        if self.client_config.get('coverage') or self.client_config.get('valgrind') is not None:
+            daemon_signal = 'term'
+
+        log.info('Mounting ceph-fuse client.{id} at {remote} {mnt}...'.format(
+            id=self.client_id, remote=self.client_remote, mnt=self.mountpoint))
+
+        self.client_remote.run(
+            args=[
+                'mkdir',
+                '--',
+                self.mountpoint,
+            ],
+        )
+
+        run_cmd = [
+            'sudo',
+            'adjust-ulimits',
+            'ceph-coverage',
+            '{tdir}/archive/coverage'.format(tdir=self.test_dir),
+            'daemon-helper',
+            daemon_signal,
+        ]
+
+        fuse_cmd = ['ceph-fuse', "-f"]
+
+        if mount_path is not None:
+            fuse_cmd += ["--client_mountpoint={0}".format(mount_path)]
+
+        fuse_cmd += [
+            '--name', 'client.{id}'.format(id=self.client_id),
+            # TODO ceph-fuse doesn't understand dash dash '--',
+            self.mountpoint,
+        ]
+
+        if self.client_config.get('valgrind') is not None:
+            run_cmd = misc.get_valgrind_args(
+                self.test_dir,
+                'client.{id}'.format(id=self.client_id),
+                run_cmd,
+                self.client_config.get('valgrind'),
+            )
+
+        run_cmd.extend(fuse_cmd)
+
+        def list_connections():
+            self.client_remote.run(
+                args=["sudo", "mount", "-t", "fusectl", "/sys/fs/fuse/connections", "/sys/fs/fuse/connections"],
+                check_status=False
+            )
+            p = self.client_remote.run(
+                args=["ls", "/sys/fs/fuse/connections"],
+                stdout=StringIO(),
+                check_status=False
+            )
+            if p.exitstatus != 0:
+                return []
+
+            ls_str = p.stdout.getvalue().strip()
+            if ls_str:
+                return [int(n) for n in ls_str.split("\n")]
+            else:
+                return []
+
+        # Before starting ceph-fuse process, note the contents of
+        # /sys/fs/fuse/connections
+        pre_mount_conns = list_connections()
+        log.info("Pre-mount connections: {0}".format(pre_mount_conns))
+
+        proc = self.client_remote.run(
+            args=run_cmd,
+            logger=log.getChild('ceph-fuse.{id}'.format(id=self.client_id)),
+            stdin=run.PIPE,
+            wait=False,
+        )
+        self.fuse_daemon = proc
+
+        # Wait for the connection reference to appear in /sys
+        mount_wait = self.client_config.get('mount_wait', 0)
+        if mount_wait > 0:
+            log.info("Fuse mount waits {0} seconds before checking /sys/".format(mount_wait))
+            time.sleep(mount_wait)            
+        timeout = int(self.client_config.get('mount_timeout', 30))
+        waited = 0
+
+        post_mount_conns = list_connections()
+        while len(post_mount_conns) <= len(pre_mount_conns):
+            if self.fuse_daemon.finished:
+                # Did mount fail?  Raise the CommandFailedError instead of
+                # hitting the "failed to populate /sys/" timeout
+                self.fuse_daemon.wait()
+            time.sleep(1)
+            waited += 1
+            if waited > timeout:
+                raise RuntimeError("Fuse mount failed to populate /sys/ after {0} seconds".format(
+                    waited
+                ))
+            else:
+                post_mount_conns = list_connections()
+
+        log.info("Post-mount connections: {0}".format(post_mount_conns))
+
+        # Record our fuse connection number so that we can use it when
+        # forcing an unmount
+        new_conns = list(set(post_mount_conns) - set(pre_mount_conns))
+        if len(new_conns) == 0:
+            raise RuntimeError("New fuse connection directory not found ({0})".format(new_conns))
+        elif len(new_conns) > 1:
+            raise RuntimeError("Unexpectedly numerous fuse connections {0}".format(new_conns))
+        else:
+            self._fuse_conn = new_conns[0]
+
+    def is_mounted(self):
+        proc = self.client_remote.run(
+            args=[
+                'stat',
+                '--file-system',
+                '--printf=%T\n',
+                '--',
+                self.mountpoint,
+            ],
+            stdout=StringIO(),
+            stderr=StringIO(),
+            wait=False
+        )
+        try:
+            proc.wait()
+        except CommandFailedError:
+            if ("endpoint is not connected" in proc.stderr.getvalue()
+            or "Software caused connection abort" in proc.stderr.getvalue()):
+                # This happens is fuse is killed without unmount
+                log.warn("Found stale moutn point at {0}".format(self.mountpoint))
+                return True
+            else:
+                # This happens if the mount directory doesn't exist
+                log.info('mount point does not exist: %s', self.mountpoint)
+                return False
+
+        fstype = proc.stdout.getvalue().rstrip('\n')
+        if fstype == 'fuseblk':
+            log.info('ceph-fuse is mounted on %s', self.mountpoint)
+            return True
+        else:
+            log.debug('ceph-fuse not mounted, got fs type {fstype!r}'.format(
+                fstype=fstype))
+            return False
+
+    def wait_until_mounted(self):
+        """
+        Check to make sure that fuse is mounted on mountpoint.  If not,
+        sleep for 5 seconds and check again.
+        """
+
+        while not self.is_mounted():
+            # Even if it's not mounted, it should at least
+            # be running: catch simple failures where it has terminated.
+            assert not self.fuse_daemon.poll()
+
+            time.sleep(5)
+
+        # Now that we're mounted, set permissions so that the rest of the test will have
+        # unrestricted access to the filesystem mount.
+        self.client_remote.run(
+            args=['sudo', 'chmod', '1777', self.mountpoint])
+
+    def _mountpoint_exists(self):
+        return self.client_remote.run(args=["ls", "-d", self.mountpoint], check_status=False).exitstatus == 0
+
+    def umount(self):
+        try:
+            log.info('Running fusermount -u on {name}...'.format(name=self.client_remote.name))
+            self.client_remote.run(
+                args=[
+                    'sudo',
+                    'fusermount',
+                    '-u',
+                    self.mountpoint,
+                ],
+            )
+        except run.CommandFailedError:
+            log.info('Failed to unmount ceph-fuse on {name}, aborting...'.format(name=self.client_remote.name))
+
+            # abort the fuse mount, killing all hung processes
+            if self._fuse_conn:
+                self.run_python(dedent("""
+                import os
+                path = "/sys/fs/fuse/connections/{0}/abort"
+                if os.path.exists(path):
+                    open(path, "w").write("1")
+                """).format(self._fuse_conn))
+                self._fuse_conn = None
+
+            stderr = StringIO()
+            try:
+                # make sure its unmounted
+                self.client_remote.run(
+                    args=[
+                        'sudo',
+                        'umount',
+                        '-l',
+                        '-f',
+                        self.mountpoint,
+                    ],
+                    stderr=stderr
+                )
+            except CommandFailedError:
+                if self.is_mounted():
+                    raise
+
+        assert not self.is_mounted()
+        self._fuse_conn = None
+
+    def umount_wait(self, force=False, require_clean=False):
+        """
+        :param force: Complete cleanly even if the MDS is offline
+        """
+        if force:
+            assert not require_clean  # mutually exclusive
+
+            # When we expect to be forcing, kill the ceph-fuse process directly.
+            # This should avoid hitting the more aggressive fallback killing
+            # in umount() which can affect other mounts too.
+            self.fuse_daemon.stdin.close()
+
+            # However, we will still hit the aggressive wait if there is an ongoing
+            # mount -o remount (especially if the remount is stuck because MDSs
+            # are unavailable)
+
+        self.umount()
+
+        try:
+            if self.fuse_daemon:
+                # Permit a timeout, so that we do not block forever
+                run.wait([self.fuse_daemon], 900)
+        except MaxWhileTries:
+            log.error("process failed to terminate after unmount.  This probably"
+                      "indicates a bug within ceph-fuse.")
+            raise
+        except CommandFailedError:
+            if require_clean:
+                raise
+
+        self.cleanup()
+
+    def cleanup(self):
+        """
+        Remove the mount point.
+
+        Prerequisite: the client is not mounted.
+        """
+        stderr = StringIO()
+        try:
+            self.client_remote.run(
+                args=[
+                    'rmdir',
+                    '--',
+                    self.mountpoint,
+                ],
+                stderr=stderr
+            )
+        except CommandFailedError:
+            if "No such file or directory" in stderr.getvalue():
+                pass
+            else:
+                raise
+
+    def kill(self):
+        """
+        Terminate the client without removing the mount point.
+        """
+        self.fuse_daemon.stdin.close()
+        try:
+            self.fuse_daemon.wait()
+        except CommandFailedError:
+            pass
+
+    def kill_cleanup(self):
+        """
+        Follow up ``kill`` to get to a clean unmounted state.
+        """
+        self.umount()
+        self.cleanup()
+
+    def teardown(self):
+        """
+        Whatever the state of the mount, get it gone.
+        """
+        super(FuseMount, self).teardown()
+
+        self.umount()
+
+        if self.fuse_daemon and not self.fuse_daemon.finished:
+            self.fuse_daemon.stdin.close()
+            try:
+                self.fuse_daemon.wait()
+            except CommandFailedError:
+                pass
+
+        # Indiscriminate, unlike the touchier cleanup()
+        self.client_remote.run(
+            args=[
+                'rm',
+                '-rf',
+                self.mountpoint,
+            ],
+        )
+
+    def _asok_path(self):
+        return "/var/run/ceph/ceph-client.{0}.*.asok".format(self.client_id)
+
+    @property
+    def _prefix(self):
+        return ""
+
+    def admin_socket(self, args):
+        pyscript = """
+import glob
+import re
+import os
+import subprocess
+
+def find_socket(client_name):
+        asok_path = "{asok_path}"
+        files = glob.glob(asok_path)
+
+        # Given a non-glob path, it better be there
+        if "*" not in asok_path:
+            assert(len(files) == 1)
+            return files[0]
+
+        for f in files:
+                pid = re.match(".*\.(\d+)\.asok$", f).group(1)
+                if os.path.exists("/proc/{{0}}".format(pid)):
+                        return f
+        raise RuntimeError("Client socket {{0}} not found".format(client_name))
+
+print find_socket("{client_name}")
+""".format(
+            asok_path=self._asok_path(),
+            client_name="client.{0}".format(self.client_id))
+
+        # Find the admin socket
+        p = self.client_remote.run(args=[
+            'python', '-c', pyscript
+        ], stdout=StringIO())
+        asok_path = p.stdout.getvalue().strip()
+        log.info("Found client admin socket at {0}".format(asok_path))
+
+        # Query client ID from admin socket
+        p = self.client_remote.run(
+            args=['sudo', self._prefix + 'ceph', '--admin-daemon', asok_path] + args,
+            stdout=StringIO())
+        return json.loads(p.stdout.getvalue())
+
+    def get_global_id(self):
+        """
+        Look up the CephFS client ID for this mount
+        """
+
+        return self.admin_socket(['mds_sessions'])['id']
+
+    def get_osd_epoch(self):
+        """
+        Return 2-tuple of osd_epoch, osd_epoch_barrier
+        """
+        status = self.admin_socket(['status'])
+        return status['osd_epoch'], status['osd_epoch_barrier']
+
+    def get_dentry_count(self):
+        """
+        Return 2-tuple of dentry_count, dentry_pinned_count
+        """
+        status = self.admin_socket(['status'])
+        return status['dentry_count'], status['dentry_pinned_count']
+
+    def set_cache_size(self, size):
+        return self.admin_socket(['config', 'set', 'client_cache_size', str(size)])
diff --git a/qa/tasks/cephfs/kernel_mount.py b/qa/tasks/cephfs/kernel_mount.py
new file mode 100644
index 0000000..f9e9050
--- /dev/null
+++ b/qa/tasks/cephfs/kernel_mount.py
@@ -0,0 +1,246 @@
+from StringIO import StringIO
+import json
+import logging
+from textwrap import dedent
+from teuthology.orchestra.run import CommandFailedError
+from teuthology import misc
+
+from teuthology.orchestra import remote as orchestra_remote
+from teuthology.orchestra import run
+from .mount import CephFSMount
+
+log = logging.getLogger(__name__)
+
+
+class KernelMount(CephFSMount):
+    def __init__(self, mons, test_dir, client_id, client_remote,
+                 ipmi_user, ipmi_password, ipmi_domain):
+        super(KernelMount, self).__init__(test_dir, client_id, client_remote)
+        self.mons = mons
+
+        self.mounted = False
+        self.ipmi_user = ipmi_user
+        self.ipmi_password = ipmi_password
+        self.ipmi_domain = ipmi_domain
+
+    def write_secret_file(self, remote, role, keyring, filename):
+        """
+        Stash the keyring in the filename specified.
+        """
+        remote.run(
+            args=[
+                'adjust-ulimits',
+                'ceph-coverage',
+                '{tdir}/archive/coverage'.format(tdir=self.test_dir),
+                'ceph-authtool',
+                '--name={role}'.format(role=role),
+                '--print-key',
+                keyring,
+                run.Raw('>'),
+                filename,
+            ],
+        )
+
+    def mount(self, mount_path=None):
+        log.info('Mounting kclient client.{id} at {remote} {mnt}...'.format(
+            id=self.client_id, remote=self.client_remote, mnt=self.mountpoint))
+
+        keyring = self.get_keyring_path()
+        secret = '{tdir}/ceph.data/client.{id}.secret'.format(tdir=self.test_dir, id=self.client_id)
+        self.write_secret_file(self.client_remote, 'client.{id}'.format(id=self.client_id),
+                               keyring, secret)
+
+        self.client_remote.run(
+            args=[
+                'mkdir',
+                '--',
+                self.mountpoint,
+            ],
+        )
+
+        if mount_path is None:
+            mount_path = "/"
+
+        self.client_remote.run(
+            args=[
+                'sudo',
+                'adjust-ulimits',
+                'ceph-coverage',
+                '{tdir}/archive/coverage'.format(tdir=self.test_dir),
+                '/sbin/mount.ceph',
+                '{mons}:{mount_path}'.format(mons=','.join(self.mons), mount_path=mount_path),
+                self.mountpoint,
+                '-v',
+                '-o',
+                'name={id},secretfile={secret}'.format(id=self.client_id,
+                                                       secret=secret),
+            ],
+        )
+
+        self.client_remote.run(
+            args=['sudo', 'chmod', '1777', self.mountpoint])
+
+        self.mounted = True
+
+    def umount(self):
+        log.debug('Unmounting client client.{id}...'.format(id=self.client_id))
+        self.client_remote.run(
+            args=[
+                'sudo',
+                'umount',
+                self.mountpoint,
+            ],
+        )
+        self.client_remote.run(
+            args=[
+                'rmdir',
+                '--',
+                self.mountpoint,
+            ],
+        )
+        self.mounted = False
+
+    def cleanup(self):
+        pass
+
+    def umount_wait(self, force=False, require_clean=False):
+        """
+        Unlike the fuse client, the kernel client's umount is immediate
+        """
+        try:
+            self.umount()
+        except CommandFailedError:
+            if not force:
+                raise
+
+            self.kill()
+            self.kill_cleanup()
+
+        self.mounted = False
+
+    def is_mounted(self):
+        return self.mounted
+
+    def wait_until_mounted(self):
+        """
+        Unlike the fuse client, the kernel client is up and running as soon
+        as the initial mount() function returns.
+        """
+        assert self.mounted
+
+    def teardown(self):
+        super(KernelMount, self).teardown()
+        if self.mounted:
+            self.umount()
+
+    def kill(self):
+        """
+        The Ceph kernel client doesn't have a mechanism to kill itself (doing
+        that in side the kernel would be weird anyway), so we reboot the whole node
+        to get the same effect.
+
+        We use IPMI to reboot, because we don't want the client to send any
+        releases of capabilities.
+        """
+
+        con = orchestra_remote.getRemoteConsole(self.client_remote.hostname,
+                                                self.ipmi_user,
+                                                self.ipmi_password,
+                                                self.ipmi_domain)
+        con.power_off()
+
+        self.mounted = False
+
+    def kill_cleanup(self):
+        assert not self.mounted
+
+        con = orchestra_remote.getRemoteConsole(self.client_remote.hostname,
+                                                self.ipmi_user,
+                                                self.ipmi_password,
+                                                self.ipmi_domain)
+        con.power_on()
+
+        # Wait for node to come back up after reboot
+        misc.reconnect(None, 300, [self.client_remote])
+
+        # Remove mount directory
+        self.client_remote.run(
+            args=[
+                'rmdir',
+                '--',
+                self.mountpoint,
+            ],
+        )
+
+    def _find_debug_dir(self):
+        """
+        Find the debugfs folder for this mount
+        """
+        pyscript = dedent("""
+            import glob
+            import os
+            import json
+
+            def get_id_to_dir():
+                result = {}
+                for dir in glob.glob("/sys/kernel/debug/ceph/*"):
+                    mds_sessions_lines = open(os.path.join(dir, "mds_sessions")).readlines()
+                    client_id = mds_sessions_lines[1].split()[1].strip('"')
+
+                    result[client_id] = dir
+                return result
+
+            print json.dumps(get_id_to_dir())
+            """)
+
+        p = self.client_remote.run(args=[
+            'sudo', 'python', '-c', pyscript
+        ], stdout=StringIO())
+        client_id_to_dir = json.loads(p.stdout.getvalue())
+
+        try:
+            return client_id_to_dir[self.client_id]
+        except KeyError:
+            log.error("Client id '{0}' debug dir not found (clients seen were: {1})".format(
+                self.client_id, ",".join(client_id_to_dir.keys())
+            ))
+            raise
+
+    def _read_debug_file(self, filename):
+        debug_dir = self._find_debug_dir()
+
+        pyscript = dedent("""
+            import os
+
+            print open(os.path.join("{debug_dir}", "{filename}")).read()
+            """).format(debug_dir=debug_dir, filename=filename)
+
+        p = self.client_remote.run(args=[
+            'sudo', 'python', '-c', pyscript
+        ], stdout=StringIO())
+        return p.stdout.getvalue()
+
+    def get_global_id(self):
+        """
+        Look up the CephFS client ID for this mount, using debugfs.
+        """
+
+        assert self.mounted
+
+        mds_sessions = self._read_debug_file("mds_sessions")
+        lines = mds_sessions.split("\n")
+        return int(lines[0].split()[1])
+
+    def get_osd_epoch(self):
+        """
+        Return 2-tuple of osd_epoch, osd_epoch_barrier
+        """
+        osd_map = self._read_debug_file("osdmap")
+        lines = osd_map.split("\n")
+        epoch = int(lines[0].split()[1])
+
+        mds_sessions = self._read_debug_file("mds_sessions")
+        lines = mds_sessions.split("\n")
+        epoch_barrier = int(lines[2].split()[1].strip('"'))
+
+        return epoch, epoch_barrier
diff --git a/qa/tasks/cephfs/mount.py b/qa/tasks/cephfs/mount.py
new file mode 100644
index 0000000..f3b16db
--- /dev/null
+++ b/qa/tasks/cephfs/mount.py
@@ -0,0 +1,585 @@
+from contextlib import contextmanager
+import json
+import logging
+import datetime
+import time
+from textwrap import dedent
+import os
+from StringIO import StringIO
+from teuthology.orchestra import run
+from teuthology.orchestra.run import CommandFailedError, ConnectionLostError
+
+log = logging.getLogger(__name__)
+
+
+class CephFSMount(object):
+    def __init__(self, test_dir, client_id, client_remote):
+        """
+        :param test_dir: Global teuthology test dir
+        :param client_id: Client ID, the 'foo' in client.foo
+        :param client_remote: Remote instance for the host where client will run
+        """
+
+        self.test_dir = test_dir
+        self.client_id = client_id
+        self.client_remote = client_remote
+        self.mountpoint_dir_name = 'mnt.{id}'.format(id=self.client_id)
+
+        self.test_files = ['a', 'b', 'c']
+
+        self.background_procs = []
+
+    @property
+    def mountpoint(self):
+        return os.path.join(
+            self.test_dir, '{dir_name}'.format(dir_name=self.mountpoint_dir_name))
+
+    def is_mounted(self):
+        raise NotImplementedError()
+
+    def mount(self, mount_path=None):
+        raise NotImplementedError()
+
+    def umount(self):
+        raise NotImplementedError()
+
+    def umount_wait(self, force=False, require_clean=False):
+        """
+
+        :param force: Expect that the mount will not shutdown cleanly: kill
+                      it hard.
+        :param require_clean: Wait for the Ceph client associated with the
+                              mount (e.g. ceph-fuse) to terminate, and
+                              raise if it doesn't do so cleanly.
+        :return:
+        """
+        raise NotImplementedError()
+
+    def kill_cleanup(self):
+        raise NotImplementedError()
+
+    def kill(self):
+        raise NotImplementedError()
+
+    def cleanup(self):
+        raise NotImplementedError()
+
+    def wait_until_mounted(self):
+        raise NotImplementedError()
+
+    def get_keyring_path(self):
+        return '/etc/ceph/ceph.client.{id}.keyring'.format(id=self.client_id)
+
+    @property
+    def config_path(self):
+        """
+        Path to ceph.conf: override this if you're not a normal systemwide ceph install
+        :return: stringv
+        """
+        return "/etc/ceph/ceph.conf"
+
+    @contextmanager
+    def mounted(self):
+        """
+        A context manager, from an initially unmounted state, to mount
+        this, yield, and then unmount and clean up.
+        """
+        self.mount()
+        self.wait_until_mounted()
+        try:
+            yield
+        finally:
+            self.umount_wait()
+
+    def create_files(self):
+        assert(self.is_mounted())
+
+        for suffix in self.test_files:
+            log.info("Creating file {0}".format(suffix))
+            self.client_remote.run(args=[
+                'sudo', 'touch', os.path.join(self.mountpoint, suffix)
+            ])
+
+    def check_files(self):
+        assert(self.is_mounted())
+
+        for suffix in self.test_files:
+            log.info("Checking file {0}".format(suffix))
+            r = self.client_remote.run(args=[
+                'sudo', 'ls', os.path.join(self.mountpoint, suffix)
+            ], check_status=False)
+            if r.exitstatus != 0:
+                raise RuntimeError("Expected file {0} not found".format(suffix))
+
+    def create_destroy(self):
+        assert(self.is_mounted())
+
+        filename = "{0} {1}".format(datetime.datetime.now(), self.client_id)
+        log.debug("Creating test file {0}".format(filename))
+        self.client_remote.run(args=[
+            'sudo', 'touch', os.path.join(self.mountpoint, filename)
+        ])
+        log.debug("Deleting test file {0}".format(filename))
+        self.client_remote.run(args=[
+            'sudo', 'rm', '-f', os.path.join(self.mountpoint, filename)
+        ])
+
+    def _run_python(self, pyscript):
+        return self.client_remote.run(args=[
+            'sudo', 'adjust-ulimits', 'daemon-helper', 'kill', 'python', '-c', pyscript
+        ], wait=False, stdin=run.PIPE, stdout=StringIO())
+
+    def run_python(self, pyscript):
+        p = self._run_python(pyscript)
+        p.wait()
+        return p.stdout.getvalue().strip()
+
+    def run_shell(self, args, wait=True):
+        args = ["cd", self.mountpoint, run.Raw('&&'), "sudo"] + args
+        return self.client_remote.run(args=args, stdout=StringIO(),
+                                      stderr=StringIO(), wait=wait)
+
+    def open_no_data(self, basename):
+        """
+        A pure metadata operation
+        """
+        assert(self.is_mounted())
+
+        path = os.path.join(self.mountpoint, basename)
+
+        p = self._run_python(dedent(
+            """
+            f = open("{path}", 'w')
+            """.format(path=path)
+        ))
+        p.wait()
+
+    def open_background(self, basename="background_file"):
+        """
+        Open a file for writing, then block such that the client
+        will hold a capability
+        """
+        assert(self.is_mounted())
+
+        path = os.path.join(self.mountpoint, basename)
+
+        pyscript = dedent("""
+            import time
+
+            f = open("{path}", 'w')
+            f.write('content')
+            f.flush()
+            f.write('content2')
+            while True:
+                time.sleep(1)
+            """).format(path=path)
+
+        rproc = self._run_python(pyscript)
+        self.background_procs.append(rproc)
+        return rproc
+
+    def wait_for_visible(self, basename="background_file", timeout=30):
+        i = 0
+        while i < timeout:
+            r = self.client_remote.run(args=[
+                'sudo', 'ls', os.path.join(self.mountpoint, basename)
+            ], check_status=False)
+            if r.exitstatus == 0:
+                log.debug("File {0} became visible from {1} after {2}s".format(
+                    basename, self.client_id, i))
+                return
+            else:
+                time.sleep(1)
+                i += 1
+
+        raise RuntimeError("Timed out after {0}s waiting for {1} to become visible from {2}".format(
+            i, basename, self.client_id))
+
+    def lock_background(self, basename="background_file", do_flock=True):
+        """
+        Open and lock a files for writing, hold the lock in a background process
+        """
+        assert(self.is_mounted())
+
+        path = os.path.join(self.mountpoint, basename)
+
+        script_builder = """
+            import time
+            import fcntl
+            import struct"""
+        if do_flock:
+            script_builder += """
+            f1 = open("{path}-1", 'w')
+            fcntl.flock(f1, fcntl.LOCK_EX | fcntl.LOCK_NB)"""
+        script_builder += """
+            f2 = open("{path}-2", 'w')
+            lockdata = struct.pack('hhllhh', fcntl.F_WRLCK, 0, 0, 0, 0, 0)
+            fcntl.fcntl(f2, fcntl.F_SETLK, lockdata)
+            while True:
+                time.sleep(1)
+            """
+
+        pyscript = dedent(script_builder).format(path=path)
+
+        log.info("lock file {0}".format(basename))
+        rproc = self._run_python(pyscript)
+        self.background_procs.append(rproc)
+        return rproc
+
+    def check_filelock(self, basename="background_file", do_flock=True):
+        assert(self.is_mounted())
+
+        path = os.path.join(self.mountpoint, basename)
+
+        script_builder = """
+            import fcntl
+            import errno
+            import struct"""
+        if do_flock:
+            script_builder += """
+            f1 = open("{path}-1", 'r')
+            try:
+                fcntl.flock(f1, fcntl.LOCK_EX | fcntl.LOCK_NB)
+            except IOError, e:
+                if e.errno == errno.EAGAIN:
+                    pass
+            else:
+                raise RuntimeError("flock on file {path}-1 not found")"""
+        script_builder += """
+            f2 = open("{path}-2", 'r')
+            try:
+                lockdata = struct.pack('hhllhh', fcntl.F_WRLCK, 0, 0, 0, 0, 0)
+                fcntl.fcntl(f2, fcntl.F_SETLK, lockdata)
+            except IOError, e:
+                if e.errno == errno.EAGAIN:
+                    pass
+            else:
+                raise RuntimeError("posix lock on file {path}-2 not found")
+            """
+        pyscript = dedent(script_builder).format(path=path)
+
+        log.info("check lock on file {0}".format(basename))
+        self.client_remote.run(args=[
+            'sudo', 'python', '-c', pyscript
+        ])
+
+    def write_background(self, basename="background_file", loop=False):
+        """
+        Open a file for writing, complete as soon as you can
+        :param basename:
+        :return:
+        """
+        assert(self.is_mounted())
+
+        path = os.path.join(self.mountpoint, basename)
+
+        pyscript = dedent("""
+            import os
+            import time
+
+            fd = os.open("{path}", os.O_RDWR | os.O_CREAT, 0644)
+            try:
+                while True:
+                    os.write(fd, 'content')
+                    time.sleep(1)
+                    if not {loop}:
+                        break
+            except IOError, e:
+                pass
+            os.close(fd)
+            """).format(path=path, loop=str(loop))
+
+        rproc = self._run_python(pyscript)
+        self.background_procs.append(rproc)
+        return rproc
+
+    def write_n_mb(self, filename, n_mb, seek=0, wait=True):
+        """
+        Write the requested number of megabytes to a file
+        """
+        assert(self.is_mounted())
+
+        return self.run_shell(["dd", "if=/dev/urandom", "of={0}".format(filename),
+                               "bs=1M", "conv=fdatasync",
+                               "count={0}".format(n_mb),
+                               "seek={0}".format(seek)
+                               ], wait=wait)
+
+    def write_test_pattern(self, filename, size):
+        log.info("Writing {0} bytes to {1}".format(size, filename))
+        return self.run_python(dedent("""
+            import zlib
+            path = "{path}"
+            f = open(path, 'w')
+            for i in range(0, {size}):
+                val = zlib.crc32("%s" % i) & 7
+                f.write(chr(val))
+            f.close()
+        """.format(
+            path=os.path.join(self.mountpoint, filename),
+            size=size
+        )))
+
+    def validate_test_pattern(self, filename, size):
+        log.info("Validating {0} bytes from {1}".format(size, filename))
+        return self.run_python(dedent("""
+            import zlib
+            path = "{path}"
+            f = open(path, 'r')
+            bytes = f.read()
+            f.close()
+            if len(bytes) != {size}:
+                raise RuntimeError("Bad length {{0}} vs. expected {{1}}".format(
+                    len(bytes), {size}
+                ))
+            for i, b in enumerate(bytes):
+                val = zlib.crc32("%s" % i) & 7
+                if b != chr(val):
+                    raise RuntimeError("Bad data at offset {{0}}".format(i))
+        """.format(
+            path=os.path.join(self.mountpoint, filename),
+            size=size
+        )))
+
+    def open_n_background(self, fs_path, count):
+        """
+        Open N files for writing, hold them open in a background process
+
+        :param fs_path: Path relative to CephFS root, e.g. "foo/bar"
+        :return: a RemoteProcess
+        """
+        assert(self.is_mounted())
+
+        abs_path = os.path.join(self.mountpoint, fs_path)
+
+        pyscript = dedent("""
+            import sys
+            import time
+            import os
+
+            n = {count}
+            abs_path = "{abs_path}"
+
+            if not os.path.exists(os.path.dirname(abs_path)):
+                os.makedirs(os.path.dirname(abs_path))
+
+            handles = []
+            for i in range(0, n):
+                fname = "{{0}}_{{1}}".format(abs_path, i)
+                handles.append(open(fname, 'w'))
+
+            while True:
+                time.sleep(1)
+            """).format(abs_path=abs_path, count=count)
+
+        rproc = self._run_python(pyscript)
+        self.background_procs.append(rproc)
+        return rproc
+
+    def create_n_files(self, fs_path, count, sync=False):
+        assert(self.is_mounted())
+
+        abs_path = os.path.join(self.mountpoint, fs_path)
+
+        pyscript = dedent("""
+            import sys
+            import time
+            import os
+
+            n = {count}
+            abs_path = "{abs_path}"
+
+            if not os.path.exists(os.path.dirname(abs_path)):
+                os.makedirs(os.path.dirname(abs_path))
+
+            for i in range(0, n):
+                fname = "{{0}}_{{1}}".format(abs_path, i)
+                h = open(fname, 'w')
+                h.write('content')
+                if {sync}:
+                    h.flush()
+                    os.fsync(h.fileno())
+                h.close()
+            """).format(abs_path=abs_path, count=count, sync=str(sync))
+
+        self.run_python(pyscript)
+
+    def teardown(self):
+        for p in self.background_procs:
+            log.info("Terminating background process")
+            self._kill_background(p)
+
+        self.background_procs = []
+
+    def _kill_background(self, p):
+        if p.stdin:
+            p.stdin.close()
+            try:
+                p.wait()
+            except (CommandFailedError, ConnectionLostError):
+                pass
+
+    def kill_background(self, p):
+        """
+        For a process that was returned by one of the _background member functions,
+        kill it hard.
+        """
+        self._kill_background(p)
+        self.background_procs.remove(p)
+
+    def spam_dir_background(self, path):
+        """
+        Create directory `path` and do lots of metadata operations
+        in it until further notice.
+        """
+        assert(self.is_mounted())
+        abs_path = os.path.join(self.mountpoint, path)
+
+        pyscript = dedent("""
+            import sys
+            import time
+            import os
+
+            abs_path = "{abs_path}"
+
+            if not os.path.exists(abs_path):
+                os.makedirs(abs_path)
+
+            n = 0
+            while True:
+                file_path = os.path.join(abs_path, "tmp%d" % n)
+                f = open(file_path, 'w')
+                f.close()
+                n = n + 1
+            """).format(abs_path=abs_path)
+
+        rproc = self._run_python(pyscript)
+        self.background_procs.append(rproc)
+        return rproc
+
+    def get_global_id(self):
+        raise NotImplementedError()
+
+    def get_osd_epoch(self):
+        raise NotImplementedError()
+
+    def stat(self, fs_path, wait=True):
+        """
+        stat a file, and return the result as a dictionary like this:
+        {
+          "st_ctime": 1414161137.0,
+          "st_mtime": 1414161137.0,
+          "st_nlink": 33,
+          "st_gid": 0,
+          "st_dev": 16777218,
+          "st_size": 1190,
+          "st_ino": 2,
+          "st_uid": 0,
+          "st_mode": 16877,
+          "st_atime": 1431520593.0
+        }
+
+        Raises exception on absent file.
+        """
+        abs_path = os.path.join(self.mountpoint, fs_path)
+
+        pyscript = dedent("""
+            import os
+            import stat
+            import json
+            import sys
+
+            try:
+                s = os.stat("{path}")
+            except OSError as e:
+                sys.exit(e.errno)
+
+            attrs = ["st_mode", "st_ino", "st_dev", "st_nlink", "st_uid", "st_gid", "st_size", "st_atime", "st_mtime", "st_ctime"]
+            print json.dumps(
+                dict([(a, getattr(s, a)) for a in attrs]),
+                indent=2)
+            """).format(path=abs_path)
+        proc = self._run_python(pyscript)
+        if wait:
+            proc.wait()
+            return json.loads(proc.stdout.getvalue().strip())
+        else:
+            return proc
+
+    def touch(self, fs_path):
+        """
+        Create a dentry if it doesn't already exist.  This python
+        implementation exists because the usual command line tool doesn't
+        pass through error codes like EIO.
+
+        :param fs_path:
+        :return:
+        """
+        abs_path = os.path.join(self.mountpoint, fs_path)
+        pyscript = dedent("""
+            import sys
+            import errno
+
+            try:
+                f = open("{path}", "w")
+                f.close()
+            except IOError as e:
+                sys.exit(errno.EIO)
+            """).format(path=abs_path)
+        proc = self._run_python(pyscript)
+        proc.wait()
+
+    def path_to_ino(self, fs_path):
+        abs_path = os.path.join(self.mountpoint, fs_path)
+
+        pyscript = dedent("""
+            import os
+            import stat
+
+            print os.stat("{path}").st_ino
+            """).format(path=abs_path)
+        proc = self._run_python(pyscript)
+        proc.wait()
+        return int(proc.stdout.getvalue().strip())
+
+    def ls(self, path=None):
+        """
+        Wrap ls: return a list of strings
+        """
+        cmd = ["ls"]
+        if path:
+            cmd.append(path)
+
+        ls_text = self.run_shell(cmd).stdout.getvalue().strip()
+
+        if ls_text:
+            return ls_text.split("\n")
+        else:
+            # Special case because otherwise split on empty string
+            # gives you [''] instead of []
+            return []
+
+    def getfattr(self, path, attr):
+        """
+        Wrap getfattr: return the values of a named xattr on one file.
+
+        :return: a string
+        """
+        p = self.run_shell(["getfattr", "--only-values", "-n", attr, path])
+        return p.stdout.getvalue()
+
+    def df(self):
+        """
+        Wrap df: return a dict of usage fields in bytes
+        """
+
+        p = self.run_shell(["df", "-B1", "."])
+        lines = p.stdout.getvalue().strip().split("\n")
+        fs, total, used, avail = lines[1].split()[:4]
+        log.warn(lines)
+
+        return {
+            "total": int(total),
+            "used": int(used),
+            "available": int(avail)
+        }
diff --git a/qa/tasks/cephfs/test_auto_repair.py b/qa/tasks/cephfs/test_auto_repair.py
new file mode 100644
index 0000000..033d8dd
--- /dev/null
+++ b/qa/tasks/cephfs/test_auto_repair.py
@@ -0,0 +1,90 @@
+
+"""
+Exercise the MDS's auto repair functions
+"""
+
+import logging
+import time
+
+from teuthology.orchestra.run import CommandFailedError
+from tasks.cephfs.cephfs_test_case import CephFSTestCase
+
+
+log = logging.getLogger(__name__)
+
+
+# Arbitrary timeouts for operations involving restarting
+# an MDS or waiting for it to come up
+MDS_RESTART_GRACE = 60
+
+
+class TestMDSAutoRepair(CephFSTestCase):
+    def test_backtrace_repair(self):
+        """
+        MDS should verify/fix backtrace on fetch dirfrag
+        """
+
+        self.mount_a.run_shell(["mkdir", "testdir1"])
+        self.mount_a.run_shell(["touch", "testdir1/testfile"])
+        dir_objname = "{:x}.00000000".format(self.mount_a.path_to_ino("testdir1"))
+
+        # drop inodes caps
+        self.mount_a.umount_wait()
+
+        # flush journal entries to dirfrag objects, and expire journal
+        self.fs.mds_asok(['flush', 'journal'])
+
+        # Restart the MDS to drop the metadata cache (because we expired the journal,
+        # nothing gets replayed into cache on restart)
+        self.fs.mds_stop()
+        self.fs.mds_fail_restart()
+        self.fs.wait_for_daemons()
+
+        # remove testdir1's backtrace
+        self.fs.rados(["rmxattr", dir_objname, "parent"])
+
+        # readdir (fetch dirfrag) should fix testdir1's backtrace
+        self.mount_a.mount()
+        self.mount_a.wait_until_mounted()
+        self.mount_a.run_shell(["ls", "testdir1"])
+
+        # flush journal entries to dirfrag objects
+        self.fs.mds_asok(['flush', 'journal'])
+
+        # check if backtrace exists
+        self.fs.rados(["getxattr", dir_objname, "parent"])
+
+    def test_mds_readonly(self):
+        """
+        test if MDS behave correct when it's readonly
+        """
+        # operation should successd when MDS is not readonly
+        self.mount_a.run_shell(["touch", "test_file1"])
+        writer = self.mount_a.write_background(loop=True)
+
+        time.sleep(10)
+        self.assertFalse(writer.finished)
+
+        # force MDS to read-only mode
+        self.fs.mds_asok(['force_readonly'])
+        time.sleep(10)
+
+        # touching test file should fail
+        try:
+            self.mount_a.run_shell(["touch", "test_file1"])
+        except CommandFailedError:
+            pass
+        else:
+            self.assertTrue(False)
+
+        # background writer also should fail
+        self.assertTrue(writer.finished)
+
+        # The MDS should report its readonly health state to the mon
+        self.wait_for_health("MDS in read-only mode", timeout=30)
+
+        # restart mds to make it writable
+        self.fs.mds_fail_restart()
+        self.fs.wait_for_daemons()
+
+        self.wait_for_health_clear(timeout=30)
diff --git a/qa/tasks/cephfs/test_backtrace.py b/qa/tasks/cephfs/test_backtrace.py
new file mode 100644
index 0000000..6d7308e
--- /dev/null
+++ b/qa/tasks/cephfs/test_backtrace.py
@@ -0,0 +1,82 @@
+
+from tasks.cephfs.cephfs_test_case import CephFSTestCase
+
+
+class TestBacktrace(CephFSTestCase):
+    def test_backtrace(self):
+        """
+        That the 'parent' and 'layout' xattrs on the head objects of files
+        are updated correctly.
+        """
+
+        def get_pool_id(name):
+            return self.fs.mon_manager.get_pool_dump(name)['pool']
+
+        old_data_pool_name = self.fs.get_data_pool_name()
+        old_pool_id = get_pool_id(old_data_pool_name)
+
+        # Create a file for subsequent checks
+        self.mount_a.run_shell(["mkdir", "parent_a"])
+        self.mount_a.run_shell(["touch", "parent_a/alpha"])
+        file_ino = self.mount_a.path_to_ino("parent_a/alpha")
+
+        # That backtrace and layout are written after initial flush
+        self.fs.mds_asok(["flush", "journal"])
+        backtrace = self.fs.read_backtrace(file_ino)
+        self.assertEqual(['alpha', 'parent_a'], [a['dname'] for a in backtrace['ancestors']])
+        layout = self.fs.read_layout(file_ino)
+        self.assertDictEqual(layout, {
+            "stripe_unit": 4194304,
+            "stripe_count": 1,
+            "object_size": 4194304,
+            "pool_id": old_pool_id,
+            "pool_ns": "",
+        })
+        self.assertEqual(backtrace['pool'], old_pool_id)
+
+        # That backtrace is written after parentage changes
+        self.mount_a.run_shell(["mkdir", "parent_b"])
+        self.mount_a.run_shell(["mv", "parent_a/alpha", "parent_b/alpha"])
+
+        self.fs.mds_asok(["flush", "journal"])
+        backtrace = self.fs.read_backtrace(file_ino)
+        self.assertEqual(['alpha', 'parent_b'], [a['dname'] for a in backtrace['ancestors']])
+
+        # Create a new data pool
+        new_pool_name = "data_new"
+        self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', new_pool_name,
+                                            self.fs.get_pgs_per_fs_pool().__str__())
+        self.fs.mon_manager.raw_cluster_cmd('mds', 'add_data_pool', new_pool_name)
+        new_pool_id = get_pool_id(new_pool_name)
+
+        # That an object which has switched pools gets its backtrace updated
+        self.mount_a.run_shell(["setfattr", "-n", "ceph.file.layout.pool", "-v", new_pool_name, "./parent_b/alpha"])
+        self.fs.mds_asok(["flush", "journal"])
+        backtrace_old_pool = self.fs.read_backtrace(file_ino, pool=old_data_pool_name)
+        self.assertEqual(backtrace_old_pool['pool'], new_pool_id)
+        backtrace_new_pool = self.fs.read_backtrace(file_ino, pool=new_pool_name)
+        self.assertEqual(backtrace_new_pool['pool'], new_pool_id)
+        new_pool_layout = self.fs.read_layout(file_ino, pool=new_pool_name)
+        self.assertEqual(new_pool_layout['pool_id'], new_pool_id)
+        self.assertEqual(new_pool_layout['pool_ns'], '')
+
+        # That subsequent linkage changes are only written to new pool backtrace
+        self.mount_a.run_shell(["mkdir", "parent_c"])
+        self.mount_a.run_shell(["mv", "parent_b/alpha", "parent_c/alpha"])
+        self.fs.mds_asok(["flush", "journal"])
+        backtrace_old_pool = self.fs.read_backtrace(file_ino, pool=old_data_pool_name)
+        self.assertEqual(['alpha', 'parent_b'], [a['dname'] for a in backtrace_old_pool['ancestors']])
+        backtrace_new_pool = self.fs.read_backtrace(file_ino, pool=new_pool_name)
+        self.assertEqual(['alpha', 'parent_c'], [a['dname'] for a in backtrace_new_pool['ancestors']])
+
+        # That layout is written to new pool after change to other field in layout
+        self.mount_a.run_shell(["setfattr", "-n", "ceph.file.layout.object_size", "-v", "8388608", "./parent_c/alpha"])
+
+        self.fs.mds_asok(["flush", "journal"])
+        new_pool_layout = self.fs.read_layout(file_ino, pool=new_pool_name)
+        self.assertEqual(new_pool_layout['object_size'], 8388608)
+
+        # ...but not to the old pool: the old pool's backtrace points to the new pool, and that's enough,
+        # we don't update the layout in all the old pools whenever it changes
+        old_pool_layout = self.fs.read_layout(file_ino, pool=old_data_pool_name)
+        self.assertEqual(old_pool_layout['object_size'], 4194304)
diff --git a/qa/tasks/cephfs/test_cap_flush.py b/qa/tasks/cephfs/test_cap_flush.py
new file mode 100644
index 0000000..f142b00
--- /dev/null
+++ b/qa/tasks/cephfs/test_cap_flush.py
@@ -0,0 +1,63 @@
+
+import os
+import time
+from textwrap import dedent
+from unittest import SkipTest
+from tasks.cephfs.fuse_mount import FuseMount
+from tasks.cephfs.cephfs_test_case import CephFSTestCase
+
+class TestCapFlush(CephFSTestCase):
+    def test_replay_create(self):
+        """
+        MDS starts to handle client caps when it enters clientreplay stage.
+        When handling a client cap in clientreplay stage, it's possible that
+        corresponding inode does not exist because the client request which
+        creates inode hasn't been replayed.
+        """
+
+        if not isinstance(self.mount_a, FuseMount):
+            raise SkipTest("Require FUSE client to inject client release failure")
+
+        dir_path = os.path.join(self.mount_a.mountpoint, "testdir")
+        py_script = dedent("""
+            import os
+            os.mkdir("{0}")
+            fd = os.open("{0}", os.O_RDONLY)
+            os.fchmod(fd, 0777)
+            os.fsync(fd)
+            """).format(dir_path)
+        self.mount_a.run_python(py_script)
+
+        self.fs.mds_asok(["flush", "journal"])
+
+        # client will only get unsafe replay
+        self.fs.mds_asok(["config", "set", "mds_log_pause", "1"])
+
+        file_name = "testfile"
+        file_path = dir_path + "/" + file_name
+
+        # Create a file and modify its mode. ceph-fuse will mark Ax cap dirty
+        py_script = dedent("""
+            import os
+            os.chdir("{0}")
+            os.setgid(65534)
+            os.setuid(65534)
+            fd = os.open("{1}", os.O_CREAT | os.O_RDWR, 0644)
+            os.fchmod(fd, 0640)
+            """).format(dir_path, file_name)
+        self.mount_a.run_python(py_script)
+
+        # Modify file mode by different user. ceph-fuse will send a setattr request
+        self.mount_a.run_shell(["chmod", "600", file_path], wait=False)
+
+        time.sleep(10)
+
+        # Restart mds. Client will re-send the unsafe request and cap flush
+        self.fs.mds_stop()
+        self.fs.mds_fail_restart()
+        self.fs.wait_for_daemons()
+
+        mode = self.mount_a.run_shell(['stat', '-c' '%a', file_path]).stdout.getvalue().strip()
+        # If the cap flush get dropped, mode should be 0644.
+        # (Ax cap stays in dirty state, which prevents setattr reply from updating file mode)
+        self.assertEqual(mode, "600")
diff --git a/qa/tasks/cephfs/test_client_limits.py b/qa/tasks/cephfs/test_client_limits.py
new file mode 100644
index 0000000..c6bce44
--- /dev/null
+++ b/qa/tasks/cephfs/test_client_limits.py
@@ -0,0 +1,219 @@
+
+"""
+Exercise the MDS's behaviour when clients and the MDCache reach or
+exceed the limits of how many caps/inodes they should hold.
+"""
+
+import logging
+from textwrap import dedent
+from unittest import SkipTest
+from teuthology.orchestra.run import CommandFailedError
+from tasks.cephfs.cephfs_test_case import CephFSTestCase, needs_trimming
+from tasks.cephfs.fuse_mount import FuseMount
+import os
+
+
+log = logging.getLogger(__name__)
+
+
+# Arbitrary timeouts for operations involving restarting
+# an MDS or waiting for it to come up
+MDS_RESTART_GRACE = 60
+
+# Hardcoded values from Server::recall_client_state
+CAP_RECALL_RATIO = 0.8
+CAP_RECALL_MIN = 100
+
+
+class TestClientLimits(CephFSTestCase):
+    REQUIRE_KCLIENT_REMOTE = True
+    CLIENTS_REQUIRED = 2
+
+    def _test_client_pin(self, use_subdir):
+        """
+        When a client pins an inode in its cache, for example because the file is held open,
+        it should reject requests from the MDS to trim these caps.  The MDS should complain
+        to the user that it is unable to enforce its cache size limits because of this
+        objectionable client.
+
+        :param use_subdir: whether to put test files in a subdir or use root
+        """
+
+        cache_size = 200
+        open_files = 250
+
+        self.set_conf('mds', 'mds cache size', cache_size)
+        self.fs.mds_fail_restart()
+        self.fs.wait_for_daemons()
+
+        mount_a_client_id = self.mount_a.get_global_id()
+        path = "subdir/mount_a" if use_subdir else "mount_a"
+        open_proc = self.mount_a.open_n_background(path, open_files)
+
+        # Client should now hold:
+        # `open_files` caps for the open files
+        # 1 cap for root
+        # 1 cap for subdir
+        self.wait_until_equal(lambda: self.get_session(mount_a_client_id)['num_caps'],
+                              open_files + (2 if use_subdir else 1),
+                              timeout=600,
+                              reject_fn=lambda x: x > open_files + 2)
+
+        # MDS should not be happy about that, as the client is failing to comply
+        # with the SESSION_RECALL messages it is being sent
+        mds_recall_state_timeout = int(self.fs.get_config("mds_recall_state_timeout"))
+        self.wait_for_health("failing to respond to cache pressure", mds_recall_state_timeout + 10)
+
+        # When the client closes the files, it should retain only as many caps as allowed
+        # under the SESSION_RECALL policy
+        log.info("Terminating process holding files open")
+        open_proc.stdin.close()
+        try:
+            open_proc.wait()
+        except CommandFailedError:
+            # We killed it, so it raises an error
+            pass
+
+        # The remaining caps should comply with the numbers sent from MDS in SESSION_RECALL message,
+        # which depend on the cache size and overall ratio
+        self.wait_until_equal(
+            lambda: self.get_session(mount_a_client_id)['num_caps'],
+            int(cache_size * 0.8),
+            timeout=600,
+            reject_fn=lambda x: x < int(cache_size*.8))
+
+    @needs_trimming
+    def test_client_pin_root(self):
+        self._test_client_pin(False)
+
+    @needs_trimming
+    def test_client_pin(self):
+        self._test_client_pin(True)
+
+    def test_client_release_bug(self):
+        """
+        When a client has a bug (which we will simulate) preventing it from releasing caps,
+        the MDS should notice that releases are not being sent promptly, and generate a health
+        metric to that effect.
+        """
+
+        # The debug hook to inject the failure only exists in the fuse client
+        if not isinstance(self.mount_a, FuseMount):
+            raise SkipTest("Require FUSE client to inject client release failure")
+
+        self.set_conf('client.{0}'.format(self.mount_a.client_id), 'client inject release failure', 'true')
+        self.mount_a.teardown()
+        self.mount_a.mount()
+        self.mount_a.wait_until_mounted()
+        mount_a_client_id = self.mount_a.get_global_id()
+
+        # Client A creates a file.  He will hold the write caps on the file, and later (simulated bug) fail
+        # to comply with the MDSs request to release that cap
+        self.mount_a.run_shell(["touch", "file1"])
+
+        # Client B tries to stat the file that client A created
+        rproc = self.mount_b.write_background("file1")
+
+        # After mds_revoke_cap_timeout, we should see a health warning (extra lag from
+        # MDS beacon period)
+        mds_revoke_cap_timeout = int(self.fs.get_config("mds_revoke_cap_timeout"))
+        self.wait_for_health("failing to respond to capability release", mds_revoke_cap_timeout + 10)
+
+        # Client B should still be stuck
+        self.assertFalse(rproc.finished)
+
+        # Kill client A
+        self.mount_a.kill()
+        self.mount_a.kill_cleanup()
+
+        # Client B should complete
+        self.fs.mds_asok(['session', 'evict', "%s" % mount_a_client_id])
+        rproc.wait()
+
+    def test_client_oldest_tid(self):
+        """
+        When a client does not advance its oldest tid, the MDS should notice that
+        and generate health warnings.
+        """
+
+        # num of requests client issues
+        max_requests = 1000
+
+        # The debug hook to inject the failure only exists in the fuse client
+        if not isinstance(self.mount_a, FuseMount):
+            raise SkipTest("Require FUSE client to inject client release failure")
+
+        self.set_conf('client', 'client inject fixed oldest tid', 'true')
+        self.mount_a.teardown()
+        self.mount_a.mount()
+        self.mount_a.wait_until_mounted()
+
+        self.fs.mds_asok(['config', 'set', 'mds_max_completed_requests', '{0}'.format(max_requests)])
+
+        # Create lots of files
+        self.mount_a.create_n_files("testdir/file1", max_requests + 100)
+
+        # Create a few files synchronously. This makes sure previous requests are completed
+        self.mount_a.create_n_files("testdir/file2", 5, True)
+
+        # Wait for the health warnings. Assume mds can handle 10 request per second at least
+        self.wait_for_health("failing to advance its oldest client/flush tid", max_requests / 10)
+
+    def _test_client_cache_size(self, mount_subdir):
+        """
+        check if client invalidate kernel dcache according to its cache size config
+        """
+
+        # The debug hook to inject the failure only exists in the fuse client
+        if not isinstance(self.mount_a, FuseMount):
+            raise SkipTest("Require FUSE client to inject client release failure")
+
+        if mount_subdir:
+            # fuse assigns a fix inode number (1) to root inode. But in mounting into
+            # subdir case, the actual inode number of root is not 1. This mismatch
+            # confuses fuse_lowlevel_notify_inval_entry() when invalidating dentries
+            # in root directory.
+            self.mount_a.run_shell(["mkdir", "subdir"])
+            self.mount_a.umount_wait()
+            self.set_conf('client', 'client mountpoint', '/subdir')
+            self.mount_a.mount()
+            self.mount_a.wait_until_mounted()
+            root_ino = self.mount_a.path_to_ino(".")
+            self.assertEqual(root_ino, 1);
+
+        dir_path = os.path.join(self.mount_a.mountpoint, "testdir")
+
+        mkdir_script = dedent("""
+            import os
+            os.mkdir("{path}")
+            for n in range(0, {num_dirs}):
+                os.mkdir("{path}/dir{{0}}".format(n))
+            """)
+
+        num_dirs = 1000
+        self.mount_a.run_python(mkdir_script.format(path=dir_path, num_dirs=num_dirs))
+        self.mount_a.run_shell(["sync"])
+
+        dentry_count, dentry_pinned_count = self.mount_a.get_dentry_count()
+        self.assertGreaterEqual(dentry_count, num_dirs)
+        self.assertGreaterEqual(dentry_pinned_count, num_dirs)
+
+        cache_size = num_dirs / 10
+        self.mount_a.set_cache_size(cache_size)
+
+        def trimmed():
+            dentry_count, dentry_pinned_count = self.mount_a.get_dentry_count()
+            log.info("waiting, dentry_count, dentry_pinned_count: {0}, {1}".format(
+                dentry_count, dentry_pinned_count
+            ))
+            if dentry_count > cache_size or dentry_pinned_count > cache_size:
+                return False
+
+            return True
+
+        self.wait_until_true(trimmed, 30)
+
+    @needs_trimming
+    def test_client_cache_size(self):
+        self._test_client_cache_size(False)
+        self._test_client_cache_size(True)
diff --git a/qa/tasks/cephfs/test_client_recovery.py b/qa/tasks/cephfs/test_client_recovery.py
new file mode 100644
index 0000000..8839345
--- /dev/null
+++ b/qa/tasks/cephfs/test_client_recovery.py
@@ -0,0 +1,432 @@
+
+"""
+Teuthology task for exercising CephFS client recovery
+"""
+
+import logging
+from textwrap import dedent
+import time
+import distutils.version as version
+import re
+import os
+
+from teuthology.orchestra.run import CommandFailedError, ConnectionLostError
+from tasks.cephfs.cephfs_test_case import CephFSTestCase
+from teuthology.packaging import get_package_version
+
+
+log = logging.getLogger(__name__)
+
+
+# Arbitrary timeouts for operations involving restarting
+# an MDS or waiting for it to come up
+MDS_RESTART_GRACE = 60
+
+
+class TestClientNetworkRecovery(CephFSTestCase):
+    REQUIRE_KCLIENT_REMOTE = True
+    REQUIRE_ONE_CLIENT_REMOTE = True
+    CLIENTS_REQUIRED = 2
+
+    LOAD_SETTINGS = ["mds_session_timeout", "mds_reconnect_timeout", "ms_max_backoff"]
+
+    # Environment references
+    mds_session_timeout = None
+    mds_reconnect_timeout = None
+    ms_max_backoff = None
+
+    def test_network_death(self):
+        """
+        Simulate software freeze or temporary network failure.
+
+        Check that the client blocks I/O during failure, and completes
+        I/O after failure.
+        """
+
+        # We only need one client
+        self.mount_b.umount_wait()
+
+        # Initially our one client session should be visible
+        client_id = self.mount_a.get_global_id()
+        ls_data = self._session_list()
+        self.assert_session_count(1, ls_data)
+        self.assertEqual(ls_data[0]['id'], client_id)
+        self.assert_session_state(client_id, "open")
+
+        # ...and capable of doing I/O without blocking
+        self.mount_a.create_files()
+
+        # ...but if we turn off the network
+        self.fs.set_clients_block(True)
+
+        # ...and try and start an I/O
+        write_blocked = self.mount_a.write_background()
+
+        # ...then it should block
+        self.assertFalse(write_blocked.finished)
+        self.assert_session_state(client_id, "open")
+        time.sleep(self.mds_session_timeout * 1.5)  # Long enough for MDS to consider session stale
+        self.assertFalse(write_blocked.finished)
+        self.assert_session_state(client_id, "stale")
+
+        # ...until we re-enable I/O
+        self.fs.set_clients_block(False)
+
+        # ...when it should complete promptly
+        a = time.time()
+        self.wait_until_true(lambda: write_blocked.finished, self.ms_max_backoff * 2)
+        write_blocked.wait()  # Already know we're finished, wait() to raise exception on errors
+        recovery_time = time.time() - a
+        log.info("recovery time: {0}".format(recovery_time))
+        self.assert_session_state(client_id, "open")
+
+
+class TestClientRecovery(CephFSTestCase):
+    REQUIRE_KCLIENT_REMOTE = True
+    CLIENTS_REQUIRED = 2
+
+    LOAD_SETTINGS = ["mds_session_timeout", "mds_reconnect_timeout", "ms_max_backoff"]
+
+    # Environment references
+    mds_session_timeout = None
+    mds_reconnect_timeout = None
+    ms_max_backoff = None
+
+    def test_basic(self):
+        # Check that two clients come up healthy and see each others' files
+        # =====================================================
+        self.mount_a.create_files()
+        self.mount_a.check_files()
+        self.mount_a.umount_wait()
+
+        self.mount_b.check_files()
+
+        self.mount_a.mount()
+        self.mount_a.wait_until_mounted()
+
+        # Check that the admin socket interface is correctly reporting
+        # two sessions
+        # =====================================================
+        ls_data = self._session_list()
+        self.assert_session_count(2, ls_data)
+
+        self.assertSetEqual(
+            set([l['id'] for l in ls_data]),
+            {self.mount_a.get_global_id(), self.mount_b.get_global_id()}
+        )
+
+    def test_restart(self):
+        # Check that after an MDS restart both clients reconnect and continue
+        # to handle I/O
+        # =====================================================
+        self.fs.mds_fail_restart()
+        self.fs.wait_for_state('up:active', timeout=MDS_RESTART_GRACE)
+
+        self.mount_a.create_destroy()
+        self.mount_b.create_destroy()
+
+    def _session_num_caps(self, client_id):
+        ls_data = self.fs.mds_asok(['session', 'ls'])
+        return int(self._session_by_id(ls_data).get(client_id, {'num_caps': None})['num_caps'])
+
+    def test_reconnect_timeout(self):
+        # Reconnect timeout
+        # =================
+        # Check that if I stop an MDS and a client goes away, the MDS waits
+        # for the reconnect period
+        self.fs.mds_stop()
+        self.fs.mds_fail()
+
+        mount_a_client_id = self.mount_a.get_global_id()
+        self.mount_a.umount_wait(force=True)
+
+        self.fs.mds_restart()
+
+        self.fs.wait_for_state('up:reconnect', reject='up:active', timeout=MDS_RESTART_GRACE)
+        # Check that the MDS locally reports its state correctly
+        status = self.fs.mds_asok(['status'])
+        self.assertIn("reconnect_status", status)
+
+        ls_data = self._session_list()
+        self.assert_session_count(2, ls_data)
+
+        # The session for the dead client should have the 'reconnect' flag set
+        self.assertTrue(self.get_session(mount_a_client_id)['reconnecting'])
+
+        # Wait for the reconnect state to clear, this should take the
+        # reconnect timeout period.
+        in_reconnect_for = self.fs.wait_for_state('up:active', timeout=self.mds_reconnect_timeout * 2)
+        # Check that the period we waited to enter active is within a factor
+        # of two of the reconnect timeout.
+        self.assertGreater(in_reconnect_for, self.mds_reconnect_timeout / 2,
+                           "Should have been in reconnect phase for {0} but only took {1}".format(
+                               self.mds_reconnect_timeout, in_reconnect_for
+                           ))
+
+        self.assert_session_count(1)
+
+        # Check that the client that timed out during reconnect can
+        # mount again and do I/O
+        self.mount_a.mount()
+        self.mount_a.wait_until_mounted()
+        self.mount_a.create_destroy()
+
+        self.assert_session_count(2)
+
+    def test_reconnect_eviction(self):
+        # Eviction during reconnect
+        # =========================
+        self.fs.mds_stop()
+        self.fs.mds_fail()
+
+        mount_a_client_id = self.mount_a.get_global_id()
+        self.mount_a.umount_wait(force=True)
+
+        self.fs.mds_restart()
+
+        # Enter reconnect phase
+        self.fs.wait_for_state('up:reconnect', reject='up:active', timeout=MDS_RESTART_GRACE)
+        self.assert_session_count(2)
+
+        # Evict the stuck client
+        self.fs.mds_asok(['session', 'evict', "%s" % mount_a_client_id])
+        self.assert_session_count(1)
+
+        # Observe that we proceed to active phase without waiting full reconnect timeout
+        evict_til_active = self.fs.wait_for_state('up:active', timeout=MDS_RESTART_GRACE)
+        # Once we evict the troublemaker, the reconnect phase should complete
+        # in well under the reconnect timeout.
+        self.assertLess(evict_til_active, self.mds_reconnect_timeout * 0.5,
+                        "reconnect did not complete soon enough after eviction, took {0}".format(
+                            evict_til_active
+                        ))
+
+        # Bring the client back
+        self.mount_a.mount()
+        self.mount_a.wait_until_mounted()
+        self.mount_a.create_destroy()
+
+    def test_stale_caps(self):
+        # Capability release from stale session
+        # =====================================
+        cap_holder = self.mount_a.open_background()
+
+        # Wait for the file to be visible from another client, indicating
+        # that mount_a has completed its network ops
+        self.mount_b.wait_for_visible()
+
+        # Simulate client death
+        self.mount_a.kill()
+
+        try:
+            # Now, after mds_session_timeout seconds, the waiter should
+            # complete their operation when the MDS marks the holder's
+            # session stale.
+            cap_waiter = self.mount_b.write_background()
+            a = time.time()
+            cap_waiter.wait()
+            b = time.time()
+
+            # Should have succeeded
+            self.assertEqual(cap_waiter.exitstatus, 0)
+
+            cap_waited = b - a
+            log.info("cap_waiter waited {0}s".format(cap_waited))
+            self.assertTrue(self.mds_session_timeout / 2.0 <= cap_waited <= self.mds_session_timeout * 2.0,
+                            "Capability handover took {0}, expected approx {1}".format(
+                                cap_waited, self.mds_session_timeout
+                            ))
+
+            cap_holder.stdin.close()
+            try:
+                cap_holder.wait()
+            except (CommandFailedError, ConnectionLostError):
+                # We killed it (and possibly its node), so it raises an error
+                pass
+        finally:
+            # teardown() doesn't quite handle this case cleanly, so help it out
+            self.mount_a.kill_cleanup()
+
+        self.mount_a.mount()
+        self.mount_a.wait_until_mounted()
+
+    def test_evicted_caps(self):
+        # Eviction while holding a capability
+        # ===================================
+
+        # Take out a write capability on a file on client A,
+        # and then immediately kill it.
+        cap_holder = self.mount_a.open_background()
+        mount_a_client_id = self.mount_a.get_global_id()
+
+        # Wait for the file to be visible from another client, indicating
+        # that mount_a has completed its network ops
+        self.mount_b.wait_for_visible()
+
+        # Simulate client death
+        self.mount_a.kill()
+
+        try:
+            # The waiter should get stuck waiting for the capability
+            # held on the MDS by the now-dead client A
+            cap_waiter = self.mount_b.write_background()
+            time.sleep(5)
+            self.assertFalse(cap_waiter.finished)
+
+            self.fs.mds_asok(['session', 'evict', "%s" % mount_a_client_id])
+            # Now, because I evicted the old holder of the capability, it should
+            # immediately get handed over to the waiter
+            a = time.time()
+            cap_waiter.wait()
+            b = time.time()
+            cap_waited = b - a
+            log.info("cap_waiter waited {0}s".format(cap_waited))
+            # This is the check that it happened 'now' rather than waiting
+            # for the session timeout
+            self.assertLess(cap_waited, self.mds_session_timeout / 2.0,
+                            "Capability handover took {0}, expected less than {1}".format(
+                                cap_waited, self.mds_session_timeout / 2.0
+                            ))
+
+            cap_holder.stdin.close()
+            try:
+                cap_holder.wait()
+            except (CommandFailedError, ConnectionLostError):
+                # We killed it (and possibly its node), so it raises an error
+                pass
+        finally:
+            self.mount_a.kill_cleanup()
+
+        self.mount_a.mount()
+        self.mount_a.wait_until_mounted()
+
+    def test_trim_caps(self):
+        # Trim capability when reconnecting MDS
+        # ===================================
+
+        count = 500
+        # Create lots of files
+        for i in range(count):
+            self.mount_a.run_shell(["touch", "f{0}".format(i)])
+
+        # Populate mount_b's cache
+        self.mount_b.run_shell(["ls"])
+
+        client_id = self.mount_b.get_global_id()
+        num_caps = self._session_num_caps(client_id)
+        self.assertGreaterEqual(num_caps, count)
+
+        # Restart MDS. client should trim its cache when reconnecting to the MDS
+        self.fs.mds_fail_restart()
+        self.fs.wait_for_state('up:active', timeout=MDS_RESTART_GRACE)
+
+        num_caps = self._session_num_caps(client_id)
+        self.assertLess(num_caps, count,
+                        "should have less than {0} capabilities, have {1}".format(
+                            count, num_caps
+                        ))
+
+    def test_filelock(self):
+        """
+        Check that file lock doesn't get lost after an MDS restart
+        """
+        a_version_str = get_package_version(self.mount_a.client_remote, "fuse")
+        b_version_str = get_package_version(self.mount_b.client_remote, "fuse")
+        flock_version_str = "2.9"
+
+        version_regex = re.compile(r"[0-9\.]+")
+        a_result = version_regex.match(a_version_str)
+        self.assertTrue(a_result)
+        b_result = version_regex.match(b_version_str)
+        self.assertTrue(b_result)
+        a_version = version.StrictVersion(a_result.group())
+        b_version = version.StrictVersion(b_result.group())
+        flock_version=version.StrictVersion(flock_version_str)
+
+        flockable = False
+        if (a_version >= flock_version and b_version >= flock_version):
+            log.info("testing flock locks")
+            flockable = True
+        else:
+            log.info("not testing flock locks, machines have versions {av} and {bv}".format(
+                av=a_version_str,bv=b_version_str))
+
+        lock_holder = self.mount_a.lock_background(do_flock=flockable)
+
+        self.mount_b.wait_for_visible("background_file-2")
+        self.mount_b.check_filelock(do_flock=flockable)
+
+        self.fs.mds_fail_restart()
+        self.fs.wait_for_state('up:active', timeout=MDS_RESTART_GRACE)
+
+        self.mount_b.check_filelock(do_flock=flockable)
+
+        # Tear down the background process
+        lock_holder.stdin.close()
+        try:
+            lock_holder.wait()
+        except (CommandFailedError, ConnectionLostError):
+            # We killed it, so it raises an error
+            pass
+
+    def test_dir_fsync(self):
+	self._test_fsync(True);
+
+    def test_create_fsync(self):
+	self._test_fsync(False);
+
+    def _test_fsync(self, dirfsync):
+        """
+        That calls to fsync guarantee visibility of metadata to another
+        client immediately after the fsyncing client dies.
+        """
+
+        # Leave this guy out until he's needed
+        self.mount_b.umount_wait()
+
+        # Create dir + child dentry on client A, and fsync the dir
+        path = os.path.join(self.mount_a.mountpoint, "subdir")
+        self.mount_a.run_python(
+            dedent("""
+                import os
+                import time
+
+                path = "{path}"
+
+                print "Starting creation..."
+                start = time.time()
+
+                os.mkdir(path)
+                dfd = os.open(path, os.O_DIRECTORY)
+
+                fd = open(os.path.join(path, "childfile"), "w")
+                print "Finished creation in {{0}}s".format(time.time() - start)
+
+                print "Starting fsync..."
+                start = time.time()
+                if {dirfsync}:
+                    os.fsync(dfd)
+                else:
+                    os.fsync(fd)
+                print "Finished fsync in {{0}}s".format(time.time() - start)
+            """.format(path=path,dirfsync=str(dirfsync)))
+        )
+
+        # Immediately kill the MDS and then client A
+        self.fs.mds_stop()
+        self.fs.mds_fail()
+        self.mount_a.kill()
+        self.mount_a.kill_cleanup()
+
+        # Restart the MDS.  Wait for it to come up, it'll have to time out in clientreplay
+        self.fs.mds_restart()
+        log.info("Waiting for reconnect...")
+        self.fs.wait_for_state("up:reconnect")
+        log.info("Waiting for active...")
+        self.fs.wait_for_state("up:active", timeout=MDS_RESTART_GRACE + self.mds_reconnect_timeout)
+        log.info("Reached active...")
+
+        # Is the child dentry visible from mount B?
+        self.mount_b.mount()
+        self.mount_b.wait_until_mounted()
+        self.mount_b.run_shell(["ls", "subdir/childfile"])
diff --git a/qa/tasks/cephfs/test_config_commands.py b/qa/tasks/cephfs/test_config_commands.py
new file mode 100644
index 0000000..ce0619f
--- /dev/null
+++ b/qa/tasks/cephfs/test_config_commands.py
@@ -0,0 +1,63 @@
+
+from unittest import case
+from tasks.cephfs.cephfs_test_case import CephFSTestCase
+from tasks.cephfs.fuse_mount import FuseMount
+
+
+class TestConfigCommands(CephFSTestCase):
+    """
+    Test that daemons and clients respond to the otherwise rarely-used
+    runtime config modification operations.
+    """
+
+    CLIENTS_REQUIRED = 1
+    MDSS_REQUIRED = 1
+
+    def test_client_config(self):
+        """
+        That I can successfully issue asok "config set" commands
+
+        :return:
+        """
+
+        if not isinstance(self.mount_a, FuseMount):
+            raise case.SkipTest("Test only applies to FUSE clients")
+
+        test_key = "client_cache_size"
+        test_val = "123"
+        self.mount_a.admin_socket(['config', 'set', test_key, test_val])
+        out = self.mount_a.admin_socket(['config', 'get', test_key])
+        self.assertEqual(out[test_key], test_val)
+
+        self.mount_a.write_n_mb("file.bin", 1);
+
+        # Implicitly asserting that things don't have lockdep error in shutdown
+        self.mount_a.umount_wait(require_clean=True)
+        self.fs.mds_stop()
+
+    def test_mds_config_asok(self):
+        test_key = "mds_max_purge_ops"
+        test_val = "123"
+        self.fs.mds_asok(['config', 'set', test_key, test_val])
+        out = self.fs.mds_asok(['config', 'get', test_key])
+        self.assertEqual(out[test_key], test_val)
+
+        # Implicitly asserting that things don't have lockdep error in shutdown
+        self.mount_a.umount_wait(require_clean=True)
+        self.fs.mds_stop()
+
+    def test_mds_config_tell(self):
+        test_key = "mds_max_purge_ops"
+        test_val = "123"
+
+        mds_id = self.fs.get_lone_mds_id()
+        self.fs.mon_manager.raw_cluster_cmd("tell", "mds.{0}".format(mds_id), "injectargs",
+                                            "--{0}={1}".format(test_key, test_val))
+
+        # Read it back with asok because there is no `tell` equivalent
+        out = self.fs.mds_asok(['config', 'get', test_key])
+        self.assertEqual(out[test_key], test_val)
+
+        # Implicitly asserting that things don't have lockdep error in shutdown
+        self.mount_a.umount_wait(require_clean=True)
+        self.fs.mds_stop()
diff --git a/qa/tasks/cephfs/test_damage.py b/qa/tasks/cephfs/test_damage.py
new file mode 100644
index 0000000..b6d5404
--- /dev/null
+++ b/qa/tasks/cephfs/test_damage.py
@@ -0,0 +1,534 @@
+import json
+import logging
+import errno
+import re
+from teuthology.contextutil import MaxWhileTries
+from teuthology.exceptions import CommandFailedError
+from teuthology.orchestra.run import wait
+from tasks.cephfs.cephfs_test_case import CephFSTestCase, long_running
+
+DAMAGED_ON_START = "damaged_on_start"
+DAMAGED_ON_LS = "damaged_on_ls"
+CRASHED = "server crashed"
+NO_DAMAGE = "no damage"
+FAILED_CLIENT = "client failed"
+FAILED_SERVER = "server failed"
+
+# An EIO in response to a stat from the client
+EIO_ON_LS = "eio"
+
+# An EIO, but nothing in damage table (not ever what we expect)
+EIO_NO_DAMAGE = "eio without damage entry"
+
+
+log = logging.getLogger(__name__)
+
+
+class TestDamage(CephFSTestCase):
+    def _simple_workload_write(self):
+        self.mount_a.run_shell(["mkdir", "subdir"])
+        self.mount_a.write_n_mb("subdir/sixmegs", 6)
+        return self.mount_a.stat("subdir/sixmegs")
+
+    def is_marked_damaged(self, rank):
+        mds_map = self.fs.get_mds_map()
+        return rank in mds_map['damaged']
+
+    @long_running #459s
+    def test_object_deletion(self):
+        """
+        That the MDS has a clean 'damaged' response to loss of any single metadata object
+        """
+
+        self._simple_workload_write()
+
+        # Hmm, actually it would be nice to permute whether the metadata pool
+        # state contains sessions or not, but for the moment close this session
+        # to avoid waiting through reconnect on every MDS start.
+        self.mount_a.umount_wait()
+        for mds_name in self.fs.get_active_names():
+            self.fs.mds_asok(["flush", "journal"], mds_name)
+
+        self.fs.mds_stop()
+        self.fs.mds_fail()
+
+        self.fs.rados(['export', '/tmp/metadata.bin'])
+
+        def is_ignored(obj_id, dentry=None):
+            """
+            A filter to avoid redundantly mutating many similar objects (e.g.
+            stray dirfrags) or similar dentries (e.g. stray dir dentries)
+            """
+            if re.match("60.\.00000000", obj_id) and obj_id != "600.00000000":
+                return True
+
+            if dentry and obj_id == "100.00000000":
+                if re.match("stray.+_head", dentry) and dentry != "stray0_head":
+                    return True
+
+            return False
+
+        def get_path(obj_id, dentry=None):
+            """
+            What filesystem path does this object or dentry correspond to?   i.e.
+            what should I poke to see EIO after damaging it?
+            """
+
+            if obj_id == "1.00000000" and dentry == "subdir_head":
+                return "./subdir"
+            elif obj_id == "10000000000.00000000" and dentry == "sixmegs_head":
+                return "./subdir/sixmegs"
+
+            # None means ls will do an "ls -R" in hope of seeing some errors
+            return None
+
+        objects = self.fs.rados(["ls"]).split("\n")
+        objects = [o for o in objects if not is_ignored(o)]
+
+        # Find all objects with an OMAP header
+        omap_header_objs = []
+        for o in objects:
+            header = self.fs.rados(["getomapheader", o])
+            # The rados CLI wraps the header output in a hex-printed style
+            header_bytes = int(re.match("header \((.+) bytes\)", header).group(1))
+            if header_bytes > 0:
+                omap_header_objs.append(o)
+
+        # Find all OMAP key/vals
+        omap_keys = []
+        for o in objects:
+            keys_str = self.fs.rados(["listomapkeys", o])
+            if keys_str:
+                for key in keys_str.split("\n"):
+                    if not is_ignored(o, key):
+                        omap_keys.append((o, key))
+
+        # Find objects that have data in their bodies
+        data_objects = []
+        for obj_id in objects:
+            stat_out = self.fs.rados(["stat", obj_id])
+            size = int(re.match(".+, size (.+)$", stat_out).group(1))
+            if size > 0:
+                data_objects.append(obj_id)
+
+        # Define the various forms of damage we will inflict
+        class MetadataMutation(object):
+            def __init__(self, obj_id_, desc_, mutate_fn_, expectation_, ls_path=None):
+                self.obj_id = obj_id_
+                self.desc = desc_
+                self.mutate_fn = mutate_fn_
+                self.expectation = expectation_
+                if ls_path is None:
+                    self.ls_path = "."
+                else:
+                    self.ls_path = ls_path
+
+            def __eq__(self, other):
+                return self.desc == other.desc
+
+            def __hash__(self):
+                return hash(self.desc)
+
+        junk = "deadbeef" * 10
+        mutations = []
+
+        # Removals
+        for obj_id in objects:
+            if obj_id in [
+                # JournalPointers are auto-replaced if missing (same path as upgrade)
+                "400.00000000",
+                # Missing dirfrags for non-system dirs result in empty directory
+                "10000000000.00000000",
+            ]:
+                expectation = NO_DAMAGE
+            else:
+                expectation = DAMAGED_ON_START
+
+            log.info("Expectation on rm '{0}' will be '{1}'".format(
+                obj_id, expectation
+            ))
+
+            mutations.append(MetadataMutation(
+                obj_id,
+                "Delete {0}".format(obj_id),
+                lambda o=obj_id: self.fs.rados(["rm", o]),
+                expectation
+            ))
+
+        # Blatant corruptions
+        mutations.extend([
+            MetadataMutation(
+                o,
+                "Corrupt {0}".format(o),
+                lambda o=o: self.fs.rados(["put", o, "-"], stdin_data=junk),
+                DAMAGED_ON_START
+            ) for o in data_objects
+        ])
+
+        # Truncations
+        mutations.extend([
+            MetadataMutation(
+                o,
+                "Truncate {0}".format(o),
+                lambda o=o: self.fs.rados(["truncate", o, "0"]),
+                DAMAGED_ON_START
+            ) for o in data_objects
+        ])
+
+        # OMAP value corruptions
+        for o, k in omap_keys:
+            if o.startswith("100."):
+                # Anything in rank 0's 'mydir'
+                expectation = DAMAGED_ON_START
+            else:
+                expectation = EIO_ON_LS
+
+            mutations.append(
+                MetadataMutation(
+                    o,
+                    "Corrupt omap key {0}:{1}".format(o, k),
+                    lambda o=o,k=k: self.fs.rados(["setomapval", o, k, junk]),
+                    expectation,
+                    get_path(o, k)
+                )
+            )
+
+        # OMAP header corruptions
+        for obj_id in omap_header_objs:
+            if re.match("60.\.00000000", obj_id) \
+                    or obj_id in ["1.00000000", "100.00000000", "mds0_sessionmap"]:
+                expectation = DAMAGED_ON_START
+            else:
+                expectation = NO_DAMAGE
+
+            log.info("Expectation on corrupt header '{0}' will be '{1}'".format(
+                obj_id, expectation
+            ))
+
+            mutations.append(
+                MetadataMutation(
+                    obj_id,
+                    "Corrupt omap header on {0}".format(obj_id),
+                    lambda o=obj_id: self.fs.rados(["setomapheader", o, junk]),
+                    expectation
+                )
+            )
+
+        results = {}
+
+        for mutation in mutations:
+            log.info("Applying mutation '{0}'".format(mutation.desc))
+
+            # Reset MDS state
+            self.mount_a.umount_wait(force=True)
+            self.fs.mds_stop()
+            self.fs.mds_fail()
+            self.fs.mon_manager.raw_cluster_cmd('mds', 'repaired', '0')
+
+            # Reset RADOS pool state
+            self.fs.rados(['import', '/tmp/metadata.bin'])
+
+            # Inject the mutation
+            mutation.mutate_fn()
+
+            # Try starting the MDS
+            self.fs.mds_restart()
+
+            # How long we'll wait between starting a daemon and expecting
+            # it to make it through startup, and potentially declare itself
+            # damaged to the mon cluster.
+            startup_timeout = 60
+
+            if mutation.expectation not in (EIO_ON_LS, DAMAGED_ON_LS, NO_DAMAGE):
+                if mutation.expectation == DAMAGED_ON_START:
+                    # The MDS may pass through active before making it to damaged
+                    try:
+                        self.wait_until_true(lambda: self.is_marked_damaged(0), startup_timeout)
+                    except RuntimeError:
+                        pass
+
+                # Wait for MDS to either come up or go into damaged state
+                try:
+                    self.wait_until_true(lambda: self.is_marked_damaged(0) or self.fs.are_daemons_healthy(), startup_timeout)
+                except RuntimeError:
+                    crashed = False
+                    # Didn't make it to healthy or damaged, did it crash?
+                    for daemon_id, daemon in self.fs.mds_daemons.items():
+                        if daemon.proc and daemon.proc.finished:
+                            crashed = True
+                            log.error("Daemon {0} crashed!".format(daemon_id))
+                            daemon.proc = None  # So that subsequent stop() doesn't raise error
+                    if not crashed:
+                        # Didn't go health, didn't go damaged, didn't crash, so what?
+                        raise
+                    else:
+                        log.info("Result: Mutation '{0}' led to crash".format(mutation.desc))
+                        results[mutation] = CRASHED
+                        continue
+                if self.is_marked_damaged(0):
+                    log.info("Result: Mutation '{0}' led to DAMAGED state".format(mutation.desc))
+                    results[mutation] = DAMAGED_ON_START
+                    continue
+                else:
+                    log.info("Mutation '{0}' did not prevent MDS startup, attempting ls...".format(mutation.desc))
+            else:
+                try:
+                    self.wait_until_true(self.fs.are_daemons_healthy, 60)
+                except RuntimeError:
+                    log.info("Result: Mutation '{0}' should have left us healthy, actually not.".format(mutation.desc))
+                    if self.is_marked_damaged(0):
+                        results[mutation] = DAMAGED_ON_START
+                    else:
+                        results[mutation] = FAILED_SERVER
+                    continue
+                log.info("Daemons came up after mutation '{0}', proceeding to ls".format(mutation.desc))
+
+            # MDS is up, should go damaged on ls or client mount
+            self.mount_a.mount()
+            self.mount_a.wait_until_mounted()
+            if mutation.ls_path == ".":
+                proc = self.mount_a.run_shell(["ls", "-R", mutation.ls_path], wait=False)
+            else:
+                proc = self.mount_a.stat(mutation.ls_path, wait=False)
+
+            if mutation.expectation == DAMAGED_ON_LS:
+                try:
+                    self.wait_until_true(lambda: self.is_marked_damaged(0), 60)
+                    log.info("Result: Mutation '{0}' led to DAMAGED state after ls".format(mutation.desc))
+                    results[mutation] = DAMAGED_ON_LS
+                except RuntimeError:
+                    if self.fs.are_daemons_healthy():
+                        log.error("Result: Failed to go damaged on mutation '{0}', actually went active".format(
+                            mutation.desc))
+                        results[mutation] = NO_DAMAGE
+                    else:
+                        log.error("Result: Failed to go damaged on mutation '{0}'".format(mutation.desc))
+                        results[mutation] = FAILED_SERVER
+
+            else:
+                try:
+                    wait([proc], 20)
+                    log.info("Result: Mutation '{0}' did not caused DAMAGED state".format(mutation.desc))
+                    results[mutation] = NO_DAMAGE
+                except MaxWhileTries:
+                    log.info("Result: Failed to complete client IO on mutation '{0}'".format(mutation.desc))
+                    results[mutation] = FAILED_CLIENT
+                except CommandFailedError as e:
+                    if e.exitstatus == errno.EIO:
+                        log.info("Result: EIO on client")
+                        results[mutation] = EIO_ON_LS
+                    else:
+                        log.info("Result: unexpected error {0} on client".format(e))
+                        results[mutation] = FAILED_CLIENT
+
+            if mutation.expectation == EIO_ON_LS:
+                # EIOs mean something handled by DamageTable: assert that it has
+                # been populated
+                damage = json.loads(
+                    self.fs.mon_manager.raw_cluster_cmd(
+                        'tell', 'mds.{0}'.format(self.fs.get_active_names()[0]), "damage", "ls", '--format=json-pretty'))
+                if len(damage) == 0:
+                    results[mutation] = EIO_NO_DAMAGE
+
+        failures = [(mutation, result) for (mutation, result) in results.items() if mutation.expectation != result]
+        if failures:
+            log.error("{0} mutations had unexpected outcomes:".format(len(failures)))
+            for mutation, result in failures:
+                log.error("  Expected '{0}' actually '{1}' from '{2}'".format(
+                    mutation.expectation, result, mutation.desc
+                ))
+            raise RuntimeError("{0} mutations had unexpected outcomes".format(len(failures)))
+        else:
+            log.info("All {0} mutations had expected outcomes".format(len(mutations)))
+
+    def test_damaged_dentry(self):
+        # Damage to dentrys is interesting because it leaves the
+        # directory's `complete` flag in a subtle state where
+        # we have marked the dir complete in order that folks
+        # can access it, but in actual fact there is a dentry
+        # missing
+        self.mount_a.run_shell(["mkdir", "subdir/"])
+
+        self.mount_a.run_shell(["touch", "subdir/file_undamaged"])
+        self.mount_a.run_shell(["touch", "subdir/file_to_be_damaged"])
+
+        subdir_ino = self.mount_a.path_to_ino("subdir")
+
+        self.mount_a.umount_wait()
+        for mds_name in self.fs.get_active_names():
+            self.fs.mds_asok(["flush", "journal"], mds_name)
+
+        self.fs.mds_stop()
+        self.fs.mds_fail()
+
+        # Corrupt a dentry
+        junk = "deadbeef" * 10
+        dirfrag_obj = "{0:x}.00000000".format(subdir_ino)
+        self.fs.rados(["setomapval", dirfrag_obj, "file_to_be_damaged_head", junk])
+
+        # Start up and try to list it
+        self.fs.mds_restart()
+        self.fs.wait_for_daemons()
+
+        self.mount_a.mount()
+        self.mount_a.wait_until_mounted()
+        dentries = self.mount_a.ls("subdir/")
+
+        # The damaged guy should have disappeared
+        self.assertEqual(dentries, ["file_undamaged"])
+
+        # I should get ENOENT if I try and read it normally, because
+        # the dir is considered complete
+        try:
+            self.mount_a.stat("subdir/file_to_be_damaged", wait=True)
+        except CommandFailedError as e:
+            self.assertEqual(e.exitstatus, errno.ENOENT)
+        else:
+            raise AssertionError("Expected ENOENT")
+
+        # The fact that there is damaged should have bee recorded
+        damage = json.loads(
+            self.fs.mon_manager.raw_cluster_cmd(
+                'tell', 'mds.{0}'.format(self.fs.get_active_names()[0]),
+                "damage", "ls", '--format=json-pretty'))
+        self.assertEqual(len(damage), 1)
+        damage_id = damage[0]['id']
+
+        # If I try to create a dentry with the same name as the damaged guy
+        # then that should be forbidden
+        try:
+            self.mount_a.touch("subdir/file_to_be_damaged")
+        except CommandFailedError as e:
+            self.assertEqual(e.exitstatus, errno.EIO)
+        else:
+            raise AssertionError("Expected EIO")
+
+        # Attempting that touch will clear the client's complete flag, now
+        # when I stat it I'll get EIO instead of ENOENT
+        try:
+            self.mount_a.stat("subdir/file_to_be_damaged", wait=True)
+        except CommandFailedError as e:
+            self.assertEqual(e.exitstatus, errno.EIO)
+        else:
+            raise AssertionError("Expected EIO")
+
+        nfiles = self.mount_a.getfattr("./subdir", "ceph.dir.files")
+        self.assertEqual(nfiles, "2")
+
+        self.mount_a.umount_wait()
+
+        # Now repair the stats
+        scrub_json = self.fs.mds_asok(["scrub_path", "/subdir", "repair"])
+        log.info(json.dumps(scrub_json, indent=2))
+
+        self.assertEqual(scrub_json["passed_validation"], False)
+        self.assertEqual(scrub_json["raw_stats"]["checked"], True)
+        self.assertEqual(scrub_json["raw_stats"]["passed"], False)
+
+        # Check that the file count is now correct
+        self.mount_a.mount()
+        self.mount_a.wait_until_mounted()
+        nfiles = self.mount_a.getfattr("./subdir", "ceph.dir.files")
+        self.assertEqual(nfiles, "1")
+
+        # Clean up the omap object
+        self.fs.rados(["setomapval", dirfrag_obj, "file_to_be_damaged_head", junk])
+
+        # Clean up the damagetable entry
+        self.fs.mon_manager.raw_cluster_cmd(
+            'tell', 'mds.{0}'.format(self.fs.get_active_names()[0]),
+            "damage", "rm", "{did}".format(did=damage_id))
+
+        # Now I should be able to create a file with the same name as the
+        # damaged guy if I want.
+        self.mount_a.touch("subdir/file_to_be_damaged")
+
+    def test_open_ino_errors(self):
+        """
+        That errors encountered during opening inos are properly propagated
+        """
+
+        self.mount_a.run_shell(["mkdir", "dir1"])
+        self.mount_a.run_shell(["touch", "dir1/file1"])
+        self.mount_a.run_shell(["mkdir", "dir2"])
+        self.mount_a.run_shell(["touch", "dir2/file2"])
+        self.mount_a.run_shell(["mkdir", "testdir"])
+        self.mount_a.run_shell(["ln", "dir1/file1", "testdir/hardlink1"])
+        self.mount_a.run_shell(["ln", "dir2/file2", "testdir/hardlink2"])
+
+        file1_ino = self.mount_a.path_to_ino("dir1/file1")
+        file2_ino = self.mount_a.path_to_ino("dir2/file2")
+        dir2_ino = self.mount_a.path_to_ino("dir2")
+
+        # Ensure everything is written to backing store
+        self.mount_a.umount_wait()
+        self.fs.mds_asok(["flush", "journal"])
+
+        # Drop everything from the MDS cache
+        self.mds_cluster.mds_stop()
+        self.fs.journal_tool(['journal', 'reset'])
+        self.mds_cluster.mds_fail_restart()
+        self.fs.wait_for_daemons()
+
+        self.mount_a.mount()
+
+        # Case 1: un-decodeable backtrace
+
+        # Validate that the backtrace is present and decodable
+        self.fs.read_backtrace(file1_ino)
+        # Go corrupt the backtrace of alpha/target (used for resolving
+        # bravo/hardlink).
+        self.fs._write_data_xattr(file1_ino, "parent", "rhubarb")
+
+        # Check that touching the hardlink gives EIO
+        ran = self.mount_a.run_shell(["stat", "testdir/hardlink1"], wait=False)
+        try:
+            ran.wait()
+        except CommandFailedError:
+            self.assertTrue("Input/output error" in ran.stderr.getvalue())
+
+        # Check that an entry is created in the damage table
+        damage = json.loads(
+            self.fs.mon_manager.raw_cluster_cmd(
+                'tell', 'mds.{0}'.format(self.fs.get_active_names()[0]),
+                "damage", "ls", '--format=json-pretty'))
+        self.assertEqual(len(damage), 1)
+        self.assertEqual(damage[0]['damage_type'], "backtrace")
+        self.assertEqual(damage[0]['ino'], file1_ino)
+
+        self.fs.mon_manager.raw_cluster_cmd(
+            'tell', 'mds.{0}'.format(self.fs.get_active_names()[0]),
+            "damage", "rm", str(damage[0]['id']))
+
+
+        # Case 2: missing dirfrag for the target inode
+
+        self.fs.rados(["rm", "{0:x}.00000000".format(dir2_ino)])
+
+        # Check that touching the hardlink gives EIO
+        ran = self.mount_a.run_shell(["stat", "testdir/hardlink2"], wait=False)
+        try:
+            ran.wait()
+        except CommandFailedError:
+            self.assertTrue("Input/output error" in ran.stderr.getvalue())
+
+        # Check that an entry is created in the damage table
+        damage = json.loads(
+            self.fs.mon_manager.raw_cluster_cmd(
+                'tell', 'mds.{0}'.format(self.fs.get_active_names()[0]),
+                "damage", "ls", '--format=json-pretty'))
+        self.assertEqual(len(damage), 2)
+        if damage[0]['damage_type'] == "backtrace" :
+            self.assertEqual(damage[0]['ino'], file2_ino)
+            self.assertEqual(damage[1]['damage_type'], "dir_frag")
+            self.assertEqual(damage[1]['ino'], dir2_ino)
+        else:
+            self.assertEqual(damage[0]['damage_type'], "dir_frag")
+            self.assertEqual(damage[0]['ino'], dir2_ino)
+            self.assertEqual(damage[1]['damage_type'], "backtrace")
+            self.assertEqual(damage[1]['ino'], file2_ino)
+
+        for entry in damage:
+            self.fs.mon_manager.raw_cluster_cmd(
+                'tell', 'mds.{0}'.format(self.fs.get_active_names()[0]),
+                "damage", "rm", str(entry['id']))
diff --git a/qa/tasks/cephfs/test_data_scan.py b/qa/tasks/cephfs/test_data_scan.py
new file mode 100644
index 0000000..31bca16
--- /dev/null
+++ b/qa/tasks/cephfs/test_data_scan.py
@@ -0,0 +1,511 @@
+
+"""
+Test our tools for recovering metadata from the data pool
+"""
+
+import logging
+import os
+from textwrap import dedent
+import traceback
+from collections import namedtuple
+
+from teuthology.orchestra.run import CommandFailedError
+from tasks.cephfs.cephfs_test_case import CephFSTestCase, long_running
+
+log = logging.getLogger(__name__)
+
+
+ValidationError = namedtuple("ValidationError", ["exception", "backtrace"])
+
+
+class Workload(object):
+    def __init__(self, filesystem, mount):
+        self._mount = mount
+        self._filesystem = filesystem
+        self._initial_state = None
+
+        # Accumulate backtraces for every failed validation, and return them.  Backtraces
+        # are rather verbose, but we only see them when something breaks, and they
+        # let us see which check failed without having to decorate each check with
+        # a string
+        self._errors = []
+
+    def assert_equal(self, a, b):
+        try:
+            if a != b:
+                raise AssertionError("{0} != {1}".format(a, b))
+        except AssertionError as e:
+            self._errors.append(
+                ValidationError(e, traceback.format_exc(3))
+            )
+
+    def write(self):
+        """
+        Write the workload files to the mount
+        """
+        raise NotImplementedError()
+
+    def validate(self):
+        """
+        Read from the mount and validate that the workload files are present (i.e. have
+        survived or been reconstructed from the test scenario)
+        """
+        raise NotImplementedError()
+
+    def damage(self):
+        """
+        Damage the filesystem pools in ways that will be interesting to recover from.  By
+        default just wipe everything in the metadata pool
+        """
+        # Delete every object in the metadata pool
+        objects = self._filesystem.rados(["ls"]).split("\n")
+        for o in objects:
+            self._filesystem.rados(["rm", o])
+
+    def flush(self):
+        """
+        Called after client unmount, after write: flush whatever you want
+        """
+        self._filesystem.mds_asok(["flush", "journal"])
+
+
+class SimpleWorkload(Workload):
+    """
+    Single file, single directory, check that it gets recovered and so does its size
+    """
+    def write(self):
+        self._mount.run_shell(["mkdir", "subdir"])
+        self._mount.write_n_mb("subdir/sixmegs", 6)
+        self._initial_state = self._mount.stat("subdir/sixmegs")
+
+    def validate(self):
+        self._mount.run_shell(["ls", "subdir"])
+        st = self._mount.stat("subdir/sixmegs")
+        self.assert_equal(st['st_size'], self._initial_state['st_size'])
+        return self._errors
+
+
+class MovedFile(Workload):
+    def write(self):
+        # Create a file whose backtrace disagrees with his eventual position
+        # in the metadata.  We will see that he gets reconstructed in his
+        # original position according to his backtrace.
+        self._mount.run_shell(["mkdir", "subdir_alpha"])
+        self._mount.run_shell(["mkdir", "subdir_bravo"])
+        self._mount.write_n_mb("subdir_alpha/sixmegs", 6)
+        self._filesystem.mds_asok(["flush", "journal"])
+        self._mount.run_shell(["mv", "subdir_alpha/sixmegs", "subdir_bravo/sixmegs"])
+        self._initial_state = self._mount.stat("subdir_bravo/sixmegs")
+
+    def flush(self):
+        pass
+
+    def validate(self):
+        self.assert_equal(self._mount.ls(), ["subdir_alpha"])
+        st = self._mount.stat("subdir_alpha/sixmegs")
+        self.assert_equal(st['st_size'], self._initial_state['st_size'])
+        return self._errors
+
+
+class BacktracelessFile(Workload):
+    def write(self):
+        self._mount.run_shell(["mkdir", "subdir"])
+        self._mount.write_n_mb("subdir/sixmegs", 6)
+        self._initial_state = self._mount.stat("subdir/sixmegs")
+
+    def flush(self):
+        # Never flush metadata, so backtrace won't be written
+        pass
+
+    def validate(self):
+        ino_name = "%x" % self._initial_state["st_ino"]
+
+        # The inode should be linked into lost+found because we had no path for it
+        self.assert_equal(self._mount.ls(), ["lost+found"])
+        self.assert_equal(self._mount.ls("lost+found"), [ino_name])
+        st = self._mount.stat("lost+found/{ino_name}".format(ino_name=ino_name))
+
+        # We might not have got the name or path, but we should still get the size
+        self.assert_equal(st['st_size'], self._initial_state['st_size'])
+
+        return self._errors
+
+
+class StripedStashedLayout(Workload):
+    def __init__(self, fs, m):
+        super(StripedStashedLayout, self).__init__(fs, m)
+
+        # Nice small stripes so we can quickly do our writes+validates
+        self.sc = 4
+        self.ss = 65536
+        self.os = 262144
+
+        self.interesting_sizes = [
+            # Exactly stripe_count objects will exist
+            self.os * self.sc,
+            # Fewer than stripe_count objects will exist
+            self.os * self.sc / 2,
+            self.os * (self.sc - 1) + self.os / 2,
+            self.os * (self.sc - 1) + self.os / 2 - 1,
+            self.os * (self.sc + 1) + self.os / 2,
+            self.os * (self.sc + 1) + self.os / 2 + 1,
+            # More than stripe_count objects will exist
+            self.os * self.sc + self.os * self.sc / 2
+        ]
+
+    def write(self):
+        # Create a dir with a striped layout set on it
+        self._mount.run_shell(["mkdir", "stripey"])
+
+        self._mount.run_shell([
+            "setfattr", "-n", "ceph.dir.layout", "-v",
+            "stripe_unit={ss} stripe_count={sc} object_size={os} pool={pool}".format(
+                ss=self.ss, os=self.os, sc=self.sc,
+                pool=self._filesystem.get_data_pool_name()
+            ),
+            "./stripey"])
+
+        # Write files, then flush metadata so that its layout gets written into an xattr
+        for i, n_bytes in enumerate(self.interesting_sizes):
+            self._mount.write_test_pattern("stripey/flushed_file_{0}".format(i), n_bytes)
+            # This is really just validating the validator
+            self._mount.validate_test_pattern("stripey/flushed_file_{0}".format(i), n_bytes)
+        self._filesystem.mds_asok(["flush", "journal"])
+
+        # Write another file in the same way, but this time don't flush the metadata,
+        # so that it won't have the layout xattr
+        self._mount.write_test_pattern("stripey/unflushed_file", 1024 * 512)
+        self._mount.validate_test_pattern("stripey/unflushed_file", 1024 * 512)
+
+        self._initial_state = {
+            "unflushed_ino": self._mount.path_to_ino("stripey/unflushed_file")
+        }
+
+    def flush(self):
+        # Pass because we already selectively flushed during write
+        pass
+
+    def validate(self):
+        # The first files should have been recovered into its original location
+        # with the correct layout: read back correct data
+        for i, n_bytes in enumerate(self.interesting_sizes):
+            try:
+                self._mount.validate_test_pattern("stripey/flushed_file_{0}".format(i), n_bytes)
+            except CommandFailedError as e:
+                self._errors.append(
+                    ValidationError("File {0} (size {1}): {2}".format(i, n_bytes, e), traceback.format_exc(3))
+                )
+
+        # The unflushed file should have been recovered into lost+found without
+        # the correct layout: read back junk
+        ino_name = "%x" % self._initial_state["unflushed_ino"]
+        self.assert_equal(self._mount.ls("lost+found"), [ino_name])
+        try:
+            self._mount.validate_test_pattern(os.path.join("lost+found", ino_name), 1024 * 512)
+        except CommandFailedError:
+            pass
+        else:
+            self._errors.append(
+                ValidationError("Unexpectedly valid data in unflushed striped file", "")
+            )
+
+        return self._errors
+
+
+class ManyFilesWorkload(Workload):
+    def __init__(self, filesystem, mount, file_count):
+        super(ManyFilesWorkload, self).__init__(filesystem, mount)
+        self.file_count = file_count
+
+    def write(self):
+        self._mount.run_shell(["mkdir", "subdir"])
+        for n in range(0, self.file_count):
+            self._mount.write_test_pattern("subdir/{0}".format(n), 6 * 1024 * 1024)
+
+    def validate(self):
+        for n in range(0, self.file_count):
+            try:
+                self._mount.validate_test_pattern("subdir/{0}".format(n), 6 * 1024 * 1024)
+            except CommandFailedError as e:
+                self._errors.append(
+                    ValidationError("File {0}: {1}".format(n, e), traceback.format_exc(3))
+                )
+
+        return self._errors
+
+
+class MovedDir(Workload):
+    def write(self):
+        # Create a nested dir that we will then move.  Two files with two different
+        # backtraces referring to the moved dir, claiming two different locations for
+        # it.  We will see that only one backtrace wins and the dir ends up with
+        # single linkage.
+        self._mount.run_shell(["mkdir", "-p", "grandmother/parent"])
+        self._mount.write_n_mb("grandmother/parent/orig_pos_file", 1)
+        self._filesystem.mds_asok(["flush", "journal"])
+        self._mount.run_shell(["mkdir", "grandfather"])
+        self._mount.run_shell(["mv", "grandmother/parent", "grandfather"])
+        self._mount.write_n_mb("grandfather/parent/new_pos_file", 2)
+        self._filesystem.mds_asok(["flush", "journal"])
+
+        self._initial_state = (
+            self._mount.stat("grandfather/parent/orig_pos_file"),
+            self._mount.stat("grandfather/parent/new_pos_file")
+        )
+
+    def validate(self):
+        root_files = self._mount.ls()
+        self.assert_equal(len(root_files), 1)
+        self.assert_equal(root_files[0] in ["grandfather", "grandmother"], True)
+        winner = root_files[0]
+        st_opf = self._mount.stat("{0}/parent/orig_pos_file".format(winner))
+        st_npf = self._mount.stat("{0}/parent/new_pos_file".format(winner))
+
+        self.assert_equal(st_opf['st_size'], self._initial_state[0]['st_size'])
+        self.assert_equal(st_npf['st_size'], self._initial_state[1]['st_size'])
+
+
+class MissingZerothObject(Workload):
+    def write(self):
+        self._mount.run_shell(["mkdir", "subdir"])
+        self._mount.write_n_mb("subdir/sixmegs", 6)
+        self._initial_state = self._mount.stat("subdir/sixmegs")
+
+    def damage(self):
+        super(MissingZerothObject, self).damage()
+        zeroth_id = "{0:x}.00000000".format(self._initial_state['st_ino'])
+        self._filesystem.rados(["rm", zeroth_id], pool=self._filesystem.get_data_pool_name())
+
+    def validate(self):
+        st = self._mount.stat("lost+found/{0:x}".format(self._initial_state['st_ino']))
+        self.assert_equal(st['st_size'], self._initial_state['st_size'])
+
+
+class NonDefaultLayout(Workload):
+    """
+    Check that the reconstruction copes with files that have a different
+    object size in their layout
+    """
+    def write(self):
+        self._mount.run_shell(["touch", "datafile"])
+        self._mount.run_shell(["setfattr", "-n", "ceph.file.layout.object_size", "-v", "8388608", "./datafile"])
+        self._mount.run_shell(["dd", "if=/dev/urandom", "of=./datafile", "bs=1M", "count=32"])
+        self._initial_state = self._mount.stat("datafile")
+
+    def validate(self):
+        p = self._mount.run_shell(["getfattr", "--only-values", "-n", "ceph.file.layout.object_size", "./datafile"])
+
+        # Check we got the layout reconstructed properly
+        object_size = int(p.stdout.getvalue().strip())
+        self.assert_equal(object_size, 8388608)
+
+        # Check we got the file size reconstructed properly
+        st = self._mount.stat("datafile")
+        self.assert_equal(st['st_size'], self._initial_state['st_size'])
+
+
+class TestDataScan(CephFSTestCase):
+    MDSS_REQUIRED = 2
+
+    def is_marked_damaged(self, rank):
+        mds_map = self.fs.get_mds_map()
+        return rank in mds_map['damaged']
+
+    def _rebuild_metadata(self, workload, workers=1):
+        """
+        That when all objects in metadata pool are removed, we can rebuild a metadata pool
+        based on the contents of a data pool, and a client can see and read our files.
+        """
+
+        # First, inject some files
+        workload.write()
+
+        # Unmount the client and flush the journal: the tool should also cope with
+        # situations where there is dirty metadata, but we'll test that separately
+        self.mount_a.umount_wait()
+        workload.flush()
+
+        # Stop the MDS
+        self.fs.mds_stop()
+        self.fs.mds_fail()
+
+        # After recovery, we need the MDS to not be strict about stats (in production these options
+        # are off by default, but in QA we need to explicitly disable them)
+        self.fs.set_ceph_conf('mds', 'mds verify scatter', False)
+        self.fs.set_ceph_conf('mds', 'mds debug scatterstat', False)
+
+        # Apply any data damage the workload wants
+        workload.damage()
+
+        # Reset the MDS map in case multiple ranks were in play: recovery procedure
+        # only understands how to rebuild metadata under rank 0
+        self.fs.mon_manager.raw_cluster_cmd('fs', 'reset', self.fs.name,
+                '--yes-i-really-mean-it')
+
+        # Attempt to start an MDS, see that it goes into damaged state
+        self.fs.mds_restart()
+
+        def get_state(mds_id):
+            info = self.mds_cluster.get_mds_info(mds_id)
+            return info['state'] if info is not None else None
+
+        self.wait_until_true(lambda: self.is_marked_damaged(0), 60)
+        for mds_id in self.fs.mds_ids:
+            self.wait_until_equal(
+                    lambda: get_state(mds_id),
+                    "up:standby",
+                    timeout=60)
+
+        # Run the recovery procedure
+        self.fs.table_tool(["0", "reset", "session"])
+        self.fs.table_tool(["0", "reset", "snap"])
+        self.fs.table_tool(["0", "reset", "inode"])
+        if False:
+            with self.assertRaises(CommandFailedError):
+                # Normal reset should fail when no objects are present, we'll use --force instead
+                self.fs.journal_tool(["journal", "reset"])
+        self.fs.journal_tool(["journal", "reset", "--force"])
+        self.fs.data_scan(["init"])
+        self.fs.data_scan(["scan_extents", self.fs.get_data_pool_name()], worker_count=workers)
+        self.fs.data_scan(["scan_inodes", self.fs.get_data_pool_name()], worker_count=workers)
+
+        # Mark the MDS repaired
+        self.fs.mon_manager.raw_cluster_cmd('mds', 'repaired', '0')
+
+        # Start the MDS
+        self.fs.mds_restart()
+        self.fs.wait_for_daemons()
+        import json
+        log.info(json.dumps(self.mds_cluster.get_fs_map(), indent=2))
+
+        # Mount a client
+        self.mount_a.mount()
+        self.mount_a.wait_until_mounted()
+
+        # See that the files are present and correct
+        errors = workload.validate()
+        if errors:
+            log.error("Validation errors found: {0}".format(len(errors)))
+            for e in errors:
+                log.error(e.exception)
+                log.error(e.backtrace)
+            raise AssertionError("Validation failed, first error: {0}\n{1}".format(
+                errors[0].exception, errors[0].backtrace
+            ))
+
+    def test_rebuild_simple(self):
+        self._rebuild_metadata(SimpleWorkload(self.fs, self.mount_a))
+
+    def test_rebuild_moved_file(self):
+        self._rebuild_metadata(MovedFile(self.fs, self.mount_a))
+
+    def test_rebuild_backtraceless(self):
+        self._rebuild_metadata(BacktracelessFile(self.fs, self.mount_a))
+
+    def test_rebuild_moved_dir(self):
+        self._rebuild_metadata(MovedDir(self.fs, self.mount_a))
+
+    def test_rebuild_missing_zeroth(self):
+        self._rebuild_metadata(MissingZerothObject(self.fs, self.mount_a))
+
+    def test_rebuild_nondefault_layout(self):
+        self._rebuild_metadata(NonDefaultLayout(self.fs, self.mount_a))
+
+    def test_stashed_layout(self):
+        self._rebuild_metadata(StripedStashedLayout(self.fs, self.mount_a))
+
+    def _dirfrag_keys(self, object_id):
+        keys_str = self.fs.rados(["listomapkeys", object_id])
+        if keys_str:
+            return keys_str.split("\n")
+        else:
+            return []
+
+    def test_fragmented_injection(self):
+        """
+        That when injecting a dentry into a fragmented directory, we put it in the right fragment.
+        """
+
+        self.fs.mon_manager.raw_cluster_cmd("mds", "set", "allow_dirfrags", "true",
+                                            "--yes-i-really-mean-it")
+
+        file_count = 100
+        file_names = ["%s" % n for n in range(0, file_count)]
+
+        # Create a directory of `file_count` files, each named after its
+        # decimal number and containing the string of its decimal number
+        self.mount_a.run_python(dedent("""
+        import os
+        path = os.path.join("{path}", "subdir")
+        os.mkdir(path)
+        for n in range(0, {file_count}):
+            open(os.path.join(path, "%s" % n), 'w').write("%s" % n)
+        """.format(
+            path=self.mount_a.mountpoint,
+            file_count=file_count
+        )))
+
+        dir_ino = self.mount_a.path_to_ino("subdir")
+
+        # Only one MDS should be active!
+        self.assertEqual(len(self.fs.get_active_names()), 1)
+
+        # Ensure that one directory is fragmented
+        mds_id = self.fs.get_active_names()[0]
+        self.fs.mds_asok(["dirfrag", "split", "/subdir", "0/0", "1"], mds_id)
+
+        # Flush journal and stop MDS
+        self.mount_a.umount_wait()
+        self.fs.mds_asok(["flush", "journal"], mds_id)
+        self.fs.mds_stop()
+        self.fs.mds_fail()
+
+        # Pick a dentry and wipe out its key
+        # Because I did a 1 bit split, I know one frag will be named <inode>.01000000
+        frag_obj_id = "{0:x}.01000000".format(dir_ino)
+        keys = self._dirfrag_keys(frag_obj_id)
+        victim_key = keys[7]  # arbitrary choice
+        log.info("victim_key={0}".format(victim_key))
+        victim_dentry = victim_key.split("_head")[0]
+        self.fs.rados(["rmomapkey", frag_obj_id, victim_key])
+
+        # Start filesystem back up, observe that the file appears to be gone in an `ls`
+        self.fs.mds_restart()
+        self.fs.wait_for_daemons()
+        self.mount_a.mount()
+        self.mount_a.wait_until_mounted()
+        files = self.mount_a.run_shell(["ls", "subdir/"]).stdout.getvalue().strip().split("\n")
+        self.assertListEqual(sorted(files), sorted(list(set(file_names) - set([victim_dentry]))))
+
+        # Stop the filesystem
+        self.mount_a.umount_wait()
+        self.fs.mds_stop()
+        self.fs.mds_fail()
+
+        # Run data-scan, observe that it inserts our dentry back into the correct fragment
+        # by checking the omap now has the dentry's key again
+        self.fs.data_scan(["scan_extents", self.fs.get_data_pool_name()])
+        self.fs.data_scan(["scan_inodes", self.fs.get_data_pool_name()])
+        self.assertIn(victim_key, self._dirfrag_keys(frag_obj_id))
+
+        # Start the filesystem and check that the dentry we deleted is now once again visible
+        # and points to the correct file data.
+        self.fs.mds_restart()
+        self.fs.wait_for_daemons()
+        self.mount_a.mount()
+        self.mount_a.wait_until_mounted()
+        out = self.mount_a.run_shell(["cat", "subdir/{0}".format(victim_dentry)]).stdout.getvalue().strip()
+        self.assertEqual(out, victim_dentry)
+
+        # Finally, close the loop by checking our injected dentry survives a merge
+        mds_id = self.fs.get_active_names()[0]
+        self.mount_a.ls("subdir")  # Do an ls to ensure both frags are in cache so the merge will work
+        self.fs.mds_asok(["dirfrag", "merge", "/subdir", "0/0"], mds_id)
+        self.fs.mds_asok(["flush", "journal"], mds_id)
+        frag_obj_id = "{0:x}.00000000".format(dir_ino)
+        keys = self._dirfrag_keys(frag_obj_id)
+        self.assertListEqual(sorted(keys), sorted(["%s_head" % f for f in file_names]))
+
+    @long_running
+    def test_parallel_execution(self):
+        self._rebuild_metadata(ManyFilesWorkload(self.fs, self.mount_a, 25), workers=7)
diff --git a/qa/tasks/cephfs/test_failover.py b/qa/tasks/cephfs/test_failover.py
new file mode 100644
index 0000000..5183beb
--- /dev/null
+++ b/qa/tasks/cephfs/test_failover.py
@@ -0,0 +1,465 @@
+import json
+import logging
+from unittest import case
+from cephfs_test_case import CephFSTestCase
+from teuthology.exceptions import CommandFailedError
+
+log = logging.getLogger(__name__)
+
+
+class TestFailover(CephFSTestCase):
+    CLIENTS_REQUIRED = 1
+    MDSS_REQUIRED = 2
+
+    def test_simple(self):
+        """
+        That when the active MDS is killed, a standby MDS is promoted into
+        its rank after the grace period.
+
+        This is just a simple unit test, the harder cases are covered
+        in thrashing tests.
+        """
+
+        # Need all my standbys up as well as the active daemons
+        self.wait_for_daemon_start()
+
+        (original_active, ) = self.fs.get_active_names()
+        original_standbys = self.mds_cluster.get_standby_daemons()
+
+        # Kill the rank 0 daemon's physical process
+        self.fs.mds_stop(original_active)
+
+        grace = int(self.fs.get_config("mds_beacon_grace", service_type="mon"))
+
+        # Wait until the monitor promotes his replacement
+        def promoted():
+            active = self.fs.get_active_names()
+            return active and active[0] in original_standbys
+
+        log.info("Waiting for promotion of one of the original standbys {0}".format(
+            original_standbys))
+        self.wait_until_true(
+            promoted,
+            timeout=grace*2)
+
+        # Start the original rank 0 daemon up again, see that he becomes a standby
+        self.fs.mds_restart(original_active)
+        self.wait_until_true(
+            lambda: original_active in self.mds_cluster.get_standby_daemons(),
+            timeout=60  # Approximately long enough for MDS to start and mon to notice
+        )
+
+    def test_client_abort(self):
+        """
+        That a client will respect fuse_require_active_mds and error out
+        when the cluster appears to be unavailable.
+        """
+
+        require_active = self.fs.get_config("fuse_require_active_mds", service_type="mon").lower() == "true"
+        if not require_active:
+            raise case.SkipTest("fuse_require_active_mds is not set")
+
+        grace = int(self.fs.get_config("mds_beacon_grace", service_type="mon"))
+
+        # Check it's not laggy to begin with
+        (original_active, ) = self.fs.get_active_names()
+        self.assertNotIn("laggy_since", self.fs.mon_manager.get_mds_status(original_active))
+
+        self.mounts[0].umount_wait()
+
+        # Control: that we can mount and unmount usually, while the cluster is healthy
+        self.mounts[0].mount()
+        self.mounts[0].wait_until_mounted()
+        self.mounts[0].umount_wait()
+
+        # Stop the daemon processes
+        self.fs.mds_stop()
+
+        # Wait for everyone to go laggy
+        def laggy():
+            mdsmap = self.fs.get_mds_map()
+            for info in mdsmap['info'].values():
+                if "laggy_since" not in info:
+                    return False
+
+            return True
+
+        self.wait_until_true(laggy, grace * 2)
+        with self.assertRaises(CommandFailedError):
+            self.mounts[0].mount()
+
+
+class TestStandbyReplay(CephFSTestCase):
+    MDSS_REQUIRED = 2
+    REQUIRE_FILESYSTEM = False
+
+    def test_standby_failure(self):
+        """
+        That the failure of a standby-replay daemon happens cleanly
+        and doesn't interrupt anything else.
+        """
+        # Pick out exactly 2 daemons to be run during test
+        use_daemons = sorted(self.mds_cluster.mds_ids[0:2])
+        mds_a, mds_b = use_daemons
+        log.info("Using MDS daemons: {0}".format(use_daemons))
+
+        def set_standby_for(leader, follower, replay):
+            self.set_conf("mds.{0}".format(follower), "mds_standby_for_name", leader)
+            if replay:
+                self.set_conf("mds.{0}".format(follower), "mds_standby_replay", "true")
+
+        # Configure two pairs of MDSs that are standby for each other
+        set_standby_for(mds_a, mds_b, True)
+        set_standby_for(mds_b, mds_a, False)
+
+        # Create FS alpha and get mds_a to come up as active
+        fs_a = self.mds_cluster.get_filesystem("alpha")
+        fs_a.create()
+        self.mds_cluster.mds_restart(mds_a)
+        fs_a.wait_for_daemons()
+        self.assertEqual(fs_a.get_active_names(), [mds_a])
+
+        # Start the standbys
+        self.mds_cluster.mds_restart(mds_b)
+        self.wait_for_daemon_start([mds_b])
+
+        def get_info_by_name(fs, mds_name):
+            mds_map = fs.get_mds_map()
+            for gid_str, info in mds_map['info'].items():
+                if info['name'] == mds_name:
+                    return info
+
+            log.warn(json.dumps(mds_map, indent=2))
+            raise RuntimeError("MDS '{0}' not found in filesystem MDSMap".format(mds_name))
+
+        # See the standby come up as the correct rank
+        info_b = get_info_by_name(fs_a, mds_b)
+        self.assertEqual(info_b['state'], "up:standby-replay")
+        self.assertEqual(info_b['standby_for_name'], mds_a)
+        self.assertEqual(info_b['rank'], 0)
+
+        # Kill the standby
+        self.mds_cluster.mds_stop(mds_b)
+        self.mds_cluster.mds_fail(mds_b)
+
+        # See that the standby is gone and the active remains
+        self.assertEqual(fs_a.get_active_names(), [mds_a])
+        mds_map = fs_a.get_mds_map()
+        self.assertEqual(len(mds_map['info']), 1)
+        self.assertEqual(mds_map['failed'], [])
+        self.assertEqual(mds_map['damaged'], [])
+        self.assertEqual(mds_map['stopped'], [])
+
+class TestMultiFilesystems(CephFSTestCase):
+    CLIENTS_REQUIRED = 2
+    MDSS_REQUIRED = 4
+
+    # We'll create our own filesystems and start our own daemons
+    REQUIRE_FILESYSTEM = False
+
+    def setUp(self):
+        super(TestMultiFilesystems, self).setUp()
+        self.fs.mon_manager.raw_cluster_cmd("fs", "flag", "set",
+                                            "enable_multiple", "true",
+                                            "--yes-i-really-mean-it")
+
+    def _setup_two(self):
+        fs_a = self.mds_cluster.get_filesystem("alpha")
+        fs_b = self.mds_cluster.get_filesystem("bravo")
+        fs_a.create()
+        fs_b.create()
+
+        self.mds_cluster.mds_restart()
+
+        # Wait for both filesystems to go healthy
+        fs_a.wait_for_daemons()
+        fs_b.wait_for_daemons()
+
+        # Reconfigure client auth caps
+        for mount in self.mounts:
+            self.fs.mon_manager.raw_cluster_cmd_result(
+                'auth', 'caps', "client.{0}".format(mount.client_id),
+                'mds', 'allow',
+                'mon', 'allow r',
+                'osd', 'allow rw pool={0}, allow rw pool={1}'.format(
+                    fs_a.get_data_pool_name(), fs_b.get_data_pool_name()))
+
+        return fs_a, fs_b
+
+    def test_clients(self):
+        fs_a, fs_b = self._setup_two()
+
+        # Mount a client on fs_a
+        fs_a.set_ceph_conf(
+            "client.{0}".format(self.mount_a.client_id), "client_mds_namespace",
+            fs_a.get_namespace_id().__str__()
+        )
+        self.mount_a.mount()
+        self.mount_a.write_n_mb("pad.bin", 1)
+        self.mount_a.write_n_mb("test.bin", 2)
+        a_created_ino = self.mount_a.path_to_ino("test.bin")
+        self.mount_a.create_files()
+        self.mount_a.umount_wait()
+
+        # Mount a client on fs_b
+        fs_b.set_ceph_conf(
+            "client.{0}".format(self.mount_b.client_id), "client_mds_namespace",
+            fs_b.get_namespace_id().__str__()
+        )
+        self.mount_b.mount()
+        self.mount_b.write_n_mb("test.bin", 1)
+        b_created_ino = self.mount_b.path_to_ino("test.bin")
+        self.mount_b.create_files()
+        self.mount_b.umount_wait()
+
+        # See that the client's files went into the correct pool
+        self.assertTrue(fs_a.data_objects_present(a_created_ino, 1024 * 1024))
+        self.assertTrue(fs_b.data_objects_present(b_created_ino, 1024 * 1024))
+
+    def test_standby(self):
+        fs_a, fs_b = self._setup_two()
+
+        # Assert that the remaining two MDS daemons are now standbys
+        a_daemons = fs_a.get_active_names()
+        b_daemons = fs_b.get_active_names()
+        self.assertEqual(len(a_daemons), 1)
+        self.assertEqual(len(b_daemons), 1)
+        original_a = a_daemons[0]
+        original_b = b_daemons[0]
+        expect_standby_daemons = set(self.mds_cluster.mds_ids) - (set(a_daemons) | set(b_daemons))
+
+        # Need all my standbys up as well as the active daemons
+        self.wait_for_daemon_start()
+        self.assertEqual(expect_standby_daemons, self.mds_cluster.get_standby_daemons())
+
+        # Kill fs_a's active MDS, see a standby take over
+        self.mds_cluster.mds_stop(original_a)
+        self.mds_cluster.mon_manager.raw_cluster_cmd("mds", "fail", original_a)
+        self.wait_until_equal(lambda: len(fs_a.get_active_names()), 1, 30,
+                              reject_fn=lambda v: v > 1)
+        # Assert that it's a *different* daemon that has now appeared in the map for fs_a
+        self.assertNotEqual(fs_a.get_active_names()[0], original_a)
+
+        # Kill fs_b's active MDS, see a standby take over
+        self.mds_cluster.mds_stop(original_b)
+        self.mds_cluster.mon_manager.raw_cluster_cmd("mds", "fail", original_b)
+        self.wait_until_equal(lambda: len(fs_b.get_active_names()), 1, 30,
+                              reject_fn=lambda v: v > 1)
+        # Assert that it's a *different* daemon that has now appeared in the map for fs_a
+        self.assertNotEqual(fs_b.get_active_names()[0], original_b)
+
+        # Both of the original active daemons should be gone, and all standbys used up
+        self.assertEqual(self.mds_cluster.get_standby_daemons(), set())
+
+        # Restart the ones I killed, see them reappear as standbys
+        self.mds_cluster.mds_restart(original_a)
+        self.mds_cluster.mds_restart(original_b)
+        self.wait_until_true(
+            lambda: {original_a, original_b} == self.mds_cluster.get_standby_daemons(),
+            timeout=30
+        )
+
+    def test_grow_shrink(self):
+        # Usual setup...
+        fs_a, fs_b = self._setup_two()
+        fs_a.mon_manager.raw_cluster_cmd("fs", "set", fs_a.name,
+                                         "allow_multimds", "true",
+                                         "--yes-i-really-mean-it")
+
+        fs_b.mon_manager.raw_cluster_cmd("fs", "set", fs_b.name,
+                                         "allow_multimds", "true",
+                                         "--yes-i-really-mean-it")
+
+        # Increase max_mds on fs_b, see a standby take up the role
+        fs_b.mon_manager.raw_cluster_cmd('fs', 'set', fs_b.name, 'max_mds', "2")
+        self.wait_until_equal(lambda: len(fs_b.get_active_names()), 2, 30,
+                              reject_fn=lambda v: v > 2 or v < 1)
+
+        # Increase max_mds on fs_a, see a standby take up the role
+        fs_a.mon_manager.raw_cluster_cmd('fs', 'set', fs_a.name, 'max_mds', "2")
+        self.wait_until_equal(lambda: len(fs_a.get_active_names()), 2, 30,
+                              reject_fn=lambda v: v > 2 or v < 1)
+
+        # Shrink fs_b back to 1, see a daemon go back to standby
+        fs_b.mon_manager.raw_cluster_cmd('fs', 'set', fs_b.name, 'max_mds', "1")
+        fs_b.mon_manager.raw_cluster_cmd('mds', 'deactivate', "{0}:1".format(fs_b.name))
+        self.wait_until_equal(lambda: len(fs_b.get_active_names()), 1, 30,
+                              reject_fn=lambda v: v > 2 or v < 1)
+
+        # Grow fs_a up to 3, see the former fs_b daemon join it.
+        fs_a.mon_manager.raw_cluster_cmd('fs', 'set', fs_a.name, 'max_mds', "3")
+        self.wait_until_equal(lambda: len(fs_a.get_active_names()), 3, 60,
+                              reject_fn=lambda v: v > 3 or v < 2)
+
+    def test_standby_for_name(self):
+        # Pick out exactly 4 daemons to be run during test
+        use_daemons = sorted(self.mds_cluster.mds_ids[0:4])
+        mds_a, mds_b, mds_c, mds_d = use_daemons
+        log.info("Using MDS daemons: {0}".format(use_daemons))
+
+        def set_standby_for(leader, follower, replay):
+            self.set_conf("mds.{0}".format(follower), "mds_standby_for_name", leader)
+            if replay:
+                self.set_conf("mds.{0}".format(follower), "mds_standby_replay", "true")
+
+        # Configure two pairs of MDSs that are standby for each other
+        set_standby_for(mds_a, mds_b, True)
+        set_standby_for(mds_b, mds_a, False)
+        set_standby_for(mds_c, mds_d, True)
+        set_standby_for(mds_d, mds_c, False)
+
+        # Create FS alpha and get mds_a to come up as active
+        fs_a = self.mds_cluster.get_filesystem("alpha")
+        fs_a.create()
+        self.mds_cluster.mds_restart(mds_a)
+        fs_a.wait_for_daemons()
+        self.assertEqual(fs_a.get_active_names(), [mds_a])
+
+        # Create FS bravo and get mds_c to come up as active
+        fs_b = self.mds_cluster.get_filesystem("bravo")
+        fs_b.create()
+        self.mds_cluster.mds_restart(mds_c)
+        fs_b.wait_for_daemons()
+        self.assertEqual(fs_b.get_active_names(), [mds_c])
+
+        # Start the standbys
+        self.mds_cluster.mds_restart(mds_b)
+        self.mds_cluster.mds_restart(mds_d)
+        self.wait_for_daemon_start([mds_b, mds_d])
+
+        def get_info_by_name(fs, mds_name):
+            mds_map = fs.get_mds_map()
+            for gid_str, info in mds_map['info'].items():
+                if info['name'] == mds_name:
+                    return info
+
+            log.warn(json.dumps(mds_map, indent=2))
+            raise RuntimeError("MDS '{0}' not found in filesystem MDSMap".format(mds_name))
+
+        # See both standbys come up as standby replay for the correct ranks
+        # mds_b should be in filesystem alpha following mds_a
+        info_b = get_info_by_name(fs_a, mds_b)
+        self.assertEqual(info_b['state'], "up:standby-replay")
+        self.assertEqual(info_b['standby_for_name'], mds_a)
+        self.assertEqual(info_b['rank'], 0)
+        # mds_d should be in filesystem alpha following mds_c
+        info_d = get_info_by_name(fs_b, mds_d)
+        self.assertEqual(info_d['state'], "up:standby-replay")
+        self.assertEqual(info_d['standby_for_name'], mds_c)
+        self.assertEqual(info_d['rank'], 0)
+
+        # Kill both active daemons
+        self.mds_cluster.mds_stop(mds_a)
+        self.mds_cluster.mds_fail(mds_a)
+        self.mds_cluster.mds_stop(mds_c)
+        self.mds_cluster.mds_fail(mds_c)
+
+        # Wait for standbys to take over
+        fs_a.wait_for_daemons()
+        self.assertEqual(fs_a.get_active_names(), [mds_b])
+        fs_b.wait_for_daemons()
+        self.assertEqual(fs_b.get_active_names(), [mds_d])
+
+        # Start the original active daemons up again
+        self.mds_cluster.mds_restart(mds_a)
+        self.mds_cluster.mds_restart(mds_c)
+        self.wait_for_daemon_start([mds_a, mds_c])
+
+        self.assertEqual(set(self.mds_cluster.get_standby_daemons()),
+                         {mds_a, mds_c})
+
+    def test_standby_for_rank(self):
+        use_daemons = sorted(self.mds_cluster.mds_ids[0:4])
+        mds_a, mds_b, mds_c, mds_d = use_daemons
+        log.info("Using MDS daemons: {0}".format(use_daemons))
+
+        def set_standby_for(leader_rank, leader_fs, follower_id):
+            self.set_conf("mds.{0}".format(follower_id),
+                          "mds_standby_for_rank", leader_rank)
+
+            fscid = leader_fs.get_namespace_id()
+            self.set_conf("mds.{0}".format(follower_id),
+                          "mds_standby_for_fscid", fscid)
+
+        fs_a = self.mds_cluster.get_filesystem("alpha")
+        fs_a.create()
+        fs_b = self.mds_cluster.get_filesystem("bravo")
+        fs_b.create()
+        set_standby_for(0, fs_a, mds_a)
+        set_standby_for(0, fs_a, mds_b)
+        set_standby_for(0, fs_b, mds_c)
+        set_standby_for(0, fs_b, mds_d)
+
+        self.mds_cluster.mds_restart(mds_a)
+        fs_a.wait_for_daemons()
+        self.assertEqual(fs_a.get_active_names(), [mds_a])
+
+        self.mds_cluster.mds_restart(mds_c)
+        fs_b.wait_for_daemons()
+        self.assertEqual(fs_b.get_active_names(), [mds_c])
+
+        self.mds_cluster.mds_restart(mds_b)
+        self.mds_cluster.mds_restart(mds_d)
+        self.wait_for_daemon_start([mds_b, mds_d])
+
+        self.mds_cluster.mds_stop(mds_a)
+        self.mds_cluster.mds_fail(mds_a)
+        self.mds_cluster.mds_stop(mds_c)
+        self.mds_cluster.mds_fail(mds_c)
+
+        fs_a.wait_for_daemons()
+        self.assertEqual(fs_a.get_active_names(), [mds_b])
+        fs_b.wait_for_daemons()
+        self.assertEqual(fs_b.get_active_names(), [mds_d])
+
+    def test_standby_for_fscid(self):
+        """
+        That I can set a standby FSCID with no rank, and the result is
+        that daemons join any rank for that filesystem.
+        """
+        use_daemons = sorted(self.mds_cluster.mds_ids[0:4])
+        mds_a, mds_b, mds_c, mds_d = use_daemons
+
+        log.info("Using MDS daemons: {0}".format(use_daemons))
+
+        def set_standby_for(leader_fs, follower_id):
+            fscid = leader_fs.get_namespace_id()
+            self.set_conf("mds.{0}".format(follower_id),
+                          "mds_standby_for_fscid", fscid)
+
+        # Create two filesystems which should have two ranks each
+        fs_a = self.mds_cluster.get_filesystem("alpha")
+        fs_a.create()
+        fs_a.mon_manager.raw_cluster_cmd("fs", "set", fs_a.name,
+                                         "allow_multimds", "true",
+                                         "--yes-i-really-mean-it")
+
+        fs_b = self.mds_cluster.get_filesystem("bravo")
+        fs_b.create()
+        fs_b.mon_manager.raw_cluster_cmd("fs", "set", fs_b.name,
+                                         "allow_multimds", "true",
+                                         "--yes-i-really-mean-it")
+
+        fs_a.mon_manager.raw_cluster_cmd('fs', 'set', fs_a.name,
+                                         'max_mds', "2")
+        fs_b.mon_manager.raw_cluster_cmd('fs', 'set', fs_b.name,
+                                         'max_mds', "2")
+
+        # Set all the daemons to have a FSCID assignment but no other
+        # standby preferences.
+        set_standby_for(fs_a, mds_a)
+        set_standby_for(fs_a, mds_b)
+        set_standby_for(fs_b, mds_c)
+        set_standby_for(fs_b, mds_d)
+
+        # Now when we start all daemons at once, they should fall into
+        # ranks in the right filesystem
+        self.mds_cluster.mds_restart(mds_a)
+        self.mds_cluster.mds_restart(mds_b)
+        self.mds_cluster.mds_restart(mds_c)
+        self.mds_cluster.mds_restart(mds_d)
+        self.wait_for_daemon_start([mds_a, mds_b, mds_c, mds_d])
+        fs_a.wait_for_daemons()
+        fs_b.wait_for_daemons()
+        self.assertEqual(set(fs_a.get_active_names()), {mds_a, mds_b})
+        self.assertEqual(set(fs_b.get_active_names()), {mds_c, mds_d})
diff --git a/qa/tasks/cephfs/test_flush.py b/qa/tasks/cephfs/test_flush.py
new file mode 100644
index 0000000..c83db08
--- /dev/null
+++ b/qa/tasks/cephfs/test_flush.py
@@ -0,0 +1,113 @@
+
+from textwrap import dedent
+from tasks.cephfs.cephfs_test_case import CephFSTestCase
+from tasks.cephfs.filesystem import ObjectNotFound, ROOT_INO
+
+
+class TestFlush(CephFSTestCase):
+    def test_flush(self):
+        self.mount_a.run_shell(["mkdir", "mydir"])
+        self.mount_a.run_shell(["touch", "mydir/alpha"])
+        dir_ino = self.mount_a.path_to_ino("mydir")
+        file_ino = self.mount_a.path_to_ino("mydir/alpha")
+
+        # Unmount the client so that it isn't still holding caps
+        self.mount_a.umount_wait()
+
+        # Before flush, the dirfrag object does not exist
+        with self.assertRaises(ObjectNotFound):
+            self.fs.list_dirfrag(dir_ino)
+
+        # Before flush, the file's backtrace has not been written
+        with self.assertRaises(ObjectNotFound):
+            self.fs.read_backtrace(file_ino)
+
+        # Before flush, there are no dentries in the root
+        self.assertEqual(self.fs.list_dirfrag(ROOT_INO), [])
+
+        # Execute flush
+        flush_data = self.fs.mds_asok(["flush", "journal"])
+        self.assertEqual(flush_data['return_code'], 0)
+
+        # After flush, the dirfrag object has been created
+        dir_list = self.fs.list_dirfrag(dir_ino)
+        self.assertEqual(dir_list, ["alpha_head"])
+
+        # And the 'mydir' dentry is in the root
+        self.assertEqual(self.fs.list_dirfrag(ROOT_INO), ['mydir_head'])
+
+        # ...and the data object has its backtrace
+        backtrace = self.fs.read_backtrace(file_ino)
+        self.assertEqual(['alpha', 'mydir'], [a['dname'] for a in backtrace['ancestors']])
+        self.assertEqual([dir_ino, 1], [a['dirino'] for a in backtrace['ancestors']])
+        self.assertEqual(file_ino, backtrace['ino'])
+
+        # ...and the journal is truncated to just a single subtreemap from the
+        # newly created segment
+        summary_output = self.fs.journal_tool(["event", "get", "summary"])
+        try:
+            self.assertEqual(summary_output,
+                             dedent(
+                                 """
+                                 Events by type:
+                                   SUBTREEMAP: 1
+                                 Errors: 0
+                                 """
+                             ).strip())
+        except AssertionError:
+            # In some states, flushing the journal will leave you
+            # an extra event from locks a client held.   This is
+            # correct behaviour: the MDS is flushing the journal,
+            # it's just that new events are getting added too.
+            # In this case, we should nevertheless see a fully
+            # empty journal after a second flush.
+            self.assertEqual(summary_output,
+                             dedent(
+                                 """
+                                 Events by type:
+                                   SUBTREEMAP: 1
+                                   UPDATE: 1
+                                 Errors: 0
+                                 """
+                             ).strip())
+            flush_data = self.fs.mds_asok(["flush", "journal"])
+            self.assertEqual(flush_data['return_code'], 0)
+            self.assertEqual(self.fs.journal_tool(["event", "get", "summary"]),
+                             dedent(
+                                 """
+                                 Events by type:
+                                   SUBTREEMAP: 1
+                                 Errors: 0
+                                 """
+                             ).strip())
+
+        # Now for deletion!
+        # We will count the RADOS deletions and MDS file purges, to verify that
+        # the expected behaviour is happening as a result of the purge
+        initial_dels = self.fs.mds_asok(['perf', 'dump', 'objecter'])['objecter']['osdop_delete']
+        initial_purges = self.fs.mds_asok(['perf', 'dump', 'mds_cache'])['mds_cache']['strays_purged']
+
+        # Use a client to delete a file
+        self.mount_a.mount()
+        self.mount_a.wait_until_mounted()
+        self.mount_a.run_shell(["rm", "-rf", "mydir"])
+
+        # Flush the journal so that the directory inode can be purged
+        flush_data = self.fs.mds_asok(["flush", "journal"])
+        self.assertEqual(flush_data['return_code'], 0)
+
+        # We expect to see a single file purge
+        self.wait_until_true(
+            lambda: self.fs.mds_asok(['perf', 'dump', 'mds_cache'])['mds_cache']['strays_purged'] - initial_purges >= 2,
+            60)
+
+        # We expect two deletions, one of the dirfrag and one of the backtrace
+        self.wait_until_true(
+            lambda: self.fs.mds_asok(['perf', 'dump', 'objecter'])['objecter']['osdop_delete'] - initial_dels >= 2,
+            60)  # timeout is fairly long to allow for tick+rados latencies
+
+        with self.assertRaises(ObjectNotFound):
+            self.fs.list_dirfrag(dir_ino)
+        with self.assertRaises(ObjectNotFound):
+            self.fs.read_backtrace(file_ino)
+        self.assertEqual(self.fs.list_dirfrag(ROOT_INO), [])
diff --git a/qa/tasks/cephfs/test_forward_scrub.py b/qa/tasks/cephfs/test_forward_scrub.py
new file mode 100644
index 0000000..6a179a6
--- /dev/null
+++ b/qa/tasks/cephfs/test_forward_scrub.py
@@ -0,0 +1,196 @@
+
+"""
+Test that the forward scrub functionality can traverse metadata and apply
+requested tags, on well formed metadata.
+
+This is *not* the real testing for forward scrub, which will need to test
+how the functionality responds to damaged metadata.
+
+"""
+import json
+
+import logging
+from collections import namedtuple
+from textwrap import dedent
+
+from teuthology.orchestra.run import CommandFailedError
+from tasks.cephfs.cephfs_test_case import CephFSTestCase
+
+import struct
+
+log = logging.getLogger(__name__)
+
+
+ValidationError = namedtuple("ValidationError", ["exception", "backtrace"])
+
+
+class TestForwardScrub(CephFSTestCase):
+    MDSS_REQUIRED = 1
+
+    def _read_str_xattr(self, pool, obj, attr):
+        """
+        Read a ceph-encoded string from a rados xattr
+        """
+        output = self.fs.rados(["getxattr", obj, attr], pool=pool)
+        strlen = struct.unpack('i', output[0:4])[0]
+        return output[4:(4 + strlen)]
+
+    def _get_paths_to_ino(self):
+        inos = {}
+        p = self.mount_a.run_shell(["find", "./"])
+        paths = p.stdout.getvalue().strip().split()
+        for path in paths:
+            inos[path] = self.mount_a.path_to_ino(path)
+
+        return inos
+
+    def test_apply_tag(self):
+        self.mount_a.run_shell(["mkdir", "parentdir"])
+        self.mount_a.run_shell(["mkdir", "parentdir/childdir"])
+        self.mount_a.run_shell(["touch", "rfile"])
+        self.mount_a.run_shell(["touch", "parentdir/pfile"])
+        self.mount_a.run_shell(["touch", "parentdir/childdir/cfile"])
+
+        # Build a structure mapping path to inode, as we will later want
+        # to check object by object and objects are named after ino number
+        inos = self._get_paths_to_ino()
+
+        # Flush metadata: this is a friendly test of forward scrub so we're skipping
+        # the part where it's meant to cope with dirty metadata
+        self.mount_a.umount_wait()
+        self.fs.mds_asok(["flush", "journal"])
+
+        tag = "mytag"
+
+        # Execute tagging forward scrub
+        self.fs.mds_asok(["tag", "path", "/parentdir", tag])
+        # Wait for completion
+        import time
+        time.sleep(10)
+        # FIXME watching clog isn't a nice mechanism for this, once we have a ScrubMap we'll
+        # watch that instead
+
+        # Check that dirs were tagged
+        for dirpath in ["./parentdir", "./parentdir/childdir"]:
+            self.assertTagged(inos[dirpath], tag, self.fs.get_metadata_pool_name())
+
+        # Check that files were tagged
+        for filepath in ["./parentdir/pfile", "./parentdir/childdir/cfile"]:
+            self.assertTagged(inos[filepath], tag, self.fs.get_data_pool_name())
+
+        # This guy wasn't in the tag path, shouldn't have been tagged
+        self.assertUntagged(inos["./rfile"])
+
+    def assertUntagged(self, ino):
+        file_obj_name = "{0:x}.00000000".format(ino)
+        with self.assertRaises(CommandFailedError):
+            self._read_str_xattr(
+                self.fs.get_data_pool_name(),
+                file_obj_name,
+                "scrub_tag"
+            )
+
+    def assertTagged(self, ino, tag, pool):
+        file_obj_name = "{0:x}.00000000".format(ino)
+        wrote = self._read_str_xattr(
+            pool,
+            file_obj_name,
+            "scrub_tag"
+        )
+        self.assertEqual(wrote, tag)
+
+    def _validate_linkage(self, expected):
+        inos = self._get_paths_to_ino()
+        try:
+            self.assertDictEqual(inos, expected)
+        except AssertionError:
+            log.error("Expected: {0}".format(json.dumps(expected, indent=2)))
+            log.error("Actual: {0}".format(json.dumps(inos, indent=2)))
+            raise
+
+    def test_orphan_scan(self):
+        # Create some files whose metadata we will flush
+        self.mount_a.run_python(dedent("""
+            import os
+            mount_point = "{mount_point}"
+            parent = os.path.join(mount_point, "parent")
+            os.mkdir(parent)
+            flushed = os.path.join(parent, "flushed")
+            os.mkdir(flushed)
+            for f in ["alpha", "bravo", "charlie"]:
+                open(os.path.join(flushed, f), 'w').write(f)
+        """.format(mount_point=self.mount_a.mountpoint)))
+
+        inos = self._get_paths_to_ino()
+
+        # Flush journal
+        # Umount before flush to avoid cap releases putting
+        # things we don't want in the journal later.
+        self.mount_a.umount_wait()
+        self.fs.mds_asok(["flush", "journal"])
+
+        # Create a new inode that's just in the log, i.e. would
+        # look orphaned to backward scan if backward scan wisnae
+        # respectin' tha scrub_tag xattr.
+        self.mount_a.mount()
+        self.mount_a.run_shell(["mkdir", "parent/unflushed"])
+        self.mount_a.run_shell(["dd", "if=/dev/urandom",
+                                "of=./parent/unflushed/jfile",
+                                "bs=1M", "count=8"])
+        inos["./parent/unflushed"] = self.mount_a.path_to_ino("./parent/unflushed")
+        inos["./parent/unflushed/jfile"] = self.mount_a.path_to_ino("./parent/unflushed/jfile")
+        self.mount_a.umount_wait()
+
+        # Orphan an inode by deleting its dentry
+        # Our victim will be.... bravo.
+        self.mount_a.umount_wait()
+        self.fs.mds_stop()
+        self.fs.mds_fail()
+        self.fs.set_ceph_conf('mds', 'mds verify scatter', False)
+        self.fs.set_ceph_conf('mds', 'mds debug scatterstat', False)
+        frag_obj_id = "{0:x}.00000000".format(inos["./parent/flushed"])
+        self.fs.rados(["rmomapkey", frag_obj_id, "bravo_head"])
+
+        self.fs.mds_restart()
+        self.fs.wait_for_daemons()
+
+        # See that the orphaned file is indeed missing from a client's POV
+        self.mount_a.mount()
+        damaged_state = self._get_paths_to_ino()
+        self.assertNotIn("./parent/flushed/bravo", damaged_state)
+        self.mount_a.umount_wait()
+
+        # Run a tagging forward scrub
+        tag = "mytag123"
+        self.fs.mds_asok(["tag", "path", "/parent", tag])
+
+        # See that the orphan wisnae tagged
+        self.assertUntagged(inos['./parent/flushed/bravo'])
+
+        # See that the flushed-metadata-and-still-present files are tagged
+        self.assertTagged(inos['./parent/flushed/alpha'], tag, self.fs.get_data_pool_name())
+        self.assertTagged(inos['./parent/flushed/charlie'], tag, self.fs.get_data_pool_name())
+
+        # See that journalled-but-not-flushed file *was* tagged
+        self.assertTagged(inos['./parent/unflushed/jfile'], tag, self.fs.get_data_pool_name())
+
+        # Run cephfs-data-scan targeting only orphans
+        self.fs.mds_stop()
+        self.fs.mds_fail()
+        self.fs.data_scan(["scan_extents", self.fs.get_data_pool_name()])
+        self.fs.data_scan([
+            "scan_inodes",
+            "--filter-tag", tag,
+            self.fs.get_data_pool_name()
+        ])
+
+        # After in-place injection stats should be kosher again
+        self.fs.set_ceph_conf('mds', 'mds verify scatter', True)
+        self.fs.set_ceph_conf('mds', 'mds debug scatterstat', True)
+
+        # And we should have all the same linkage we started with,
+        # and no lost+found, and no extra inodes!
+        self.fs.mds_restart()
+        self.fs.wait_for_daemons()
+        self.mount_a.mount()
+        self._validate_linkage(inos)
diff --git a/qa/tasks/cephfs/test_full.py b/qa/tasks/cephfs/test_full.py
new file mode 100644
index 0000000..8013f91
--- /dev/null
+++ b/qa/tasks/cephfs/test_full.py
@@ -0,0 +1,410 @@
+
+
+import json
+import logging
+import os
+from textwrap import dedent
+import time
+from teuthology.orchestra.run import CommandFailedError
+from tasks.cephfs.cephfs_test_case import CephFSTestCase
+
+
+log = logging.getLogger(__name__)
+
+
+class FullnessTestCase(CephFSTestCase):
+    CLIENTS_REQUIRED = 2
+
+    # Subclasses define whether they're filling whole cluster or just data pool
+    data_only = False
+
+    # Subclasses define how many bytes should be written to achieve fullness
+    pool_capacity = None
+    fill_mb = None
+
+    # Subclasses define what fullness means to them
+    def is_full(self):
+        raise NotImplementedError()
+
+    def setUp(self):
+        CephFSTestCase.setUp(self)
+
+        # These tests just use a single active MDS throughout, so remember its ID
+        # for use in mds_asok calls
+        self.active_mds_id = self.fs.get_active_names()[0]
+
+        # Capture the initial OSD map epoch for later use
+        self.initial_osd_epoch = json.loads(
+            self.fs.mon_manager.raw_cluster_cmd("osd", "dump", "--format=json").strip()
+        )['epoch']
+
+        # Check the initial barrier epoch on the MDS: this should be
+        # set to the latest map at MDS startup.  We do this check in
+        # setUp to get in there before subclasses might touch things
+        # in their own setUp functions.
+        self.assertGreaterEqual(self.fs.mds_asok(["status"], mds_id=self.active_mds_id)['osdmap_epoch_barrier'],
+                                self.initial_osd_epoch)
+
+    def test_barrier(self):
+        """
+        That when an OSD epoch barrier is set on an MDS, subsequently
+        issued capabilities cause clients to update their OSD map to that
+        epoch.
+        """
+
+        # Sync up clients with initial MDS OSD map barrier
+        self.mount_a.open_no_data("foo")
+        self.mount_b.open_no_data("bar")
+
+        # Grab mounts' initial OSD epochs: later we will check that
+        # it hasn't advanced beyond this point.
+        mount_a_initial_epoch = self.mount_a.get_osd_epoch()[0]
+        mount_b_initial_epoch = self.mount_b.get_osd_epoch()[0]
+
+        # Freshly mounted at start of test, should be up to date with OSD map
+        self.assertGreaterEqual(mount_a_initial_epoch, self.initial_osd_epoch)
+        self.assertGreaterEqual(mount_b_initial_epoch, self.initial_osd_epoch)
+
+        # Set and unset a flag to cause OSD epoch to increment
+        self.fs.mon_manager.raw_cluster_cmd("osd", "set", "pause")
+        self.fs.mon_manager.raw_cluster_cmd("osd", "unset", "pause")
+
+        out = self.fs.mon_manager.raw_cluster_cmd("osd", "dump", "--format=json").strip()
+        new_epoch = json.loads(out)['epoch']
+        self.assertNotEqual(self.initial_osd_epoch, new_epoch)
+
+        # Do a metadata operation on clients, witness that they end up with
+        # the old OSD map from startup time (nothing has prompted client
+        # to update its map)
+        self.mount_a.open_no_data("alpha")
+        self.mount_b.open_no_data("bravo1")
+
+        # Sleep long enough that if the OSD map was propagating it would
+        # have done so (this is arbitrary because we are 'waiting' for something
+        # to *not* happen).
+        time.sleep(30)
+
+        mount_a_epoch, mount_a_barrier = self.mount_a.get_osd_epoch()
+        self.assertEqual(mount_a_epoch, mount_a_initial_epoch)
+        mount_b_epoch, mount_b_barrier = self.mount_b.get_osd_epoch()
+        self.assertEqual(mount_b_epoch, mount_b_initial_epoch)
+
+        # Set a barrier on the MDS
+        self.fs.mds_asok(["osdmap", "barrier", new_epoch.__str__()], mds_id=self.active_mds_id)
+
+        # Do an operation on client B, witness that it ends up with
+        # the latest OSD map from the barrier.  This shouldn't generate any
+        # cap revokes to A because B was already the last one to touch
+        # a file in root.
+        self.mount_b.run_shell(["touch", "bravo2"])
+        self.mount_b.open_no_data("bravo2")
+
+        # Some time passes here because the metadata part of the operation
+        # completes immediately, while the resulting OSD map update happens
+        # asynchronously (it's an Objecter::_maybe_request_map) as a result
+        # of seeing the new epoch barrier.
+        self.wait_until_equal(
+            lambda: self.mount_b.get_osd_epoch(),
+            (new_epoch, new_epoch),
+            30,
+            lambda x: x[0] > new_epoch or x[1] > new_epoch)
+
+        # ...and none of this should have affected the oblivious mount a,
+        # because it wasn't doing any data or metadata IO
+        mount_a_epoch, mount_a_barrier = self.mount_a.get_osd_epoch()
+        self.assertEqual(mount_a_epoch, mount_a_initial_epoch)
+
+    def _data_pool_name(self):
+        data_pool_names = self.fs.get_data_pool_names()
+        if len(data_pool_names) > 1:
+            raise RuntimeError("This test can't handle multiple data pools")
+        else:
+            return data_pool_names[0]
+
+    def _test_full(self, easy_case):
+        """
+        - That a client trying to write data to a file is prevented
+        from doing so with an -EFULL result
+        - That they are also prevented from creating new files by the MDS.
+        - That they may delete another file to get the system healthy again
+
+        :param easy_case: if true, delete a successfully written file to
+                          free up space.  else, delete the file that experienced
+                          the failed write.
+        """
+
+        osd_mon_report_interval_max = int(self.fs.get_config("osd_mon_report_interval_max", service_type='osd'))
+
+        log.info("Writing {0}MB should fill this cluster".format(self.fill_mb))
+
+        # Fill up the cluster.  This dd may or may not fail, as it depends on
+        # how soon the cluster recognises its own fullness
+        self.mount_a.write_n_mb("large_file_a", self.fill_mb / 2)
+        try:
+            self.mount_a.write_n_mb("large_file_b", self.fill_mb / 2)
+        except CommandFailedError:
+            log.info("Writing file B failed (full status happened already)")
+            assert self.is_full()
+        else:
+            log.info("Writing file B succeeded (full status will happen soon)")
+            self.wait_until_true(lambda: self.is_full(),
+                                 timeout=osd_mon_report_interval_max * 5)
+
+        # Attempting to write more data should give me ENOSPC
+        with self.assertRaises(CommandFailedError) as ar:
+            self.mount_a.write_n_mb("large_file_b", 50, seek=self.fill_mb / 2)
+        self.assertEqual(ar.exception.exitstatus, 1)  # dd returns 1 on "No space"
+
+        # Wait for the MDS to see the latest OSD map so that it will reliably
+        # be applying the policy of rejecting non-deletion metadata operations
+        # while in the full state.
+        osd_epoch = json.loads(self.fs.mon_manager.raw_cluster_cmd("osd", "dump", "--format=json-pretty"))['epoch']
+        self.wait_until_true(
+            lambda: self.fs.mds_asok(['status'], mds_id=self.active_mds_id)['osdmap_epoch'] >= osd_epoch,
+            timeout=10)
+
+        if not self.data_only:
+            with self.assertRaises(CommandFailedError):
+                self.mount_a.write_n_mb("small_file_1", 0)
+
+        # Clear out some space
+        if easy_case:
+            self.mount_a.run_shell(['rm', '-f', 'large_file_a'])
+            self.mount_a.run_shell(['rm', '-f', 'large_file_b'])
+        else:
+            # In the hard case it is the file that filled the system.
+            # Before the new #7317 (ENOSPC, epoch barrier) changes, this
+            # would fail because the last objects written would be
+            # stuck in the client cache as objecter operations.
+            self.mount_a.run_shell(['rm', '-f', 'large_file_b'])
+            self.mount_a.run_shell(['rm', '-f', 'large_file_a'])
+
+        # Here we are waiting for two things to happen:
+        # * The MDS to purge the stray folder and execute object deletions
+        #  * The OSDs to inform the mon that they are no longer full
+        self.wait_until_true(lambda: not self.is_full(),
+                             timeout=osd_mon_report_interval_max * 5)
+
+        # Wait for the MDS to see the latest OSD map so that it will reliably
+        # be applying the free space policy
+        osd_epoch = json.loads(self.fs.mon_manager.raw_cluster_cmd("osd", "dump", "--format=json-pretty"))['epoch']
+        self.wait_until_true(
+            lambda: self.fs.mds_asok(['status'], mds_id=self.active_mds_id)['osdmap_epoch'] >= osd_epoch,
+            timeout=10)
+
+        # Now I should be able to write again
+        self.mount_a.write_n_mb("large_file", 50, seek=0)
+
+        # Ensure that the MDS keeps its OSD epoch barrier across a restart
+
+    def test_full_different_file(self):
+        self._test_full(True)
+
+    def test_full_same_file(self):
+        self._test_full(False)
+
+    def _remote_write_test(self, template):
+        """
+        Run some remote python in a way that's useful for
+        testing free space behaviour (see test_* methods using this)
+        """
+        file_path = os.path.join(self.mount_a.mountpoint, "full_test_file")
+
+        # Enough to trip the full flag
+        osd_mon_report_interval_max = int(self.fs.get_config("osd_mon_report_interval_max", service_type='osd'))
+        mon_tick_interval = int(self.fs.get_config("mon_tick_interval", service_type="mon"))
+
+        # Sufficient data to cause RADOS cluster to go 'full'
+        log.info("pool capacity {0}, {1}MB should be enough to fill it".format(self.pool_capacity, self.fill_mb))
+
+        # Long enough for RADOS cluster to notice it is full and set flag on mons
+        # (report_interval for mon to learn PG stats, tick interval for it to update OSD map,
+        #  factor of 1.5 for I/O + network latency in committing OSD map and distributing it
+        #  to the OSDs)
+        full_wait = (osd_mon_report_interval_max + mon_tick_interval) * 1.5
+
+        # Configs for this test should bring this setting down in order to
+        # run reasonably quickly
+        if osd_mon_report_interval_max > 10:
+            log.warn("This test may run rather slowly unless you decrease"
+                     "osd_mon_report_interval_max (5 is a good setting)!")
+
+        self.mount_a.run_python(template.format(
+            fill_mb=self.fill_mb,
+            file_path=file_path,
+            full_wait=full_wait
+        ))
+
+    def test_full_fclose(self):
+        # A remote script which opens a file handle, fills up the filesystem, and then
+        # checks that ENOSPC errors on buffered writes appear correctly as errors in fsync
+        remote_script = dedent("""
+            import time
+            import datetime
+            import subprocess
+            import os
+
+            # Write some buffered data through before going full, all should be well
+            print "writing some data through which we expect to succeed"
+            bytes = 0
+            f = os.open("{file_path}", os.O_WRONLY | os.O_CREAT)
+            bytes += os.write(f, 'a' * 4096)
+            os.fsync(f)
+            print "fsync'ed data successfully, will now attempt to fill fs"
+
+            # Okay, now we're going to fill up the filesystem, and then keep
+            # writing until we see an error from fsync.  As long as we're doing
+            # buffered IO, the error should always only appear from fsync and not
+            # from write
+            full = False
+
+            for n in range(0, {fill_mb}):
+                bytes += os.write(f, 'x' * 1024 * 1024)
+                print "wrote bytes via buffered write, may repeat"
+            print "done writing bytes"
+
+            # OK, now we should sneak in under the full condition
+            # due to the time it takes the OSDs to report to the
+            # mons, and get a successful fsync on our full-making data
+            os.fsync(f)
+            print "successfully fsync'ed prior to getting full state reported"
+
+            # Now wait for the full flag to get set so that our
+            # next flush IO will fail
+            time.sleep(30)
+
+            # A buffered IO, should succeed
+            print "starting buffered write we expect to succeed"
+            os.write(f, 'x' * 4096)
+            print "wrote, now waiting 30s and then doing a close we expect to fail"
+
+            # Wait long enough for a background flush that should fail
+            time.sleep(30)
+
+            # ...and check that the failed background flush is reflected in fclose
+            try:
+                os.close(f)
+            except OSError:
+                print "close() returned an error as expected"
+            else:
+                raise RuntimeError("close() failed to raise error")
+
+            os.unlink("{file_path}")
+            """)
+        self._remote_write_test(remote_script)
+
+    def test_full_fsync(self):
+        """
+        That when the full flag is encountered during asynchronous
+        flushes, such that an fwrite() succeeds but an fsync/fclose()
+        should return the ENOSPC error.
+        """
+
+        # A remote script which opens a file handle, fills up the filesystem, and then
+        # checks that ENOSPC errors on buffered writes appear correctly as errors in fsync
+        remote_script = dedent("""
+            import time
+            import datetime
+            import subprocess
+            import os
+
+            # Write some buffered data through before going full, all should be well
+            print "writing some data through which we expect to succeed"
+            bytes = 0
+            f = os.open("{file_path}", os.O_WRONLY | os.O_CREAT)
+            bytes += os.write(f, 'a' * 4096)
+            os.fsync(f)
+            print "fsync'ed data successfully, will now attempt to fill fs"
+
+            # Okay, now we're going to fill up the filesystem, and then keep
+            # writing until we see an error from fsync.  As long as we're doing
+            # buffered IO, the error should always only appear from fsync and not
+            # from write
+            full = False
+
+            for n in range(0, {fill_mb} + 1):
+                try:
+                    bytes += os.write(f, 'x' * 1024 * 1024)
+                    print "wrote bytes via buffered write, moving on to fsync"
+                except OSError as e:
+                    print "Unexpected error %s from write() instead of fsync()" % e
+                    raise
+
+                try:
+                    os.fsync(f)
+                    print "fsync'ed successfully"
+                except OSError as e:
+                    print "Reached fullness after %.2f MB" % (bytes / (1024.0 * 1024.0))
+                    full = True
+                    break
+                else:
+                    print "Not full yet after %.2f MB" % (bytes / (1024.0 * 1024.0))
+
+                if n > {fill_mb} * 0.8:
+                    # Be cautious in the last region where we expect to hit
+                    # the full condition, so that we don't overshoot too dramatically
+                    print "sleeping a bit as we've exceeded 80% of our expected full ratio"
+                    time.sleep({full_wait})
+
+            if not full:
+                raise RuntimeError("Failed to reach fullness after writing %d bytes" % bytes)
+
+            # The error sticks to the inode until we dispose of it
+            try:
+                os.close(f)
+            except OSError:
+                print "Saw error from close() as expected"
+            else:
+                raise RuntimeError("Did not see expected error from close()")
+
+            os.unlink("{file_path}")
+            """)
+
+        self._remote_write_test(remote_script)
+
+
+class TestQuotaFull(FullnessTestCase):
+    """
+    Test per-pool fullness, which indicates quota limits exceeded
+    """
+    pool_capacity = 1024 * 1024 * 32   # arbitrary low-ish limit
+    fill_mb = pool_capacity / (1024 * 1024)
+
+    # We are only testing quota handling on the data pool, not the metadata
+    # pool.
+    data_only = True
+
+    def setUp(self):
+        super(TestQuotaFull, self).setUp()
+
+        pool_name = self.fs.get_data_pool_name()
+        self.fs.mon_manager.raw_cluster_cmd("osd", "pool", "set-quota", pool_name,
+                                            "max_bytes", "{0}".format(self.pool_capacity))
+
+    def is_full(self):
+        return self.fs.is_pool_full(self.fs.get_data_pool_name())
+
+
+class TestClusterFull(FullnessTestCase):
+    """
+    Test cluster-wide fullness, which indicates that an OSD has become too full
+    """
+    pool_capacity = None
+    REQUIRE_MEMSTORE = True
+
+    def setUp(self):
+        super(TestClusterFull, self).setUp()
+
+        if self.pool_capacity is None:
+            # This is a hack to overcome weird fluctuations in the reported
+            # `max_avail` attribute of pools that sometimes occurs in between
+            # tests (reason as yet unclear, but this dodges the issue)
+            TestClusterFull.pool_capacity = self.fs.get_pool_df(self._data_pool_name())['max_avail']
+            mon_osd_full_ratio = float(self.fs.get_config("mon_osd_full_ratio"))
+            TestClusterFull.fill_mb = int(1.05 * mon_osd_full_ratio * (self.pool_capacity / (1024.0 * 1024.0)))
+
+    def is_full(self):
+        return self.fs.is_full()
+
+# Hide the parent class so that unittest.loader doesn't try to run it.
+del globals()['FullnessTestCase']
diff --git a/qa/tasks/cephfs/test_journal_migration.py b/qa/tasks/cephfs/test_journal_migration.py
new file mode 100644
index 0000000..873603f
--- /dev/null
+++ b/qa/tasks/cephfs/test_journal_migration.py
@@ -0,0 +1,89 @@
+
+from StringIO import StringIO
+from tasks.cephfs.cephfs_test_case import CephFSTestCase
+from tasks.workunit import task as workunit
+
+JOURNAL_FORMAT_LEGACY = 0
+JOURNAL_FORMAT_RESILIENT = 1
+
+
+class TestJournalMigration(CephFSTestCase):
+    CLIENTS_REQUIRED = 1
+
+    def test_journal_migration(self):
+        old_journal_version = JOURNAL_FORMAT_LEGACY
+        new_journal_version = JOURNAL_FORMAT_RESILIENT
+
+        self.fs.set_ceph_conf('mds', 'mds journal format', old_journal_version)
+
+        # Create a filesystem using the older journal format.
+        self.mount_a.umount_wait()
+        self.fs.mds_stop()
+        self.fs.recreate()
+        self.fs.mds_restart()
+        self.fs.wait_for_daemons()
+
+        # Do some client work so that the log is populated with something.
+        with self.mount_a.mounted():
+            self.mount_a.create_files()
+            self.mount_a.check_files()  # sanity, this should always pass
+
+            # Run a more substantial workunit so that the length of the log to be
+            # coverted is going span at least a few segments
+            workunit(self.ctx, {
+                'clients': {
+                    "client.{0}".format(self.mount_a.client_id): ["suites/fsstress.sh"],
+                },
+                "timeout": "3h"
+            })
+
+        # Modify the ceph.conf to ask the MDS to use the new journal format.
+        self.fs.set_ceph_conf('mds', 'mds journal format', new_journal_version)
+
+        # Restart the MDS.
+        self.fs.mds_fail_restart()
+        self.fs.wait_for_daemons()
+
+        # This ensures that all daemons come up into a valid state
+        self.fs.wait_for_daemons()
+
+        # Check that files created in the initial client workload are still visible
+        # in a client mount.
+        with self.mount_a.mounted():
+            self.mount_a.check_files()
+
+        # Verify that the journal really has been rewritten.
+        journal_version = self.fs.get_journal_version()
+        if journal_version != new_journal_version:
+            raise RuntimeError("Journal was not upgraded, version should be {0} but is {1}".format(
+                new_journal_version, journal_version()
+            ))
+
+        # Verify that cephfs-journal-tool can now read the rewritten journal
+        inspect_out = self.fs.journal_tool(["journal", "inspect"])
+        if not inspect_out.endswith(": OK"):
+            raise RuntimeError("Unexpected journal-tool result: '{0}'".format(
+                inspect_out
+            ))
+
+        self.fs.journal_tool(["event", "get", "json", "--path", "/tmp/journal.json"])
+        p = self.fs.tool_remote.run(
+            args=[
+                "python",
+                "-c",
+                "import json; print len(json.load(open('/tmp/journal.json')))"
+            ],
+            stdout=StringIO())
+        event_count = int(p.stdout.getvalue().strip())
+        if event_count < 1000:
+            # Approximate value of "lots", expected from having run fsstress
+            raise RuntimeError("Unexpectedly few journal events: {0}".format(event_count))
+
+        # Do some client work so that the log is populated with something.
+        with self.mount_a.mounted():
+            workunit(self.ctx, {
+                'clients': {
+                    "client.{0}".format(self.mount_a.client_id): ["fs/misc/trivial_sync.sh"],
+                },
+                "timeout": "3h"
+            })
diff --git a/qa/tasks/cephfs/test_journal_repair.py b/qa/tasks/cephfs/test_journal_repair.py
new file mode 100644
index 0000000..9ee4924
--- /dev/null
+++ b/qa/tasks/cephfs/test_journal_repair.py
@@ -0,0 +1,439 @@
+
+"""
+Test our tools for recovering the content of damaged journals
+"""
+
+import json
+import logging
+from textwrap import dedent
+import time
+
+from teuthology.orchestra.run import CommandFailedError
+from tasks.cephfs.filesystem import ObjectNotFound, ROOT_INO
+from tasks.cephfs.cephfs_test_case import CephFSTestCase, long_running
+from tasks.workunit import task as workunit
+
+log = logging.getLogger(__name__)
+
+
+class TestJournalRepair(CephFSTestCase):
+    MDSS_REQUIRED = 2
+
+    def test_inject_to_empty(self):
+        """
+        That when some dentries in the journal but nothing is in
+        the backing store, we correctly populate the backing store
+        from the journalled dentries.
+        """
+
+        # Inject metadata operations
+        self.mount_a.run_shell(["touch", "rootfile"])
+        self.mount_a.run_shell(["mkdir", "subdir"])
+        self.mount_a.run_shell(["touch", "subdir/subdirfile"])
+        # There are several different paths for handling hardlinks, depending
+        # on whether an existing dentry (being overwritten) is also a hardlink
+        self.mount_a.run_shell(["mkdir", "linkdir"])
+
+        # Test inode -> remote transition for a dentry
+        self.mount_a.run_shell(["touch", "linkdir/link0"])
+        self.mount_a.run_shell(["rm", "-f", "linkdir/link0"])
+        self.mount_a.run_shell(["ln", "subdir/subdirfile", "linkdir/link0"])
+
+        # Test nothing -> remote transition
+        self.mount_a.run_shell(["ln", "subdir/subdirfile", "linkdir/link1"])
+
+        # Test remote -> inode transition
+        self.mount_a.run_shell(["ln", "subdir/subdirfile", "linkdir/link2"])
+        self.mount_a.run_shell(["rm", "-f", "linkdir/link2"])
+        self.mount_a.run_shell(["touch", "linkdir/link2"])
+
+        # Test remote -> diff remote transition
+        self.mount_a.run_shell(["ln", "subdir/subdirfile", "linkdir/link3"])
+        self.mount_a.run_shell(["rm", "-f", "linkdir/link3"])
+        self.mount_a.run_shell(["ln", "rootfile", "linkdir/link3"])
+
+        # Test an empty directory
+        self.mount_a.run_shell(["mkdir", "subdir/subsubdir"])
+        self.mount_a.run_shell(["sync"])
+
+        # Before we unmount, make a note of the inode numbers, later we will
+        # check that they match what we recover from the journal
+        rootfile_ino = self.mount_a.path_to_ino("rootfile")
+        subdir_ino = self.mount_a.path_to_ino("subdir")
+        linkdir_ino = self.mount_a.path_to_ino("linkdir")
+        subdirfile_ino = self.mount_a.path_to_ino("subdir/subdirfile")
+        subsubdir_ino = self.mount_a.path_to_ino("subdir/subsubdir")
+
+        self.mount_a.umount_wait()
+
+        # Stop the MDS
+        self.fs.mds_stop()
+        self.fs.mds_fail()
+
+        # Now, the journal should contain the operations, but the backing
+        # store shouldn't
+        with self.assertRaises(ObjectNotFound):
+            self.fs.list_dirfrag(subdir_ino)
+        self.assertEqual(self.fs.list_dirfrag(ROOT_INO), [])
+
+        # Execute the dentry recovery, this should populate the backing store
+        self.fs.journal_tool(['event', 'recover_dentries', 'list'])
+
+        # Dentries in ROOT_INO are present
+        self.assertEqual(sorted(self.fs.list_dirfrag(ROOT_INO)), sorted(['rootfile_head', 'subdir_head', 'linkdir_head']))
+        self.assertEqual(self.fs.list_dirfrag(subdir_ino), ['subdirfile_head', 'subsubdir_head'])
+        self.assertEqual(sorted(self.fs.list_dirfrag(linkdir_ino)),
+                         sorted(['link0_head', 'link1_head', 'link2_head', 'link3_head']))
+
+        # Now check the MDS can read what we wrote: truncate the journal
+        # and start the mds.
+        self.fs.journal_tool(['journal', 'reset'])
+        self.fs.mds_fail_restart()
+        self.fs.wait_for_daemons()
+
+        # List files
+        self.mount_a.mount()
+        self.mount_a.wait_until_mounted()
+
+        # First ls -R to populate MDCache, such that hardlinks will
+        # resolve properly (recover_dentries does not create backtraces,
+        # so ordinarily hardlinks to inodes that happen not to have backtraces
+        # will be invisible in readdir).
+        # FIXME: hook in forward scrub here to regenerate backtraces
+        proc = self.mount_a.run_shell(['ls', '-R'])
+        self.mount_a.umount_wait()  # remount to clear client cache before our second ls
+        self.mount_a.mount()
+        self.mount_a.wait_until_mounted()
+
+        proc = self.mount_a.run_shell(['ls', '-R'])
+        self.assertEqual(proc.stdout.getvalue().strip(),
+                         dedent("""
+                         .:
+                         linkdir
+                         rootfile
+                         subdir
+
+                         ./linkdir:
+                         link0
+                         link1
+                         link2
+                         link3
+
+                         ./subdir:
+                         subdirfile
+                         subsubdir
+
+                         ./subdir/subsubdir:
+                         """).strip())
+
+        # Check the correct inos were preserved by path
+        self.assertEqual(rootfile_ino, self.mount_a.path_to_ino("rootfile"))
+        self.assertEqual(subdir_ino, self.mount_a.path_to_ino("subdir"))
+        self.assertEqual(subdirfile_ino, self.mount_a.path_to_ino("subdir/subdirfile"))
+        self.assertEqual(subsubdir_ino, self.mount_a.path_to_ino("subdir/subsubdir"))
+
+        # Check that the hard link handling came out correctly
+        self.assertEqual(self.mount_a.path_to_ino("linkdir/link0"), subdirfile_ino)
+        self.assertEqual(self.mount_a.path_to_ino("linkdir/link1"), subdirfile_ino)
+        self.assertNotEqual(self.mount_a.path_to_ino("linkdir/link2"), subdirfile_ino)
+        self.assertEqual(self.mount_a.path_to_ino("linkdir/link3"), rootfile_ino)
+
+        # Create a new file, ensure it is not issued the same ino as one of the
+        # recovered ones
+        self.mount_a.run_shell(["touch", "afterwards"])
+        new_ino = self.mount_a.path_to_ino("afterwards")
+        self.assertNotIn(new_ino, [rootfile_ino, subdir_ino, subdirfile_ino])
+
+        # Check that we can do metadata ops in the recovered directory
+        self.mount_a.run_shell(["touch", "subdir/subsubdir/subsubdirfile"])
+
+    @long_running # 308s
+    def test_reset(self):
+        """
+        That after forcibly modifying the backing store, we can get back into
+        a good state by resetting the MDSMap.
+
+        The scenario is that we have two active MDSs, and we lose the journals.  Once
+        we have completely lost confidence in the integrity of the metadata, we want to
+        return the system to a single-MDS state to go into a scrub to recover what we
+        can.
+        """
+
+        # Set max_mds to 2
+        self.fs.mon_manager.raw_cluster_cmd_result('mds', 'set', "allow_multimds",
+                                                   "true", "--yes-i-really-mean-it")
+        self.fs.mon_manager.raw_cluster_cmd_result('mds', 'set', "max_mds", "2")
+
+        # See that we have two active MDSs
+        self.wait_until_equal(lambda: len(self.fs.get_active_names()), 2, 30,
+                              reject_fn=lambda v: v > 2 or v < 1)
+        active_mds_names = self.fs.get_active_names()
+
+        # Do a bunch of I/O such that at least some will hit the second MDS: create
+        # lots of directories so that the balancer should find it easy to make a decision
+        # to allocate some of them to the second mds.
+        spammers = []
+        for n in range(0, 16):
+            dir_name = "spam_{0}".format(n)
+            spammers.append(self.mount_a.spam_dir_background(dir_name))
+
+        def subtrees_assigned():
+            got_subtrees = self.fs.mds_asok(["get", "subtrees"], mds_id=active_mds_names[0])
+            rank_1_count = len([s for s in got_subtrees if s['auth_first'] == 1])
+
+            # Greater than 1, because there is typically 1 for ~mds1, and once it
+            # has been assigned something in addition to that it means it has been
+            # assigned a "real" subtree.
+            return rank_1_count > 1
+
+        # We are waiting for the MDS to respond to hot directories, which
+        # is not guaranteed to happen at a particular time, so a lengthy timeout here.
+        self.wait_until_true(subtrees_assigned, 600)
+
+        # Flush the journals so that we have some backing store data
+        # belonging to one MDS, and some to the other MDS.
+        for mds_name in active_mds_names:
+            self.fs.mds_asok(["flush", "journal"], mds_name)
+
+        # Stop (hard) the second MDS daemon
+        self.fs.mds_stop(active_mds_names[1])
+
+        # Wipe out the tables for MDS rank 1 so that it is broken and can't start
+        # (this is the simulated failure that we will demonstrate that the disaster
+        #  recovery tools can get us back from)
+        self.fs.erase_metadata_objects(prefix="mds1_")
+
+        # Try to access files from the client
+        blocked_ls = self.mount_a.run_shell(["ls", "-R"], wait=False)
+
+        # Check that this "ls -R" blocked rather than completing: indicates
+        # it got stuck trying to access subtrees which were on the now-dead MDS.
+        log.info("Sleeping to check ls is blocked...")
+        time.sleep(60)
+        self.assertFalse(blocked_ls.finished)
+
+        # This mount is now useless because it will depend on MDS rank 1, and MDS rank 1
+        # is not coming back.  Kill it.
+        log.info("Killing mount, it's blocked on the MDS we killed")
+        self.mount_a.kill()
+        self.mount_a.kill_cleanup()
+        try:
+            # Now that the mount is dead, the ls -R should error out.
+            blocked_ls.wait()
+        except CommandFailedError:
+            pass
+
+        log.info("Terminating spammer processes...")
+        for spammer_proc in spammers:
+            spammer_proc.stdin.close()
+            try:
+                spammer_proc.wait()
+            except CommandFailedError:
+                pass
+
+        # See that the second MDS will crash when it starts and tries to
+        # acquire rank 1
+        damaged_id = active_mds_names[1]
+        self.fs.mds_restart(damaged_id)
+
+        # The daemon taking the damaged rank should start starting, then
+        # restart back into standby after asking the mon to mark the rank
+        # damaged.
+        def is_marked_damaged():
+            mds_map = self.fs.get_mds_map()
+            return 1 in mds_map['damaged']
+
+        self.wait_until_true(is_marked_damaged, 60)
+
+        def get_state():
+            info = self.mds_cluster.get_mds_info(damaged_id)
+            return info['state'] if info is not None else None
+
+        self.wait_until_equal(
+                get_state,
+                "up:standby",
+                timeout=60)
+
+        self.fs.mds_stop(damaged_id)
+        self.fs.mds_fail(damaged_id)
+
+        # Now give up and go through a disaster recovery procedure
+        self.fs.mds_stop(active_mds_names[0])
+        self.fs.mds_fail(active_mds_names[0])
+        # Invoke recover_dentries quietly, because otherwise log spews millions of lines
+        self.fs.journal_tool(["event", "recover_dentries", "summary"], rank=0, quiet=True)
+        self.fs.journal_tool(["event", "recover_dentries", "summary"], rank=1, quiet=True)
+        self.fs.table_tool(["0", "reset", "session"])
+        self.fs.journal_tool(["journal", "reset"], rank=0)
+        self.fs.erase_mds_objects(1)
+        self.fs.mon_manager.raw_cluster_cmd('fs', 'reset', self.fs.name,
+                '--yes-i-really-mean-it')
+
+        # Bring an MDS back online, mount a client, and see that we can walk the full
+        # filesystem tree again
+        self.fs.mds_fail_restart(active_mds_names[0])
+        self.wait_until_equal(lambda: self.fs.get_active_names(), [active_mds_names[0]], 30,
+                              reject_fn=lambda v: len(v) > 1)
+        self.mount_a.mount()
+        self.mount_a.run_shell(["ls", "-R"], wait=True)
+
+    def test_table_tool(self):
+        active_mdss = self.fs.get_active_names()
+        self.assertEqual(len(active_mdss), 1)
+        mds_name = active_mdss[0]
+
+        self.mount_a.run_shell(["touch", "foo"])
+        self.fs.mds_asok(["flush", "journal"], mds_name)
+
+        log.info(self.fs.table_tool(["all", "show", "inode"]))
+        log.info(self.fs.table_tool(["all", "show", "snap"]))
+        log.info(self.fs.table_tool(["all", "show", "session"]))
+
+        # Inode table should always be the same because initial state
+        # and choice of inode are deterministic.
+        # Should see one inode consumed
+        self.assertEqual(
+            json.loads(self.fs.table_tool(["all", "show", "inode"])),
+            {"0": {
+                "data": {
+                    "version": 2,
+                    "inotable": {
+                        "projected_free": [
+                            {"start": 1099511628777,
+                             "len": 1099511626775}],
+                        "free": [
+                            {"start": 1099511628777,
+                             "len": 1099511626775}]}},
+                "result": 0}}
+
+        )
+
+        # Should see one session
+        session_data = json.loads(self.fs.table_tool(
+            ["all", "show", "session"]))
+        self.assertEqual(len(session_data["0"]["data"]["Sessions"]), 1)
+        self.assertEqual(session_data["0"]["result"], 0)
+
+        # Should see no snaps
+        self.assertEqual(
+            json.loads(self.fs.table_tool(["all", "show", "snap"])),
+            {"version": 0,
+             "snapserver": {"last_snap": 1,
+                            "pending_noop": [],
+                            "snaps": [],
+                            "need_to_purge": {},
+                            "pending_update": [],
+                            "pending_destroy": []},
+             "result": 0}
+        )
+
+        # Reset everything
+        for table in ["session", "inode", "snap"]:
+            self.fs.table_tool(["all", "reset", table])
+
+        log.info(self.fs.table_tool(["all", "show", "inode"]))
+        log.info(self.fs.table_tool(["all", "show", "snap"]))
+        log.info(self.fs.table_tool(["all", "show", "session"]))
+
+        # Should see 0 sessions
+        session_data = json.loads(self.fs.table_tool(
+            ["all", "show", "session"]))
+        self.assertEqual(len(session_data["0"]["data"]["Sessions"]), 0)
+        self.assertEqual(session_data["0"]["result"], 0)
+
+        # Should see entire inode range now marked free
+        self.assertEqual(
+            json.loads(self.fs.table_tool(["all", "show", "inode"])),
+            {"0": {"data": {"version": 1,
+                            "inotable": {"projected_free": [
+                                {"start": 1099511627776,
+                                 "len": 1099511627776}],
+                                 "free": [
+                                    {"start": 1099511627776,
+                                    "len": 1099511627776}]}},
+                   "result": 0}}
+        )
+
+        # Should see no snaps
+        self.assertEqual(
+            json.loads(self.fs.table_tool(["all", "show", "snap"])),
+            {"version": 1,
+             "snapserver": {"last_snap": 1,
+                            "pending_noop": [],
+                            "snaps": [],
+                            "need_to_purge": {},
+                            "pending_update": [],
+                            "pending_destroy": []},
+             "result": 0}
+        )
+
+    def test_table_tool_take_inos(self):
+        initial_range_start = 1099511627776
+        initial_range_len = 1099511627776
+        # Initially a completely clear range
+        self.assertEqual(
+            json.loads(self.fs.table_tool(["all", "show", "inode"])),
+            {"0": {"data": {"version": 0,
+                            "inotable": {"projected_free": [
+                                {"start": initial_range_start,
+                                 "len": initial_range_len}],
+                                "free": [
+                                    {"start": initial_range_start,
+                                     "len": initial_range_len}]}},
+                   "result": 0}}
+        )
+
+        # Remove some
+        self.assertEqual(
+            json.loads(self.fs.table_tool(["all", "take_inos", "{0}".format(initial_range_start + 100)])),
+            {"0": {"data": {"version": 1,
+                            "inotable": {"projected_free": [
+                                {"start": initial_range_start + 101,
+                                 "len": initial_range_len - 101}],
+                                "free": [
+                                    {"start": initial_range_start + 101,
+                                     "len": initial_range_len - 101}]}},
+                   "result": 0}}
+        )
+
+    @long_running  # Hack: "long running" because .sh doesn't work outside teuth
+    def test_journal_smoke(self):
+        workunit(self.ctx, {
+            'clients': {
+                "client.{0}".format(self.mount_a.client_id): [
+                    "fs/misc/trivial_sync.sh"],
+            },
+            "timeout": "1h"
+        })
+
+        for mount in self.mounts:
+            mount.umount_wait()
+
+        self.fs.mds_stop()
+        self.fs.mds_fail()
+
+        # journal tool smoke
+        workunit(self.ctx, {
+            'clients': {
+                "client.{0}".format(self.mount_a.client_id): [
+                    "suites/cephfs_journal_tool_smoke.sh"],
+            },
+            "timeout": "1h"
+        })
+
+
+
+        self.fs.mds_restart()
+        self.fs.wait_for_daemons()
+
+        self.mount_a.mount()
+
+        # trivial sync moutn a
+        workunit(self.ctx, {
+            'clients': {
+                "client.{0}".format(self.mount_a.client_id): [
+                    "fs/misc/trivial_sync.sh"],
+            },
+            "timeout": "1h"
+        })
+
diff --git a/qa/tasks/cephfs/test_misc.py b/qa/tasks/cephfs/test_misc.py
new file mode 100644
index 0000000..bd8ba64
--- /dev/null
+++ b/qa/tasks/cephfs/test_misc.py
@@ -0,0 +1,33 @@
+
+from unittest import SkipTest
+from tasks.cephfs.fuse_mount import FuseMount
+from tasks.cephfs.cephfs_test_case import CephFSTestCase
+
+class TestMisc(CephFSTestCase):
+    CLIENTS_REQUIRED = 2
+    def test_getattr_caps(self):
+        """
+        Check if MDS recognizes the 'mask' parameter of open request.
+        The paramter allows client to request caps when opening file
+        """
+
+        if not isinstance(self.mount_a, FuseMount):
+            raise SkipTest("Require FUSE client")
+
+        # Enable debug. Client will requests CEPH_CAP_XATTR_SHARED
+        # on lookup/open
+        self.mount_b.umount_wait()
+        self.set_conf('client', 'client debug getattr caps', 'true')
+        self.mount_b.mount()
+        self.mount_b.wait_until_mounted()
+
+        # create a file and hold it open. MDS will issue CEPH_CAP_EXCL_*
+        # to mount_a
+        p = self.mount_a.open_background("testfile")
+        self.mount_b.wait_for_visible("testfile")
+
+        # this tiggers a lookup request and an open request. The debug
+        # code will check if lookup/open reply contains xattrs
+        self.mount_b.run_shell(["cat", "testfile"])
+
+        self.mount_a.kill_background(p)
diff --git a/qa/tasks/cephfs/test_pool_perm.py b/qa/tasks/cephfs/test_pool_perm.py
new file mode 100644
index 0000000..b558cb1
--- /dev/null
+++ b/qa/tasks/cephfs/test_pool_perm.py
@@ -0,0 +1,117 @@
+from textwrap import dedent
+from teuthology.exceptions import CommandFailedError
+from tasks.cephfs.cephfs_test_case import CephFSTestCase
+import os
+
+
+class TestPoolPerm(CephFSTestCase):
+    def test_pool_perm(self):
+        self.mount_a.run_shell(["touch", "test_file"])
+
+        file_path = os.path.join(self.mount_a.mountpoint, "test_file")
+
+        remote_script = dedent("""
+            import os
+            import errno
+
+            fd = os.open("{path}", os.O_RDWR)
+            try:
+                if {check_read}:
+                    ret = os.read(fd, 1024)
+                else:
+                    os.write(fd, 'content')
+            except OSError, e:
+                if e.errno != errno.EPERM:
+                    raise
+            else:
+                raise RuntimeError("client does not check permission of data pool")
+            """)
+
+        client_name = "client.{0}".format(self.mount_a.client_id)
+
+        # set data pool read only
+        self.fs.mon_manager.raw_cluster_cmd_result(
+            'auth', 'caps', client_name, 'mds', 'allow', 'mon', 'allow r', 'osd',
+            'allow r pool={0}'.format(self.fs.get_data_pool_name()))
+
+        self.mount_a.umount_wait()
+        self.mount_a.mount()
+        self.mount_a.wait_until_mounted()
+
+        # write should fail
+        self.mount_a.run_python(remote_script.format(path=file_path, check_read=str(False)))
+
+        # set data pool write only
+        self.fs.mon_manager.raw_cluster_cmd_result(
+            'auth', 'caps', client_name, 'mds', 'allow', 'mon', 'allow r', 'osd',
+            'allow w pool={0}'.format(self.fs.get_data_pool_name()))
+
+        self.mount_a.umount_wait()
+        self.mount_a.mount()
+        self.mount_a.wait_until_mounted()
+
+        # read should fail
+        self.mount_a.run_python(remote_script.format(path=file_path, check_read=str(True)))
+
+    def test_forbidden_modification(self):
+        """
+        That a client who does not have the capability for setting
+        layout pools is prevented from doing so.
+        """
+
+        # Set up
+        client_name = "client.{0}".format(self.mount_a.client_id)
+        new_pool_name = "data_new"
+        self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', new_pool_name,
+                                            self.fs.get_pgs_per_fs_pool().__str__())
+        self.fs.mon_manager.raw_cluster_cmd('mds', 'add_data_pool', new_pool_name)
+
+        self.mount_a.run_shell(["touch", "layoutfile"])
+        self.mount_a.run_shell(["mkdir", "layoutdir"])
+
+        # Set MDS 'rw' perms: missing 'p' means no setting pool layouts
+        self.fs.mon_manager.raw_cluster_cmd_result(
+            'auth', 'caps', client_name, 'mds', 'allow rw', 'mon', 'allow r',
+            'osd',
+            'allow rw pool={0},allow rw pool={1}'.format(
+                self.fs.get_data_pool_names()[0],
+                self.fs.get_data_pool_names()[1],
+            ))
+
+        self.mount_a.umount_wait()
+        self.mount_a.mount()
+
+        with self.assertRaises(CommandFailedError):
+            self.mount_a.run_shell(["setfattr",
+                                    "-n", "ceph.file.layout.pool",
+                                    "-v", new_pool_name, "layoutfile"])
+        with self.assertRaises(CommandFailedError):
+            self.mount_a.run_shell(["setfattr",
+                                    "-n", "ceph.dir.layout.pool",
+                                    "-v", new_pool_name, "layoutdir"])
+        self.mount_a.umount_wait()
+
+        # Set MDS 'rwp' perms: should now be able to set layouts
+        self.fs.mon_manager.raw_cluster_cmd_result(
+            'auth', 'caps', client_name, 'mds', 'allow rwp', 'mon', 'allow r',
+            'osd',
+            'allow rw pool={0},allow rw pool={1}'.format(
+                self.fs.get_data_pool_names()[0],
+                self.fs.get_data_pool_names()[1],
+            ))
+        self.mount_a.mount()
+        self.mount_a.run_shell(["setfattr",
+                                "-n", "ceph.file.layout.pool",
+                                "-v", new_pool_name, "layoutfile"])
+        self.mount_a.run_shell(["setfattr",
+                                "-n", "ceph.dir.layout.pool",
+                                "-v", new_pool_name, "layoutdir"])
+        self.mount_a.umount_wait()
+
+    def tearDown(self):
+        self.fs.mon_manager.raw_cluster_cmd_result(
+            'auth', 'caps', "client.{0}".format(self.mount_a.client_id),
+            'mds', 'allow', 'mon', 'allow r', 'osd',
+            'allow rw pool={0}'.format(self.fs.get_data_pool_names()[0]))
+        super(TestPoolPerm, self).tearDown()
+
diff --git a/qa/tasks/cephfs/test_scrub_checks.py b/qa/tasks/cephfs/test_scrub_checks.py
new file mode 100644
index 0000000..a2de527
--- /dev/null
+++ b/qa/tasks/cephfs/test_scrub_checks.py
@@ -0,0 +1,245 @@
+"""
+MDS admin socket scrubbing-related tests.
+"""
+import json
+import logging
+import errno
+import time
+from teuthology.exceptions import CommandFailedError
+import os
+from tasks.cephfs.cephfs_test_case import CephFSTestCase
+
+log = logging.getLogger(__name__)
+
+
+class TestScrubChecks(CephFSTestCase):
+    """
+    Run flush and scrub commands on the specified files in the filesystem. This
+    task will run through a sequence of operations, but it is not comprehensive
+    on its own -- it doesn't manipulate the mds cache state to test on both
+    in- and out-of-memory parts of the hierarchy. So it's designed to be run
+    multiple times within a single test run, so that the test can manipulate
+    memory state.
+
+    Usage:
+    mds_scrub_checks:
+      mds_rank: 0
+      path: path/to/test/dir
+      client: 0
+      run_seq: [0-9]+
+
+    Increment the run_seq on subsequent invocations within a single test run;
+    it uses that value to generate unique folder and file names.
+    """
+
+    MDSS_REQUIRED = 1
+    CLIENTS_REQUIRED = 1
+
+    def test_scrub_checks(self):
+        self._checks(0)
+        self._checks(1)
+
+    def _checks(self, run_seq):
+        mds_rank = 0
+        test_dir = "scrub_test_path"
+
+        abs_test_path = "/{0}".format(test_dir)
+
+        log.info("mountpoint: {0}".format(self.mount_a.mountpoint))
+        client_path = os.path.join(self.mount_a.mountpoint, test_dir)
+        log.info("client_path: {0}".format(client_path))
+
+        log.info("Cloning repo into place")
+        repo_path = self.clone_repo(self.mount_a, client_path)
+
+        log.info("Initiating mds_scrub_checks on mds.{id_}, " +
+                 "test_path {path}, run_seq {seq}".format(
+                     id_=mds_rank, path=abs_test_path, seq=run_seq)
+                 )
+
+
+        success_validator = lambda j, r: self.json_validator(j, r, "return_code", 0)
+
+        nep = "{test_path}/i/dont/exist".format(test_path=abs_test_path)
+        self.asok_command(mds_rank, "flush_path {nep}".format(nep=nep),
+                          lambda j, r: self.json_validator(j, r, "return_code", -errno.ENOENT))
+        self.asok_command(mds_rank, "scrub_path {nep}".format(nep=nep),
+                          lambda j, r: self.json_validator(j, r, "return_code", -errno.ENOENT))
+
+        test_repo_path = "{test_path}/ceph-qa-suite".format(test_path=abs_test_path)
+        dirpath = "{repo_path}/suites".format(repo_path=test_repo_path)
+
+        if run_seq == 0:
+            log.info("First run: flushing {dirpath}".format(dirpath=dirpath))
+            command = "flush_path {dirpath}".format(dirpath=dirpath)
+            self.asok_command(mds_rank, command, success_validator)
+        command = "scrub_path {dirpath}".format(dirpath=dirpath)
+        self.asok_command(mds_rank, command, success_validator)
+
+        filepath = "{repo_path}/suites/fs/verify/validater/valgrind.yaml".format(
+            repo_path=test_repo_path)
+        if run_seq == 0:
+            log.info("First run: flushing {filepath}".format(filepath=filepath))
+            command = "flush_path {filepath}".format(filepath=filepath)
+            self.asok_command(mds_rank, command, success_validator)
+        command = "scrub_path {filepath}".format(filepath=filepath)
+        self.asok_command(mds_rank, command, success_validator)
+
+        filepath = "{repo_path}/suites/fs/basic/clusters/fixed-3-cephfs.yaml". \
+            format(repo_path=test_repo_path)
+        command = "scrub_path {filepath}".format(filepath=filepath)
+        self.asok_command(mds_rank, command,
+                          lambda j, r: self.json_validator(j, r, "performed_validation",
+                                                           False))
+
+        if run_seq == 0:
+            log.info("First run: flushing base dir /")
+            command = "flush_path /"
+            self.asok_command(mds_rank, command, success_validator)
+        command = "scrub_path /"
+        self.asok_command(mds_rank, command, success_validator)
+
+        new_dir = "{repo_path}/new_dir_{i}".format(repo_path=repo_path, i=run_seq)
+        test_new_dir = "{repo_path}/new_dir_{i}".format(repo_path=test_repo_path,
+                                                        i=run_seq)
+        self.mount_a.run_shell(["mkdir", new_dir])
+        command = "flush_path {dir}".format(dir=test_new_dir)
+        self.asok_command(mds_rank, command, success_validator)
+
+        new_file = "{repo_path}/new_file_{i}".format(repo_path=repo_path,
+                                                     i=run_seq)
+        test_new_file = "{repo_path}/new_file_{i}".format(repo_path=test_repo_path,
+                                                          i=run_seq)
+        self.mount_a.write_n_mb(new_file, 1)
+
+        command = "flush_path {file}".format(file=test_new_file)
+        self.asok_command(mds_rank, command, success_validator)
+
+        # check that scrub fails on errors
+        ino = self.mount_a.path_to_ino(new_file)
+        rados_obj_name = "{ino:x}.00000000".format(ino=ino)
+        command = "scrub_path {file}".format(file=test_new_file)
+
+        # Missing parent xattr -> ENODATA
+        self.fs.rados(["rmxattr", rados_obj_name, "parent"], pool=self.fs.get_data_pool_name())
+        self.asok_command(mds_rank, command,
+                          lambda j, r: self.json_validator(j, r, "return_code", -errno.ENODATA))
+
+        # Missing object -> ENOENT
+        self.fs.rados(["rm", rados_obj_name], pool=self.fs.get_data_pool_name())
+        self.asok_command(mds_rank, command,
+                          lambda j, r: self.json_validator(j, r, "return_code", -errno.ENOENT))
+
+        command = "flush_path /"
+        self.asok_command(mds_rank, command, success_validator)
+
+    def test_scrub_repair(self):
+        mds_rank = 0
+        test_dir = "scrub_repair_path"
+
+        self.mount_a.run_shell(["sudo", "mkdir", test_dir])
+        self.mount_a.run_shell(["sudo", "touch", "{0}/file".format(test_dir)])
+        dir_objname = "{:x}.00000000".format(self.mount_a.path_to_ino(test_dir))
+
+        self.mount_a.umount_wait()
+
+        # flush journal entries to dirfrag objects, and expire journal
+        self.fs.mds_asok(['flush', 'journal'])
+        self.fs.mds_stop()
+
+        # remove the dentry from dirfrag, cause incorrect fragstat/rstat
+        self.fs.rados(["rmomapkey", dir_objname, "file_head"],
+                      pool=self.fs.get_metadata_pool_name())
+
+        self.fs.mds_fail_restart()
+        self.fs.wait_for_daemons()
+
+        self.mount_a.mount()
+        self.mount_a.wait_until_mounted()
+
+        # fragstat indicates the directory is not empty, rmdir should fail
+        with self.assertRaises(CommandFailedError) as ar:
+            self.mount_a.run_shell(["sudo", "rmdir", test_dir])
+        self.assertEqual(ar.exception.exitstatus, 1)
+
+        self.asok_command(mds_rank, "scrub_path /{0} repair".format(test_dir),
+                          lambda j, r: self.json_validator(j, r, "return_code", 0))
+
+	# wait a few second for background repair
+	time.sleep(10)
+
+	# fragstat should be fixed
+	self.mount_a.run_shell(["sudo", "rmdir", test_dir])
+
+    @staticmethod
+    def json_validator(json_out, rc, element, expected_value):
+        if rc != 0:
+            return False, "asok command returned error {rc}".format(rc=rc)
+        element_value = json_out.get(element)
+        if element_value != expected_value:
+            return False, "unexpectedly got {jv} instead of {ev}!".format(
+                jv=element_value, ev=expected_value)
+        return True, "Succeeded"
+
+    def asok_command(self, mds_rank, command, validator):
+        log.info("Running command '{command}'".format(command=command))
+
+        command_list = command.split()
+
+        # we just assume there's an active mds for every rank
+        mds_id = self.fs.get_active_names()[mds_rank]
+        proc = self.fs.mon_manager.admin_socket('mds', mds_id,
+                                                command_list, check_status=False)
+        rout = proc.exitstatus
+        sout = proc.stdout.getvalue()
+
+        if sout.strip():
+            jout = json.loads(sout)
+        else:
+            jout = None
+
+        log.info("command '{command}' got response code " +
+                 "'{rout}' and stdout '{sout}'".format(
+                     command=command, rout=rout, sout=sout))
+
+        success, errstring = validator(jout, rout)
+
+        if not success:
+            raise AsokCommandFailedError(command, rout, jout, errstring)
+
+        return jout
+
+    def clone_repo(self, client_mount, path):
+        repo = "ceph-qa-suite"
+        repo_path = os.path.join(path, repo)
+        client_mount.run_shell(["mkdir", "-p", path])
+
+        try:
+            client_mount.stat(repo_path)
+        except CommandFailedError:
+            client_mount.run_shell([
+                "git", "clone", '--branch', 'giant',
+                "http://github.com/ceph/{repo}".format(repo=repo),
+                "{path}/{repo}".format(path=path, repo=repo)
+            ])
+
+        return repo_path
+
+
+class AsokCommandFailedError(Exception):
+    """
+    Exception thrown when we get an unexpected response
+    on an admin socket command
+    """
+
+    def __init__(self, command, rc, json_out, errstring):
+        self.command = command
+        self.rc = rc
+        self.json = json_out
+        self.errstring = errstring
+
+    def __str__(self):
+        return "Admin socket: {command} failed with rc={rc}," + \
+               "json output={json}, because '{es}'".format(
+                   command=self.command, rc=self.rc,
+                   json=self.json, es=self.errstring)
diff --git a/qa/tasks/cephfs/test_sessionmap.py b/qa/tasks/cephfs/test_sessionmap.py
new file mode 100644
index 0000000..eb46764
--- /dev/null
+++ b/qa/tasks/cephfs/test_sessionmap.py
@@ -0,0 +1,235 @@
+from StringIO import StringIO
+import json
+import logging
+from tasks.cephfs.fuse_mount import FuseMount
+from teuthology.exceptions import CommandFailedError
+from tasks.cephfs.cephfs_test_case import CephFSTestCase
+
+log = logging.getLogger(__name__)
+
+
+class TestSessionMap(CephFSTestCase):
+    CLIENTS_REQUIRED = 2
+    MDSS_REQUIRED = 2
+
+    def test_tell_session_drop(self):
+        """
+        That when a `tell` command is sent using the python CLI,
+        its MDS session is gone after it terminates
+        """
+        self.mount_a.umount_wait()
+        self.mount_b.umount_wait()
+
+        mds_id = self.fs.get_lone_mds_id()
+        self.fs.mon_manager.raw_cluster_cmd("tell", "mds.{0}".format(mds_id), "session", "ls")
+
+        ls_data = self.fs.mds_asok(['session', 'ls'])
+        self.assertEqual(len(ls_data), 0)
+
+    def _get_thread_count(self, mds_id):
+        remote = self.fs.mds_daemons[mds_id].remote
+
+        ps_txt = remote.run(
+            args=["ps", "-ww", "axo", "nlwp,cmd"],
+            stdout=StringIO()
+        ).stdout.getvalue().strip()
+        lines = ps_txt.split("\n")[1:]
+
+        for line in lines:
+            if "ceph-mds" in line and not "daemon-helper" in line:
+                if line.find("-i {0}".format(mds_id)) != -1:
+                    log.info("Found ps line for daemon: {0}".format(line))
+                    return int(line.split()[0])
+
+        raise RuntimeError("No process found in ps output for MDS {0}: {1}".format(
+            mds_id, ps_txt
+        ))
+
+    def test_tell_conn_close(self):
+        """
+        That when a `tell` command is sent using the python CLI,
+        the thread count goes back to where it started (i.e. we aren't
+        leaving connections open)
+        """
+        self.mount_a.umount_wait()
+        self.mount_b.umount_wait()
+
+        mds_id = self.fs.get_lone_mds_id()
+
+        initial_thread_count = self._get_thread_count(mds_id)
+        self.fs.mon_manager.raw_cluster_cmd("tell", "mds.{0}".format(mds_id), "session", "ls")
+        final_thread_count = self._get_thread_count(mds_id)
+
+        self.assertEqual(initial_thread_count, final_thread_count)
+
+    def test_mount_conn_close(self):
+        """
+        That when a client unmounts, the thread count on the MDS goes back
+        to what it was before the client mounted
+        """
+        self.mount_a.umount_wait()
+        self.mount_b.umount_wait()
+
+        mds_id = self.fs.get_lone_mds_id()
+
+        initial_thread_count = self._get_thread_count(mds_id)
+        self.mount_a.mount()
+        self.mount_a.wait_until_mounted()
+        self.assertGreater(self._get_thread_count(mds_id), initial_thread_count)
+        self.mount_a.umount_wait()
+        final_thread_count = self._get_thread_count(mds_id)
+
+        self.assertEqual(initial_thread_count, final_thread_count)
+
+    def test_version_splitting(self):
+        """
+        That when many sessions are updated, they are correctly
+        split into multiple versions to obey mds_sessionmap_keys_per_op
+        """
+
+        # Start umounted
+        self.mount_a.umount_wait()
+        self.mount_b.umount_wait()
+
+        # Configure MDS to write one OMAP key at once
+        self.set_conf('mds', 'mds_sessionmap_keys_per_op', 1)
+        self.fs.mds_fail_restart()
+        self.fs.wait_for_daemons()
+
+        # I would like two MDSs, so that I can do an export dir later
+        self.fs.mon_manager.raw_cluster_cmd_result('mds', 'set', "allow_multimds",
+                                                   "true", "--yes-i-really-mean-it")
+        self.fs.mon_manager.raw_cluster_cmd_result('mds', 'set', "max_mds", "2")
+        self.fs.wait_for_daemons()
+
+        active_mds_names = self.fs.get_active_names()
+        rank_0_id = active_mds_names[0]
+        rank_1_id = active_mds_names[1]
+        log.info("Ranks 0 and 1 are {0} and {1}".format(
+            rank_0_id, rank_1_id))
+
+        # Bring the clients back
+        self.mount_a.mount()
+        self.mount_b.mount()
+        self.mount_a.create_files()  # Kick the client into opening sessions
+        self.mount_b.create_files()
+
+        # See that they've got sessions
+        self.assert_session_count(2, mds_id=rank_0_id)
+
+        # See that we persist their sessions
+        self.fs.mds_asok(["flush", "journal"], rank_0_id)
+        table_json = json.loads(self.fs.table_tool(["0", "show", "session"]))
+        log.info("SessionMap: {0}".format(json.dumps(table_json, indent=2)))
+        self.assertEqual(table_json['0']['result'], 0)
+        self.assertEqual(len(table_json['0']['data']['Sessions']), 2)
+
+        # Now, induce a "force_open_sessions" event by exporting a dir
+        self.mount_a.run_shell(["mkdir", "bravo"])
+        self.mount_a.run_shell(["touch", "bravo/file"])
+        self.mount_b.run_shell(["ls", "-l", "bravo/file"])
+
+        def get_omap_wrs():
+            return self.fs.mds_asok(['perf', 'dump', 'objecter'], rank_1_id)['objecter']['omap_wr']
+
+        # Flush so that there are no dirty sessions on rank 1
+        self.fs.mds_asok(["flush", "journal"], rank_1_id)
+
+        # Export so that we get a force_open to rank 1 for the two sessions from rank 0
+        initial_omap_wrs = get_omap_wrs()
+        self.fs.mds_asok(['export', 'dir', '/bravo', '1'], rank_0_id)
+
+        # This is the critical (if rather subtle) check: that in the process of doing an export dir,
+        # we hit force_open_sessions, and as a result we end up writing out the sessionmap.  There
+        # will be two sessions dirtied here, and because we have set keys_per_op to 1, we should see
+        # a single session get written out (the first of the two, triggered by the second getting marked
+        # dirty)
+        # The number of writes is two per session, because the header (sessionmap version) update and
+        # KV write both count.
+        self.wait_until_true(
+            lambda: get_omap_wrs() - initial_omap_wrs == 2,
+            timeout=10  # Long enough for an export to get acked
+        )
+
+        # Now end our sessions and check the backing sessionmap is updated correctly
+        self.mount_a.umount_wait()
+        self.mount_b.umount_wait()
+
+        # In-memory sessionmap check
+        self.assert_session_count(0, mds_id=rank_0_id)
+
+        # On-disk sessionmap check
+        self.fs.mds_asok(["flush", "journal"], rank_0_id)
+        table_json = json.loads(self.fs.table_tool(["0", "show", "session"]))
+        log.info("SessionMap: {0}".format(json.dumps(table_json, indent=2)))
+        self.assertEqual(table_json['0']['result'], 0)
+        self.assertEqual(len(table_json['0']['data']['Sessions']), 0)
+
+    def _sudo_write_file(self, remote, path, data):
+        """
+        Write data to a remote file as super user
+
+        :param remote: Remote site.
+        :param path: Path on the remote being written to.
+        :param data: Data to be written.
+
+        Both perms and owner are passed directly to chmod.
+        """
+        remote.run(
+            args=[
+                'sudo',
+                'python',
+                '-c',
+                'import shutil, sys; shutil.copyfileobj(sys.stdin, file(sys.argv[1], "wb"))',
+                path,
+            ],
+            stdin=data,
+        )
+
+    def _configure_auth(self, mount, id_name, mds_caps, osd_caps=None, mon_caps=None):
+        """
+        Set up auth credentials for a client mount, and write out the keyring
+        for the client to use.
+        """
+
+        # This keyring stuff won't work for kclient
+        assert(isinstance(mount, FuseMount))
+
+        if osd_caps is None:
+            osd_caps = "allow rw"
+
+        if mon_caps is None:
+            mon_caps = "allow r"
+
+        out = self.fs.mon_manager.raw_cluster_cmd(
+            "auth", "get-or-create", "client.{name}".format(name=id_name),
+            "mds", mds_caps,
+            "osd", osd_caps,
+            "mon", mon_caps
+        )
+        mount.client_id = id_name
+        self._sudo_write_file(mount.client_remote, mount.get_keyring_path(), out)
+        self.set_conf("client.{name}".format(name=id_name), "keyring", mount.get_keyring_path())
+
+    def test_session_reject(self):
+        self.mount_a.run_shell(["mkdir", "foo"])
+        self.mount_a.run_shell(["mkdir", "foo/bar"])
+        self.mount_a.umount_wait()
+
+        # Mount B will be my rejected client
+        self.mount_b.umount_wait()
+
+        # Configure a client that is limited to /foo/bar
+        self._configure_auth(self.mount_b, "badguy", "allow rw path=/foo/bar")
+        # Check he can mount that dir and do IO
+        self.mount_b.mount(mount_path="/foo/bar")
+        self.mount_b.wait_until_mounted()
+        self.mount_b.create_destroy()
+        self.mount_b.umount_wait()
+
+        # Configure the client to claim that its mount point metadata is /baz
+        self.set_conf("client.badguy", "client_metadata", "root=/baz")
+        # Try to mount the client, see that it fails
+        with self.assert_cluster_log("client session with invalid root '/baz' denied"):
+            with self.assertRaises(CommandFailedError):
+                self.mount_b.mount(mount_path="/foo/bar")
diff --git a/qa/tasks/cephfs/test_strays.py b/qa/tasks/cephfs/test_strays.py
new file mode 100644
index 0000000..e01e0a1
--- /dev/null
+++ b/qa/tasks/cephfs/test_strays.py
@@ -0,0 +1,684 @@
+import json
+import logging
+from textwrap import dedent
+import time
+import gevent
+from tasks.cephfs.cephfs_test_case import CephFSTestCase, long_running
+
+log = logging.getLogger(__name__)
+
+
+class TestStrays(CephFSTestCase):
+    MDSS_REQUIRED = 2
+
+    OPS_THROTTLE = 1
+    FILES_THROTTLE = 2
+
+    # Range of different file sizes used in throttle test's workload
+    throttle_workload_size_range = 16
+
+    @long_running
+    def test_ops_throttle(self):
+        self._test_throttling(self.OPS_THROTTLE)
+
+    @long_running
+    def test_files_throttle(self):
+        self._test_throttling(self.FILES_THROTTLE)
+
+    def test_dir_deletion(self):
+        """
+        That when deleting a bunch of dentries and the containing
+        directory, everything gets purged.
+        Catches cases where the client might e.g. fail to trim
+        the unlinked dir from its cache.
+        """
+        file_count = 1000
+        create_script = dedent("""
+            import os
+
+            mount_path = "{mount_path}"
+            subdir = "delete_me"
+            size = {size}
+            file_count = {file_count}
+            os.mkdir(os.path.join(mount_path, subdir))
+            for i in xrange(0, file_count):
+                filename = "{{0}}_{{1}}.bin".format(i, size)
+                f = open(os.path.join(mount_path, subdir, filename), 'w')
+                f.write(size * 'x')
+                f.close()
+        """.format(
+            mount_path=self.mount_a.mountpoint,
+            size=1024,
+            file_count=file_count
+        ))
+
+        self.mount_a.run_python(create_script)
+        self.mount_a.run_shell(["rm", "-rf", "delete_me"])
+        self.fs.mds_asok(["flush", "journal"])
+        strays = self.get_mdc_stat("strays_created")
+        self.assertEqual(strays, file_count + 1)
+        self.wait_until_equal(
+            lambda: self.get_mdc_stat("strays_purged"),
+            strays,
+            timeout=600
+
+        )
+
+    def _test_throttling(self, throttle_type):
+        """
+        That the mds_max_purge_ops setting is respected
+        """
+
+        def set_throttles(files, ops):
+            """
+            Helper for updating ops/files limits, and calculating effective
+            ops_per_pg setting to give the same ops limit.
+            """
+            self.set_conf('mds', 'mds_max_purge_files', "%d" % files)
+            self.set_conf('mds', 'mds_max_purge_ops', "%d" % ops)
+
+            pgs = self.fs.mon_manager.get_pool_property(
+                self.fs.get_data_pool_name(),
+                "pg_num"
+            )
+            ops_per_pg = float(ops) / pgs
+            self.set_conf('mds', 'mds_max_purge_ops_per_pg', "%s" % ops_per_pg)
+
+        # Test conditions depend on what we're going to be exercising.
+        # * Lift the threshold on whatever throttle we are *not* testing, so
+        #   that the throttle of interest is the one that will be the bottleneck
+        # * Create either many small files (test file count throttling) or fewer
+        #   large files (test op throttling)
+        if throttle_type == self.OPS_THROTTLE:
+            set_throttles(files=100000000, ops=16)
+            size_unit = 1024 * 1024  # big files, generate lots of ops
+            file_multiplier = 100
+        elif throttle_type == self.FILES_THROTTLE:
+            # The default value of file limit is pretty permissive, so to avoid
+            # the test running too fast, create lots of files and set the limit
+            # pretty low.
+            set_throttles(ops=100000000, files=6)
+            size_unit = 1024  # small, numerous files
+            file_multiplier = 200
+        else:
+            raise NotImplemented(throttle_type)
+
+        # Pick up config changes
+        self.fs.mds_fail_restart()
+        self.fs.wait_for_daemons()
+
+        create_script = dedent("""
+            import os
+
+            mount_path = "{mount_path}"
+            subdir = "delete_me"
+            size_unit = {size_unit}
+            file_multiplier = {file_multiplier}
+            os.mkdir(os.path.join(mount_path, subdir))
+            for i in xrange(0, file_multiplier):
+                for size in xrange(0, {size_range}*size_unit, size_unit):
+                    filename = "{{0}}_{{1}}.bin".format(i, size / size_unit)
+                    f = open(os.path.join(mount_path, subdir, filename), 'w')
+                    f.write(size * 'x')
+                    f.close()
+        """.format(
+            mount_path=self.mount_a.mountpoint,
+            size_unit=size_unit,
+            file_multiplier=file_multiplier,
+            size_range=self.throttle_workload_size_range
+        ))
+
+        self.mount_a.run_python(create_script)
+
+        # We will run the deletion in the background, to reduce the risk of it completing before
+        # we have started monitoring the stray statistics.
+        def background():
+            self.mount_a.run_shell(["rm", "-rf", "delete_me"])
+            self.fs.mds_asok(["flush", "journal"])
+
+        background_thread = gevent.spawn(background)
+
+        total_inodes = file_multiplier * self.throttle_workload_size_range + 1
+        mds_max_purge_ops = int(self.fs.get_config("mds_max_purge_ops", 'mds'))
+        mds_max_purge_files = int(self.fs.get_config("mds_max_purge_files", 'mds'))
+
+        # During this phase we look for the concurrent ops to exceed half
+        # the limit (a heuristic) and not exceed the limit (a correctness
+        # condition).
+        purge_timeout = 600
+        elapsed = 0
+        files_high_water = 0
+        ops_high_water = 0
+        while True:
+            mdc_stats = self.fs.mds_asok(['perf', 'dump', 'mds_cache'])['mds_cache']
+            if elapsed >= purge_timeout:
+                raise RuntimeError("Timeout waiting for {0} inodes to purge, stats:{1}".format(total_inodes, mdc_stats))
+
+            num_strays = mdc_stats['num_strays']
+            num_strays_purging = mdc_stats['num_strays_purging']
+            num_purge_ops = mdc_stats['num_purge_ops']
+
+            files_high_water = max(files_high_water, num_strays_purging)
+            ops_high_water = max(ops_high_water, num_purge_ops)
+
+            total_strays_created = mdc_stats['strays_created']
+            total_strays_purged = mdc_stats['strays_purged']
+
+            if total_strays_purged == total_inodes:
+                log.info("Complete purge in {0} seconds".format(elapsed))
+                break
+            elif total_strays_purged > total_inodes:
+                raise RuntimeError("Saw more strays than expected, mdc stats: {0}".format(mdc_stats))
+            else:
+                if throttle_type == self.OPS_THROTTLE:
+                    if num_strays_purging > mds_max_purge_files:
+                        raise RuntimeError("num_purge_ops violates threshold {0}/{1}".format(
+                            num_purge_ops, mds_max_purge_ops
+                        ))
+                elif throttle_type == self.FILES_THROTTLE:
+                    if num_strays_purging > mds_max_purge_files:
+                        raise RuntimeError("num_strays_purging violates threshold {0}/{1}".format(
+                            num_strays_purging, mds_max_purge_files
+                        ))
+                else:
+                    raise NotImplemented(throttle_type)
+
+                log.info("Waiting for purge to complete {0}/{1}, {2}/{3}".format(
+                    num_strays_purging, num_strays,
+                    total_strays_purged, total_strays_created
+                ))
+                time.sleep(1)
+                elapsed += 1
+
+        background_thread.join()
+
+        # Check that we got up to a respectable rate during the purge.  This is totally
+        # racy, but should be safeish unless the cluster is pathologically slow, or
+        # insanely fast such that the deletions all pass before we have polled the
+        # statistics.
+        if throttle_type == self.OPS_THROTTLE:
+            if ops_high_water < mds_max_purge_ops / 2:
+                raise RuntimeError("Ops in flight high water is unexpectedly low ({0} / {1})".format(
+                    ops_high_water, mds_max_purge_ops
+                ))
+        elif throttle_type == self.FILES_THROTTLE:
+            if files_high_water < mds_max_purge_files / 2:
+                raise RuntimeError("Files in flight high water is unexpectedly low ({0} / {1})".format(
+                    ops_high_water, mds_max_purge_files
+                ))
+
+        # Sanity check all MDC stray stats
+        mdc_stats = self.fs.mds_asok(['perf', 'dump', 'mds_cache'])['mds_cache']
+        self.assertEqual(mdc_stats['num_strays'], 0)
+        self.assertEqual(mdc_stats['num_strays_purging'], 0)
+        self.assertEqual(mdc_stats['num_strays_delayed'], 0)
+        self.assertEqual(mdc_stats['num_purge_ops'], 0)
+        self.assertEqual(mdc_stats['strays_created'], total_inodes)
+        self.assertEqual(mdc_stats['strays_purged'], total_inodes)
+
+    def get_mdc_stat(self, name, mds_id=None):
+        return self.fs.mds_asok(['perf', 'dump', "mds_cache", name],
+                                mds_id=mds_id)['mds_cache'][name]
+
+    def test_open_inode(self):
+        """
+        That the case of a dentry unlinked while a client holds an
+        inode open is handled correctly.
+
+        The inode should be moved into a stray dentry, while the original
+        dentry and directory should be purged.
+
+        The inode's data should be purged when the client eventually closes
+        it.
+        """
+        mount_a_client_id = self.mount_a.get_global_id()
+
+        # Write some bytes to a file
+        size_mb = 8
+        self.mount_a.write_n_mb("open_file", size_mb)
+        open_file_ino = self.mount_a.path_to_ino("open_file")
+
+        # Hold the file open
+        p = self.mount_a.open_background("open_file")
+
+        self.assertEqual(self.get_session(mount_a_client_id)['num_caps'], 2)
+
+        # Unlink the dentry
+        self.mount_a.run_shell(["rm", "-f", "open_file"])
+
+        # Wait to see the stray count increment
+        self.wait_until_equal(
+            lambda: self.get_mdc_stat("num_strays"),
+            expect_val=1, timeout=60, reject_fn=lambda x: x > 1)
+
+        # See that while the stray count has incremented, the purge count
+        # has not
+        self.assertEqual(self.get_mdc_stat("strays_created"), 1)
+        self.assertEqual(self.get_mdc_stat("strays_purged"), 0)
+
+        # See that the client still holds 2 caps
+        self.assertEqual(self.get_session(mount_a_client_id)['num_caps'], 2)
+
+        # See that the data objects remain in the data pool
+        self.assertTrue(self.fs.data_objects_present(open_file_ino, size_mb * 1024 * 1024))
+
+        # Now close the file
+        self.mount_a.kill_background(p)
+
+        # Wait to see the client cap count decrement
+        self.wait_until_equal(
+            lambda: self.get_session(mount_a_client_id)['num_caps'],
+            expect_val=1, timeout=60, reject_fn=lambda x: x > 2 or x < 1
+        )
+        # Wait to see the purge counter increment, stray count go to zero
+        self.wait_until_equal(
+            lambda: self.get_mdc_stat("strays_purged"),
+            expect_val=1, timeout=60, reject_fn=lambda x: x > 1
+        )
+        self.wait_until_equal(
+            lambda: self.get_mdc_stat("num_strays"),
+            expect_val=0, timeout=6, reject_fn=lambda x: x > 1
+        )
+
+        # See that the data objects no longer exist
+        self.assertTrue(self.fs.data_objects_absent(open_file_ino, size_mb * 1024 * 1024))
+
+        self.await_data_pool_empty()
+
+    def test_hardlink_reintegration(self):
+        """
+        That removal of primary dentry of hardlinked inode results
+        in reintegration of inode into the previously-remote dentry,
+        rather than lingering as a stray indefinitely.
+        """
+        # Write some bytes to file_a
+        size_mb = 8
+        self.mount_a.write_n_mb("file_a", size_mb)
+        ino = self.mount_a.path_to_ino("file_a")
+
+        # Create a hardlink named file_b
+        self.mount_a.run_shell(["ln", "file_a", "file_b"])
+        self.assertEqual(self.mount_a.path_to_ino("file_b"), ino)
+
+        # Flush journal
+        self.fs.mds_asok(['flush', 'journal'])
+
+        # See that backtrace for the file points to the file_a path
+        pre_unlink_bt = self.fs.read_backtrace(ino)
+        self.assertEqual(pre_unlink_bt['ancestors'][0]['dname'], "file_a")
+
+        # Unlink file_a
+        self.mount_a.run_shell(["rm", "-f", "file_a"])
+
+        # See that a stray was created
+        self.assertEqual(self.get_mdc_stat("num_strays"), 1)
+        self.assertEqual(self.get_mdc_stat("strays_created"), 1)
+
+        # Wait, see that data objects are still present (i.e. that the
+        # stray did not advance to purging given time)
+        time.sleep(30)
+        self.assertTrue(self.fs.data_objects_present(ino, size_mb * 1024 * 1024))
+        self.assertEqual(self.get_mdc_stat("strays_purged"), 0)
+
+        # See that before reintegration, the inode's backtrace points to a stray dir
+        self.fs.mds_asok(['flush', 'journal'])
+        self.assertTrue(self.get_backtrace_path(ino).startswith("stray"))
+
+        # Do a metadata operation on the remaining link (mv is heavy handed, but
+        # others like touch may be satisfied from caps without poking MDS)
+        self.mount_a.run_shell(["mv", "file_b", "file_c"])
+
+        # See the reintegration counter increment
+        # This should happen as a result of the eval_remote call on
+        # responding to a client request.
+        self.wait_until_equal(
+            lambda: self.get_mdc_stat("strays_reintegrated"),
+            expect_val=1, timeout=60, reject_fn=lambda x: x > 1
+        )
+
+        # Flush the journal
+        self.fs.mds_asok(['flush', 'journal'])
+
+        # See that the backtrace for the file points to the remaining link's path
+        post_reint_bt = self.fs.read_backtrace(ino)
+        self.assertEqual(post_reint_bt['ancestors'][0]['dname'], "file_c")
+
+        # See that the number of strays in existence is zero
+        self.assertEqual(self.get_mdc_stat("num_strays"), 0)
+
+        # Now really delete it
+        self.mount_a.run_shell(["rm", "-f", "file_c"])
+        self.wait_until_equal(
+            lambda: self.get_mdc_stat("strays_purged"),
+            expect_val=1, timeout=60, reject_fn=lambda x: x > 1
+        )
+        self.assert_purge_idle()
+        self.assertTrue(self.fs.data_objects_absent(ino, size_mb * 1024 * 1024))
+
+        # We caused the inode to go stray twice
+        self.assertEqual(self.get_mdc_stat("strays_created"), 2)
+        # One time we reintegrated it
+        self.assertEqual(self.get_mdc_stat("strays_reintegrated"), 1)
+        # Then the second time we purged it
+        self.assertEqual(self.get_mdc_stat("strays_purged"), 1)
+
+    def test_mv_hardlink_cleanup(self):
+        """
+        That when doing a rename from A to B, and B has hardlinks,
+        then we make a stray for B which is then reintegrated
+        into one of his hardlinks.
+        """
+        # Create file_a, file_b, and a hardlink to file_b
+        size_mb = 8
+        self.mount_a.write_n_mb("file_a", size_mb)
+        file_a_ino = self.mount_a.path_to_ino("file_a")
+
+        self.mount_a.write_n_mb("file_b", size_mb)
+        file_b_ino = self.mount_a.path_to_ino("file_b")
+
+        self.mount_a.run_shell(["ln", "file_b", "linkto_b"])
+        self.assertEqual(self.mount_a.path_to_ino("linkto_b"), file_b_ino)
+
+        # mv file_a file_b
+        self.mount_a.run_shell(["mv", "file_a", "file_b"])
+
+        self.fs.mds_asok(['flush', 'journal'])
+
+        # Initially, linkto_b will still be a remote inode pointing to a newly created
+        # stray from when file_b was unlinked due to the 'mv'.  No data objects should
+        # have been deleted, as both files still have linkage.
+        self.assertEqual(self.get_mdc_stat("num_strays"), 1)
+        self.assertEqual(self.get_mdc_stat("strays_created"), 1)
+        self.assertTrue(self.get_backtrace_path(file_b_ino).startswith("stray"))
+        self.assertTrue(self.fs.data_objects_present(file_a_ino, size_mb * 1024 * 1024))
+        self.assertTrue(self.fs.data_objects_present(file_b_ino, size_mb * 1024 * 1024))
+
+        # Trigger reintegration and wait for it to happen
+        self.assertEqual(self.get_mdc_stat("strays_reintegrated"), 0)
+        self.mount_a.run_shell(["mv", "linkto_b", "file_c"])
+        self.wait_until_equal(
+            lambda: self.get_mdc_stat("strays_reintegrated"),
+            expect_val=1, timeout=60, reject_fn=lambda x: x > 1
+        )
+
+        self.fs.mds_asok(['flush', 'journal'])
+
+        post_reint_bt = self.fs.read_backtrace(file_b_ino)
+        self.assertEqual(post_reint_bt['ancestors'][0]['dname'], "file_c")
+        self.assertEqual(self.get_mdc_stat("num_strays"), 0)
+
+    def test_migration_on_shutdown(self):
+        """
+        That when an MDS rank is shut down, any not-yet-purging strays
+        are migrated to another MDS's stray dir.
+        """
+
+        # Set up two MDSs
+        self.fs.mon_manager.raw_cluster_cmd_result('mds', 'set', "allow_multimds",
+                                                   "true", "--yes-i-really-mean-it")
+        self.fs.mon_manager.raw_cluster_cmd_result('mds', 'set', "max_mds", "2")
+
+        # See that we have two active MDSs
+        self.wait_until_equal(lambda: len(self.fs.get_active_names()), 2, 30,
+                              reject_fn=lambda v: v > 2 or v < 1)
+
+        active_mds_names = self.fs.get_active_names()
+        rank_0_id = active_mds_names[0]
+        rank_1_id = active_mds_names[1]
+        log.info("Ranks 0 and 1 are {0} and {1}".format(
+            rank_0_id, rank_1_id))
+
+        # Get rid of other MDS daemons so that it's easier to know which
+        # daemons to expect in which ranks after restarts
+        for unneeded_mds in set(self.mds_cluster.mds_ids) - {rank_0_id, rank_1_id}:
+            self.mds_cluster.mds_stop(unneeded_mds)
+            self.mds_cluster.mds_fail(unneeded_mds)
+
+        # Set the purge file throttle to 0 on MDS rank 1
+        self.set_conf("mds.{0}".format(rank_1_id), 'mds_max_purge_files', "0")
+        self.fs.mds_fail_restart(rank_1_id)
+        self.wait_until_equal(lambda: len(self.fs.get_active_names()), 2, 30,
+                              reject_fn=lambda v: v > 2 or v < 1)
+
+        # Create a file
+        # Export dir on an empty dir doesn't work, so we create the file before
+        # calling export dir in order to kick a dirfrag into existence
+        size_mb = 8
+        self.mount_a.run_shell(["mkdir", "ALPHA"])
+        self.mount_a.write_n_mb("ALPHA/alpha_file", size_mb)
+        ino = self.mount_a.path_to_ino("ALPHA/alpha_file")
+
+        result = self.fs.mds_asok(["export", "dir", "/ALPHA", "1"], rank_0_id)
+        self.assertEqual(result["return_code"], 0)
+
+        # Poll the MDS cache dump to watch for the export completing
+        migrated = False
+        migrate_timeout = 60
+        migrate_elapsed = 0
+        while not migrated:
+            data = self.fs.mds_asok(["dump", "cache"], rank_1_id)
+            for inode_data in data:
+                if inode_data['ino'] == ino:
+                    log.debug("Found ino in cache: {0}".format(json.dumps(inode_data, indent=2)))
+                    if inode_data['is_auth'] is True:
+                        migrated = True
+                    break
+
+            if not migrated:
+                if migrate_elapsed > migrate_timeout:
+                    raise RuntimeError("Migration hasn't happened after {0}s!".format(migrate_elapsed))
+                else:
+                    migrate_elapsed += 1
+                    time.sleep(1)
+
+        # Delete the file on rank 1
+        self.mount_a.run_shell(["rm", "-f", "ALPHA/alpha_file"])
+
+        # See the stray counter increment, but the purge counter doesn't
+        # See that the file objects are still on disk
+        self.wait_until_equal(
+            lambda: self.get_mdc_stat("num_strays", rank_1_id),
+            expect_val=1, timeout=60, reject_fn=lambda x: x > 1)
+        self.assertEqual(self.get_mdc_stat("strays_created", rank_1_id), 1)
+        time.sleep(60)  # period that we want to see if it gets purged
+        self.assertEqual(self.get_mdc_stat("strays_purged", rank_1_id), 0)
+        self.assertTrue(self.fs.data_objects_present(ino, size_mb * 1024 * 1024))
+
+        # Shut down rank 1
+        self.fs.mon_manager.raw_cluster_cmd_result('mds', 'set', "max_mds", "1")
+        self.fs.mon_manager.raw_cluster_cmd_result('mds', 'deactivate', "1")
+
+        # Wait til we get to a single active MDS mdsmap state
+        def is_stopped():
+            mds_map = self.fs.get_mds_map()
+            return 1 not in [i['rank'] for i in mds_map['info'].values()]
+
+        self.wait_until_true(is_stopped, timeout=120)
+
+        # See that the stray counter on rank 0 has incremented
+        self.assertEqual(self.get_mdc_stat("strays_created", rank_0_id), 1)
+
+        # Wait til the purge counter on rank 0 increments
+        self.wait_until_equal(
+            lambda: self.get_mdc_stat("strays_purged", rank_0_id),
+            1, timeout=60, reject_fn=lambda x: x > 1)
+
+        # See that the file objects no longer exist
+        self.assertTrue(self.fs.data_objects_absent(ino, size_mb * 1024 * 1024))
+
+        self.await_data_pool_empty()
+
+    def assert_backtrace(self, ino, expected_path):
+        """
+        Assert that the backtrace in the data pool for an inode matches
+        an expected /foo/bar path.
+        """
+        expected_elements = expected_path.strip("/").split("/")
+        bt = self.fs.read_backtrace(ino)
+        actual_elements = list(reversed([dn['dname'] for dn in bt['ancestors']]))
+        self.assertListEqual(expected_elements, actual_elements)
+
+    def get_backtrace_path(self, ino):
+        bt = self.fs.read_backtrace(ino)
+        elements = reversed([dn['dname'] for dn in bt['ancestors']])
+        return "/".join(elements)
+
+    def assert_purge_idle(self):
+        """
+        Assert that the MDS perf counters indicate no strays exist and
+        no ongoing purge activity.  Sanity check for when PurgeQueue should
+        be idle.
+        """
+        stats = self.fs.mds_asok(['perf', 'dump', "mds_cache"])['mds_cache']
+        self.assertEqual(stats["num_strays"], 0)
+        self.assertEqual(stats["num_strays_purging"], 0)
+        self.assertEqual(stats["num_strays_delayed"], 0)
+        self.assertEqual(stats["num_purge_ops"], 0)
+
+    def test_mv_cleanup(self):
+        """
+        That when doing a rename from A to B, and B has no hardlinks,
+        then we make a stray for B and purge him.
+        """
+        # Create file_a and file_b, write some to both
+        size_mb = 8
+        self.mount_a.write_n_mb("file_a", size_mb)
+        file_a_ino = self.mount_a.path_to_ino("file_a")
+        self.mount_a.write_n_mb("file_b", size_mb)
+        file_b_ino = self.mount_a.path_to_ino("file_b")
+
+        self.fs.mds_asok(['flush', 'journal'])
+        self.assert_backtrace(file_a_ino, "file_a")
+        self.assert_backtrace(file_b_ino, "file_b")
+
+        # mv file_a file_b
+        self.mount_a.run_shell(['mv', 'file_a', 'file_b'])
+
+        # See that stray counter increments
+        self.assertEqual(self.get_mdc_stat("strays_created"), 1)
+        # Wait for purge counter to increment
+        self.wait_until_equal(
+            lambda: self.get_mdc_stat("strays_purged"),
+            expect_val=1, timeout=60, reject_fn=lambda x: x > 1
+        )
+        self.assert_purge_idle()
+
+        # file_b should have been purged
+        self.assertTrue(self.fs.data_objects_absent(file_b_ino, size_mb * 1024 * 1024))
+
+        # Backtrace should have updated from file_a to file_b
+        self.fs.mds_asok(['flush', 'journal'])
+        self.assert_backtrace(file_a_ino, "file_b")
+
+        # file_a's data should still exist
+        self.assertTrue(self.fs.data_objects_present(file_a_ino, size_mb * 1024 * 1024))
+
+    def _pool_df(self, pool_name):
+        """
+        Return a dict like
+            {
+                "kb_used": 0,
+                "bytes_used": 0,
+                "max_avail": 19630292406,
+                "objects": 0
+            }
+
+        :param pool_name: Which pool (must exist)
+        """
+        out = self.fs.mon_manager.raw_cluster_cmd("df", "--format=json-pretty")
+        for p in json.loads(out)['pools']:
+            if p['name'] == pool_name:
+                return p['stats']
+
+        raise RuntimeError("Pool '{0}' not found".format(pool_name))
+
+    def await_data_pool_empty(self):
+        self.wait_until_true(
+            lambda: self._pool_df(
+                self.fs.get_data_pool_name()
+            )['objects'] == 0,
+            timeout=60)
+
+    def test_snapshot_remove(self):
+        """
+        That removal of a snapshot that references a now-unlinked file results
+        in purging on the stray for the file.
+        """
+        # Enable snapshots
+        self.fs.mon_manager.raw_cluster_cmd("mds", "set", "allow_new_snaps", "true",
+                                            "--yes-i-really-mean-it")
+
+        # Create a dir with a file in it
+        size_mb = 8
+        self.mount_a.run_shell(["mkdir", "snapdir"])
+        self.mount_a.run_shell(["mkdir", "snapdir/subdir"])
+        self.mount_a.write_test_pattern("snapdir/subdir/file_a", size_mb * 1024 * 1024)
+        file_a_ino = self.mount_a.path_to_ino("snapdir/subdir/file_a")
+
+        # Snapshot the dir
+        self.mount_a.run_shell(["mkdir", "snapdir/.snap/snap1"])
+
+        # Cause the head revision to deviate from the snapshot
+        self.mount_a.write_n_mb("snapdir/subdir/file_a", size_mb)
+
+        # Flush the journal so that backtraces, dirfrag objects will actually be written
+        self.fs.mds_asok(["flush", "journal"])
+
+        # Unlink the file
+        self.mount_a.run_shell(["rm", "-f", "snapdir/subdir/file_a"])
+        self.mount_a.run_shell(["rmdir", "snapdir/subdir"])
+
+        # Unmount the client because when I come back to check the data is still
+        # in the file I don't want to just see what's in the page cache.
+        self.mount_a.umount_wait()
+
+        self.assertEqual(self.get_mdc_stat("strays_created"), 2)
+
+        # FIXME: at this stage we see a purge and the stray count drops to
+        # zero, but there's actually still a stray, so at the very
+        # least the StrayManager stats code is slightly off
+
+        self.mount_a.mount()
+
+        # See that the data from the snapshotted revision of the file is still present
+        # and correct
+        self.mount_a.validate_test_pattern("snapdir/.snap/snap1/subdir/file_a", size_mb * 1024 * 1024)
+
+        # Remove the snapshot
+        self.mount_a.run_shell(["rmdir", "snapdir/.snap/snap1"])
+        self.mount_a.umount_wait()
+
+        # Purging file_a doesn't happen until after we've flushed the journal, because
+        # it is referenced by the snapshotted subdir, and the snapshot isn't really
+        # gone until the journal references to it are gone
+        self.fs.mds_asok(["flush", "journal"])
+
+        # See that a purge happens now
+        self.wait_until_equal(
+            lambda: self.get_mdc_stat("strays_purged"),
+            expect_val=2, timeout=60, reject_fn=lambda x: x > 1
+        )
+
+        self.assertTrue(self.fs.data_objects_absent(file_a_ino, size_mb * 1024 * 1024))
+        self.await_data_pool_empty()
+
+    def test_fancy_layout(self):
+        """
+        purge stray file with fancy layout
+        """
+
+        file_name = "fancy_layout_file"
+        self.mount_a.run_shell(["touch", file_name])
+
+        file_layout = "stripe_unit=1048576 stripe_count=4 object_size=8388608"
+        self.mount_a.run_shell(["setfattr", "-n", "ceph.file.layout", "-v", file_layout, file_name])
+
+        # 35MB requires 7 objects
+        size_mb = 35
+        self.mount_a.write_n_mb(file_name, size_mb)
+
+        self.mount_a.run_shell(["rm", "-f", file_name])
+        self.fs.mds_asok(["flush", "journal"])
+
+        # can't use self.fs.data_objects_absent here, it does not support fancy layout
+        self.await_data_pool_empty()
diff --git a/qa/tasks/cephfs/test_volume_client.py b/qa/tasks/cephfs/test_volume_client.py
new file mode 100644
index 0000000..d2925e5
--- /dev/null
+++ b/qa/tasks/cephfs/test_volume_client.py
@@ -0,0 +1,896 @@
+import json
+import logging
+import time
+import os
+from textwrap import dedent
+from tasks.cephfs.cephfs_test_case import CephFSTestCase
+from teuthology.exceptions import CommandFailedError
+
+log = logging.getLogger(__name__)
+
+
+class TestVolumeClient(CephFSTestCase):
+    #
+    # TODO: Test that VolumeClient can recover from partial auth updates.
+    #
+
+    # One for looking at the global filesystem, one for being
+    # the VolumeClient, two for mounting the created shares
+    CLIENTS_REQUIRED = 4
+
+    def _volume_client_python(self, client, script, vol_prefix=None, ns_prefix=None):
+        # Can't dedent this *and* the script we pass in, because they might have different
+        # levels of indentation to begin with, so leave this string zero-indented
+        if vol_prefix:
+            vol_prefix = "\"" + vol_prefix + "\""
+        if ns_prefix:
+            ns_prefix = "\"" + ns_prefix + "\""
+        return client.run_python("""
+from ceph_volume_client import CephFSVolumeClient, VolumePath
+import logging
+log = logging.getLogger("ceph_volume_client")
+log.addHandler(logging.StreamHandler())
+log.setLevel(logging.DEBUG)
+vc = CephFSVolumeClient("manila", "{conf_path}", "ceph", {vol_prefix}, {ns_prefix})
+vc.connect()
+{payload}
+vc.disconnect()
+        """.format(payload=script, conf_path=client.config_path, vol_prefix=vol_prefix, ns_prefix=ns_prefix))
+
+    def _sudo_write_file(self, remote, path, data):
+        """
+        Write data to a remote file as super user
+
+        :param remote: Remote site.
+        :param path: Path on the remote being written to.
+        :param data: Data to be written.
+
+        Both perms and owner are passed directly to chmod.
+        """
+        remote.run(
+            args=[
+                'sudo',
+                'python',
+                '-c',
+                'import shutil, sys; shutil.copyfileobj(sys.stdin, file(sys.argv[1], "wb"))',
+                path,
+            ],
+            stdin=data,
+        )
+
+    def _configure_vc_auth(self, mount, id_name):
+        """
+        Set up auth credentials for the VolumeClient user
+        """
+        out = self.fs.mon_manager.raw_cluster_cmd(
+            "auth", "get-or-create", "client.{name}".format(name=id_name),
+            "mds", "allow *",
+            "osd", "allow rw",
+            "mon", "allow *"
+        )
+        mount.client_id = id_name
+        self._sudo_write_file(mount.client_remote, mount.get_keyring_path(), out)
+        self.set_conf("client.{name}".format(name=id_name), "keyring", mount.get_keyring_path())
+
+    def _configure_guest_auth(self, volumeclient_mount, guest_mount,
+                              guest_entity, mount_path,
+                              namespace_prefix=None, readonly=False,
+                              tenant_id=None):
+        """
+        Set up auth credentials for the guest client to mount a volume.
+
+        :param volumeclient_mount: mount used as the handle for driving
+                                   volumeclient.
+        :param guest_mount: mount used by the guest client.
+        :param guest_entity: auth ID used by the guest client.
+        :param mount_path: path of the volume.
+        :param namespace_prefix: name prefix of the RADOS namespace, which
+                                 is used for the volume's layout.
+        :param readonly: defaults to False. If set to 'True' only read-only
+                         mount access is granted to the guest.
+        :param tenant_id: (OpenStack) tenant ID of the guest client.
+        """
+
+        head, volume_id = os.path.split(mount_path)
+        head, group_id = os.path.split(head)
+        head, volume_prefix = os.path.split(head)
+        volume_prefix = "/" + volume_prefix
+
+        # Authorize the guest client's auth ID to mount the volume.
+        key = self._volume_client_python(volumeclient_mount, dedent("""
+            vp = VolumePath("{group_id}", "{volume_id}")
+            auth_result = vc.authorize(vp, "{guest_entity}", readonly={readonly},
+                                       tenant_id="{tenant_id}")
+            print auth_result['auth_key']
+        """.format(
+            group_id=group_id,
+            volume_id=volume_id,
+            guest_entity=guest_entity,
+            readonly=readonly,
+            tenant_id=tenant_id)), volume_prefix, namespace_prefix
+        )
+
+        # CephFSVolumeClient's authorize() does not return the secret
+        # key to a caller who isn't multi-tenant aware. Explicitly
+        # query the key for such a client.
+        if not tenant_id:
+            key = self.fs.mon_manager.raw_cluster_cmd(
+            "auth", "get-key", "client.{name}".format(name=guest_entity),
+            )
+
+        # The guest auth ID should exist.
+        existing_ids = [a['entity'] for a in self.auth_list()]
+        self.assertIn("client.{0}".format(guest_entity), existing_ids)
+
+        # Create keyring file for the guest client.
+        keyring_txt = dedent("""
+        [client.{guest_entity}]
+            key = {key}
+
+        """.format(
+            guest_entity=guest_entity,
+            key=key
+        ))
+        guest_mount.client_id = guest_entity
+        self._sudo_write_file(guest_mount.client_remote,
+                              guest_mount.get_keyring_path(),
+                              keyring_txt)
+
+        # Add a guest client section to the ceph config file.
+        self.set_conf("client.{0}".format(guest_entity), "client quota", "True")
+        self.set_conf("client.{0}".format(guest_entity), "debug client", "20")
+        self.set_conf("client.{0}".format(guest_entity), "debug objecter", "20")
+        self.set_conf("client.{0}".format(guest_entity),
+                      "keyring", guest_mount.get_keyring_path())
+
+    def test_default_prefix(self):
+        group_id = "grpid"
+        volume_id = "volid"
+        DEFAULT_VOL_PREFIX = "volumes"
+        DEFAULT_NS_PREFIX = "fsvolumens_"
+
+        self.mount_b.umount_wait()
+        self._configure_vc_auth(self.mount_b, "manila")
+
+        #create a volume with default prefix
+        self._volume_client_python(self.mount_b, dedent("""
+            vp = VolumePath("{group_id}", "{volume_id}")
+            vc.create_volume(vp, 10, data_isolated=True)
+        """.format(
+            group_id=group_id,
+            volume_id=volume_id,
+        )))
+
+        # The dir should be created
+        self.mount_a.stat(os.path.join(DEFAULT_VOL_PREFIX, group_id, volume_id))
+
+        #namespace should be set
+        ns_in_attr = self.mount_a.getfattr(os.path.join(DEFAULT_VOL_PREFIX, group_id, volume_id), "ceph.dir.layout.pool_namespace")
+        namespace = "{0}{1}".format(DEFAULT_NS_PREFIX, volume_id)
+        self.assertEqual(namespace, ns_in_attr)
+
+
+    def test_lifecycle(self):
+        """
+        General smoke test for create, extend, destroy
+        """
+
+        # I'm going to use mount_c later as a guest for mounting the created
+        # shares
+        self.mounts[2].umount()
+
+        # I'm going to leave mount_b unmounted and just use it as a handle for
+        # driving volumeclient.  It's a little hacky but we don't have a more
+        # general concept for librados/libcephfs clients as opposed to full
+        # blown mounting clients.
+        self.mount_b.umount_wait()
+        self._configure_vc_auth(self.mount_b, "manila")
+
+        guest_entity = "guest"
+        group_id = "grpid"
+        volume_id = "volid"
+
+        volume_prefix = "/myprefix"
+        namespace_prefix = "mynsprefix_"
+
+        # Create a 100MB volume
+        volume_size = 100
+        mount_path = self._volume_client_python(self.mount_b, dedent("""
+            vp = VolumePath("{group_id}", "{volume_id}")
+            create_result = vc.create_volume(vp, 1024*1024*{volume_size})
+            print create_result['mount_path']
+        """.format(
+            group_id=group_id,
+            volume_id=volume_id,
+            volume_size=volume_size
+        )), volume_prefix, namespace_prefix)
+
+        # The dir should be created
+        self.mount_a.stat(os.path.join("myprefix", group_id, volume_id))
+
+        # Authorize and configure credentials for the guest to mount the
+        # the volume.
+        self._configure_guest_auth(self.mount_b, self.mounts[2], guest_entity,
+                                   mount_path, namespace_prefix)
+        self.mounts[2].mount(mount_path=mount_path)
+
+        # df should see volume size, same as the quota set on volume's dir
+        self.assertEqual(self.mounts[2].df()['total'],
+                         volume_size * 1024 * 1024)
+        self.assertEqual(
+                self.mount_a.getfattr(
+                    os.path.join(volume_prefix.strip("/"), group_id, volume_id),
+                    "ceph.quota.max_bytes"),
+                "%s" % (volume_size * 1024 * 1024))
+
+        # df granularity is 4MB block so have to write at least that much
+        data_bin_mb = 4
+        self.mounts[2].write_n_mb("data.bin", data_bin_mb)
+
+        # Write something outside volume to check this space usage is
+        # not reported in the volume's DF.
+        other_bin_mb = 6
+        self.mount_a.write_n_mb("other.bin", other_bin_mb)
+
+        # global: df should see all the writes (data + other).  This is a >
+        # rather than a == because the global spaced used includes all pools
+        self.assertGreater(self.mount_a.df()['used'],
+                           (data_bin_mb + other_bin_mb) * 1024 * 1024)
+
+        # Hack: do a metadata IO to kick rstats
+        self.mounts[2].run_shell(["touch", "foo"])
+
+        # volume: df should see the data_bin_mb consumed from quota, same
+        # as the rbytes for the volume's dir
+        self.wait_until_equal(
+                lambda: self.mounts[2].df()['used'],
+                data_bin_mb * 1024 * 1024, timeout=60)
+        self.wait_until_equal(
+                lambda: self.mount_a.getfattr(
+                    os.path.join(volume_prefix.strip("/"), group_id, volume_id),
+                    "ceph.dir.rbytes"),
+                "%s" % (data_bin_mb * 1024 * 1024), timeout=60)
+
+        # sync so that file data are persist to rados
+        self.mounts[2].run_shell(["sync"])
+
+        # Our data should stay in particular rados namespace
+        pool_name = self.mount_a.getfattr(os.path.join("myprefix", group_id, volume_id), "ceph.dir.layout.pool")
+        namespace = "{0}{1}".format(namespace_prefix, volume_id)
+        ns_in_attr = self.mount_a.getfattr(os.path.join("myprefix", group_id, volume_id), "ceph.dir.layout.pool_namespace")
+        self.assertEqual(namespace, ns_in_attr)
+
+        objects_in_ns = set(self.fs.rados(["ls"], pool=pool_name, namespace=namespace).split("\n"))
+        self.assertNotEqual(objects_in_ns, set())
+
+        # De-authorize the guest
+        self._volume_client_python(self.mount_b, dedent("""
+            vp = VolumePath("{group_id}", "{volume_id}")
+            vc.deauthorize(vp, "{guest_entity}")
+            vc.evict("{guest_entity}")
+        """.format(
+            group_id=group_id,
+            volume_id=volume_id,
+            guest_entity=guest_entity
+        )), volume_prefix, namespace_prefix)
+
+        # Once deauthorized, the client should be unable to do any more metadata ops
+        # The way that the client currently behaves here is to block (it acts like
+        # it has lost network, because there is nothing to tell it that is messages
+        # are being dropped because it's identity is gone)
+        background = self.mounts[2].write_n_mb("rogue.bin", 1, wait=False)
+        time.sleep(10)  # Approximate check for 'stuck' as 'still running after 10s'
+        self.assertFalse(background.finished)
+
+        # After deauthorisation, the client ID should be gone (this was the only
+        # volume it was authorised for)
+        self.assertNotIn("client.{0}".format(guest_entity), [e['entity'] for e in self.auth_list()])
+
+        # Clean up the dead mount (ceph-fuse's behaviour here is a bit undefined)
+        self.mounts[2].kill()
+        self.mounts[2].kill_cleanup()
+        try:
+            background.wait()
+        except CommandFailedError:
+            # We killed the mount out from under you
+            pass
+
+        self._volume_client_python(self.mount_b, dedent("""
+            vp = VolumePath("{group_id}", "{volume_id}")
+            vc.delete_volume(vp)
+            vc.purge_volume(vp)
+        """.format(
+            group_id=group_id,
+            volume_id=volume_id,
+        )), volume_prefix, namespace_prefix)
+
+    def test_idempotency(self):
+        """
+        That the volumeclient interface works when calling everything twice
+        """
+        self.mount_b.umount_wait()
+        self._configure_vc_auth(self.mount_b, "manila")
+
+        guest_entity = "guest"
+        group_id = "grpid"
+        volume_id = "volid"
+        self._volume_client_python(self.mount_b, dedent("""
+            vp = VolumePath("{group_id}", "{volume_id}")
+            vc.create_volume(vp, 10)
+            vc.create_volume(vp, 10)
+            vc.authorize(vp, "{guest_entity}")
+            vc.authorize(vp, "{guest_entity}")
+            vc.deauthorize(vp, "{guest_entity}")
+            vc.deauthorize(vp, "{guest_entity}")
+            vc.delete_volume(vp)
+            vc.delete_volume(vp)
+            vc.purge_volume(vp)
+            vc.purge_volume(vp)
+
+            vc.create_volume(vp, 10, data_isolated=True)
+            vc.create_volume(vp, 10, data_isolated=True)
+            vc.authorize(vp, "{guest_entity}")
+            vc.authorize(vp, "{guest_entity}")
+            vc.deauthorize(vp, "{guest_entity}")
+            vc.deauthorize(vp, "{guest_entity}")
+            vc.evict("{guest_entity}")
+            vc.evict("{guest_entity}")
+            vc.delete_volume(vp, data_isolated=True)
+            vc.delete_volume(vp, data_isolated=True)
+            vc.purge_volume(vp, data_isolated=True)
+            vc.purge_volume(vp, data_isolated=True)
+        """.format(
+            group_id=group_id,
+            volume_id=volume_id,
+            guest_entity=guest_entity
+        )))
+
+    def test_data_isolated(self):
+        """
+        That data isolated shares get their own pool
+        :return:
+        """
+
+        # Because the teuthology config template sets mon_pg_warn_max_per_osd to
+        # 10000 (i.e. it just tries to ignore health warnings), reset it to something
+        # sane before using volume_client, to avoid creating pools with absurdly large
+        # numbers of PGs.
+        self.set_conf("global", "mon pg warn max per osd", "300")
+        for mon_daemon_state in self.ctx.daemons.iter_daemons_of_role('mon'):
+            mon_daemon_state.restart()
+
+        self.mount_b.umount_wait()
+        self._configure_vc_auth(self.mount_b, "manila")
+
+        # Calculate how many PGs we'll expect the new volume pool to have
+        osd_map = json.loads(self.fs.mon_manager.raw_cluster_cmd('osd', 'dump', '--format=json-pretty'))
+        max_per_osd = int(self.fs.get_config('mon_pg_warn_max_per_osd'))
+        osd_count = len(osd_map['osds'])
+        max_overall = osd_count * max_per_osd
+
+        existing_pg_count = 0
+        for p in osd_map['pools']:
+            existing_pg_count += p['pg_num']
+
+        expected_pg_num = (max_overall - existing_pg_count) / 10
+        log.info("max_per_osd {0}".format(max_per_osd))
+        log.info("osd_count {0}".format(osd_count))
+        log.info("max_overall {0}".format(max_overall))
+        log.info("existing_pg_count {0}".format(existing_pg_count))
+        log.info("expected_pg_num {0}".format(expected_pg_num))
+
+        pools_a = json.loads(self.fs.mon_manager.raw_cluster_cmd("osd", "dump", "--format=json-pretty"))['pools']
+
+        group_id = "grpid"
+        volume_id = "volid"
+        self._volume_client_python(self.mount_b, dedent("""
+            vp = VolumePath("{group_id}", "{volume_id}")
+            vc.create_volume(vp, 10, data_isolated=True)
+        """.format(
+            group_id=group_id,
+            volume_id=volume_id,
+        )))
+
+        pools_b = json.loads(self.fs.mon_manager.raw_cluster_cmd("osd", "dump", "--format=json-pretty"))['pools']
+
+        # Should have created one new pool
+        new_pools = set(p['pool_name'] for p in pools_b) - set([p['pool_name'] for p in pools_a])
+        self.assertEqual(len(new_pools), 1)
+
+        # It should have followed the heuristic for PG count
+        # (this is an overly strict test condition, so we may want to remove
+        #  it at some point as/when the logic gets fancier)
+        created_pg_num = self.fs.mon_manager.get_pool_property(list(new_pools)[0], "pg_num")
+        self.assertEqual(expected_pg_num, created_pg_num)
+
+    def test_15303(self):
+        """
+        Reproducer for #15303 "Client holds incorrect complete flag on dir
+        after losing caps" (http://tracker.ceph.com/issues/15303)
+        """
+        for m in self.mounts:
+            m.umount_wait()
+
+        # Create a dir on mount A
+        self.mount_a.mount()
+        self.mount_a.run_shell(["mkdir", "parent1"])
+        self.mount_a.run_shell(["mkdir", "parent2"])
+        self.mount_a.run_shell(["mkdir", "parent1/mydir"])
+
+        # Put some files in it from mount B
+        self.mount_b.mount()
+        self.mount_b.run_shell(["touch", "parent1/mydir/afile"])
+        self.mount_b.umount_wait()
+
+        # List the dir's contents on mount A
+        self.assertListEqual(self.mount_a.ls("parent1/mydir"),
+                             ["afile"])
+
+    def test_evict_client(self):
+        """
+        That a volume client can be evicted based on its auth ID and the volume
+        path it has mounted.
+        """
+
+        # mounts[1] would be used as handle for driving VolumeClient. mounts[2]
+        # and mounts[3] would be used as guests to mount the volumes/shares.
+
+        for i in range(1, 4):
+            self.mounts[i].umount_wait()
+
+        volumeclient_mount = self.mounts[1]
+        self._configure_vc_auth(volumeclient_mount, "manila")
+        guest_mounts = (self.mounts[2], self.mounts[3])
+
+        guest_entity = "guest"
+        group_id = "grpid"
+        mount_paths = []
+        volume_ids = []
+
+        # Create two volumes. Authorize 'guest' auth ID to mount the two
+        # volumes. Mount the two volumes. Write data to the volumes.
+        for i in range(2):
+            # Create volume.
+            volume_ids.append("volid_{0}".format(str(i)))
+            mount_paths.append(
+                self._volume_client_python(volumeclient_mount, dedent("""
+                    vp = VolumePath("{group_id}", "{volume_id}")
+                    create_result = vc.create_volume(vp, 10 * 1024 * 1024)
+                    print create_result['mount_path']
+                """.format(
+                    group_id=group_id,
+                    volume_id=volume_ids[i]
+            ))))
+
+            # Authorize 'guest' auth ID to mount the volume.
+            self._configure_guest_auth(volumeclient_mount, guest_mounts[i],
+                                       guest_entity, mount_paths[i])
+
+            # Mount the volume.
+            guest_mounts[i].mountpoint_dir_name = 'mnt.{id}.{suffix}'.format(
+                id=guest_entity, suffix=str(i))
+            guest_mounts[i].mount(mount_path=mount_paths[i])
+            guest_mounts[i].write_n_mb("data.bin", 1)
+
+
+        # Evict client, guest_mounts[0], using auth ID 'guest' and has mounted
+        # one volume.
+        self._volume_client_python(self.mount_b, dedent("""
+            vp = VolumePath("{group_id}", "{volume_id}")
+            vc.evict("{guest_entity}", volume_path=vp)
+        """.format(
+            group_id=group_id,
+            volume_id=volume_ids[0],
+            guest_entity=guest_entity
+        )))
+
+        # Evicted guest client, guest_mounts[0], should not be able to do
+        # anymore metadata ops. It behaves as if it has lost network
+        # connection.
+        background = guest_mounts[0].write_n_mb("rogue.bin", 1, wait=False)
+        # Approximate check for 'stuck' as 'still running after 10s'.
+        time.sleep(10)
+        self.assertFalse(background.finished)
+
+        # Guest client, guest_mounts[1], using the same auth ID 'guest', but
+        # has mounted the other volume, should be able to use its volume
+        # unaffected.
+        guest_mounts[1].write_n_mb("data.bin.1", 1)
+
+        # Cleanup.
+        for i in range(2):
+            self._volume_client_python(volumeclient_mount, dedent("""
+                vp = VolumePath("{group_id}", "{volume_id}")
+                vc.deauthorize(vp, "{guest_entity}")
+                vc.delete_volume(vp)
+                vc.purge_volume(vp)
+            """.format(
+                group_id=group_id,
+                volume_id=volume_ids[i],
+                guest_entity=guest_entity
+            )))
+
+        # We must hard-umount the one that we evicted
+        guest_mounts[0].umount_wait(force=True)
+
+    def test_purge(self):
+        """
+        Reproducer for #15266, exception trying to purge volumes that
+        contain non-ascii filenames.
+
+        Additionally test any other purge corner cases here.
+        """
+        # I'm going to leave mount_b unmounted and just use it as a handle for
+        # driving volumeclient.  It's a little hacky but we don't have a more
+        # general concept for librados/libcephfs clients as opposed to full
+        # blown mounting clients.
+        self.mount_b.umount_wait()
+        self._configure_vc_auth(self.mount_b, "manila")
+
+        group_id = "grpid"
+        # Use a unicode volume ID (like Manila), to reproduce #15266
+        volume_id = u"volid"
+
+        # Create
+        mount_path = self._volume_client_python(self.mount_b, dedent("""
+            vp = VolumePath("{group_id}", u"{volume_id}")
+            create_result = vc.create_volume(vp, 10)
+            print create_result['mount_path']
+        """.format(
+            group_id=group_id,
+            volume_id=volume_id
+        )))
+
+        # Strip leading "/"
+        mount_path = mount_path[1:]
+
+        # A file with non-ascii characters
+        self.mount_a.run_shell(["touch", os.path.join(mount_path, u"b\u00F6b")])
+
+        # A file with no permissions to do anything
+        self.mount_a.run_shell(["touch", os.path.join(mount_path, "noperms")])
+        self.mount_a.run_shell(["chmod", "0000", os.path.join(mount_path, "noperms")])
+
+        self._volume_client_python(self.mount_b, dedent("""
+            vp = VolumePath("{group_id}", u"{volume_id}")
+            vc.delete_volume(vp)
+            vc.purge_volume(vp)
+        """.format(
+            group_id=group_id,
+            volume_id=volume_id
+        )))
+
+        # Check it's really gone
+        self.assertEqual(self.mount_a.ls("volumes/_deleting"), [])
+        self.assertEqual(self.mount_a.ls("volumes/"), ["_deleting", group_id])
+
+    def test_readonly_authorization(self):
+        """
+        That guest clients can be restricted to read-only mounts of volumes.
+        """
+
+        volumeclient_mount = self.mounts[1]
+        guest_mount = self.mounts[2]
+        volumeclient_mount.umount_wait()
+        guest_mount.umount_wait()
+
+        # Configure volumeclient_mount as the handle for driving volumeclient.
+        self._configure_vc_auth(volumeclient_mount, "manila")
+
+        guest_entity = "guest"
+        group_id = "grpid"
+        volume_id = "volid"
+
+        # Create a volume.
+        mount_path = self._volume_client_python(volumeclient_mount, dedent("""
+            vp = VolumePath("{group_id}", "{volume_id}")
+            create_result = vc.create_volume(vp, 1024*1024*10)
+            print create_result['mount_path']
+        """.format(
+            group_id=group_id,
+            volume_id=volume_id,
+        )))
+
+        # Authorize and configure credentials for the guest to mount the
+        # the volume with read-write access.
+        self._configure_guest_auth(volumeclient_mount, guest_mount, guest_entity,
+                                   mount_path, readonly=False)
+
+        # Mount the volume, and write to it.
+        guest_mount.mount(mount_path=mount_path)
+        guest_mount.write_n_mb("data.bin", 1)
+
+        # Change the guest auth ID's authorization to read-only mount access.
+        self._volume_client_python(volumeclient_mount, dedent("""
+            vp = VolumePath("{group_id}", "{volume_id}")
+            vc.deauthorize(vp, "{guest_entity}")
+        """.format(
+            group_id=group_id,
+            volume_id=volume_id,
+            guest_entity=guest_entity
+        )))
+        self._configure_guest_auth(volumeclient_mount, guest_mount, guest_entity,
+                                   mount_path, readonly=True)
+
+        # The effect of the change in access level to read-only is not
+        # immediate. The guest sees the change only after a remount of
+        # the volume.
+        guest_mount.umount_wait()
+        guest_mount.mount(mount_path=mount_path)
+
+        # Read existing content of the volume.
+        self.assertListEqual(guest_mount.ls(guest_mount.mountpoint), ["data.bin"])
+        # Cannot write into read-only volume.
+        with self.assertRaises(CommandFailedError):
+            guest_mount.write_n_mb("rogue.bin", 1)
+
+    def test_get_authorized_ids(self):
+        """
+        That for a volume, the authorized IDs and their access levels
+        can be obtained using CephFSVolumeClient's get_authorized_ids().
+        """
+        volumeclient_mount = self.mounts[1]
+        volumeclient_mount.umount_wait()
+
+        # Configure volumeclient_mount as the handle for driving volumeclient.
+        self._configure_vc_auth(volumeclient_mount, "manila")
+
+        group_id = "grpid"
+        volume_id = "volid"
+        guest_entity_1 = "guest1"
+        guest_entity_2 = "guest2"
+
+        log.info("print group ID: {0}".format(group_id))
+
+        # Create a volume.
+        auths = self._volume_client_python(volumeclient_mount, dedent("""
+            vp = VolumePath("{group_id}", "{volume_id}")
+            vc.create_volume(vp, 1024*1024*10)
+            auths = vc.get_authorized_ids(vp)
+            print auths
+        """.format(
+            group_id=group_id,
+            volume_id=volume_id,
+        )))
+        # Check the list of authorized IDs for the volume.
+        expected_result = None
+        self.assertEqual(str(expected_result), auths)
+
+        # Allow two auth IDs access to the volume.
+        auths = self._volume_client_python(volumeclient_mount, dedent("""
+            vp = VolumePath("{group_id}", "{volume_id}")
+            vc.authorize(vp, "{guest_entity_1}", readonly=False)
+            vc.authorize(vp, "{guest_entity_2}", readonly=True)
+            auths = vc.get_authorized_ids(vp)
+            print auths
+        """.format(
+            group_id=group_id,
+            volume_id=volume_id,
+            guest_entity_1=guest_entity_1,
+            guest_entity_2=guest_entity_2,
+        )))
+        # Check the list of authorized IDs and their access levels.
+        expected_result = [(u'guest1', u'rw'), (u'guest2', u'r')]
+        self.assertItemsEqual(str(expected_result), auths)
+
+        # Disallow both the auth IDs' access to the volume.
+        auths = self._volume_client_python(volumeclient_mount, dedent("""
+            vp = VolumePath("{group_id}", "{volume_id}")
+            vc.deauthorize(vp, "{guest_entity_1}")
+            vc.deauthorize(vp, "{guest_entity_2}")
+            auths = vc.get_authorized_ids(vp)
+            print auths
+        """.format(
+            group_id=group_id,
+            volume_id=volume_id,
+            guest_entity_1=guest_entity_1,
+            guest_entity_2=guest_entity_2,
+        )))
+        # Check the list of authorized IDs for the volume.
+        expected_result = None
+        self.assertItemsEqual(str(expected_result), auths)
+
+    def test_multitenant_volumes(self):
+        """
+        That volume access can be restricted to a tenant.
+
+        That metadata used to enforce tenant isolation of
+        volumes is stored as a two-way mapping between auth
+        IDs and volumes that they're authorized to access.
+        """
+        volumeclient_mount = self.mounts[1]
+        volumeclient_mount.umount_wait()
+
+        # Configure volumeclient_mount as the handle for driving volumeclient.
+        self._configure_vc_auth(volumeclient_mount, "manila")
+
+        group_id = "groupid"
+        volume_id = "volumeid"
+
+        # Guest clients belonging to different tenants, but using the same
+        # auth ID.
+        auth_id = "guest"
+        guestclient_1 = {
+            "auth_id": auth_id,
+            "tenant_id": "tenant1",
+        }
+        guestclient_2 = {
+            "auth_id": auth_id,
+            "tenant_id": "tenant2",
+        }
+
+        # Create a volume.
+        self._volume_client_python(volumeclient_mount, dedent("""
+            vp = VolumePath("{group_id}", "{volume_id}")
+            vc.create_volume(vp, 1024*1024*10)
+        """.format(
+            group_id=group_id,
+            volume_id=volume_id,
+        )))
+
+        # Check that volume metadata file is created on volume creation.
+        vol_metadata_filename = "_{0}:{1}.meta".format(group_id, volume_id)
+        self.assertIn(vol_metadata_filename, self.mounts[0].ls("volumes"))
+
+        # Authorize 'guestclient_1', using auth ID 'guest' and belonging to
+        # 'tenant1', with 'rw' access to the volume.
+        self._volume_client_python(volumeclient_mount, dedent("""
+            vp = VolumePath("{group_id}", "{volume_id}")
+            vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}")
+        """.format(
+            group_id=group_id,
+            volume_id=volume_id,
+            auth_id=guestclient_1["auth_id"],
+            tenant_id=guestclient_1["tenant_id"]
+        )))
+
+        # Check that auth metadata file for auth ID 'guest', is
+        # created on authorizing 'guest' access to the volume.
+        auth_metadata_filename = "${0}.meta".format(guestclient_1["auth_id"])
+        self.assertIn(auth_metadata_filename, self.mounts[0].ls("volumes"))
+
+        # Verify that the auth metadata file stores the tenant ID that the
+        # auth ID belongs to, the auth ID's authorized access levels
+        # for different volumes, versioning details, etc.
+        expected_auth_metadata = {
+            u"version": 1,
+            u"compat_version": 1,
+            u"dirty": False,
+            u"tenant_id": u"tenant1",
+            u"volumes": {
+                u"groupid/volumeid": {
+                    u"dirty": False,
+                    u"access_level": u"rw",
+                }
+            }
+        }
+
+        auth_metadata = self._volume_client_python(volumeclient_mount, dedent("""
+            vp = VolumePath("{group_id}", "{volume_id}")
+            auth_metadata = vc._auth_metadata_get("{auth_id}")
+            print auth_metadata
+        """.format(
+            group_id=group_id,
+            volume_id=volume_id,
+            auth_id=guestclient_1["auth_id"],
+        )))
+
+        self.assertItemsEqual(str(expected_auth_metadata), auth_metadata)
+
+        # Verify that the volume metadata file stores info about auth IDs
+        # and their access levels to the volume, versioning details, etc.
+        expected_vol_metadata = {
+            u"version": 1,
+            u"compat_version": 1,
+            u"auths": {
+                u"guest": {
+                    u"dirty": False,
+                    u"access_level": u"rw"
+                }
+            }
+        }
+
+        vol_metadata = self._volume_client_python(volumeclient_mount, dedent("""
+            vp = VolumePath("{group_id}", "{volume_id}")
+            volume_metadata = vc._volume_metadata_get(vp)
+            print volume_metadata
+        """.format(
+            group_id=group_id,
+            volume_id=volume_id,
+        )))
+        self.assertItemsEqual(str(expected_vol_metadata), vol_metadata)
+
+        # Cannot authorize 'guestclient_2' to access the volume.
+        # It uses auth ID 'guest', which has already been used by a
+        # 'guestclient_1' belonging to an another tenant for accessing
+        # the volume.
+        with self.assertRaises(CommandFailedError):
+            self._volume_client_python(volumeclient_mount, dedent("""
+                vp = VolumePath("{group_id}", "{volume_id}")
+                vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}")
+            """.format(
+                group_id=group_id,
+                volume_id=volume_id,
+                auth_id=guestclient_2["auth_id"],
+                tenant_id=guestclient_2["tenant_id"]
+            )))
+
+        # Check that auth metadata file is cleaned up on removing
+        # auth ID's only access to a volume.
+        self._volume_client_python(volumeclient_mount, dedent("""
+            vp = VolumePath("{group_id}", "{volume_id}")
+            vc.deauthorize(vp, "{guest_entity}")
+        """.format(
+            group_id=group_id,
+            volume_id=volume_id,
+            guest_entity=guestclient_1["auth_id"]
+        )))
+
+        self.assertNotIn(auth_metadata_filename, self.mounts[0].ls("volumes"))
+
+        # Check that volume metadata file is cleaned up on volume deletion.
+        self._volume_client_python(volumeclient_mount, dedent("""
+            vp = VolumePath("{group_id}", "{volume_id}")
+            vc.delete_volume(vp)
+        """.format(
+            group_id=group_id,
+            volume_id=volume_id,
+        )))
+        self.assertNotIn(vol_metadata_filename, self.mounts[0].ls("volumes"))
+
+    def test_recover_metadata(self):
+        """
+        That volume client can recover from partial auth updates using
+        metadata files, which store auth info and its update status info.
+        """
+        volumeclient_mount = self.mounts[1]
+        volumeclient_mount.umount_wait()
+
+        # Configure volumeclient_mount as the handle for driving volumeclient.
+        self._configure_vc_auth(volumeclient_mount, "manila")
+
+        group_id = "groupid"
+        volume_id = "volumeid"
+
+        guestclient = {
+            "auth_id": "guest",
+            "tenant_id": "tenant",
+        }
+
+        # Create a volume.
+        self._volume_client_python(volumeclient_mount, dedent("""
+            vp = VolumePath("{group_id}", "{volume_id}")
+            vc.create_volume(vp, 1024*1024*10)
+        """.format(
+            group_id=group_id,
+            volume_id=volume_id,
+        )))
+
+        # Authorize 'guestclient' access to the volume.
+        self._volume_client_python(volumeclient_mount, dedent("""
+            vp = VolumePath("{group_id}", "{volume_id}")
+            vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}")
+        """.format(
+            group_id=group_id,
+            volume_id=volume_id,
+            auth_id=guestclient["auth_id"],
+            tenant_id=guestclient["tenant_id"]
+        )))
+
+        # Check that auth metadata file for auth ID 'guest' is created.
+        auth_metadata_filename = "${0}.meta".format(guestclient["auth_id"])
+        self.assertIn(auth_metadata_filename, self.mounts[0].ls("volumes"))
+
+        # Induce partial auth update state by modifying the auth metadata file,
+        # and then run recovery procedure.
+        self._volume_client_python(volumeclient_mount, dedent("""
+            vp = VolumePath("{group_id}", "{volume_id}")
+            auth_metadata = vc._auth_metadata_get("{auth_id}")
+            auth_metadata['dirty'] = True
+            vc._auth_metadata_set("{auth_id}", auth_metadata)
+            vc.recover()
+        """.format(
+            group_id=group_id,
+            volume_id=volume_id,
+            auth_id=guestclient["auth_id"],
+        )))
diff --git a/qa/tasks/cephfs/vstart_runner.py b/qa/tasks/cephfs/vstart_runner.py
new file mode 100644
index 0000000..eb04bf2
--- /dev/null
+++ b/qa/tasks/cephfs/vstart_runner.py
@@ -0,0 +1,917 @@
+"""
+Useful hack: override Filesystem and Mount interfaces to run a CephFSTestCase against a vstart
+ceph instance instead of a packaged/installed cluster.  Use this to turn around test cases
+quickly during development.
+
+For example, if you have teuthology, ceph-qa-suite and ceph all in ~git, then you would:
+
+    # Activate the teuthology virtualenv
+    source ~/git/teuthology/virtualenv/bin/activate
+    # Go into your ceph source tree
+    cd ~/git/ceph/src
+    # Start a vstart cluster
+    MDS=2 MON=1 OSD=3 ./vstart.sh -n
+    # Invoke a test using this script, with PYTHONPATH set appropriately
+    PYTHONPATH=~/git/teuthology/:~/git/ceph-qa-suite/ python ~/git/ceph-qa-suite/tasks/cephfs/vstart_runner.py
+
+If you built out of tree with CMake, then switch to your build directory before executing vstart_runner.
+
+"""
+
+from StringIO import StringIO
+from collections import defaultdict
+import getpass
+import signal
+import tempfile
+import threading
+import datetime
+import shutil
+import re
+import os
+import time
+import json
+import sys
+import errno
+from unittest import suite
+import unittest
+from teuthology.orchestra.run import Raw, quote
+from teuthology.orchestra.daemon import DaemonGroup
+from teuthology.config import config as teuth_config
+
+import logging
+
+log = logging.getLogger(__name__)
+
+handler = logging.FileHandler("./vstart_runner.log")
+formatter = logging.Formatter(
+    fmt=u'%(asctime)s.%(msecs)03d %(levelname)s:%(name)s:%(message)s',
+    datefmt='%Y-%m-%dT%H:%M:%S')
+handler.setFormatter(formatter)
+log.addHandler(handler)
+log.setLevel(logging.INFO)
+
+try:
+    from teuthology.exceptions import CommandFailedError
+    from tasks.ceph_manager import CephManager
+    from tasks.cephfs.fuse_mount import FuseMount
+    from tasks.cephfs.filesystem import Filesystem, MDSCluster
+    from teuthology.contextutil import MaxWhileTries
+    from teuthology.task import interactive
+except ImportError:
+    sys.stderr.write("***\nError importing packages, have you activated your teuthology virtualenv "
+                     "and set PYTHONPATH to point to teuthology and ceph-qa-suite?\n***\n\n")
+    raise
+
+# Must import after teuthology because of gevent monkey patching
+import subprocess
+
+if os.path.exists("./CMakeCache.txt"):
+    # Running in build dir of a cmake build
+    BIN_PREFIX = "./bin/"
+else:
+    # Running in src/ of an autotools build
+    BIN_PREFIX = "./"
+
+
+class LocalRemoteProcess(object):
+    def __init__(self, args, subproc, check_status, stdout, stderr):
+        self.args = args
+        self.subproc = subproc
+        if stdout is None:
+            self.stdout = StringIO()
+        else:
+            self.stdout = stdout
+
+        if stderr is None:
+            self.stderr = StringIO()
+        else:
+            self.stderr = stderr
+
+        self.check_status = check_status
+        self.exitstatus = self.returncode = None
+
+    def wait(self):
+        if self.finished:
+            # Avoid calling communicate() on a dead process because it'll
+            # give you stick about std* already being closed
+            if self.exitstatus != 0:
+                raise CommandFailedError(self.args, self.exitstatus)
+            else:
+                return
+
+        out, err = self.subproc.communicate()
+        self.stdout.write(out)
+        self.stderr.write(err)
+
+        self.exitstatus = self.returncode = self.subproc.returncode
+
+        if self.exitstatus != 0:
+            sys.stderr.write(out)
+            sys.stderr.write(err)
+
+        if self.check_status and self.exitstatus != 0:
+            raise CommandFailedError(self.args, self.exitstatus)
+
+    @property
+    def finished(self):
+        if self.exitstatus is not None:
+            return True
+
+        if self.subproc.poll() is not None:
+            out, err = self.subproc.communicate()
+            self.stdout.write(out)
+            self.stderr.write(err)
+            self.exitstatus = self.returncode = self.subproc.returncode
+            return True
+        else:
+            return False
+
+    def kill(self):
+        log.info("kill ")
+        if self.subproc.pid and not self.finished:
+            log.info("kill: killing pid {0} ({1})".format(
+                self.subproc.pid, self.args))
+            safe_kill(self.subproc.pid)
+        else:
+            log.info("kill: already terminated ({0})".format(self.args))
+
+    @property
+    def stdin(self):
+        class FakeStdIn(object):
+            def __init__(self, mount_daemon):
+                self.mount_daemon = mount_daemon
+
+            def close(self):
+                self.mount_daemon.kill()
+
+        return FakeStdIn(self)
+
+
+class LocalRemote(object):
+    """
+    Amusingly named class to present the teuthology RemoteProcess interface when we are really
+    running things locally for vstart
+
+    Run this inside your src/ dir!
+    """
+
+    def __init__(self):
+        self.name = "local"
+        self.hostname = "localhost"
+        self.user = getpass.getuser()
+
+    def get_file(self, path, sudo, dest_dir):
+        tmpfile = tempfile.NamedTemporaryFile(delete=False).name
+        shutil.copy(path, tmpfile)
+        return tmpfile
+
+    def put_file(self, src, dst, sudo=False):
+        shutil.copy(src, dst)
+
+    def run(self, args, check_status=True, wait=True,
+            stdout=None, stderr=None, cwd=None, stdin=None,
+            logger=None, label=None):
+        log.info("run args={0}".format(args))
+
+        # We don't need no stinkin' sudo
+        args = [a for a in args if a != "sudo"]
+
+        # We have to use shell=True if any run.Raw was present, e.g. &&
+        shell = any([a for a in args if isinstance(a, Raw)])
+
+        if shell:
+            filtered = []
+            i = 0
+            while i < len(args):
+                if args[i] == 'adjust-ulimits':
+                    i += 1
+                elif args[i] == 'ceph-coverage':
+                    i += 2
+                elif args[i] == 'timeout':
+                    i += 2
+                else:
+                    filtered.append(args[i])
+                    i += 1
+
+            args = quote(filtered)
+            log.info("Running {0}".format(args))
+
+            subproc = subprocess.Popen(args,
+                                       stdout=subprocess.PIPE,
+                                       stderr=subprocess.PIPE,
+                                       stdin=subprocess.PIPE,
+                                       cwd=cwd,
+                                       shell=True)
+        else:
+            log.info("Running {0}".format(args))
+
+            for arg in args:
+                if not isinstance(arg, basestring):
+                    raise RuntimeError("Oops, can't handle arg {0} type {1}".format(
+                        arg, arg.__class__
+                    ))
+
+            subproc = subprocess.Popen(args,
+                                       stdout=subprocess.PIPE,
+                                       stderr=subprocess.PIPE,
+                                       stdin=subprocess.PIPE,
+                                       cwd=cwd)
+
+        if stdin:
+            if not isinstance(stdin, basestring):
+                raise RuntimeError("Can't handle non-string stdins on a vstart cluster")
+
+            # Hack: writing to stdin is not deadlock-safe, but it "always" works
+            # as long as the input buffer is "small"
+            subproc.stdin.write(stdin)
+
+        proc = LocalRemoteProcess(
+            args, subproc, check_status,
+            stdout, stderr
+        )
+
+        if wait:
+            proc.wait()
+
+        return proc
+
+
+# FIXME: twiddling vstart daemons is likely to be unreliable, we should probably just let vstart
+# run RADOS and run the MDS daemons directly from the test runner
+class LocalDaemon(object):
+    def __init__(self, daemon_type, daemon_id):
+        self.daemon_type = daemon_type
+        self.daemon_id = daemon_id
+        self.controller = LocalRemote()
+        self.proc = None
+
+    @property
+    def remote(self):
+        return LocalRemote()
+
+    def running(self):
+        return self._get_pid() is not None
+
+    def _get_pid(self):
+        """
+        Return PID as an integer or None if not found
+        """
+        ps_txt = self.controller.run(
+            args=["ps", "aux"]
+        ).stdout.getvalue().strip()
+        lines = ps_txt.split("\n")[1:]
+
+        for line in lines:
+            if line.find("ceph-{0} -i {1}".format(self.daemon_type, self.daemon_id)) != -1:
+                log.info("Found ps line for daemon: {0}".format(line))
+                return int(line.split()[1])
+
+        return None
+
+    def wait(self, timeout):
+        waited = 0
+        while self._get_pid() is not None:
+            if waited > timeout:
+                raise MaxWhileTries("Timed out waiting for daemon {0}.{1}".format(self.daemon_type, self.daemon_id))
+            time.sleep(1)
+            waited += 1
+
+    def stop(self, timeout=300):
+        if not self.running():
+            log.error('tried to stop a non-running daemon')
+            return
+
+        pid = self._get_pid()
+        log.info("Killing PID {0} for {1}.{2}".format(pid, self.daemon_type, self.daemon_id))
+        os.kill(pid, signal.SIGKILL)
+        self.wait(timeout=timeout)
+
+    def restart(self):
+        if self._get_pid() is not None:
+            self.stop()
+
+        self.proc = self.controller.run([os.path.join(BIN_PREFIX, "./ceph-{0}".format(self.daemon_type)), "-i", self.daemon_id])
+
+
+def safe_kill(pid):
+    """
+    os.kill annoyingly raises exception if process already dead.  Ignore it.
+    """
+    try:
+        return os.kill(pid, signal.SIGKILL)
+    except OSError as e:
+        if e.errno == errno.ESRCH:
+            # Raced with process termination
+            pass
+        else:
+            raise
+
+
+class LocalFuseMount(FuseMount):
+    def __init__(self, test_dir, client_id):
+        super(LocalFuseMount, self).__init__(None, test_dir, client_id, LocalRemote())
+
+    @property
+    def config_path(self):
+        return "./ceph.conf"
+
+    def get_keyring_path(self):
+        # This is going to end up in a config file, so use an absolute path
+        # to avoid assumptions about daemons' pwd
+        return os.path.abspath("./client.{0}.keyring".format(self.client_id))
+
+    def run_shell(self, args, wait=True):
+        # FIXME maybe should add a pwd arg to teuthology.orchestra so that
+        # the "cd foo && bar" shenanigans isn't needed to begin with and
+        # then we wouldn't have to special case this
+        return self.client_remote.run(
+            args, wait=wait, cwd=self.mountpoint
+        )
+
+    @property
+    def _prefix(self):
+        # FuseMount only uses the prefix for running ceph, which in cmake or autotools is in
+        # the present path
+        return "./"
+
+    def _asok_path(self):
+        # In teuthology, the asok is named after the PID of the ceph-fuse process, because it's
+        # run foreground.  When running it daemonized however, the asok is named after
+        # the PID of the launching process, not the long running ceph-fuse process.  Therefore
+        # we need to give an exact path here as the logic for checking /proc/ for which
+        # asok is alive does not work.
+        path = "./out/client.{0}.{1}.asok".format(self.client_id, self.fuse_daemon.subproc.pid)
+        log.info("I think my launching pid was {0}".format(self.fuse_daemon.subproc.pid))
+        return path
+
+    def umount(self):
+        if self.is_mounted():
+            super(LocalFuseMount, self).umount()
+
+    def mount(self, mount_path=None):
+        self.client_remote.run(
+            args=[
+                'mkdir',
+                '--',
+                self.mountpoint,
+            ],
+        )
+
+        def list_connections():
+            self.client_remote.run(
+                args=["mount", "-t", "fusectl", "/sys/fs/fuse/connections", "/sys/fs/fuse/connections"],
+                check_status=False
+            )
+            p = self.client_remote.run(
+                args=["ls", "/sys/fs/fuse/connections"],
+                check_status=False
+            )
+            if p.exitstatus != 0:
+                log.warn("ls conns failed with {0}, assuming none".format(p.exitstatus))
+                return []
+
+            ls_str = p.stdout.getvalue().strip()
+            if ls_str:
+                return [int(n) for n in ls_str.split("\n")]
+            else:
+                return []
+
+        # Before starting ceph-fuse process, note the contents of
+        # /sys/fs/fuse/connections
+        pre_mount_conns = list_connections()
+        log.info("Pre-mount connections: {0}".format(pre_mount_conns))
+
+        prefix = [os.path.join(BIN_PREFIX, "ceph-fuse")]
+        if os.getuid() != 0:
+            prefix += ["--client-die-on-failed-remount=false"]
+
+        if mount_path is not None:
+            prefix += ["--client_mountpoint={0}".format(mount_path)]
+
+        self.fuse_daemon = self.client_remote.run(args=
+                                            prefix + [
+                                                "-f",
+                                                "--name",
+                                                "client.{0}".format(self.client_id),
+                                                self.mountpoint
+                                            ], wait=False)
+
+        log.info("Mounted client.{0} with pid {1}".format(self.client_id, self.fuse_daemon.subproc.pid))
+
+        # Wait for the connection reference to appear in /sys
+        waited = 0
+        post_mount_conns = list_connections()
+        while len(post_mount_conns) <= len(pre_mount_conns):
+            if self.fuse_daemon.finished:
+                # Did mount fail?  Raise the CommandFailedError instead of
+                # hitting the "failed to populate /sys/" timeout
+                self.fuse_daemon.wait()
+            time.sleep(1)
+            waited += 1
+            if waited > 30:
+                raise RuntimeError("Fuse mount failed to populate /sys/ after {0} seconds".format(
+                    waited
+                ))
+            post_mount_conns = list_connections()
+
+        log.info("Post-mount connections: {0}".format(post_mount_conns))
+
+        # Record our fuse connection number so that we can use it when
+        # forcing an unmount
+        new_conns = list(set(post_mount_conns) - set(pre_mount_conns))
+        if len(new_conns) == 0:
+            raise RuntimeError("New fuse connection directory not found ({0})".format(new_conns))
+        elif len(new_conns) > 1:
+            raise RuntimeError("Unexpectedly numerous fuse connections {0}".format(new_conns))
+        else:
+            self._fuse_conn = new_conns[0]
+
+    def _run_python(self, pyscript):
+        """
+        Override this to remove the daemon-helper prefix that is used otherwise
+        to make the process killable.
+        """
+        return self.client_remote.run(args=[
+            'python', '-c', pyscript
+        ], wait=False)
+
+
+class LocalCephManager(CephManager):
+    def __init__(self):
+        # Deliberately skip parent init, only inheriting from it to get
+        # util methods like osd_dump that sit on top of raw_cluster_cmd
+        self.controller = LocalRemote()
+
+        # A minority of CephManager fns actually bother locking for when
+        # certain teuthology tests want to run tasks in parallel
+        self.lock = threading.RLock()
+
+    def find_remote(self, daemon_type, daemon_id):
+        """
+        daemon_type like 'mds', 'osd'
+        daemon_id like 'a', '0'
+        """
+        return LocalRemote()
+
+    def run_ceph_w(self):
+        proc = self.controller.run([os.path.join(BIN_PREFIX, "ceph"), "-w"], wait=False, stdout=StringIO())
+        return proc
+
+    def raw_cluster_cmd(self, *args):
+        """
+        args like ["osd", "dump"}
+        return stdout string
+        """
+        proc = self.controller.run([os.path.join(BIN_PREFIX, "ceph")] + list(args))
+        return proc.stdout.getvalue()
+
+    def raw_cluster_cmd_result(self, *args):
+        """
+        like raw_cluster_cmd but don't check status, just return rc
+        """
+        proc = self.controller.run([os.path.join(BIN_PREFIX, "ceph")] + list(args), check_status=False)
+        return proc.exitstatus
+
+    def admin_socket(self, daemon_type, daemon_id, command, check_status=True):
+        return self.controller.run(
+            args=[os.path.join(BIN_PREFIX, "ceph"), "daemon", "{0}.{1}".format(daemon_type, daemon_id)] + command, check_status=check_status
+        )
+
+    # FIXME: copypasta
+    def get_mds_status(self, mds):
+        """
+        Run cluster commands for the mds in order to get mds information
+        """
+        out = self.raw_cluster_cmd('mds', 'dump', '--format=json')
+        j = json.loads(' '.join(out.splitlines()[1:]))
+        # collate; for dup ids, larger gid wins.
+        for info in j['info'].itervalues():
+            if info['name'] == mds:
+                return info
+        return None
+
+    # FIXME: copypasta
+    def get_mds_status_by_rank(self, rank):
+        """
+        Run cluster commands for the mds in order to get mds information
+        check rank.
+        """
+        j = self.get_mds_status_all()
+        # collate; for dup ids, larger gid wins.
+        for info in j['info'].itervalues():
+            if info['rank'] == rank:
+                return info
+        return None
+
+    def get_mds_status_all(self):
+        """
+        Run cluster command to extract all the mds status.
+        """
+        out = self.raw_cluster_cmd('mds', 'dump', '--format=json')
+        j = json.loads(' '.join(out.splitlines()[1:]))
+        return j
+
+
+class LocalMDSCluster(MDSCluster):
+    def __init__(self, ctx):
+        # Deliberately skip calling parent constructor
+        self._ctx = ctx
+
+        self.mds_ids = ctx.daemons.daemons['mds'].keys()
+        if not self.mds_ids:
+            raise RuntimeError("No MDSs found in ceph.conf!")
+
+        self.mon_manager = LocalCephManager()
+        self.mds_daemons = dict([(id_, LocalDaemon("mds", id_)) for id_ in self.mds_ids])
+
+        self._conf = defaultdict(dict)
+
+    def get_config(self, key, service_type=None):
+        if service_type is None:
+            service_type = 'mon'
+
+        # FIXME hardcoded vstart service IDs
+        service_id = {
+            'mon': 'a',
+            'mds': 'a',
+            'osd': '0'
+        }[service_type]
+
+        return self.json_asok(['config', 'get', key], service_type, service_id)[key]
+
+    def _write_conf(self):
+        # In teuthology, we have the honour of writing the entire ceph.conf, but
+        # in vstart land it has mostly already been written and we need to carefully
+        # append to it.
+        conf_path = "./ceph.conf"
+        banner = "\n#LOCAL_TEST\n"
+        existing_str = open(conf_path).read()
+
+        if banner in existing_str:
+            existing_str = existing_str[0:existing_str.find(banner)]
+
+        existing_str += banner
+
+        for subsys, kvs in self._conf.items():
+            existing_str += "\n[{0}]\n".format(subsys)
+            for key, val in kvs.items():
+                # Comment out existing instance if it exists
+                log.info("Searching for existing instance {0}/{1}".format(
+                    key, subsys
+                ))
+                existing_section = re.search("^\[{0}\]$([\n]|[^\[])+".format(
+                    subsys
+                ), existing_str, re.MULTILINE)
+
+                if existing_section:
+                    section_str = existing_str[existing_section.start():existing_section.end()]
+                    existing_val = re.search("^\s*[^#]({0}) =".format(key), section_str, re.MULTILINE)
+                    if existing_val:
+                        start = existing_section.start() + existing_val.start(1)
+                        log.info("Found string to replace at {0}".format(
+                            start
+                        ))
+                        existing_str = existing_str[0:start] + "#" + existing_str[start:]
+
+                existing_str += "{0} = {1}\n".format(key, val)
+
+        open(conf_path, "w").write(existing_str)
+
+    def set_ceph_conf(self, subsys, key, value):
+        self._conf[subsys][key] = value
+        self._write_conf()
+
+    def clear_ceph_conf(self, subsys, key):
+        del self._conf[subsys][key]
+        self._write_conf()
+
+    def clear_firewall(self):
+        # FIXME: unimplemented
+        pass
+
+    def get_filesystem(self, name):
+        return LocalFilesystem(self._ctx, name)
+
+
+class LocalFilesystem(Filesystem, LocalMDSCluster):
+    @property
+    def admin_remote(self):
+        return LocalRemote()
+
+    def __init__(self, ctx, name=None):
+        # Deliberately skip calling parent constructor
+        self._ctx = ctx
+
+        if name is None:
+            name = "cephfs"
+
+        self.name = name
+        self.metadata_pool_name = "{0}_metadata".format(name)
+        self.data_pool_name = "{0}_data".format(name)
+
+        # Hack: cheeky inspection of ceph.conf to see what MDSs exist
+        self.mds_ids = set()
+        for line in open("ceph.conf").readlines():
+            match = re.match("^\[mds\.(.+)\]$", line)
+            if match:
+                self.mds_ids.add(match.group(1))
+
+        if not self.mds_ids:
+            raise RuntimeError("No MDSs found in ceph.conf!")
+
+        self.mds_ids = list(self.mds_ids)
+
+        log.info("Discovered MDS IDs: {0}".format(self.mds_ids))
+
+        self.mon_manager = LocalCephManager()
+
+        self.mds_daemons = dict([(id_, LocalDaemon("mds", id_)) for id_ in self.mds_ids])
+
+        self.client_remote = LocalRemote()
+
+        self._conf = defaultdict(dict)
+
+    @property
+    def _prefix(self):
+        return BIN_PREFIX
+
+    def set_clients_block(self, blocked, mds_id=None):
+        raise NotImplementedError()
+
+    def get_pgs_per_fs_pool(self):
+        # FIXME: assuming there are 3 OSDs
+        return 3 * int(self.get_config('mon_pg_warn_min_per_osd'))
+
+
+class InteractiveFailureResult(unittest.TextTestResult):
+    """
+    Specialization that implements interactive-on-error style
+    behavior.
+    """
+    def addFailure(self, test, err):
+        super(InteractiveFailureResult, self).addFailure(test, err)
+        log.error(self._exc_info_to_string(err, test))
+        log.error("Failure in test '{0}', going interactive".format(
+            self.getDescription(test)
+        ))
+        interactive.task(ctx=None, config=None)
+
+    def addError(self, test, err):
+        super(InteractiveFailureResult, self).addError(test, err)
+        log.error(self._exc_info_to_string(err, test))
+        log.error("Error in test '{0}', going interactive".format(
+            self.getDescription(test)
+        ))
+        interactive.task(ctx=None, config=None)
+
+
+def exec_test():
+    # Help developers by stopping up-front if their tree isn't built enough for all the
+    # tools that the tests might want to use (add more here if needed)
+    require_binaries = ["ceph-dencoder", "cephfs-journal-tool", "cephfs-data-scan",
+                        "cephfs-table-tool", "ceph-fuse", "rados"]
+    missing_binaries = [b for b in require_binaries if not os.path.exists(os.path.join(BIN_PREFIX, b))]
+    if missing_binaries:
+        log.error("Some ceph binaries missing, please build them: {0}".format(" ".join(missing_binaries)))
+        sys.exit(-1)
+
+    test_dir = tempfile.mkdtemp()
+
+    # Create as many of these as the biggest test requires
+    clients = ["0", "1", "2", "3"]
+
+    remote = LocalRemote()
+
+    # Tolerate no MDSs or clients running at start
+    ps_txt = remote.run(
+        args=["ps", "aux"]
+    ).stdout.getvalue().strip()
+    lines = ps_txt.split("\n")[1:]
+
+    for line in lines:
+        if 'ceph-fuse' in line or 'ceph-mds' in line:
+            pid = int(line.split()[1])
+            log.warn("Killing stray process {0}".format(line))
+            os.kill(pid, signal.SIGKILL)
+
+    class LocalCluster(object):
+        def __init__(self, rolename="placeholder"):
+            self.remotes = {
+                remote: [rolename]
+            }
+
+        def only(self, requested):
+            return self.__class__(rolename=requested)
+
+    teuth_config['test_path'] = test_dir
+
+    class LocalContext(object):
+        def __init__(self):
+            self.config = {}
+            self.teuthology_config = teuth_config
+            self.cluster = LocalCluster()
+            self.daemons = DaemonGroup()
+
+            # Shove some LocalDaemons into the ctx.daemons DaemonGroup instance so that any
+            # tests that want to look these up via ctx can do so.
+            # Inspect ceph.conf to see what roles exist
+            for conf_line in open("ceph.conf").readlines():
+                for svc_type in ["mon", "osd", "mds"]:
+                    if svc_type not in self.daemons.daemons:
+                        self.daemons.daemons[svc_type] = {}
+                    match = re.match("^\[{0}\.(.+)\]$".format(svc_type), conf_line)
+                    if match:
+                        svc_id = match.group(1)
+                        self.daemons.daemons[svc_type][svc_id] = LocalDaemon(svc_type, svc_id)
+
+        def __del__(self):
+            shutil.rmtree(self.teuthology_config['test_path'])
+
+    ctx = LocalContext()
+
+    mounts = []
+    for client_id in clients:
+        # Populate client keyring (it sucks to use client.admin for test clients
+        # because it's awkward to find the logs later)
+        client_name = "client.{0}".format(client_id)
+
+        if client_name not in open("./keyring").read():
+            p = remote.run(args=[os.path.join(BIN_PREFIX, "ceph"), "auth", "get-or-create", client_name,
+                                 "osd", "allow rw",
+                                 "mds", "allow",
+                                 "mon", "allow r"])
+
+            open("./keyring", "a").write(p.stdout.getvalue())
+
+        mount = LocalFuseMount(test_dir, client_id)
+        mounts.append(mount)
+        if mount.is_mounted():
+            log.warn("unmounting {0}".format(mount.mountpoint))
+            mount.umount_wait()
+        else:
+            if os.path.exists(mount.mountpoint):
+                os.rmdir(mount.mountpoint)
+    filesystem = LocalFilesystem(ctx)
+    mds_cluster = LocalMDSCluster(ctx)
+
+    from tasks.cephfs_test_runner import DecoratingLoader
+
+    class LogStream(object):
+        def __init__(self):
+            self.buffer = ""
+
+        def write(self, data):
+            self.buffer += data
+            if "\n" in self.buffer:
+                lines = self.buffer.split("\n")
+                for line in lines[:-1]:
+                    pass
+                    # sys.stderr.write(line + "\n")
+                    log.info(line)
+                self.buffer = lines[-1]
+
+        def flush(self):
+            pass
+
+    decorating_loader = DecoratingLoader({
+        "ctx": ctx,
+        "mounts": mounts,
+        "fs": filesystem,
+        "mds_cluster": mds_cluster
+    })
+
+    # For the benefit of polling tests like test_full -- in teuthology land we set this
+    # in a .yaml, here it's just a hardcoded thing for the developer's pleasure.
+    remote.run(args=[os.path.join(BIN_PREFIX, "ceph"), "tell", "osd.*", "injectargs", "--osd-mon-report-interval-max", "5"])
+    filesystem.set_ceph_conf("osd", "osd_mon_report_interval_max", "5")
+
+    # Vstart defaults to two segments, which very easily gets a "behind on trimming" health warning
+    # from normal IO latency.  Increase it for running teests.
+    filesystem.set_ceph_conf("mds", "mds log max segments", "10")
+
+    # Make sure the filesystem created in tests has uid/gid that will let us talk to
+    # it after mounting it (without having to  go root).  Set in 'global' not just 'mds'
+    # so that cephfs-data-scan will pick it up too.
+    filesystem.set_ceph_conf("global", "mds root ino uid", "%s" % os.getuid())
+    filesystem.set_ceph_conf("global", "mds root ino gid", "%s" % os.getgid())
+
+    # Monkeypatch get_package_version to avoid having to work out what kind of distro we're on
+    def _get_package_version(remote, pkg_name):
+        # Used in cephfs tests to find fuse version.  Your development workstation *does* have >=2.9, right?
+        return "2.9"
+
+    import teuthology.packaging
+    teuthology.packaging.get_package_version = _get_package_version
+
+    def enumerate_methods(s):
+        for t in s._tests:
+            if isinstance(t, suite.BaseTestSuite):
+                for sub in enumerate_methods(t):
+                    yield sub
+            else:
+                yield s, t
+
+    interactive_on_error = False
+
+    args = sys.argv[1:]
+    flags = [a for a in args if a.startswith("-")]
+    modules = [a for a in args if not a.startswith("-")]
+    for f in flags:
+        if f == "--interactive":
+            interactive_on_error = True
+        else:
+            log.error("Unknown option '{0}'".format(f))
+            sys.exit(-1)
+
+    if modules:
+        log.info("Executing modules: {0}".format(modules))
+        module_suites = []
+        for mod_name in modules:
+            # Test names like cephfs.test_auto_repair
+            log.info("Loaded: {0}".format(list(module_suites)))
+            module_suites.append(decorating_loader.loadTestsFromName(mod_name))
+        overall_suite = suite.TestSuite(module_suites)
+    else:
+        log.info("Excuting all tests")
+        overall_suite = decorating_loader.discover(
+            os.path.dirname(os.path.abspath(__file__))
+        )
+
+    # Filter out tests that don't lend themselves to interactive running,
+    victims = []
+    for case, method in enumerate_methods(overall_suite):
+        fn = getattr(method, method._testMethodName)
+
+        drop_test = False
+
+        if hasattr(fn, 'is_long_running') and getattr(fn, 'is_long_running') is True:
+            drop_test = True
+            log.warn("Dropping test because long running: ".format(method.id()))
+
+        if getattr(fn, "needs_trimming", False) is True:
+            drop_test = (os.getuid() != 0)
+            log.warn("Dropping test because client trim unavailable: ".format(method.id()))
+
+        if drop_test:
+            # Don't drop the test if it was explicitly requested in arguments
+            is_named = False
+            for named in modules:
+                if named.endswith(method.id()):
+                    is_named = True
+                    break
+
+            if not is_named:
+                victims.append((case, method))
+
+    log.info("Disabling {0} tests because of is_long_running or needs_trimming".format(len(victims)))
+    for s, method in victims:
+        s._tests.remove(method)
+
+    if interactive_on_error:
+        result_class = InteractiveFailureResult
+    else:
+        result_class = unittest.TextTestResult
+    fail_on_skip = False
+
+    class LoggingResult(result_class):
+        def startTest(self, test):
+            log.info("Starting test: {0}".format(self.getDescription(test)))
+            test.started_at = datetime.datetime.utcnow()
+            return super(LoggingResult, self).startTest(test)
+
+        def stopTest(self, test):
+            log.info("Stopped test: {0} in {1}s".format(
+                self.getDescription(test),
+                (datetime.datetime.utcnow() - test.started_at).total_seconds()
+            ))
+
+        def addSkip(self, test, reason):
+            if fail_on_skip:
+                # Don't just call addFailure because that requires a traceback
+                self.failures.append((test, reason))
+            else:
+                super(LoggingResult, self).addSkip(test, reason)
+
+    # Execute!
+    result = unittest.TextTestRunner(
+        stream=LogStream(),
+        resultclass=LoggingResult,
+        verbosity=2,
+        failfast=True).run(overall_suite)
+
+    if not result.wasSuccessful():
+        result.printErrors()  # duplicate output at end for convenience
+
+        bad_tests = []
+        for test, error in result.errors:
+            bad_tests.append(str(test))
+        for test, failure in result.failures:
+            bad_tests.append(str(test))
+
+        sys.exit(-1)
+    else:
+        sys.exit(0)
+
+
+if __name__ == "__main__":
+    exec_test()
diff --git a/qa/tasks/cephfs_test_runner.py b/qa/tasks/cephfs_test_runner.py
new file mode 100644
index 0000000..f199c29
--- /dev/null
+++ b/qa/tasks/cephfs_test_runner.py
@@ -0,0 +1,188 @@
+import contextlib
+import logging
+import os
+import unittest
+from unittest import suite, loader, case
+from teuthology.task import interactive
+from tasks.cephfs.filesystem import Filesystem, MDSCluster
+
+log = logging.getLogger(__name__)
+
+
+class DecoratingLoader(loader.TestLoader):
+    """
+    A specialization of TestLoader that tags some extra attributes
+    onto test classes as they are loaded.
+    """
+    def __init__(self, params):
+        self._params = params
+        super(DecoratingLoader, self).__init__()
+
+    def _apply_params(self, obj):
+        for k, v in self._params.items():
+            setattr(obj, k, v)
+
+    def loadTestsFromTestCase(self, testCaseClass):
+        self._apply_params(testCaseClass)
+        return super(DecoratingLoader, self).loadTestsFromTestCase(testCaseClass)
+
+    def loadTestsFromName(self, name, module=None):
+        result = super(DecoratingLoader, self).loadTestsFromName(name, module)
+
+        # Special case for when we were called with the name of a method, we get
+        # a suite with one TestCase
+        tests_in_result = list(result)
+        if len(tests_in_result) == 1 and isinstance(tests_in_result[0], case.TestCase):
+            self._apply_params(tests_in_result[0])
+
+        return result
+
+
+class LogStream(object):
+    def __init__(self):
+        self.buffer = ""
+
+    def write(self, data):
+        self.buffer += data
+        if "\n" in self.buffer:
+            lines = self.buffer.split("\n")
+            for line in lines[:-1]:
+                log.info(line)
+            self.buffer = lines[-1]
+
+    def flush(self):
+        pass
+
+
+class InteractiveFailureResult(unittest.TextTestResult):
+    """
+    Specialization that implements interactive-on-error style
+    behavior.
+    """
+    ctx = None
+
+    def addFailure(self, test, err):
+        log.error(self._exc_info_to_string(err, test))
+        log.error("Failure in test '{0}', going interactive".format(
+            self.getDescription(test)
+        ))
+        interactive.task(ctx=self.ctx, config=None)
+
+    def addError(self, test, err):
+        log.error(self._exc_info_to_string(err, test))
+        log.error("Error in test '{0}', going interactive".format(
+            self.getDescription(test)
+        ))
+        interactive.task(ctx=self.ctx, config=None)
+
+
+ at contextlib.contextmanager
+def task(ctx, config):
+    """
+    Run the CephFS test cases.
+
+    Run everything in tasks/cephfs/test_*.py:
+
+    ::
+
+        tasks:
+          - install:
+          - ceph:
+          - ceph-fuse:
+          - cephfs_test_runner:
+
+    `modules` argument allows running only some specific modules:
+
+    ::
+
+        tasks:
+            ...
+          - cephfs_test_runner:
+              modules:
+                - tasks.cephfs.test_sessionmap
+                - tasks.cephfs.test_auto_repair
+
+    By default, any cases that can't be run on the current cluster configuration
+    will generate a failure.  When the optional `fail_on_skip` argument is set
+    to false, any tests that can't be run on the current configuration will
+    simply be skipped:
+
+    ::
+        tasks:
+            ...
+         - cephfs_test_runner:
+           fail_on_skip: false
+
+    """
+    fs = Filesystem(ctx)
+    mds_cluster = MDSCluster(ctx)
+
+    # Mount objects, sorted by ID
+    mounts = [v for k, v in sorted(ctx.mounts.items(), lambda a, b: cmp(a[0], b[0]))]
+
+    decorating_loader = DecoratingLoader({
+        "ctx": ctx,
+        "mounts": mounts,
+        "fs": fs,
+        "mds_cluster": mds_cluster
+    })
+
+    fail_on_skip = config.get('fail_on_skip', True)
+
+    # Put useful things onto ctx for interactive debugging
+    ctx.fs = fs
+    ctx.mds_cluster = mds_cluster
+
+    # Depending on config, either load specific modules, or scan for moduless
+    if config and 'modules' in config and config['modules']:
+        module_suites = []
+        for mod_name in config['modules']:
+            # Test names like cephfs.test_auto_repair
+            module_suites.append(decorating_loader.loadTestsFromName(mod_name))
+        overall_suite = suite.TestSuite(module_suites)
+    else:
+        # Default, run all tests
+        overall_suite = decorating_loader.discover(
+            os.path.join(
+                os.path.dirname(os.path.abspath(__file__)),
+                "cephfs/"
+            )
+        )
+
+    if ctx.config.get("interactive-on-error", False):
+        InteractiveFailureResult.ctx = ctx
+        result_class = InteractiveFailureResult
+    else:
+        result_class = unittest.TextTestResult
+
+    class LoggingResult(result_class):
+        def startTest(self, test):
+            log.info("Starting test: {0}".format(self.getDescription(test)))
+            return super(LoggingResult, self).startTest(test)
+
+        def addSkip(self, test, reason):
+            if fail_on_skip:
+                # Don't just call addFailure because that requires a traceback
+                self.failures.append((test, reason))
+            else:
+                super(LoggingResult, self).addSkip(test, reason)
+
+    # Execute!
+    result = unittest.TextTestRunner(
+        stream=LogStream(),
+        resultclass=LoggingResult,
+        verbosity=2,
+        failfast=True).run(overall_suite)
+
+    if not result.wasSuccessful():
+        result.printErrors()  # duplicate output at end for convenience
+
+        bad_tests = []
+        for test, error in result.errors:
+            bad_tests.append(str(test))
+        for test, failure in result.failures:
+            bad_tests.append(str(test))
+
+        raise RuntimeError("Test failure: {0}".format(", ".join(bad_tests)))
+
+    yield
diff --git a/qa/tasks/cifs_mount.py b/qa/tasks/cifs_mount.py
new file mode 100644
index 0000000..b282b0b
--- /dev/null
+++ b/qa/tasks/cifs_mount.py
@@ -0,0 +1,137 @@
+"""
+Mount cifs clients.  Unmount when finished.
+"""
+import contextlib
+import logging
+import os
+
+from teuthology import misc as teuthology
+from teuthology.orchestra import run
+
+log = logging.getLogger(__name__)
+
+ at contextlib.contextmanager
+def task(ctx, config):
+    """
+    Mount/unmount a cifs client.
+
+    The config is optional and defaults to mounting on all clients. If
+    a config is given, it is expected to be a list of clients to do
+    this operation on.
+
+    Example that starts smbd and mounts cifs on all nodes::
+
+        tasks:
+        - ceph:
+        - samba:
+        - cifs-mount:
+        - interactive:
+
+    Example that splits smbd and cifs:
+
+        tasks:
+        - ceph:
+        - samba: [samba.0]
+        - cifs-mount: [client.0]
+        - ceph-fuse: [client.1]
+        - interactive:
+
+    Example that specifies the share name:
+
+        tasks:
+        - ceph:
+        - ceph-fuse:
+        - samba:
+            samba.0:
+                cephfuse: "{testdir}/mnt.0"
+        - cifs-mount:
+            client.0:
+                share: cephfuse
+
+    :param ctx: Context
+    :param config: Configuration
+    """
+    log.info('Mounting cifs clients...')
+
+    if config is None:
+        config = dict(('client.{id}'.format(id=id_), None)
+                  for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client'))
+    elif isinstance(config, list):
+        config = dict((name, None) for name in config)
+
+    clients = list(teuthology.get_clients(ctx=ctx, roles=config.keys()))
+
+    from .samba import get_sambas
+    samba_roles = ['samba.{id_}'.format(id_=id_) for id_ in teuthology.all_roles_of_type(ctx.cluster, 'samba')]
+    sambas = list(get_sambas(ctx=ctx, roles=samba_roles))
+    (ip, _) = sambas[0][1].ssh.get_transport().getpeername()
+    log.info('samba ip: {ip}'.format(ip=ip))
+
+    for id_, remote in clients:
+        mnt = os.path.join(teuthology.get_testdir(ctx), 'mnt.{id}'.format(id=id_))
+        log.info('Mounting cifs client.{id} at {remote} {mnt}...'.format(
+                id=id_, remote=remote,mnt=mnt))
+
+        remote.run(
+            args=[
+                'mkdir',
+                '--',
+                mnt,
+                ],
+            )
+
+        rolestr = 'client.{id_}'.format(id_=id_)
+        unc = "ceph"
+        log.info("config: {c}".format(c=config))
+        if config[rolestr] is not None and 'share' in config[rolestr]:
+            unc = config[rolestr]['share']
+
+        remote.run(
+            args=[
+                'sudo',
+                'mount',
+                '-t',
+                'cifs',
+                '//{sambaip}/{unc}'.format(sambaip=ip, unc=unc),
+                '-o',
+                'username=ubuntu,password=ubuntu',
+                mnt,
+                ],
+            )
+
+        remote.run(
+            args=[
+                'sudo',
+                'chown',
+                'ubuntu:ubuntu',
+                '{m}/'.format(m=mnt),
+                ],
+            )
+
+    try:
+        yield
+    finally:
+        log.info('Unmounting cifs clients...')
+        for id_, remote in clients:
+            remote.run(
+                args=[
+                    'sudo',
+                    'umount',
+                    mnt,
+                    ],
+                )
+        for id_, remote in clients:
+            while True:
+                try:
+                    remote.run(
+                        args=[
+                            'rmdir', '--', mnt,
+                            run.Raw('2>&1'),
+                            run.Raw('|'),
+                            'grep', 'Device or resource busy',
+                            ],
+                        )
+                    import time
+                    time.sleep(1)
+                except Exception:
+                    break
diff --git a/qa/tasks/cram.py b/qa/tasks/cram.py
new file mode 100644
index 0000000..02c6667
--- /dev/null
+++ b/qa/tasks/cram.py
@@ -0,0 +1,155 @@
+"""
+Cram tests
+"""
+import logging
+import os
+
+from teuthology import misc as teuthology
+from teuthology.parallel import parallel
+from teuthology.orchestra import run
+from teuthology.config import config as teuth_config
+
+log = logging.getLogger(__name__)
+
+def task(ctx, config):
+    """
+    Run all cram tests from the specified urls on the specified
+    clients. Each client runs tests in parallel.
+
+    Limitations:
+    Tests must have a .t suffix. Tests with duplicate names will
+    overwrite each other, so only the last one will run.
+
+    For example::
+
+        tasks:
+        - ceph:
+        - cram:
+            clients:
+              client.0:
+              - http://download.ceph.com/qa/test.t
+              - http://download.ceph.com/qa/test2.t]
+              client.1: [http://download.ceph.com/qa/test.t]
+            branch: foo
+
+    You can also run a list of cram tests on all clients::
+
+        tasks:
+        - ceph:
+        - cram:
+            clients:
+              all: [http://download.ceph.com/qa/test.t]
+
+    :param ctx: Context
+    :param config: Configuration
+    """
+    assert isinstance(config, dict)
+    assert 'clients' in config and isinstance(config['clients'], dict), \
+           'configuration must contain a dictionary of clients'
+
+    clients = teuthology.replace_all_with_clients(ctx.cluster,
+                                                  config['clients'])
+    testdir = teuthology.get_testdir(ctx)
+
+    overrides = ctx.config.get('overrides', {})
+    teuthology.deep_merge(config, overrides.get('workunit', {}))
+
+    refspec = config.get('branch')
+    if refspec is None:
+        refspec = config.get('tag')
+    if refspec is None:
+        refspec = config.get('sha1')
+    if refspec is None:
+        refspec = 'HEAD'
+
+    # hack: the git_url is always ceph-ci or ceph
+    git_url = teuth_config.get_ceph_git_url()
+    repo_name = 'ceph.git'
+    if git_url.count('ceph-ci'):
+        repo_name = 'ceph-ci.git'
+
+    try:
+        for client, tests in clients.iteritems():
+            (remote,) = ctx.cluster.only(client).remotes.iterkeys()
+            client_dir = '{tdir}/archive/cram.{role}'.format(tdir=testdir, role=client)
+            remote.run(
+                args=[
+                    'mkdir', '--', client_dir,
+                    run.Raw('&&'),
+                    'virtualenv', '{tdir}/virtualenv'.format(tdir=testdir),
+                    run.Raw('&&'),
+                    '{tdir}/virtualenv/bin/pip'.format(tdir=testdir),
+                    'install', 'cram==0.6',
+                    ],
+                )
+            for test in tests:
+                url = test.format(repo=repo_name, branch=refspec)
+                log.info('fetching test %s for %s', url, client)
+                assert test.endswith('.t'), 'tests must end in .t'
+                remote.run(
+                    args=[
+                        'wget', '-nc', '-nv', '-P', client_dir, '--', url,
+                        ],
+                    )
+
+        with parallel() as p:
+            for role in clients.iterkeys():
+                p.spawn(_run_tests, ctx, role)
+    finally:
+        for client, tests in clients.iteritems():
+            (remote,) = ctx.cluster.only(client).remotes.iterkeys()
+            client_dir = '{tdir}/archive/cram.{role}'.format(tdir=testdir, role=client)
+            test_files = set([test.rsplit('/', 1)[1] for test in tests])
+
+            # remove test files unless they failed
+            for test_file in test_files:
+                abs_file = os.path.join(client_dir, test_file)
+                remote.run(
+                    args=[
+                        'test', '-f', abs_file + '.err',
+                        run.Raw('||'),
+                        'rm', '-f', '--', abs_file,
+                        ],
+                    )
+
+            # ignore failure since more than one client may
+            # be run on a host, and the client dir should be
+            # non-empty if the test failed
+            remote.run(
+                args=[
+                    'rm', '-rf', '--',
+                    '{tdir}/virtualenv'.format(tdir=testdir),
+                    run.Raw(';'),
+                    'rmdir', '--ignore-fail-on-non-empty', client_dir,
+                    ],
+                )
+
+def _run_tests(ctx, role):
+    """
+    For each role, check to make sure it's a client, then run the cram on that client
+
+    :param ctx: Context
+    :param role: Roles
+    """
+    assert isinstance(role, basestring)
+    PREFIX = 'client.'
+    assert role.startswith(PREFIX)
+    id_ = role[len(PREFIX):]
+    (remote,) = ctx.cluster.only(role).remotes.iterkeys()
+    ceph_ref = ctx.summary.get('ceph-sha1', 'master')
+
+    testdir = teuthology.get_testdir(ctx)
+    log.info('Running tests for %s...', role)
+    remote.run(
+        args=[
+            run.Raw('CEPH_REF={ref}'.format(ref=ceph_ref)),
+            run.Raw('CEPH_ID="{id}"'.format(id=id_)),
+            'adjust-ulimits',
+            'ceph-coverage',
+            '{tdir}/archive/coverage'.format(tdir=testdir),
+            '{tdir}/virtualenv/bin/cram'.format(tdir=testdir),
+            '-v', '--',
+            run.Raw('{tdir}/archive/cram.{role}/*.t'.format(tdir=testdir, role=role)),
+            ],
+        logger=log.getChild(role),
+        )
diff --git a/qa/tasks/create_verify_lfn_objects.py b/qa/tasks/create_verify_lfn_objects.py
new file mode 100644
index 0000000..01ab1a3
--- /dev/null
+++ b/qa/tasks/create_verify_lfn_objects.py
@@ -0,0 +1,83 @@
+"""
+Rados modle-based integration tests
+"""
+import contextlib
+import logging
+
+log = logging.getLogger(__name__)
+
+ at contextlib.contextmanager
+def task(ctx, config):
+    """
+    For each combination of namespace and name_length, create
+    <num_objects> objects with name length <name_length>
+    on entry.  On exit, verify that the objects still exist, can
+    be deleted, and then don't exist.
+
+    Usage::
+
+       create_verify_lfn_objects.py:
+         pool: <pool_name> default: 'data'
+         prefix: <prefix> default: ''
+         namespace: [<namespace>] default: ['']
+         num_objects: [<num_objects>] default: 10
+         name_length: [<name_length>] default: [400]
+    """
+    pool = config.get('pool', 'data')
+    num_objects = config.get('num_objects', 10)
+    name_length = config.get('name_length', [400])
+    namespace = config.get('namespace', [None])
+    prefix = config.get('prefix', None)
+    manager = ctx.managers['ceph']
+
+    objects = []
+    for l in name_length:
+        for ns in namespace:
+            def object_name(i):
+                nslength = 0
+                if namespace is not '':
+                    nslength = len(namespace)
+                numstr = str(i)
+                fillerlen = l - nslength - len(prefix) - len(numstr)
+                assert fillerlen >= 0
+                return prefix + ('a'*fillerlen) + numstr
+            objects += [(ns, object_name(i)) for i in  range(num_objects)]
+
+    for ns, name in objects:
+        err = manager.do_put(
+            pool,
+            name,
+            '/etc/resolv.conf',
+            namespace=ns)
+        log.info("err is " + str(err))
+        assert err == 0
+
+    try:
+        yield
+    finally:
+        log.info('ceph_verify_lfn_objects verifying...')
+        for ns, name in objects:
+            err = manager.do_get(
+                pool,
+                name,
+                namespace=ns)
+            log.info("err is " + str(err))
+            assert err == 0
+
+        log.info('ceph_verify_lfn_objects deleting...')
+        for ns, name in objects:
+            err = manager.do_rm(
+                pool,
+                name,
+                namespace=ns)
+            log.info("err is " + str(err))
+            assert err == 0
+
+        log.info('ceph_verify_lfn_objects verifying absent...')
+        for ns, name in objects:
+            err = manager.do_get(
+                pool,
+                name,
+                namespace=ns)
+            log.info("err is " + str(err))
+            assert err != 0
diff --git a/qa/tasks/devstack.py b/qa/tasks/devstack.py
new file mode 100644
index 0000000..9fa4c68
--- /dev/null
+++ b/qa/tasks/devstack.py
@@ -0,0 +1,382 @@
+#!/usr/bin/env python
+import contextlib
+import logging
+from cStringIO import StringIO
+import textwrap
+from configparser import ConfigParser
+import time
+
+from teuthology.orchestra import run
+from teuthology import misc
+from teuthology.contextutil import nested
+
+log = logging.getLogger(__name__)
+
+DEVSTACK_GIT_REPO = 'https://github.com/openstack-dev/devstack.git'
+DS_STABLE_BRANCHES = ("havana", "grizzly")
+
+is_devstack_node = lambda role: role.startswith('devstack')
+is_osd_node = lambda role: role.startswith('osd')
+
+
+ at contextlib.contextmanager
+def task(ctx, config):
+    if config is None:
+        config = {}
+    if not isinstance(config, dict):
+        raise TypeError("config must be a dict")
+    with nested(lambda: install(ctx=ctx, config=config),
+                lambda: smoke(ctx=ctx, config=config),
+                ):
+        yield
+
+
+ at contextlib.contextmanager
+def install(ctx, config):
+    """
+    Install OpenStack DevStack and configure it to use a Ceph cluster for
+    Glance and Cinder.
+
+    Requires one node with a role 'devstack'
+
+    Since devstack runs rampant on the system it's used on, typically you will
+    want to reprovision that machine after using devstack on it.
+
+    Also, the default 2GB of RAM that is given to vps nodes is insufficient. I
+    recommend 4GB. Downburst can be instructed to give 4GB to a vps node by
+    adding this to the yaml:
+
+    downburst:
+        ram: 4G
+
+    This was created using documentation found here:
+        https://github.com/openstack-dev/devstack/blob/master/README.md
+        http://ceph.com/docs/master/rbd/rbd-openstack/
+    """
+    if config is None:
+        config = {}
+    if not isinstance(config, dict):
+        raise TypeError("config must be a dict")
+
+    devstack_node = ctx.cluster.only(is_devstack_node).remotes.keys()[0]
+    an_osd_node = ctx.cluster.only(is_osd_node).remotes.keys()[0]
+
+    devstack_branch = config.get("branch", "master")
+    install_devstack(devstack_node, devstack_branch)
+    try:
+        configure_devstack_and_ceph(ctx, config, devstack_node, an_osd_node)
+        yield
+    finally:
+        pass
+
+
+def install_devstack(devstack_node, branch="master"):
+    log.info("Cloning DevStack repo...")
+
+    args = ['git', 'clone', DEVSTACK_GIT_REPO]
+    devstack_node.run(args=args)
+
+    if branch != "master":
+        if branch in DS_STABLE_BRANCHES and not branch.startswith("stable"):
+            branch = "stable/" + branch
+        log.info("Checking out {branch} branch...".format(branch=branch))
+        cmd = "cd devstack && git checkout " + branch
+        devstack_node.run(args=cmd)
+
+    log.info("Installing DevStack...")
+    args = ['cd', 'devstack', run.Raw('&&'), './stack.sh']
+    devstack_node.run(args=args)
+
+
+def configure_devstack_and_ceph(ctx, config, devstack_node, ceph_node):
+    pool_size = config.get('pool_size', '128')
+    create_pools(ceph_node, pool_size)
+    distribute_ceph_conf(devstack_node, ceph_node)
+    # This is where we would install python-ceph and ceph-common but it appears
+    # the ceph task does that for us.
+    generate_ceph_keys(ceph_node)
+    distribute_ceph_keys(devstack_node, ceph_node)
+    secret_uuid = set_libvirt_secret(devstack_node, ceph_node)
+    update_devstack_config_files(devstack_node, secret_uuid)
+    set_apache_servername(devstack_node)
+    # Rebooting is the most-often-used method of restarting devstack services
+    misc.reboot(devstack_node)
+    start_devstack(devstack_node)
+    restart_apache(devstack_node)
+
+
+def create_pools(ceph_node, pool_size):
+    log.info("Creating pools on Ceph cluster...")
+
+    for pool_name in ['volumes', 'images', 'backups']:
+        args = ['sudo', 'ceph', 'osd', 'pool', 'create', pool_name, pool_size]
+        ceph_node.run(args=args)
+
+
+def distribute_ceph_conf(devstack_node, ceph_node):
+    log.info("Copying ceph.conf to DevStack node...")
+
+    ceph_conf_path = '/etc/ceph/ceph.conf'
+    ceph_conf = misc.get_file(ceph_node, ceph_conf_path, sudo=True)
+    misc.sudo_write_file(devstack_node, ceph_conf_path, ceph_conf)
+
+
+def generate_ceph_keys(ceph_node):
+    log.info("Generating Ceph keys...")
+
+    ceph_auth_cmds = [
+        ['sudo', 'ceph', 'auth', 'get-or-create', 'client.cinder', 'mon',
+            'allow r', 'osd', 'allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rx pool=images'],  # noqa
+        ['sudo', 'ceph', 'auth', 'get-or-create', 'client.glance', 'mon',
+            'allow r', 'osd', 'allow class-read object_prefix rbd_children, allow rwx pool=images'],  # noqa
+        ['sudo', 'ceph', 'auth', 'get-or-create', 'client.cinder-backup', 'mon',
+            'allow r', 'osd', 'allow class-read object_prefix rbd_children, allow rwx pool=backups'],  # noqa
+    ]
+    for cmd in ceph_auth_cmds:
+        ceph_node.run(args=cmd)
+
+
+def distribute_ceph_keys(devstack_node, ceph_node):
+    log.info("Copying Ceph keys to DevStack node...")
+
+    def copy_key(from_remote, key_name, to_remote, dest_path, owner):
+        key_stringio = StringIO()
+        from_remote.run(
+            args=['sudo', 'ceph', 'auth', 'get-or-create', key_name],
+            stdout=key_stringio)
+        key_stringio.seek(0)
+        misc.sudo_write_file(to_remote, dest_path,
+                             key_stringio, owner=owner)
+    keys = [
+        dict(name='client.glance',
+             path='/etc/ceph/ceph.client.glance.keyring',
+             # devstack appears to just want root:root
+             #owner='glance:glance',
+             ),
+        dict(name='client.cinder',
+             path='/etc/ceph/ceph.client.cinder.keyring',
+             # devstack appears to just want root:root
+             #owner='cinder:cinder',
+             ),
+        dict(name='client.cinder-backup',
+             path='/etc/ceph/ceph.client.cinder-backup.keyring',
+             # devstack appears to just want root:root
+             #owner='cinder:cinder',
+             ),
+    ]
+    for key_dict in keys:
+        copy_key(ceph_node, key_dict['name'], devstack_node,
+                 key_dict['path'], key_dict.get('owner'))
+
+
+def set_libvirt_secret(devstack_node, ceph_node):
+    log.info("Setting libvirt secret...")
+
+    cinder_key_stringio = StringIO()
+    ceph_node.run(args=['sudo', 'ceph', 'auth', 'get-key', 'client.cinder'],
+                  stdout=cinder_key_stringio)
+    cinder_key = cinder_key_stringio.getvalue().strip()
+
+    uuid_stringio = StringIO()
+    devstack_node.run(args=['uuidgen'], stdout=uuid_stringio)
+    uuid = uuid_stringio.getvalue().strip()
+
+    secret_path = '/tmp/secret.xml'
+    secret_template = textwrap.dedent("""
+    <secret ephemeral='no' private='no'>
+        <uuid>{uuid}</uuid>
+        <usage type='ceph'>
+            <name>client.cinder secret</name>
+        </usage>
+    </secret>""")
+    misc.sudo_write_file(devstack_node, secret_path,
+                         secret_template.format(uuid=uuid))
+    devstack_node.run(args=['sudo', 'virsh', 'secret-define', '--file',
+                            secret_path])
+    devstack_node.run(args=['sudo', 'virsh', 'secret-set-value', '--secret',
+                            uuid, '--base64', cinder_key])
+    return uuid
+
+
+def update_devstack_config_files(devstack_node, secret_uuid):
+    log.info("Updating DevStack config files to use Ceph...")
+
+    def backup_config(node, file_name, backup_ext='.orig.teuth'):
+        node.run(args=['cp', '-f', file_name, file_name + backup_ext])
+
+    def update_config(config_name, config_stream, update_dict,
+                      section='DEFAULT'):
+        parser = ConfigParser()
+        parser.read_file(config_stream)
+        for (key, value) in update_dict.items():
+            parser.set(section, key, value)
+        out_stream = StringIO()
+        parser.write(out_stream)
+        out_stream.seek(0)
+        return out_stream
+
+    updates = [
+        dict(name='/etc/glance/glance-api.conf', options=dict(
+            default_store='rbd',
+            rbd_store_user='glance',
+            rbd_store_pool='images',
+            show_image_direct_url='True',)),
+        dict(name='/etc/cinder/cinder.conf', options=dict(
+            volume_driver='cinder.volume.drivers.rbd.RBDDriver',
+            rbd_pool='volumes',
+            rbd_ceph_conf='/etc/ceph/ceph.conf',
+            rbd_flatten_volume_from_snapshot='false',
+            rbd_max_clone_depth='5',
+            glance_api_version='2',
+            rbd_user='cinder',
+            rbd_secret_uuid=secret_uuid,
+            backup_driver='cinder.backup.drivers.ceph',
+            backup_ceph_conf='/etc/ceph/ceph.conf',
+            backup_ceph_user='cinder-backup',
+            backup_ceph_chunk_size='134217728',
+            backup_ceph_pool='backups',
+            backup_ceph_stripe_unit='0',
+            backup_ceph_stripe_count='0',
+            restore_discard_excess_bytes='true',
+            )),
+        dict(name='/etc/nova/nova.conf', options=dict(
+            libvirt_images_type='rbd',
+            libvirt_images_rbd_pool='volumes',
+            libvirt_images_rbd_ceph_conf='/etc/ceph/ceph.conf',
+            rbd_user='cinder',
+            rbd_secret_uuid=secret_uuid,
+            libvirt_inject_password='false',
+            libvirt_inject_key='false',
+            libvirt_inject_partition='-2',
+            )),
+    ]
+
+    for update in updates:
+        file_name = update['name']
+        options = update['options']
+        config_str = misc.get_file(devstack_node, file_name, sudo=True)
+        config_stream = StringIO(config_str)
+        backup_config(devstack_node, file_name)
+        new_config_stream = update_config(file_name, config_stream, options)
+        misc.sudo_write_file(devstack_node, file_name, new_config_stream)
+
+
+def set_apache_servername(node):
+    # Apache complains: "Could not reliably determine the server's fully
+    # qualified domain name, using 127.0.0.1 for ServerName"
+    # So, let's make sure it knows its name.
+    log.info("Setting Apache ServerName...")
+
+    hostname = node.hostname
+    config_file = '/etc/apache2/conf.d/servername'
+    misc.sudo_write_file(node, config_file,
+                         "ServerName {name}".format(name=hostname))
+
+
+def start_devstack(devstack_node):
+    log.info("Patching devstack start script...")
+    # This causes screen to start headless - otherwise rejoin-stack.sh fails
+    # because there is no terminal attached.
+    cmd = "cd devstack && sed -ie 's/screen -c/screen -dm -c/' rejoin-stack.sh"
+    devstack_node.run(args=cmd)
+
+    log.info("Starting devstack...")
+    cmd = "cd devstack && ./rejoin-stack.sh"
+    devstack_node.run(args=cmd)
+
+    # This was added because I was getting timeouts on Cinder requests - which
+    # were trying to access Keystone on port 5000. A more robust way to handle
+    # this would be to introduce a wait-loop on devstack_node that checks to
+    # see if a service is listening on port 5000.
+    log.info("Waiting 30s for devstack to start...")
+    time.sleep(30)
+
+
+def restart_apache(node):
+    node.run(args=['sudo', '/etc/init.d/apache2', 'restart'], wait=True)
+
+
+ at contextlib.contextmanager
+def exercise(ctx, config):
+    log.info("Running devstack exercises...")
+
+    if config is None:
+        config = {}
+    if not isinstance(config, dict):
+        raise TypeError("config must be a dict")
+
+    devstack_node = ctx.cluster.only(is_devstack_node).remotes.keys()[0]
+
+    # TODO: save the log *and* preserve failures
+    #devstack_archive_dir = create_devstack_archive(ctx, devstack_node)
+
+    try:
+        #cmd = "cd devstack && ./exercise.sh 2>&1 | tee {dir}/exercise.log".format(  # noqa
+        #    dir=devstack_archive_dir)
+        cmd = "cd devstack && ./exercise.sh"
+        devstack_node.run(args=cmd, wait=True)
+        yield
+    finally:
+        pass
+
+
+def create_devstack_archive(ctx, devstack_node):
+    test_dir = misc.get_testdir(ctx)
+    devstack_archive_dir = "{test_dir}/archive/devstack".format(
+        test_dir=test_dir)
+    devstack_node.run(args="mkdir -p " + devstack_archive_dir)
+    return devstack_archive_dir
+
+
+ at contextlib.contextmanager
+def smoke(ctx, config):
+    log.info("Running a basic smoketest...")
+
+    devstack_node = ctx.cluster.only(is_devstack_node).remotes.keys()[0]
+    an_osd_node = ctx.cluster.only(is_osd_node).remotes.keys()[0]
+
+    try:
+        create_volume(devstack_node, an_osd_node, 'smoke0', 1)
+        yield
+    finally:
+        pass
+
+
+def create_volume(devstack_node, ceph_node, vol_name, size):
+    """
+    :param size: The size of the volume, in GB
+    """
+    size = str(size)
+    log.info("Creating a {size}GB volume named {name}...".format(
+        name=vol_name,
+        size=size))
+    args = ['source', 'devstack/openrc', run.Raw('&&'), 'cinder', 'create',
+            '--display-name', vol_name, size]
+    out_stream = StringIO()
+    devstack_node.run(args=args, stdout=out_stream, wait=True)
+    vol_info = parse_os_table(out_stream.getvalue())
+    log.debug("Volume info: %s", str(vol_info))
+
+    out_stream = StringIO()
+    try:
+        ceph_node.run(args="rbd --id cinder ls -l volumes", stdout=out_stream,
+                      wait=True)
+    except run.CommandFailedError:
+        log.debug("Original rbd call failed; retrying without '--id cinder'")
+        ceph_node.run(args="rbd ls -l volumes", stdout=out_stream,
+                      wait=True)
+
+    assert vol_info['id'] in out_stream.getvalue(), \
+        "Volume not found on Ceph cluster"
+    assert vol_info['size'] == size, \
+        "Volume size on Ceph cluster is different than specified"
+    return vol_info['id']
+
+
+def parse_os_table(table_str):
+    out_dict = dict()
+    for line in table_str.split('\n'):
+        if line.startswith('|'):
+            items = line.split()
+            out_dict[items[1]] = items[3]
+    return out_dict
diff --git a/qa/tasks/die_on_err.py b/qa/tasks/die_on_err.py
new file mode 100644
index 0000000..bf422ae
--- /dev/null
+++ b/qa/tasks/die_on_err.py
@@ -0,0 +1,70 @@
+"""
+Raise exceptions on osd coredumps or test err directories
+"""
+import contextlib
+import logging
+import time
+from teuthology.orchestra import run
+
+import ceph_manager
+from teuthology import misc as teuthology
+
+log = logging.getLogger(__name__)
+
+ at contextlib.contextmanager
+def task(ctx, config):
+    """
+    Die if {testdir}/err exists or if an OSD dumps core
+    """
+    if config is None:
+        config = {}
+
+    first_mon = teuthology.get_first_mon(ctx, config)
+    (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+
+    num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd')
+    log.info('num_osds is %s' % num_osds)
+
+    manager = ceph_manager.CephManager(
+        mon,
+        ctx=ctx,
+        logger=log.getChild('ceph_manager'),
+        )
+
+    while len(manager.get_osd_status()['up']) < num_osds:
+        time.sleep(10)
+
+    testdir = teuthology.get_testdir(ctx)
+
+    while True:
+        for i in range(num_osds):
+            (osd_remote,) = ctx.cluster.only('osd.%d' % i).remotes.iterkeys()
+            p = osd_remote.run(
+                args = [ 'test', '-e', '{tdir}/err'.format(tdir=testdir) ],
+                wait=True,
+                check_status=False,
+            )
+            exit_status = p.exitstatus
+
+            if exit_status == 0:
+                log.info("osd %d has an error" % i)
+                raise Exception("osd %d error" % i)
+
+            log_path = '/var/log/ceph/osd.%d.log' % (i)
+
+            p = osd_remote.run(
+                args = [
+                         'tail', '-1', log_path,
+                         run.Raw('|'),
+                         'grep', '-q', 'end dump'
+                       ],
+                wait=True,
+                check_status=False,
+            )
+            exit_status = p.exitstatus
+
+            if exit_status == 0:
+                log.info("osd %d dumped core" % i)
+                raise Exception("osd %d dumped core" % i)
+
+        time.sleep(5)
diff --git a/qa/tasks/divergent_priors.py b/qa/tasks/divergent_priors.py
new file mode 100644
index 0000000..97a0124
--- /dev/null
+++ b/qa/tasks/divergent_priors.py
@@ -0,0 +1,171 @@
+"""
+Special case divergence test
+"""
+import logging
+import time
+
+from teuthology import misc as teuthology
+from util.rados import rados
+
+
+log = logging.getLogger(__name__)
+
+
+def task(ctx, config):
+    """
+    Test handling of divergent entries with prior_version
+    prior to log_tail
+
+    overrides:
+      ceph:
+        conf:
+          osd:
+            debug osd: 5
+
+    Requires 3 osds on a single test node.
+    """
+    if config is None:
+        config = {}
+    assert isinstance(config, dict), \
+        'divergent_priors task only accepts a dict for configuration'
+
+    manager = ctx.managers['ceph']
+
+    while len(manager.get_osd_status()['up']) < 3:
+        time.sleep(10)
+    manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+    manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
+    manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
+    manager.raw_cluster_cmd('osd', 'set', 'noout')
+    manager.raw_cluster_cmd('osd', 'set', 'noin')
+    manager.raw_cluster_cmd('osd', 'set', 'nodown')
+    manager.wait_for_clean()
+
+    # something that is always there
+    dummyfile = '/etc/fstab'
+    dummyfile2 = '/etc/resolv.conf'
+
+    # create 1 pg pool
+    log.info('creating foo')
+    manager.raw_cluster_cmd('osd', 'pool', 'create', 'foo', '1')
+
+    osds = [0, 1, 2]
+    for i in osds:
+        manager.set_config(i, osd_min_pg_log_entries=10)
+        manager.set_config(i, osd_max_pg_log_entries=10)
+        manager.set_config(i, osd_pg_log_trim_min=5)
+
+    # determine primary
+    divergent = manager.get_pg_primary('foo', 0)
+    log.info("primary and soon to be divergent is %d", divergent)
+    non_divergent = list(osds)
+    non_divergent.remove(divergent)
+
+    log.info('writing initial objects')
+    first_mon = teuthology.get_first_mon(ctx, config)
+    (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+    # write 100 objects
+    for i in range(100):
+        rados(ctx, mon, ['-p', 'foo', 'put', 'existing_%d' % i, dummyfile])
+
+    manager.wait_for_clean()
+
+    # blackhole non_divergent
+    log.info("blackholing osds %s", str(non_divergent))
+    for i in non_divergent:
+        manager.set_config(i, filestore_blackhole=1)
+
+    DIVERGENT_WRITE = 5
+    DIVERGENT_REMOVE = 5
+    # Write some soon to be divergent
+    log.info('writing divergent objects')
+    for i in range(DIVERGENT_WRITE):
+        rados(ctx, mon, ['-p', 'foo', 'put', 'existing_%d' % i,
+                         dummyfile2], wait=False)
+    # Remove some soon to be divergent
+    log.info('remove divergent objects')
+    for i in range(DIVERGENT_REMOVE):
+        rados(ctx, mon, ['-p', 'foo', 'rm',
+                         'existing_%d' % (i + DIVERGENT_WRITE)], wait=False)
+    time.sleep(10)
+    mon.run(
+        args=['killall', '-9', 'rados'],
+        wait=True,
+        check_status=False)
+
+    # kill all the osds but leave divergent in
+    log.info('killing all the osds')
+    for i in osds:
+        manager.kill_osd(i)
+    for i in osds:
+        manager.mark_down_osd(i)
+    for i in non_divergent:
+        manager.mark_out_osd(i)
+
+    # bring up non-divergent
+    log.info("bringing up non_divergent %s", str(non_divergent))
+    for i in non_divergent:
+        manager.revive_osd(i)
+    for i in non_divergent:
+        manager.mark_in_osd(i)
+
+    # write 1 non-divergent object (ensure that old divergent one is divergent)
+    objname = "existing_%d" % (DIVERGENT_WRITE + DIVERGENT_REMOVE)
+    log.info('writing non-divergent object ' + objname)
+    rados(ctx, mon, ['-p', 'foo', 'put', objname, dummyfile2])
+
+    manager.wait_for_recovery()
+
+    # ensure no recovery of up osds first
+    log.info('delay recovery')
+    for i in non_divergent:
+        manager.wait_run_admin_socket(
+            'osd', i, ['set_recovery_delay', '100000'])
+
+    # bring in our divergent friend
+    log.info("revive divergent %d", divergent)
+    manager.raw_cluster_cmd('osd', 'set', 'noup')
+    manager.revive_osd(divergent)
+
+    log.info('delay recovery divergent')
+    manager.wait_run_admin_socket(
+        'osd', divergent, ['set_recovery_delay', '100000'])
+
+    manager.raw_cluster_cmd('osd', 'unset', 'noup')
+    while len(manager.get_osd_status()['up']) < 3:
+        time.sleep(10)
+
+    log.info('wait for peering')
+    rados(ctx, mon, ['-p', 'foo', 'put', 'foo', dummyfile])
+
+    # At this point the divergent_priors should have been detected
+
+    log.info("killing divergent %d", divergent)
+    manager.kill_osd(divergent)
+    log.info("reviving divergent %d", divergent)
+    manager.revive_osd(divergent)
+
+    time.sleep(20)
+
+    log.info('allowing recovery')
+    # Set osd_recovery_delay_start back to 0 and kick the queue
+    for i in osds:
+        manager.raw_cluster_cmd('tell', 'osd.%d' % i, 'debug',
+                                    'kick_recovery_wq', ' 0')
+
+    log.info('reading divergent objects')
+    for i in range(DIVERGENT_WRITE + DIVERGENT_REMOVE):
+        exit_status = rados(ctx, mon, ['-p', 'foo', 'get', 'existing_%d' % i,
+                                       '/tmp/existing'])
+        assert exit_status is 0
+
+    (remote,) = ctx.\
+        cluster.only('osd.{o}'.format(o=divergent)).remotes.iterkeys()
+    msg = "dirty_divergent_priors: true, divergent_priors: %d" \
+          % (DIVERGENT_WRITE + DIVERGENT_REMOVE)
+    cmd = 'grep "{msg}" /var/log/ceph/ceph-osd.{osd}.log'\
+          .format(msg=msg, osd=divergent)
+    proc = remote.run(args=cmd, wait=True, check_status=False)
+    assert proc.exitstatus == 0
+
+    log.info("success")
diff --git a/qa/tasks/divergent_priors2.py b/qa/tasks/divergent_priors2.py
new file mode 100644
index 0000000..6b38472
--- /dev/null
+++ b/qa/tasks/divergent_priors2.py
@@ -0,0 +1,207 @@
+"""
+Special case divergence test with ceph-objectstore-tool export/remove/import
+"""
+import logging
+import time
+from cStringIO import StringIO
+
+from teuthology import misc as teuthology
+from util.rados import rados
+import os
+
+
+log = logging.getLogger(__name__)
+
+
+def task(ctx, config):
+    """
+    Test handling of divergent entries with prior_version
+    prior to log_tail and a ceph-objectstore-tool export/import
+
+    overrides:
+      ceph:
+        conf:
+          osd:
+            debug osd: 5
+
+    Requires 3 osds on a single test node.
+    """
+    if config is None:
+        config = {}
+    assert isinstance(config, dict), \
+        'divergent_priors task only accepts a dict for configuration'
+
+    manager = ctx.managers['ceph']
+
+    while len(manager.get_osd_status()['up']) < 3:
+        time.sleep(10)
+    manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+    manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
+    manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
+    manager.raw_cluster_cmd('osd', 'set', 'noout')
+    manager.raw_cluster_cmd('osd', 'set', 'noin')
+    manager.raw_cluster_cmd('osd', 'set', 'nodown')
+    manager.wait_for_clean()
+
+    # something that is always there
+    dummyfile = '/etc/fstab'
+    dummyfile2 = '/etc/resolv.conf'
+    testdir = teuthology.get_testdir(ctx)
+
+    # create 1 pg pool
+    log.info('creating foo')
+    manager.raw_cluster_cmd('osd', 'pool', 'create', 'foo', '1')
+
+    osds = [0, 1, 2]
+    for i in osds:
+        manager.set_config(i, osd_min_pg_log_entries=10)
+        manager.set_config(i, osd_max_pg_log_entries=10)
+        manager.set_config(i, osd_pg_log_trim_min=5)
+
+    # determine primary
+    divergent = manager.get_pg_primary('foo', 0)
+    log.info("primary and soon to be divergent is %d", divergent)
+    non_divergent = list(osds)
+    non_divergent.remove(divergent)
+
+    log.info('writing initial objects')
+    first_mon = teuthology.get_first_mon(ctx, config)
+    (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+    # write 100 objects
+    for i in range(100):
+        rados(ctx, mon, ['-p', 'foo', 'put', 'existing_%d' % i, dummyfile])
+
+    manager.wait_for_clean()
+
+    # blackhole non_divergent
+    log.info("blackholing osds %s", str(non_divergent))
+    for i in non_divergent:
+        manager.set_config(i, filestore_blackhole=1)
+
+    DIVERGENT_WRITE = 5
+    DIVERGENT_REMOVE = 5
+    # Write some soon to be divergent
+    log.info('writing divergent objects')
+    for i in range(DIVERGENT_WRITE):
+        rados(ctx, mon, ['-p', 'foo', 'put', 'existing_%d' % i,
+                         dummyfile2], wait=False)
+    # Remove some soon to be divergent
+    log.info('remove divergent objects')
+    for i in range(DIVERGENT_REMOVE):
+        rados(ctx, mon, ['-p', 'foo', 'rm',
+                         'existing_%d' % (i + DIVERGENT_WRITE)], wait=False)
+    time.sleep(10)
+    mon.run(
+        args=['killall', '-9', 'rados'],
+        wait=True,
+        check_status=False)
+
+    # kill all the osds but leave divergent in
+    log.info('killing all the osds')
+    for i in osds:
+        manager.kill_osd(i)
+    for i in osds:
+        manager.mark_down_osd(i)
+    for i in non_divergent:
+        manager.mark_out_osd(i)
+
+    # bring up non-divergent
+    log.info("bringing up non_divergent %s", str(non_divergent))
+    for i in non_divergent:
+        manager.revive_osd(i)
+    for i in non_divergent:
+        manager.mark_in_osd(i)
+
+    # write 1 non-divergent object (ensure that old divergent one is divergent)
+    objname = "existing_%d" % (DIVERGENT_WRITE + DIVERGENT_REMOVE)
+    log.info('writing non-divergent object ' + objname)
+    rados(ctx, mon, ['-p', 'foo', 'put', objname, dummyfile2])
+
+    manager.wait_for_recovery()
+
+    # ensure no recovery of up osds first
+    log.info('delay recovery')
+    for i in non_divergent:
+        manager.wait_run_admin_socket(
+            'osd', i, ['set_recovery_delay', '100000'])
+
+    # bring in our divergent friend
+    log.info("revive divergent %d", divergent)
+    manager.raw_cluster_cmd('osd', 'set', 'noup')
+    manager.revive_osd(divergent)
+
+    log.info('delay recovery divergent')
+    manager.wait_run_admin_socket(
+        'osd', divergent, ['set_recovery_delay', '100000'])
+
+    manager.raw_cluster_cmd('osd', 'unset', 'noup')
+    while len(manager.get_osd_status()['up']) < 3:
+        time.sleep(10)
+
+    log.info('wait for peering')
+    rados(ctx, mon, ['-p', 'foo', 'put', 'foo', dummyfile])
+
+    # At this point the divergent_priors should have been detected
+
+    log.info("killing divergent %d", divergent)
+    manager.kill_osd(divergent)
+
+    # Export a pg
+    (exp_remote,) = ctx.\
+        cluster.only('osd.{o}'.format(o=divergent)).remotes.iterkeys()
+    FSPATH = manager.get_filepath()
+    JPATH = os.path.join(FSPATH, "journal")
+    prefix = ("sudo adjust-ulimits ceph-objectstore-tool "
+              "--data-path {fpath} --journal-path {jpath} "
+              "--log-file="
+              "/var/log/ceph/objectstore_tool.$$.log ".
+              format(fpath=FSPATH, jpath=JPATH))
+    pid = os.getpid()
+    expfile = os.path.join(testdir, "exp.{pid}.out".format(pid=pid))
+    cmd = ((prefix + "--op export --pgid 1.0 --file {file}").
+           format(id=divergent, file=expfile))
+    proc = exp_remote.run(args=cmd, wait=True,
+                          check_status=False, stdout=StringIO())
+    assert proc.exitstatus == 0
+
+    cmd = ((prefix + "--op remove --pgid 1.0").
+           format(id=divergent, file=expfile))
+    proc = exp_remote.run(args=cmd, wait=True,
+                          check_status=False, stdout=StringIO())
+    assert proc.exitstatus == 0
+
+    cmd = ((prefix + "--op import --file {file}").
+           format(id=divergent, file=expfile))
+    proc = exp_remote.run(args=cmd, wait=True,
+                          check_status=False, stdout=StringIO())
+    assert proc.exitstatus == 0
+
+    log.info("reviving divergent %d", divergent)
+    manager.revive_osd(divergent)
+    manager.wait_run_admin_socket('osd', divergent, ['dump_ops_in_flight'])
+    time.sleep(20);
+
+    log.info('allowing recovery')
+    # Set osd_recovery_delay_start back to 0 and kick the queue
+    for i in osds:
+        manager.raw_cluster_cmd('tell', 'osd.%d' % i, 'debug',
+                                    'kick_recovery_wq', ' 0')
+
+    log.info('reading divergent objects')
+    for i in range(DIVERGENT_WRITE + DIVERGENT_REMOVE):
+        exit_status = rados(ctx, mon, ['-p', 'foo', 'get', 'existing_%d' % i,
+                                       '/tmp/existing'])
+        assert exit_status is 0
+
+    (remote,) = ctx.\
+        cluster.only('osd.{o}'.format(o=divergent)).remotes.iterkeys()
+    msg = "dirty_divergent_priors: true, divergent_priors: %d" \
+          % (DIVERGENT_WRITE + DIVERGENT_REMOVE)
+    cmd = 'grep "{msg}" /var/log/ceph/ceph-osd.{osd}.log'\
+          .format(msg=msg, osd=divergent)
+    proc = remote.run(args=cmd, wait=True, check_status=False)
+    assert proc.exitstatus == 0
+
+    cmd = 'rm {file}'.format(file=expfile)
+    remote.run(args=cmd, wait=True)
+    log.info("success")
diff --git a/qa/tasks/dump_stuck.py b/qa/tasks/dump_stuck.py
new file mode 100644
index 0000000..9e1780f
--- /dev/null
+++ b/qa/tasks/dump_stuck.py
@@ -0,0 +1,146 @@
+"""
+Dump_stuck command
+"""
+import logging
+import re
+import time
+
+import ceph_manager
+from teuthology import misc as teuthology
+
+
+log = logging.getLogger(__name__)
+
+def check_stuck(manager, num_inactive, num_unclean, num_stale, timeout=10):
+    """
+    Do checks.  Make sure get_stuck_pgs return the right amout of information, then
+    extract health information from the raw_cluster_cmd and compare the results with
+    values passed in.  This passes if all asserts pass.
+ 
+    :param num_manager: Ceph manager
+    :param num_inactive: number of inaactive pages that are stuck
+    :param num_unclean: number of unclean pages that are stuck
+    :paran num_stale: number of stale pages that are stuck
+    :param timeout: timeout value for get_stuck_pgs calls
+    """
+    inactive = manager.get_stuck_pgs('inactive', timeout)
+    assert len(inactive) == num_inactive
+    unclean = manager.get_stuck_pgs('unclean', timeout)
+    assert len(unclean) == num_unclean
+    stale = manager.get_stuck_pgs('stale', timeout)
+    assert len(stale) == num_stale
+
+    # check health output as well
+    health = manager.raw_cluster_cmd('health')
+    log.debug('ceph health is: %s', health)
+    if num_inactive > 0:
+        m = re.search('(\d+) pgs stuck inactive', health)
+        assert int(m.group(1)) == num_inactive
+    if num_unclean > 0:
+        m = re.search('(\d+) pgs stuck unclean', health)
+        assert int(m.group(1)) == num_unclean
+    if num_stale > 0:
+        m = re.search('(\d+) pgs stuck stale', health)
+        assert int(m.group(1)) == num_stale
+
+def task(ctx, config):
+    """
+    Test the dump_stuck command.
+
+    :param ctx: Context
+    :param config: Configuration
+    """
+    assert config is None, \
+        'dump_stuck requires no configuration'
+    assert teuthology.num_instances_of_type(ctx.cluster, 'osd') == 2, \
+        'dump_stuck requires exactly 2 osds'
+
+    timeout = 60
+    first_mon = teuthology.get_first_mon(ctx, config)
+    (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+
+    manager = ceph_manager.CephManager(
+        mon,
+        ctx=ctx,
+        logger=log.getChild('ceph_manager'),
+        )
+
+    manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+    manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
+    manager.wait_for_clean(timeout)
+
+    manager.raw_cluster_cmd('tell', 'mon.0', 'injectargs', '--',
+#                            '--mon-osd-report-timeout 90',
+                            '--mon-pg-stuck-threshold 10')
+
+    check_stuck(
+        manager,
+        num_inactive=0,
+        num_unclean=0,
+        num_stale=0,
+        )
+    num_pgs = manager.get_num_pgs()
+
+    manager.mark_out_osd(0)
+    time.sleep(timeout)
+    manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
+    manager.wait_for_recovery(timeout)
+
+    check_stuck(
+        manager,
+        num_inactive=0,
+        num_unclean=num_pgs,
+        num_stale=0,
+        )
+
+    manager.mark_in_osd(0)
+    manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+    manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
+    manager.wait_for_clean(timeout)
+
+    check_stuck(
+        manager,
+        num_inactive=0,
+        num_unclean=0,
+        num_stale=0,
+        )
+
+    for id_ in teuthology.all_roles_of_type(ctx.cluster, 'osd'):
+        manager.kill_osd(id_)
+        manager.mark_down_osd(id_)
+
+    starttime = time.time()
+    done = False
+    while not done:
+        try:
+            check_stuck(
+                manager,
+                num_inactive=0,
+                num_unclean=0,
+                num_stale=num_pgs,
+                )
+            done = True
+        except AssertionError:
+            # wait up to 15 minutes to become stale
+            if time.time() - starttime > 900:
+                raise
+
+    for id_ in teuthology.all_roles_of_type(ctx.cluster, 'osd'):
+        manager.revive_osd(id_)
+        manager.mark_in_osd(id_)
+    while True:
+        try:
+            manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+            manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
+            break
+        except Exception:
+            log.exception('osds must not be started yet, waiting...')
+            time.sleep(1)
+    manager.wait_for_clean(timeout)
+
+    check_stuck(
+        manager,
+        num_inactive=0,
+        num_unclean=0,
+        num_stale=0,
+        )
diff --git a/qa/tasks/ec_lost_unfound.py b/qa/tasks/ec_lost_unfound.py
new file mode 100644
index 0000000..d197eb4
--- /dev/null
+++ b/qa/tasks/ec_lost_unfound.py
@@ -0,0 +1,167 @@
+"""
+Lost_unfound
+"""
+from teuthology.orchestra import run
+import logging
+import ceph_manager
+from teuthology import misc as teuthology
+from util.rados import rados
+import time
+
+log = logging.getLogger(__name__)
+
+def task(ctx, config):
+    """
+    Test handling of lost objects on an ec pool.
+
+    A pretty rigid cluster is brought up andtested by this task
+    """
+    if config is None:
+        config = {}
+    assert isinstance(config, dict), \
+        'lost_unfound task only accepts a dict for configuration'
+    first_mon = teuthology.get_first_mon(ctx, config)
+    (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+
+    manager = ceph_manager.CephManager(
+        mon,
+        ctx=ctx,
+        logger=log.getChild('ceph_manager'),
+        )
+
+    manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+    manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
+    manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
+    manager.raw_cluster_cmd('tell', 'osd.3', 'flush_pg_stats')
+    manager.wait_for_clean()
+
+    profile = config.get('erasure_code_profile', {
+        'k': '2',
+        'm': '2',
+        'ruleset-failure-domain': 'osd'
+    })
+    profile_name = profile.get('name', 'lost_unfound')
+    manager.create_erasure_code_profile(profile_name, profile)
+    pool = manager.create_pool_with_unique_name(erasure_code_profile_name=profile_name)
+
+    # something that is always there, readable and never empty
+    dummyfile = '/etc/group'
+
+    # kludge to make sure they get a map
+    rados(ctx, mon, ['-p', pool, 'put', 'dummy', dummyfile])
+
+    manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+    manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
+    manager.wait_for_recovery()
+
+    # create old objects
+    for f in range(1, 10):
+        rados(ctx, mon, ['-p', pool, 'put', 'existing_%d' % f, dummyfile])
+        rados(ctx, mon, ['-p', pool, 'put', 'existed_%d' % f, dummyfile])
+        rados(ctx, mon, ['-p', pool, 'rm', 'existed_%d' % f])
+
+    # delay recovery, and make the pg log very long (to prevent backfill)
+    manager.raw_cluster_cmd(
+            'tell', 'osd.1',
+            'injectargs',
+            '--osd-recovery-delay-start 1000 --osd-min-pg-log-entries 100000000'
+            )
+
+    manager.kill_osd(0)
+    manager.mark_down_osd(0)
+    manager.kill_osd(3)
+    manager.mark_down_osd(3)
+    
+    for f in range(1, 10):
+        rados(ctx, mon, ['-p', pool, 'put', 'new_%d' % f, dummyfile])
+        rados(ctx, mon, ['-p', pool, 'put', 'existed_%d' % f, dummyfile])
+        rados(ctx, mon, ['-p', pool, 'put', 'existing_%d' % f, dummyfile])
+
+    # take out osd.1 and a necessary shard of those objects.
+    manager.kill_osd(1)
+    manager.mark_down_osd(1)
+    manager.raw_cluster_cmd('osd', 'lost', '1', '--yes-i-really-mean-it')
+    manager.revive_osd(0)
+    manager.wait_till_osd_is_up(0)
+    manager.revive_osd(3)
+    manager.wait_till_osd_is_up(3)
+
+    manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+    manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
+    manager.raw_cluster_cmd('tell', 'osd.3', 'flush_pg_stats')
+    manager.wait_till_active()
+    manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+    manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
+    manager.raw_cluster_cmd('tell', 'osd.3', 'flush_pg_stats')
+
+    # verify that there are unfound objects
+    unfound = manager.get_num_unfound_objects()
+    log.info("there are %d unfound objects" % unfound)
+    assert unfound
+
+    testdir = teuthology.get_testdir(ctx)
+    procs = []
+    if config.get('parallel_bench', True):
+        procs.append(mon.run(
+            args=[
+                "/bin/sh", "-c",
+                " ".join(['adjust-ulimits',
+                          'ceph-coverage',
+                          '{tdir}/archive/coverage',
+                          'rados',
+                          '--no-log-to-stderr',
+                          '--name', 'client.admin',
+                          '-b', str(4<<10),
+                          '-p' , pool,
+                          '-t', '20',
+                          'bench', '240', 'write',
+                      ]).format(tdir=testdir),
+            ],
+            logger=log.getChild('radosbench.{id}'.format(id='client.admin')),
+            stdin=run.PIPE,
+            wait=False
+        ))
+    time.sleep(10)
+
+    # mark stuff lost
+    pgs = manager.get_pg_stats()
+    for pg in pgs:
+        if pg['stat_sum']['num_objects_unfound'] > 0:
+            # verify that i can list them direct from the osd
+            log.info('listing missing/lost in %s state %s', pg['pgid'],
+                     pg['state']);
+            m = manager.list_pg_missing(pg['pgid'])
+            log.info('%s' % m)
+            assert m['num_unfound'] == pg['stat_sum']['num_objects_unfound']
+
+            log.info("reverting unfound in %s", pg['pgid'])
+            manager.raw_cluster_cmd('pg', pg['pgid'],
+                                    'mark_unfound_lost', 'delete')
+        else:
+            log.info("no unfound in %s", pg['pgid'])
+
+    manager.raw_cluster_cmd('tell', 'osd.0', 'debug', 'kick_recovery_wq', '5')
+    manager.raw_cluster_cmd('tell', 'osd.2', 'debug', 'kick_recovery_wq', '5')
+    manager.raw_cluster_cmd('tell', 'osd.3', 'debug', 'kick_recovery_wq', '5')
+    manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+    manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
+    manager.raw_cluster_cmd('tell', 'osd.3', 'flush_pg_stats')
+    manager.wait_for_recovery()
+
+    if not config.get('parallel_bench', True):
+        time.sleep(20)
+
+    # verify result
+    for f in range(1, 10):
+        err = rados(ctx, mon, ['-p', pool, 'get', 'new_%d' % f, '-'])
+        assert err
+        err = rados(ctx, mon, ['-p', pool, 'get', 'existed_%d' % f, '-'])
+        assert err
+        err = rados(ctx, mon, ['-p', pool, 'get', 'existing_%d' % f, '-'])
+        assert err
+
+    # see if osd.1 can cope
+    manager.revive_osd(1)
+    manager.wait_till_osd_is_up(1)
+    manager.wait_for_clean()
+    run.wait(procs)
diff --git a/qa/tasks/filestore_idempotent.py b/qa/tasks/filestore_idempotent.py
new file mode 100644
index 0000000..4e2a228
--- /dev/null
+++ b/qa/tasks/filestore_idempotent.py
@@ -0,0 +1,81 @@
+"""
+Filestore/filejournal handler
+"""
+import logging
+from teuthology.orchestra import run
+import random
+
+from teuthology import misc as teuthology
+
+log = logging.getLogger(__name__)
+
+def task(ctx, config):
+    """
+    Test filestore/filejournal handling of non-idempotent events.
+
+    Currently this is a kludge; we require the ceph task preceeds us just
+    so that we get the tarball installed to run the test binary.
+
+    :param ctx: Context
+    :param config: Configuration
+    """
+    assert config is None or isinstance(config, list) \
+        or isinstance(config, dict), \
+        "task only supports a list or dictionary for configuration"
+    all_clients = ['client.{id}'.format(id=id_)
+                   for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
+    if config is None:
+        config = all_clients
+    if isinstance(config, list):
+        config = dict.fromkeys(config)
+    clients = config.keys()
+
+    # just use the first client...
+    client = clients[0];
+    (remote,) = ctx.cluster.only(client).remotes.iterkeys()
+
+    testdir = teuthology.get_testdir(ctx)
+
+    dir = '%s/ceph.data/test.%s' % (testdir, client)
+
+    seed = str(int(random.uniform(1,100)))
+
+    try:
+        log.info('creating a working dir')
+        remote.run(args=['mkdir', dir])
+        remote.run(
+            args=[
+                'cd', dir,
+                run.Raw('&&'),
+                'wget','-q', '-Orun_seed_to.sh',
+                'http://git.ceph.com/?p=ceph.git;a=blob_plain;f=src/test/objectstore/run_seed_to.sh;hb=HEAD',
+                run.Raw('&&'),
+                'wget','-q', '-Orun_seed_to_range.sh',
+                'http://git.ceph.com/?p=ceph.git;a=blob_plain;f=src/test/objectstore/run_seed_to_range.sh;hb=HEAD',
+                run.Raw('&&'),
+                'chmod', '+x', 'run_seed_to.sh', 'run_seed_to_range.sh',
+                ]);
+
+        log.info('running a series of tests')
+        proc = remote.run(
+            args=[
+                'cd', dir,
+                run.Raw('&&'),
+                './run_seed_to_range.sh', seed, '50', '300',
+                ],
+            wait=False,
+            check_status=False)
+        result = proc.wait()
+
+        if result != 0:
+            remote.run(
+                args=[
+                    'cp', '-a', dir, '{tdir}/archive/idempotent_failure'.format(tdir=testdir),
+                    ])
+            raise Exception("./run_seed_to_range.sh errored out")
+
+    finally:
+        remote.run(args=[
+                'rm', '-rf', '--', dir
+                ])
+
diff --git a/qa/tasks/kclient.py b/qa/tasks/kclient.py
new file mode 100644
index 0000000..ca1fb3b
--- /dev/null
+++ b/qa/tasks/kclient.py
@@ -0,0 +1,107 @@
+"""
+Mount/unmount a ``kernel`` client.
+"""
+import contextlib
+import logging
+
+from teuthology.misc import deep_merge
+from teuthology import misc
+from cephfs.kernel_mount import KernelMount
+
+log = logging.getLogger(__name__)
+
+ at contextlib.contextmanager
+def task(ctx, config):
+    """
+    Mount/unmount a ``kernel`` client.
+
+    The config is optional and defaults to mounting on all clients. If
+    a config is given, it is expected to be a list of clients to do
+    this operation on. This lets you e.g. set up one client with
+    ``ceph-fuse`` and another with ``kclient``.
+
+    Example that mounts all clients::
+
+        tasks:
+        - ceph:
+        - kclient:
+        - interactive:
+
+    Example that uses both ``kclient` and ``ceph-fuse``::
+
+        tasks:
+        - ceph:
+        - ceph-fuse: [client.0]
+        - kclient: [client.1]
+        - interactive:
+
+
+    Pass a dictionary instead of lists to specify per-client config:
+
+        tasks:
+        -kclient:
+            client.0:
+                debug: true
+
+    :param ctx: Context
+    :param config: Configuration
+    """
+    log.info('Mounting kernel clients...')
+    assert config is None or isinstance(config, list) or isinstance(config, dict), \
+        "task kclient got invalid config"
+
+    if config is None:
+        config = ['client.{id}'.format(id=id_)
+                  for id_ in misc.all_roles_of_type(ctx.cluster, 'client')]
+
+    if isinstance(config, list):
+        client_roles = config
+        config = dict([r, dict()] for r in client_roles)
+    elif isinstance(config, dict):
+        client_roles = config.keys()
+    else:
+        raise ValueError("Invalid config object: {0} ({1})".format(config, config.__class__))
+
+    # config has been converted to a dict by this point
+    overrides = ctx.config.get('overrides', {})
+    deep_merge(config, overrides.get('kclient', {}))
+
+    clients = list(misc.get_clients(ctx=ctx, roles=client_roles))
+
+    test_dir = misc.get_testdir(ctx)
+
+    # Assemble mon addresses
+    remotes_and_roles = ctx.cluster.remotes.items()
+    roles = [roles for (remote_, roles) in remotes_and_roles]
+    ips = [remote_.ssh.get_transport().getpeername()[0]
+           for (remote_, _) in remotes_and_roles]
+    mons = misc.get_mons(roles, ips).values()
+
+    mounts = {}
+    for id_, remote in clients:
+        kernel_mount = KernelMount(
+            mons,
+            test_dir,
+            id_,
+            remote,
+            ctx.teuthology_config.get('ipmi_user', None),
+            ctx.teuthology_config.get('ipmi_password', None),
+            ctx.teuthology_config.get('ipmi_domain', None)
+        )
+
+        mounts[id_] = kernel_mount
+
+        client_config = config["client.{0}".format(id_)]
+        if client_config.get('debug', False):
+            remote.run(args=["sudo", "bash", "-c", "echo 'module ceph +p' > /sys/kernel/debug/dynamic_debug/control"])
+            remote.run(args=["sudo", "bash", "-c", "echo 'module libceph +p' > /sys/kernel/debug/dynamic_debug/control"])
+
+        kernel_mount.mount()
+
+    ctx.mounts = mounts
+    try:
+        yield mounts
+    finally:
+        log.info('Unmounting kernel clients...')
+        for mount in mounts.values():
+            mount.umount()
diff --git a/qa/tasks/locktest.py b/qa/tasks/locktest.py
new file mode 100755
index 0000000..9de5ba4
--- /dev/null
+++ b/qa/tasks/locktest.py
@@ -0,0 +1,134 @@
+"""
+locktests
+"""
+import logging
+
+from teuthology.orchestra import run
+from teuthology import misc as teuthology
+
+log = logging.getLogger(__name__)
+
+def task(ctx, config):
+    """
+    Run locktests, from the xfstests suite, on the given
+    clients. Whether the clients are ceph-fuse or kernel does not
+    matter, and the two clients can refer to the same mount.
+
+    The config is a list of two clients to run the locktest on. The
+    first client will be the host.
+
+    For example:
+       tasks:
+       - ceph:
+       - ceph-fuse: [client.0, client.1]
+       - locktest:
+           [client.0, client.1]
+
+    This task does not yield; there would be little point.
+
+    :param ctx: Context
+    :param config: Configuration
+    """
+
+    assert isinstance(config, list)
+    log.info('fetching and building locktests...')
+    (host,) = ctx.cluster.only(config[0]).remotes
+    (client,) = ctx.cluster.only(config[1]).remotes
+    ( _, _, host_id) = config[0].partition('.')
+    ( _, _, client_id) = config[1].partition('.')
+    testdir = teuthology.get_testdir(ctx)
+    hostmnt = '{tdir}/mnt.{id}'.format(tdir=testdir, id=host_id)
+    clientmnt = '{tdir}/mnt.{id}'.format(tdir=testdir, id=client_id)
+
+    try:
+        for client_name in config:
+            log.info('building on {client_}'.format(client_=client_name))
+            ctx.cluster.only(client_name).run(
+                args=[
+                    # explicitly does not support multiple autotest tasks
+                    # in a single run; the result archival would conflict
+                    'mkdir', '{tdir}/archive/locktest'.format(tdir=testdir),
+                    run.Raw('&&'),
+                    'mkdir', '{tdir}/locktest'.format(tdir=testdir),
+                    run.Raw('&&'),
+                    'wget',
+                    '-nv',
+                    'https://raw.github.com/gregsfortytwo/xfstests-ceph/master/src/locktest.c',
+                    '-O', '{tdir}/locktest/locktest.c'.format(tdir=testdir),
+                    run.Raw('&&'),
+                    'g++', '{tdir}/locktest/locktest.c'.format(tdir=testdir),
+                    '-o', '{tdir}/locktest/locktest'.format(tdir=testdir)
+                    ],
+                logger=log.getChild('locktest_client.{id}'.format(id=client_name)),
+                )
+
+        log.info('built locktest on each client')
+
+        host.run(args=['sudo', 'touch',
+                       '{mnt}/locktestfile'.format(mnt=hostmnt),
+                       run.Raw('&&'),
+                       'sudo', 'chown', 'ubuntu.ubuntu',
+                       '{mnt}/locktestfile'.format(mnt=hostmnt)
+                       ]
+                 )
+
+        log.info('starting on host')
+        hostproc = host.run(
+            args=[
+                '{tdir}/locktest/locktest'.format(tdir=testdir),
+                '-p', '6788',
+                '-d',
+                '{mnt}/locktestfile'.format(mnt=hostmnt),
+                ],
+            wait=False,
+            logger=log.getChild('locktest.host'),
+            )
+        log.info('starting on client')
+        (_,_,hostaddr) = host.name.partition('@')
+        clientproc = client.run(
+            args=[
+                '{tdir}/locktest/locktest'.format(tdir=testdir),
+                '-p', '6788',
+                '-d',
+                '-h', hostaddr,
+                '{mnt}/locktestfile'.format(mnt=clientmnt),
+                ],
+            logger=log.getChild('locktest.client'),
+            wait=False
+            )
+
+        hostresult = hostproc.wait()
+        clientresult = clientproc.wait()
+        if (hostresult != 0) or (clientresult != 0):
+            raise Exception("Did not pass locking test!")
+        log.info('finished locktest executable with results {r} and {s}'. \
+                     format(r=hostresult, s=clientresult))
+
+    finally:
+        log.info('cleaning up host dir')
+        host.run(
+            args=[
+                'mkdir', '-p', '{tdir}/locktest'.format(tdir=testdir),
+                run.Raw('&&'),
+                'rm', '-f', '{tdir}/locktest/locktest.c'.format(tdir=testdir),
+                run.Raw('&&'),
+                'rm', '-f', '{tdir}/locktest/locktest'.format(tdir=testdir),
+                run.Raw('&&'),
+                'rmdir', '{tdir}/locktest'
+                ],
+            logger=log.getChild('.{id}'.format(id=config[0])),
+            )
+        log.info('cleaning up client dir')
+        client.run(
+            args=[
+                'mkdir', '-p', '{tdir}/locktest'.format(tdir=testdir),
+                run.Raw('&&'),
+                'rm', '-f', '{tdir}/locktest/locktest.c'.format(tdir=testdir),
+                run.Raw('&&'),
+                'rm', '-f', '{tdir}/locktest/locktest'.format(tdir=testdir),
+                run.Raw('&&'),
+                'rmdir', '{tdir}/locktest'.format(tdir=testdir)
+                ],
+            logger=log.getChild('.{id}'.format(\
+                    id=config[1])),
+            )
diff --git a/qa/tasks/logrotate.conf b/qa/tasks/logrotate.conf
new file mode 100644
index 0000000..b0cb801
--- /dev/null
+++ b/qa/tasks/logrotate.conf
@@ -0,0 +1,13 @@
+/var/log/ceph/*{daemon_type}*.log {{
+    rotate 100
+    size {max_size}
+    compress
+    sharedscripts
+    postrotate
+        killall {daemon_type} -1 || true
+    endscript
+    missingok
+    notifempty
+    su root root
+}}
+
diff --git a/qa/tasks/lost_unfound.py b/qa/tasks/lost_unfound.py
new file mode 100644
index 0000000..3fb5c16
--- /dev/null
+++ b/qa/tasks/lost_unfound.py
@@ -0,0 +1,183 @@
+"""
+Lost_unfound
+"""
+import logging
+import time
+import ceph_manager
+from teuthology import misc as teuthology
+from teuthology.orchestra import run
+from util.rados import rados
+
+log = logging.getLogger(__name__)
+
+def task(ctx, config):
+    """
+    Test handling of lost objects.
+
+    A pretty rigid cluseter is brought up andtested by this task
+    """
+    POOL = 'unfound_pool'
+    if config is None:
+        config = {}
+    assert isinstance(config, dict), \
+        'lost_unfound task only accepts a dict for configuration'
+    first_mon = teuthology.get_first_mon(ctx, config)
+    (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+
+    manager = ceph_manager.CephManager(
+        mon,
+        ctx=ctx,
+        logger=log.getChild('ceph_manager'),
+        )
+
+    while len(manager.get_osd_status()['up']) < 3:
+        time.sleep(10)
+    manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+    manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
+    manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
+    manager.wait_for_clean()
+
+    manager.create_pool(POOL)
+
+    # something that is always there
+    dummyfile = '/etc/fstab'
+
+    # take an osd out until the very end
+    manager.kill_osd(2)
+    manager.mark_down_osd(2)
+    manager.mark_out_osd(2)
+
+    # kludge to make sure they get a map
+    rados(ctx, mon, ['-p', POOL, 'put', 'dummy', dummyfile])
+
+    manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+    manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
+    manager.wait_for_recovery()
+
+    # create old objects
+    for f in range(1, 10):
+        rados(ctx, mon, ['-p', POOL, 'put', 'existing_%d' % f, dummyfile])
+        rados(ctx, mon, ['-p', POOL, 'put', 'existed_%d' % f, dummyfile])
+        rados(ctx, mon, ['-p', POOL, 'rm', 'existed_%d' % f])
+
+    # delay recovery, and make the pg log very long (to prevent backfill)
+    manager.raw_cluster_cmd(
+            'tell', 'osd.1',
+            'injectargs',
+            '--osd-recovery-delay-start 1000 --osd-min-pg-log-entries 100000000'
+            )
+
+    manager.kill_osd(0)
+    manager.mark_down_osd(0)
+    
+    for f in range(1, 10):
+        rados(ctx, mon, ['-p', POOL, 'put', 'new_%d' % f, dummyfile])
+        rados(ctx, mon, ['-p', POOL, 'put', 'existed_%d' % f, dummyfile])
+        rados(ctx, mon, ['-p', POOL, 'put', 'existing_%d' % f, dummyfile])
+
+    # bring osd.0 back up, let it peer, but don't replicate the new
+    # objects...
+    log.info('osd.0 command_args is %s' % 'foo')
+    log.info(ctx.daemons.get_daemon('osd', 0).command_args)
+    ctx.daemons.get_daemon('osd', 0).command_kwargs['args'].extend([
+            '--osd-recovery-delay-start', '1000'
+            ])
+    manager.revive_osd(0)
+    manager.mark_in_osd(0)
+    manager.wait_till_osd_is_up(0)
+
+    manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
+    manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+    manager.wait_till_active()
+
+    # take out osd.1 and the only copy of those objects.
+    manager.kill_osd(1)
+    manager.mark_down_osd(1)
+    manager.mark_out_osd(1)
+    manager.raw_cluster_cmd('osd', 'lost', '1', '--yes-i-really-mean-it')
+
+    # bring up osd.2 so that things would otherwise, in theory, recovery fully
+    manager.revive_osd(2)
+    manager.mark_in_osd(2)
+    manager.wait_till_osd_is_up(2)
+
+    manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+    manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
+    manager.wait_till_active()
+    manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+    manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
+
+    # verify that there are unfound objects
+    unfound = manager.get_num_unfound_objects()
+    log.info("there are %d unfound objects" % unfound)
+    assert unfound
+
+    testdir = teuthology.get_testdir(ctx)
+    procs = []
+    if config.get('parallel_bench', True):
+        procs.append(mon.run(
+            args=[
+                "/bin/sh", "-c",
+                " ".join(['adjust-ulimits',
+                          'ceph-coverage',
+                          '{tdir}/archive/coverage',
+                          'rados',
+                          '--no-log-to-stderr',
+                          '--name', 'client.admin',
+                          '-b', str(4<<10),
+                          '-p' , POOL,
+                          '-t', '20',
+                          'bench', '240', 'write',
+                      ]).format(tdir=testdir),
+            ],
+            logger=log.getChild('radosbench.{id}'.format(id='client.admin')),
+            stdin=run.PIPE,
+            wait=False
+        ))
+    time.sleep(10)
+
+    # mark stuff lost
+    pgs = manager.get_pg_stats()
+    for pg in pgs:
+        if pg['stat_sum']['num_objects_unfound'] > 0:
+            primary = 'osd.%d' % pg['acting'][0]
+
+            # verify that i can list them direct from the osd
+            log.info('listing missing/lost in %s state %s', pg['pgid'],
+                     pg['state']);
+            m = manager.list_pg_missing(pg['pgid'])
+            #log.info('%s' % m)
+            assert m['num_unfound'] == pg['stat_sum']['num_objects_unfound']
+            num_unfound=0
+            for o in m['objects']:
+                if len(o['locations']) == 0:
+                    num_unfound += 1
+            assert m['num_unfound'] == num_unfound
+
+            log.info("reverting unfound in %s on %s", pg['pgid'], primary)
+            manager.raw_cluster_cmd('pg', pg['pgid'],
+                                    'mark_unfound_lost', 'revert')
+        else:
+            log.info("no unfound in %s", pg['pgid'])
+
+    manager.raw_cluster_cmd('tell', 'osd.0', 'debug', 'kick_recovery_wq', '5')
+    manager.raw_cluster_cmd('tell', 'osd.2', 'debug', 'kick_recovery_wq', '5')
+    manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+    manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
+    manager.wait_for_recovery()
+
+    # verify result
+    for f in range(1, 10):
+        err = rados(ctx, mon, ['-p', POOL, 'get', 'new_%d' % f, '-'])
+        assert err
+        err = rados(ctx, mon, ['-p', POOL, 'get', 'existed_%d' % f, '-'])
+        assert err
+        err = rados(ctx, mon, ['-p', POOL, 'get', 'existing_%d' % f, '-'])
+        assert not err
+
+    # see if osd.1 can cope
+    manager.revive_osd(1)
+    manager.mark_in_osd(1)
+    manager.wait_till_osd_is_up(1)
+    manager.wait_for_clean()
+    run.wait(procs)
diff --git a/qa/tasks/manypools.py b/qa/tasks/manypools.py
new file mode 100644
index 0000000..1ddcba5
--- /dev/null
+++ b/qa/tasks/manypools.py
@@ -0,0 +1,73 @@
+"""
+Force pg creation on all osds
+"""
+from teuthology import misc as teuthology
+from teuthology.orchestra import run
+import logging
+
+log = logging.getLogger(__name__)
+
+def task(ctx, config):
+    """
+    Create the specified number of pools and write 16 objects to them (thereby forcing
+    the PG creation on each OSD). This task creates pools from all the clients,
+    in parallel. It is easy to add other daemon types which have the appropriate
+    permissions, but I don't think anything else does.
+    The config is just the number of pools to create. I recommend setting
+    "mon create pg interval" to a very low value in your ceph config to speed
+    this up.
+    
+    You probably want to do this to look at memory consumption, and
+    maybe to test how performance changes with the number of PGs. For example:
+    
+    tasks:
+    - ceph:
+        config:
+          mon:
+            mon create pg interval: 1
+    - manypools: 3000
+    - radosbench:
+        clients: [client.0]
+        time: 360
+    """
+    
+    log.info('creating {n} pools'.format(n=config))
+    
+    poolnum = int(config)
+    creator_remotes = []
+    client_roles = teuthology.all_roles_of_type(ctx.cluster, 'client')
+    log.info('got client_roles={client_roles_}'.format(client_roles_=client_roles))
+    for role in client_roles:
+        log.info('role={role_}'.format(role_=role))
+        (creator_remote, ) = ctx.cluster.only('client.{id}'.format(id=role)).remotes.iterkeys()
+        creator_remotes.append((creator_remote, 'client.{id}'.format(id=role)))
+
+    remaining_pools = poolnum
+    poolprocs=dict()
+    while (remaining_pools > 0):
+        log.info('{n} pools remaining to create'.format(n=remaining_pools))
+	for remote, role_ in creator_remotes:
+            poolnum = remaining_pools
+            remaining_pools -= 1
+            if remaining_pools < 0:
+                continue
+            log.info('creating pool{num} on {role}'.format(num=poolnum, role=role_))
+	    proc = remote.run(
+	        args=[
+		    'rados',
+		    '--name', role_,
+		    'mkpool', 'pool{num}'.format(num=poolnum), '-1',
+		    run.Raw('&&'),
+		    'rados',
+		    '--name', role_,
+		    '--pool', 'pool{num}'.format(num=poolnum),
+		    'bench', '0', 'write', '-t', '16', '--block-size', '1'
+		    ],
+		wait = False
+	    )
+            log.info('waiting for pool and object creates')
+	    poolprocs[remote] = proc
+        
+        run.wait(poolprocs.itervalues())
+    
+    log.info('created all {n} pools and wrote 16 objects to each'.format(n=poolnum))
diff --git a/qa/tasks/mds_creation_failure.py b/qa/tasks/mds_creation_failure.py
new file mode 100644
index 0000000..d1de156
--- /dev/null
+++ b/qa/tasks/mds_creation_failure.py
@@ -0,0 +1,85 @@
+
+import logging
+import contextlib
+import time
+import ceph_manager
+from teuthology import misc
+from teuthology.orchestra.run import CommandFailedError, Raw
+
+log = logging.getLogger(__name__)
+
+
+ at contextlib.contextmanager
+def task(ctx, config):
+    """
+    Go through filesystem creation with a synthetic failure in an MDS
+    in its 'up:creating' state, to exercise the retry behaviour.
+    """
+    # Grab handles to the teuthology objects of interest
+    mdslist = list(misc.all_roles_of_type(ctx.cluster, 'mds'))
+    if len(mdslist) != 1:
+        # Require exactly one MDS, the code path for creation failure when
+        # a standby is available is different
+        raise RuntimeError("This task requires exactly one MDS")
+
+    mds_id = mdslist[0]
+    (mds_remote,) = ctx.cluster.only('mds.{_id}'.format(_id=mds_id)).remotes.iterkeys()
+    manager = ceph_manager.CephManager(
+        mds_remote, ctx=ctx, logger=log.getChild('ceph_manager'),
+    )
+
+    # Stop MDS
+    manager.raw_cluster_cmd('mds', 'set', "max_mds", "0")
+    mds = ctx.daemons.get_daemon('mds', mds_id)
+    mds.stop()
+    manager.raw_cluster_cmd('mds', 'fail', mds_id)
+
+    # Reset the filesystem so that next start will go into CREATING
+    manager.raw_cluster_cmd('fs', 'rm', "default", "--yes-i-really-mean-it")
+    manager.raw_cluster_cmd('fs', 'new', "default", "metadata", "data")
+
+    # Start the MDS with mds_kill_create_at set, it will crash during creation
+    mds.restart_with_args(["--mds_kill_create_at=1"])
+    try:
+        mds.wait_for_exit()
+    except CommandFailedError as e:
+        if e.exitstatus == 1:
+            log.info("MDS creation killed as expected")
+        else:
+            log.error("Unexpected status code %s" % e.exitstatus)
+            raise
+
+    # Since I have intentionally caused a crash, I will clean up the resulting core
+    # file to avoid task.internal.coredump seeing it as a failure.
+    log.info("Removing core file from synthetic MDS failure")
+    mds_remote.run(args=['rm', '-f', Raw("{archive}/coredump/*.core".format(archive=misc.get_archive_dir(ctx)))])
+
+    # It should have left the MDS map state still in CREATING
+    status = manager.get_mds_status(mds_id)
+    assert status['state'] == 'up:creating'
+
+    # Start the MDS again without the kill flag set, it should proceed with creation successfully
+    mds.restart()
+
+    # Wait for state ACTIVE
+    t = 0
+    create_timeout = 120
+    while True:
+        status = manager.get_mds_status(mds_id)
+        if status['state'] == 'up:active':
+            log.info("MDS creation completed successfully")
+            break
+        elif status['state'] == 'up:creating':
+            log.info("MDS still in creating state")
+            if t > create_timeout:
+                log.error("Creating did not complete within %ss" % create_timeout)
+                raise RuntimeError("Creating did not complete within %ss" % create_timeout)
+            t += 1
+            time.sleep(1)
+        else:
+            log.error("Unexpected MDS state: %s" % status['state'])
+            assert(status['state'] in ['up:active', 'up:creating'])
+
+    # The system should be back up in a happy healthy state, go ahead and run any further tasks
+    # inside this context.
+    yield
diff --git a/qa/tasks/mds_thrash.py b/qa/tasks/mds_thrash.py
new file mode 100644
index 0000000..77eee7e
--- /dev/null
+++ b/qa/tasks/mds_thrash.py
@@ -0,0 +1,415 @@
+"""
+Thrash mds by simulating failures
+"""
+import logging
+import contextlib
+import ceph_manager
+import random
+import time
+
+from gevent.greenlet import Greenlet
+from gevent.event import Event
+from teuthology import misc as teuthology
+
+from tasks.cephfs.filesystem import MDSCluster, Filesystem
+
+log = logging.getLogger(__name__)
+
+
+class MDSThrasher(Greenlet):
+    """
+    MDSThrasher::
+
+    The MDSThrasher thrashes MDSs during execution of other tasks (workunits, etc).
+
+    The config is optional.  Many of the config parameters are a a maximum value
+    to use when selecting a random value from a range.  To always use the maximum
+    value, set no_random to true.  The config is a dict containing some or all of:
+
+    seed: [no default] seed the random number generator
+
+    randomize: [default: true] enables randomization and use the max/min values
+
+    max_thrash: [default: 1] the maximum number of MDSs that will be thrashed at
+      any given time.
+
+    max_thrash_delay: [default: 30] maximum number of seconds to delay before
+      thrashing again.
+
+    max_revive_delay: [default: 10] maximum number of seconds to delay before
+      bringing back a thrashed MDS
+
+    thrash_in_replay: [default: 0.0] likelihood that the MDS will be thrashed
+      during replay.  Value should be between 0.0 and 1.0
+
+    max_replay_thrash_delay: [default: 4] maximum number of seconds to delay while in
+      the replay state before thrashing
+
+    thrash_weights: allows specific MDSs to be thrashed more/less frequently.  This option
+      overrides anything specified by max_thrash.  This option is a dict containing
+      mds.x: weight pairs.  For example, [mds.a: 0.7, mds.b: 0.3, mds.c: 0.0].  Each weight
+      is a value from 0.0 to 1.0.  Any MDSs not specified will be automatically
+      given a weight of 0.0.  For a given MDS, by default the trasher delays for up
+      to max_thrash_delay, trashes, waits for the MDS to recover, and iterates.  If a non-zero
+      weight is specified for an MDS, for each iteration the thrasher chooses whether to thrash
+      during that iteration based on a random value [0-1] not exceeding the weight of that MDS.
+
+    Examples::
+
+
+      The following example sets the likelihood that mds.a will be thrashed
+      to 80%, mds.b to 20%, and other MDSs will not be thrashed.  It also sets the
+      likelihood that an MDS will be thrashed in replay to 40%.
+      Thrash weights do not have to sum to 1.
+
+      tasks:
+      - ceph:
+      - mds_thrash:
+          thrash_weights:
+            - mds.a: 0.8
+            - mds.b: 0.2
+          thrash_in_replay: 0.4
+      - ceph-fuse:
+      - workunit:
+          clients:
+            all: [suites/fsx.sh]
+
+      The following example disables randomization, and uses the max delay values:
+
+      tasks:
+      - ceph:
+      - mds_thrash:
+          max_thrash_delay: 10
+          max_revive_delay: 1
+          max_replay_thrash_delay: 4
+
+    """
+
+    def __init__(self, ctx, manager, mds_cluster, config, logger, failure_group, weight):
+        super(MDSThrasher, self).__init__()
+
+        self.ctx = ctx
+        self.manager = manager
+        assert self.manager.is_clean()
+        self.mds_cluster = mds_cluster
+
+        self.stopping = Event()
+        self.logger = logger
+        self.config = config
+
+        self.randomize = bool(self.config.get('randomize', True))
+        self.max_thrash_delay = float(self.config.get('thrash_delay', 30.0))
+        self.thrash_in_replay = float(self.config.get('thrash_in_replay', False))
+        assert self.thrash_in_replay >= 0.0 and self.thrash_in_replay <= 1.0, 'thrash_in_replay ({v}) must be between [0.0, 1.0]'.format(
+            v=self.thrash_in_replay)
+
+        self.max_replay_thrash_delay = float(self.config.get('max_replay_thrash_delay', 4.0))
+
+        self.max_revive_delay = float(self.config.get('max_revive_delay', 10.0))
+
+        self.failure_group = failure_group
+        self.weight = weight
+
+        # TODO support multiple filesystems: will require behavioural change to select
+        # which filesystem to act on when doing rank-ish things
+        self.fs = Filesystem(self.ctx)
+
+    def _run(self):
+        try:
+            self.do_thrash()
+        except:
+            # Log exceptions here so we get the full backtrace (it's lost
+            # by the time someone does a .get() on this greenlet)
+            self.logger.exception("Exception in do_thrash:")
+            raise
+
+    def log(self, x):
+        """Write data to logger assigned to this MDThrasher"""
+        self.logger.info(x)
+
+    def stop(self):
+        self.stopping.set()
+
+    def kill_mds(self, mds):
+        if self.config.get('powercycle'):
+            (remote,) = (self.ctx.cluster.only('mds.{m}'.format(m=mds)).
+                         remotes.iterkeys())
+            self.log('kill_mds on mds.{m} doing powercycle of {s}'.
+                     format(m=mds, s=remote.name))
+            self._assert_ipmi(remote)
+            remote.console.power_off()
+        else:
+            self.ctx.daemons.get_daemon('mds', mds).stop()
+
+    @staticmethod
+    def _assert_ipmi(remote):
+        assert remote.console.has_ipmi_credentials, (
+            "powercycling requested but RemoteConsole is not "
+            "initialized.  Check ipmi config.")
+
+    def kill_mds_by_rank(self, rank):
+        """
+        kill_mds wrapper to kill based on rank passed.
+        """
+        status = self.mds_cluster.get_mds_info_by_rank(rank)
+        self.kill_mds(status['name'])
+
+    def revive_mds(self, mds, standby_for_rank=None):
+        """
+        Revive mds -- do an ipmpi powercycle (if indicated by the config)
+        and then restart (using --hot-standby if specified.
+        """
+        if self.config.get('powercycle'):
+            (remote,) = (self.ctx.cluster.only('mds.{m}'.format(m=mds)).
+                         remotes.iterkeys())
+            self.log('revive_mds on mds.{m} doing powercycle of {s}'.
+                     format(m=mds, s=remote.name))
+            self._assert_ipmi(remote)
+            remote.console.power_on()
+            self.manager.make_admin_daemon_dir(self.ctx, remote)
+        args = []
+        if standby_for_rank:
+            args.extend(['--hot-standby', standby_for_rank])
+        self.ctx.daemons.get_daemon('mds', mds).restart(*args)
+
+    def revive_mds_by_rank(self, rank, standby_for_rank=None):
+        """
+        revive_mds wrapper to revive based on rank passed.
+        """
+        status = self.mds_cluster.get_mds_info_by_rank(rank)
+        self.revive_mds(status['name'], standby_for_rank)
+
+    def get_mds_status_all(self):
+        return self.fs.get_mds_map()
+
+    def do_thrash(self):
+        """
+        Perform the random thrashing action
+        """
+
+        self.log('starting mds_do_thrash for failure group: ' + ', '.join(
+            ['mds.{_id}'.format(_id=_f) for _f in self.failure_group]))
+        while not self.stopping.is_set():
+            delay = self.max_thrash_delay
+            if self.randomize:
+                delay = random.randrange(0.0, self.max_thrash_delay)
+
+            if delay > 0.0:
+                self.log('waiting for {delay} secs before thrashing'.format(delay=delay))
+                self.stopping.wait(delay)
+                if self.stopping.is_set():
+                    continue
+
+            skip = random.randrange(0.0, 1.0)
+            if self.weight < 1.0 and skip > self.weight:
+                self.log('skipping thrash iteration with skip ({skip}) > weight ({weight})'.format(skip=skip,
+                                                                                                   weight=self.weight))
+                continue
+
+            # find the active mds in the failure group
+            statuses = [self.mds_cluster.get_mds_info(m) for m in self.failure_group]
+            actives = filter(lambda s: s and s['state'] == 'up:active', statuses)
+            assert len(actives) == 1, 'Can only have one active in a failure group'
+
+            active_mds = actives[0]['name']
+            active_rank = actives[0]['rank']
+
+            self.log('kill mds.{id} (rank={r})'.format(id=active_mds, r=active_rank))
+            self.manager.kill_mds_by_rank(active_rank)
+
+            # wait for mon to report killed mds as crashed
+            last_laggy_since = None
+            itercount = 0
+            while True:
+                failed = self.fs.get_mds_map()['failed']
+                status = self.mds_cluster.get_mds_info(active_mds)
+                if not status:
+                    break
+                if 'laggy_since' in status:
+                    last_laggy_since = status['laggy_since']
+                    break
+                if any([(f == active_mds) for f in failed]):
+                    break
+                self.log(
+                    'waiting till mds map indicates mds.{_id} is laggy/crashed, in failed state, or mds.{_id} is removed from mdsmap'.format(
+                        _id=active_mds))
+                itercount = itercount + 1
+                if itercount > 10:
+                    self.log('mds map: {status}'.format(status=self.mds_cluster.get_fs_map()))
+                time.sleep(2)
+            if last_laggy_since:
+                self.log(
+                    'mds.{_id} reported laggy/crashed since: {since}'.format(_id=active_mds, since=last_laggy_since))
+            else:
+                self.log('mds.{_id} down, removed from mdsmap'.format(_id=active_mds, since=last_laggy_since))
+
+            # wait for a standby mds to takeover and become active
+            takeover_mds = None
+            takeover_rank = None
+            itercount = 0
+            while True:
+                statuses = [self.mds_cluster.get_mds_info(m) for m in self.failure_group]
+                actives = filter(lambda s: s and s['state'] == 'up:active', statuses)
+                if len(actives) > 0:
+                    assert len(actives) == 1, 'Can only have one active in failure group'
+                    takeover_mds = actives[0]['name']
+                    takeover_rank = actives[0]['rank']
+                    break
+                itercount = itercount + 1
+                if itercount > 10:
+                    self.log('mds map: {status}'.format(status=self.mds_cluster.get_fs_map()))
+
+            self.log('New active mds is mds.{_id}'.format(_id=takeover_mds))
+
+            # wait for a while before restarting old active to become new
+            # standby
+            delay = self.max_revive_delay
+            if self.randomize:
+                delay = random.randrange(0.0, self.max_revive_delay)
+
+            self.log('waiting for {delay} secs before reviving mds.{id}'.format(
+                delay=delay, id=active_mds))
+            time.sleep(delay)
+
+            self.log('reviving mds.{id}'.format(id=active_mds))
+            self.manager.revive_mds(active_mds, standby_for_rank=takeover_rank)
+
+            status = {}
+            while True:
+                status = self.mds_cluster.get_mds_info(active_mds)
+                if status and (status['state'] == 'up:standby' or status['state'] == 'up:standby-replay'):
+                    break
+                self.log(
+                    'waiting till mds map indicates mds.{_id} is in standby or standby-replay'.format(_id=active_mds))
+                time.sleep(2)
+            self.log('mds.{_id} reported in {state} state'.format(_id=active_mds, state=status['state']))
+
+            # don't do replay thrashing right now
+            continue
+            # this might race with replay -> active transition...
+            if status['state'] == 'up:replay' and random.randrange(0.0, 1.0) < self.thrash_in_replay:
+
+                delay = self.max_replay_thrash_delay
+                if self.randomize:
+                    delay = random.randrange(0.0, self.max_replay_thrash_delay)
+                time.sleep(delay)
+                self.log('kill replaying mds.{id}'.format(id=self.to_kill))
+                self.manager.kill_mds(self.to_kill)
+
+                delay = self.max_revive_delay
+                if self.randomize:
+                    delay = random.randrange(0.0, self.max_revive_delay)
+
+                self.log('waiting for {delay} secs before reviving mds.{id}'.format(
+                    delay=delay, id=self.to_kill))
+                time.sleep(delay)
+
+                self.log('revive mds.{id}'.format(id=self.to_kill))
+                self.manager.revive_mds(self.to_kill)
+
+
+ at contextlib.contextmanager
+def task(ctx, config):
+    """
+    Stress test the mds by thrashing while another task/workunit
+    is running.
+
+    Please refer to MDSThrasher class for further information on the
+    available options.
+    """
+
+    mds_cluster = MDSCluster(ctx)
+
+    if config is None:
+        config = {}
+    assert isinstance(config, dict), \
+        'mds_thrash task only accepts a dict for configuration'
+    mdslist = list(teuthology.all_roles_of_type(ctx.cluster, 'mds'))
+    assert len(mdslist) > 1, \
+        'mds_thrash task requires at least 2 metadata servers'
+
+    # choose random seed
+    if 'seed' in config:
+        seed = int(config['seed'])
+    else:
+        seed = int(time.time())
+    log.info('mds thrasher using random seed: {seed}'.format(seed=seed))
+    random.seed(seed)
+
+    max_thrashers = config.get('max_thrash', 1)
+    thrashers = {}
+
+    (first,) = ctx.cluster.only('mds.{_id}'.format(_id=mdslist[0])).remotes.iterkeys()
+    manager = ceph_manager.CephManager(
+        first, ctx=ctx, logger=log.getChild('ceph_manager'),
+    )
+
+    # make sure everyone is in active, standby, or standby-replay
+    log.info('Wait for all MDSs to reach steady state...')
+    statuses = None
+    statuses_by_rank = None
+    while True:
+        statuses = {m: mds_cluster.get_mds_info(m) for m in mdslist}
+        statuses_by_rank = {}
+        for _, s in statuses.iteritems():
+            if isinstance(s, dict):
+                statuses_by_rank[s['rank']] = s
+
+        ready = filter(lambda (_, s): s is not None and (s['state'] == 'up:active'
+                                                         or s['state'] == 'up:standby'
+                                                         or s['state'] == 'up:standby-replay'),
+                       statuses.items())
+        if len(ready) == len(statuses):
+            break
+        time.sleep(2)
+    log.info('Ready to start thrashing')
+
+    # setup failure groups
+    failure_groups = {}
+    actives = {s['name']: s for (_, s) in statuses.iteritems() if s['state'] == 'up:active'}
+    log.info('Actives is: {d}'.format(d=actives))
+    log.info('Statuses is: {d}'.format(d=statuses_by_rank))
+    for active in actives:
+        for (r, s) in statuses.iteritems():
+            if s['standby_for_name'] == active:
+                if not active in failure_groups:
+                    failure_groups[active] = []
+                log.info('Assigning mds rank {r} to failure group {g}'.format(r=r, g=active))
+                failure_groups[active].append(r)
+
+    manager.wait_for_clean()
+    for (active, standbys) in failure_groups.iteritems():
+        weight = 1.0
+        if 'thrash_weights' in config:
+            weight = int(config['thrash_weights'].get('mds.{_id}'.format(_id=active), '0.0'))
+
+        failure_group = [active]
+        failure_group.extend(standbys)
+
+        thrasher = MDSThrasher(
+            ctx, manager, mds_cluster, config,
+            logger=log.getChild('mds_thrasher.failure_group.[{a}, {sbs}]'.format(
+                a=active,
+                sbs=', '.join(standbys)
+            )
+            ),
+            failure_group=failure_group,
+            weight=weight)
+        thrasher.start()
+        thrashers[active] = thrasher
+
+        # if thrash_weights isn't specified and we've reached max_thrash,
+        # we're done
+        if 'thrash_weights' not in config and len(thrashers) == max_thrashers:
+            break
+
+    try:
+        log.debug('Yielding')
+        yield
+    finally:
+        log.info('joining mds_thrashers')
+        for t in thrashers:
+            log.info('join thrasher for failure group [{fg}]'.format(fg=', '.join(failure_group)))
+            thrashers[t].stop()
+            thrashers[t].join()
+        log.info('done joining')
diff --git a/qa/tasks/metadata.yaml b/qa/tasks/metadata.yaml
new file mode 100644
index 0000000..ccdc3b0
--- /dev/null
+++ b/qa/tasks/metadata.yaml
@@ -0,0 +1,2 @@
+instance-id: test
+local-hostname: test
diff --git a/qa/tasks/mod_fastcgi.conf.template b/qa/tasks/mod_fastcgi.conf.template
new file mode 100644
index 0000000..d0efdc8
--- /dev/null
+++ b/qa/tasks/mod_fastcgi.conf.template
@@ -0,0 +1,17 @@
+# mod_fastcgi config goes here
+
+# Set fastcgi environment variables.
+# Note that this is separate from Unix environment variables!
+SetEnv RGW_LOG_LEVEL 20
+SetEnv RGW_SHOULD_LOG yes
+SetEnv RGW_PRINT_CONTINUE {print_continue}
+
+<IfModule !fastcgi_module>
+  LoadModule fastcgi_module {mod_path}/mod_fastcgi.so
+</IfModule>
+
+FastCgiIPCDir {testdir}/apache/tmp.{client}/fastcgi_sock
+FastCgiExternalServer {testdir}/apache/htdocs.{client}/rgw.fcgi -socket rgw_sock -idle-timeout {idle_timeout}
+RewriteEngine On
+
+RewriteRule ^/([a-zA-Z0-9-_.]*)([/]?.*) /rgw.fcgi?page=$1&params=$2&%{{QUERY_STRING}} [E=HTTP_AUTHORIZATION:%{{HTTP:Authorization}},L]
diff --git a/qa/tasks/mod_proxy_fcgi.tcp.conf.template b/qa/tasks/mod_proxy_fcgi.tcp.conf.template
new file mode 100644
index 0000000..b47fb5c
--- /dev/null
+++ b/qa/tasks/mod_proxy_fcgi.tcp.conf.template
@@ -0,0 +1,16 @@
+# mod_proxy_fcgi config, using TCP 
+
+<IfModule !proxy_module>
+  LoadModule proxy_module {mod_path}/mod_proxy.so
+</IfModule>
+<IfModule !proxy_fcgi_module>
+  LoadModule proxy_fcgi_module {mod_path}/mod_proxy_fcgi.so
+</IfModule>
+
+RewriteEngine On
+
+RewriteRule .* - [E=HTTP_AUTHORIZATION:%{{HTTP:Authorization}},L]
+
+SetEnv proxy-nokeepalive 1
+
+ProxyPass / fcgi://0.0.0.0:9000/
diff --git a/qa/tasks/mod_proxy_fcgi.uds.conf.template b/qa/tasks/mod_proxy_fcgi.uds.conf.template
new file mode 100644
index 0000000..8649aa7
--- /dev/null
+++ b/qa/tasks/mod_proxy_fcgi.uds.conf.template
@@ -0,0 +1,14 @@
+# mod_proxy_fcgi config, using UDS
+
+<IfModule !proxy_module>
+  LoadModule proxy_module {mod_path}/mod_proxy.so
+</IfModule>
+<IfModule !proxy_fcgi_module>
+  LoadModule proxy_fcgi_module {mod_path}/mod_proxy_fcgi.so
+</IfModule>
+
+RewriteEngine On
+
+RewriteRule .* - [E=HTTP_AUTHORIZATION:%{{HTTP:Authorization}},L]
+
+ProxyPass / unix://{testdir}/apache/tmp.{client}/fastcgi_sock/rgw_sock|fcgi://localhost:9000/ disablereuse=On
diff --git a/qa/tasks/mon_clock_skew_check.py b/qa/tasks/mon_clock_skew_check.py
new file mode 100644
index 0000000..891e6ec
--- /dev/null
+++ b/qa/tasks/mon_clock_skew_check.py
@@ -0,0 +1,261 @@
+"""
+Handle clock skews in monitors.
+"""
+import logging
+import contextlib
+import ceph_manager
+import time
+import gevent
+from StringIO import StringIO
+from teuthology import misc as teuthology
+
+log = logging.getLogger(__name__)
+
+class ClockSkewCheck:
+    """
+    Periodically check if there are any clock skews among the monitors in the
+    quorum. By default, assume no skews are supposed to exist; that can be
+    changed using the 'expect-skew' option. If 'fail-on-skew' is set to false,
+    then we will always succeed and only report skews if any are found.
+
+    This class does not spawn a thread. It assumes that, if that is indeed
+    wanted, it should be done by a third party (for instance, the task using
+    this class). We intend it as such in order to reuse this class if need be.
+
+    This task accepts the following options:
+
+    interval     amount of seconds to wait in-between checks. (default: 30.0)
+    max-skew     maximum skew, in seconds, that is considered tolerable before
+                 issuing a warning. (default: 0.05)
+    expect-skew  'true' or 'false', to indicate whether to expect a skew during
+                 the run or not. If 'true', the test will fail if no skew is
+                 found, and succeed if a skew is indeed found; if 'false', it's
+                 the other way around. (default: false)
+    never-fail   Don't fail the run if a skew is detected and we weren't
+                 expecting it, or if no skew is detected and we were expecting
+                 it. (default: False)
+
+    at-least-once          Runs at least once, even if we are told to stop.
+                           (default: True)
+    at-least-once-timeout  If we were told to stop but we are attempting to
+                           run at least once, timeout after this many seconds.
+                           (default: 600)
+
+    Example:
+        Expect a skew higher than 0.05 seconds, but only report it without
+        failing the teuthology run.
+
+    - mon_clock_skew_check:
+        interval: 30
+        max-skew: 0.05
+        expect_skew: true
+        never-fail: true
+    """
+
+    def __init__(self, ctx, manager, config, logger):
+        self.ctx = ctx
+        self.manager = manager
+
+        self.stopping = False
+        self.logger = logger
+        self.config = config
+
+        if self.config is None:
+            self.config = dict()
+
+        self.check_interval = float(self.config.get('interval', 30.0))
+
+        first_mon = teuthology.get_first_mon(ctx, config)
+        remote = ctx.cluster.only(first_mon).remotes.keys()[0]
+        proc = remote.run(
+            args=[
+                'sudo',
+                'ceph-mon',
+                '-i', first_mon[4:],
+                '--show-config-value', 'mon_clock_drift_allowed'
+                ], stdout=StringIO(), wait=True
+                )
+        self.max_skew = self.config.get('max-skew', float(proc.stdout.getvalue()))
+
+        self.expect_skew = self.config.get('expect-skew', False)
+        self.never_fail = self.config.get('never-fail', False)
+        self.at_least_once = self.config.get('at-least-once', True)
+        self.at_least_once_timeout = self.config.get('at-least-once-timeout', 600.0)
+
+    def info(self, x):
+        """
+        locally define logger for info messages
+        """
+        self.logger.info(x)
+
+    def warn(self, x):
+        """
+        locally define logger for warnings
+        """
+        self.logger.warn(x)
+
+    def debug(self, x):
+        """
+        locally define logger for debug messages
+        """
+        self.logger.info(x)
+        self.logger.debug(x)
+
+    def finish(self):
+        """
+        Break out of the do_check loop.
+        """
+        self.stopping = True
+
+    def sleep_interval(self):
+        """
+        If a sleep interval is set, sleep for that amount of time.
+        """
+        if self.check_interval > 0.0:
+            self.debug('sleeping for {s} seconds'.format(
+                s=self.check_interval))
+            time.sleep(self.check_interval)
+
+    def print_skews(self, skews):
+        """
+        Display skew values.
+        """
+        total = len(skews)
+        if total > 0:
+            self.info('---------- found {n} skews ----------'.format(n=total))
+            for mon_id, values in skews.iteritems():
+                self.info('mon.{id}: {v}'.format(id=mon_id, v=values))
+            self.info('-------------------------------------')
+        else:
+            self.info('---------- no skews were found ----------')
+
+    def do_check(self):
+        """
+        Clock skew checker.  Loops until finish() is called.
+        """
+        self.info('start checking for clock skews')
+        skews = dict()
+        ran_once = False
+        
+        started_on = None
+
+        while not self.stopping or (self.at_least_once and not ran_once):
+
+            if self.at_least_once and not ran_once and self.stopping:
+                if started_on is None:
+                    self.info('kicking-off timeout (if any)')
+                    started_on = time.time()
+                elif self.at_least_once_timeout > 0.0:
+                    assert time.time() - started_on < self.at_least_once_timeout, \
+                        'failed to obtain a timecheck before timeout expired'
+
+            quorum_size = len(teuthology.get_mon_names(self.ctx))
+            self.manager.wait_for_mon_quorum_size(quorum_size)
+
+            health = self.manager.get_mon_health(True)
+            timechecks = health['timechecks']
+
+            clean_check = False
+
+            if timechecks['round_status'] == 'finished':
+                assert (timechecks['round'] % 2) == 0, \
+                    'timecheck marked as finished but round ' \
+                    'disagrees (r {r})'.format(
+                        r=timechecks['round'])
+                clean_check = True
+            else:
+                assert timechecks['round_status'] == 'on-going', \
+                        'timecheck status expected \'on-going\' ' \
+                        'but found \'{s}\' instead'.format(
+                            s=timechecks['round_status'])
+                if 'mons' in timechecks.keys() and len(timechecks['mons']) > 1:
+                    self.info('round still on-going, but there are available reports')
+                else:
+                    self.info('no timechecks available just yet')
+                    self.sleep_interval()
+                    continue
+
+            assert len(timechecks['mons']) > 1, \
+                'there are not enough reported timechecks; ' \
+                'expected > 1 found {n}'.format(n=len(timechecks['mons']))
+
+            for check in timechecks['mons']:
+                mon_skew = float(check['skew'])
+                mon_health = check['health']
+                mon_id = check['name']
+                if abs(mon_skew) > self.max_skew:
+                    assert mon_health == 'HEALTH_WARN', \
+                        'mon.{id} health is \'{health}\' but skew {s} > max {ms}'.format(
+                            id=mon_id,health=mon_health,s=abs(mon_skew),ms=self.max_skew)
+
+                    log_str = 'mon.{id} with skew {s} > max {ms}'.format(
+                        id=mon_id,s=abs(mon_skew),ms=self.max_skew)
+
+                    """ add to skew list """
+                    details = check['details']
+                    skews[mon_id] = {'skew': mon_skew, 'details': details}
+
+                    if self.expect_skew:
+                        self.info('expected skew: {str}'.format(str=log_str))
+                    else:
+                        self.warn('unexpected skew: {str}'.format(str=log_str))
+
+            if clean_check or (self.expect_skew and len(skews) > 0):
+                ran_once = True
+                self.print_skews(skews)
+            self.sleep_interval()
+
+        total = len(skews)
+        self.print_skews(skews)
+
+        error_str = ''
+        found_error = False
+
+        if self.expect_skew:
+            if total == 0:
+                error_str = 'We were expecting a skew, but none was found!'
+                found_error = True
+        else:
+            if total > 0:
+                error_str = 'We were not expecting a skew, but we did find it!'
+                found_error = True
+
+        if found_error:
+            self.info(error_str)
+            if not self.never_fail:
+                assert False, error_str
+
+ at contextlib.contextmanager
+def task(ctx, config):
+    """
+    Use clas ClockSkewCheck to check for clock skews on the monitors.
+    This task will spawn a thread running ClockSkewCheck's do_check().
+
+    All the configuration will be directly handled by ClockSkewCheck,
+    so please refer to the class documentation for further information.
+    """
+    if config is None:
+        config = {}
+    assert isinstance(config, dict), \
+        'mon_clock_skew_check task only accepts a dict for configuration'
+    log.info('Beginning mon_clock_skew_check...')
+    first_mon = teuthology.get_first_mon(ctx, config)
+    (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+    manager = ceph_manager.CephManager(
+        mon,
+        ctx=ctx,
+        logger=log.getChild('ceph_manager'),
+        )
+
+    skew_check = ClockSkewCheck(ctx,
+        manager, config,
+        logger=log.getChild('mon_clock_skew_check'))
+    skew_check_thread = gevent.spawn(skew_check.do_check)
+    try:
+        yield
+    finally:
+        log.info('joining mon_clock_skew_check')
+        skew_check.finish()
+        skew_check_thread.get()
+
+
diff --git a/qa/tasks/mon_recovery.py b/qa/tasks/mon_recovery.py
new file mode 100644
index 0000000..bfa2cdf
--- /dev/null
+++ b/qa/tasks/mon_recovery.py
@@ -0,0 +1,80 @@
+"""
+Monitor recovery
+"""
+import logging
+import ceph_manager
+from teuthology import misc as teuthology
+
+
+log = logging.getLogger(__name__)
+
+def task(ctx, config):
+    """
+    Test monitor recovery.
+    """
+    if config is None:
+        config = {}
+    assert isinstance(config, dict), \
+        'task only accepts a dict for configuration'
+    first_mon = teuthology.get_first_mon(ctx, config)
+    (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+
+    manager = ceph_manager.CephManager(
+        mon,
+        ctx=ctx,
+        logger=log.getChild('ceph_manager'),
+        )
+
+    mons = [f.split('.')[1] for f in teuthology.get_mon_names(ctx)]
+    log.info("mon ids = %s" % mons)
+
+    manager.wait_for_mon_quorum_size(len(mons))
+
+    log.info('verifying all monitors are in the quorum')
+    for m in mons:
+        s = manager.get_mon_status(m)
+        assert s['state'] == 'leader' or s['state'] == 'peon'
+        assert len(s['quorum']) == len(mons)
+
+    log.info('restarting each monitor in turn')
+    for m in mons:
+        # stop a monitor
+        manager.kill_mon(m)
+        manager.wait_for_mon_quorum_size(len(mons) - 1)
+
+        # restart
+        manager.revive_mon(m)
+        manager.wait_for_mon_quorum_size(len(mons))
+
+    # in forward and reverse order,
+    rmons = mons
+    rmons.reverse()
+    for mons in mons, rmons:
+        log.info('stopping all monitors')
+        for m in mons:
+            manager.kill_mon(m)
+
+        log.info('forming a minimal quorum for %s, then adding monitors' % mons)
+        qnum = (len(mons) / 2) + 1
+        num = 0
+        for m in mons:
+            manager.revive_mon(m)
+            num += 1
+            if num >= qnum:
+                manager.wait_for_mon_quorum_size(num)
+
+    # on both leader and non-leader ranks...
+    for rank in [0, 1]:
+        # take one out
+        log.info('removing mon %s' % mons[rank])
+        manager.kill_mon(mons[rank])
+        manager.wait_for_mon_quorum_size(len(mons) - 1)
+
+        log.info('causing some monitor log activity')
+        m = 30
+        for n in range(1, m):
+            manager.raw_cluster_cmd('log', '%d of %d' % (n, m))
+
+        log.info('adding mon %s back in' % mons[rank])
+        manager.revive_mon(mons[rank])
+        manager.wait_for_mon_quorum_size(len(mons))
diff --git a/qa/tasks/mon_thrash.py b/qa/tasks/mon_thrash.py
new file mode 100644
index 0000000..0754bcd
--- /dev/null
+++ b/qa/tasks/mon_thrash.py
@@ -0,0 +1,343 @@
+"""
+Monitor thrash
+"""
+import logging
+import contextlib
+import ceph_manager
+import random
+import time
+import gevent
+import json
+import math
+from teuthology import misc as teuthology
+
+log = logging.getLogger(__name__)
+
+def _get_mons(ctx):
+    """
+    Get monitor names from the context value.
+    """
+    mons = [f[len('mon.'):] for f in teuthology.get_mon_names(ctx)]
+    return mons
+
+class MonitorThrasher:
+    """
+    How it works::
+
+    - pick a monitor
+    - kill it
+    - wait for quorum to be formed
+    - sleep for 'revive_delay' seconds
+    - revive monitor
+    - wait for quorum to be formed
+    - sleep for 'thrash_delay' seconds
+
+    Options::
+
+    seed                Seed to use on the RNG to reproduce a previous
+                        behaviour (default: None; i.e., not set)
+    revive_delay        Number of seconds to wait before reviving
+                        the monitor (default: 10)
+    thrash_delay        Number of seconds to wait in-between
+                        test iterations (default: 0)
+    thrash_store        Thrash monitor store before killing the monitor being thrashed (default: False)
+    thrash_store_probability  Probability of thrashing a monitor's store
+                              (default: 50)
+    thrash_many         Thrash multiple monitors instead of just one. If
+                        'maintain-quorum' is set to False, then we will
+                        thrash up to as many monitors as there are
+                        available. (default: False)
+    maintain_quorum     Always maintain quorum, taking care on how many
+                        monitors we kill during the thrashing. If we
+                        happen to only have one or two monitors configured,
+                        if this option is set to True, then we won't run
+                        this task as we cannot guarantee maintenance of
+                        quorum. Setting it to false however would allow the
+                        task to run with as many as just one single monitor.
+                        (default: True)
+    freeze_mon_probability: how often to freeze the mon instead of killing it,
+                        in % (default: 0)
+    freeze_mon_duration: how many seconds to freeze the mon (default: 15)
+    scrub               Scrub after each iteration (default: True)
+
+    Note: if 'store-thrash' is set to True, then 'maintain-quorum' must also
+          be set to True.
+
+    For example::
+
+    tasks:
+    - ceph:
+    - mon_thrash:
+        revive_delay: 20
+        thrash_delay: 1
+        thrash_store: true
+        thrash_store_probability: 40
+        seed: 31337
+        maintain_quorum: true
+        thrash_many: true
+    - ceph-fuse:
+    - workunit:
+        clients:
+          all:
+            - mon/workloadgen.sh
+    """
+    def __init__(self, ctx, manager, config, logger):
+        self.ctx = ctx
+        self.manager = manager
+        self.manager.wait_for_clean()
+
+        self.stopping = False
+        self.logger = logger
+        self.config = config
+
+        if self.config is None:
+            self.config = dict()
+
+        """ Test reproducibility """
+        self.random_seed = self.config.get('seed', None)
+
+        if self.random_seed is None:
+            self.random_seed = int(time.time())
+
+        self.rng = random.Random()
+        self.rng.seed(int(self.random_seed))
+
+        """ Monitor thrashing """
+        self.revive_delay = float(self.config.get('revive_delay', 10.0))
+        self.thrash_delay = float(self.config.get('thrash_delay', 0.0))
+
+        self.thrash_many = self.config.get('thrash_many', False)
+        self.maintain_quorum = self.config.get('maintain_quorum', True)
+
+        self.scrub = self.config.get('scrub', True)
+
+        self.freeze_mon_probability = float(self.config.get('freeze_mon_probability', 10))
+        self.freeze_mon_duration = float(self.config.get('freeze_mon_duration', 15.0))
+
+        assert self.max_killable() > 0, \
+            'Unable to kill at least one monitor with the current config.'
+
+        """ Store thrashing """
+        self.store_thrash = self.config.get('store_thrash', False)
+        self.store_thrash_probability = int(
+            self.config.get('store_thrash_probability', 50))
+        if self.store_thrash:
+            assert self.store_thrash_probability > 0, \
+                'store_thrash is set, probability must be > 0'
+            assert self.maintain_quorum, \
+                'store_thrash = true must imply maintain_quorum = true'
+
+        self.thread = gevent.spawn(self.do_thrash)
+
+    def log(self, x):
+        """
+        locally log info messages
+        """
+        self.logger.info(x)
+
+    def do_join(self):
+        """
+        Break out of this processes thrashing loop.
+        """
+        self.stopping = True
+        self.thread.get()
+
+    def should_thrash_store(self):
+        """
+        If allowed, indicate that we should thrash a certain percentage of
+        the time as determined by the store_thrash_probability value.
+        """
+        if not self.store_thrash:
+            return False
+        return self.rng.randrange(0, 101) < self.store_thrash_probability
+
+    def thrash_store(self, mon):
+        """
+        Thrash the monitor specified.
+        :param mon: monitor to thrash
+        """
+        addr = self.ctx.ceph['ceph'].conf['mon.%s' % mon]['mon addr']
+        self.log('thrashing mon.{id}@{addr} store'.format(id=mon, addr=addr))
+        out = self.manager.raw_cluster_cmd('-m', addr, 'sync', 'force')
+        j = json.loads(out)
+        assert j['ret'] == 0, \
+            'error forcing store sync on mon.{id}:\n{ret}'.format(
+                id=mon,ret=out)
+
+    def should_freeze_mon(self):
+        """
+        Indicate that we should freeze a certain percentago of the time
+        as determined by the freeze_mon_probability value.
+        """
+        return self.rng.randrange(0, 101) < self.freeze_mon_probability
+
+    def freeze_mon(self, mon):
+        """
+        Send STOP signal to freeze the monitor.
+        """
+        log.info('Sending STOP to mon %s', mon)
+        self.manager.signal_mon(mon, 19)  # STOP
+
+    def unfreeze_mon(self, mon):
+        """
+        Send CONT signal to unfreeze the monitor.
+        """
+        log.info('Sending CONT to mon %s', mon)
+        self.manager.signal_mon(mon, 18)  # CONT
+
+    def kill_mon(self, mon):
+        """
+        Kill the monitor specified
+        """
+        self.log('killing mon.{id}'.format(id=mon))
+        self.manager.kill_mon(mon)
+
+    def revive_mon(self, mon):
+        """
+        Revive the monitor specified
+        """
+        self.log('killing mon.{id}'.format(id=mon))
+        self.log('reviving mon.{id}'.format(id=mon))
+        self.manager.revive_mon(mon)
+
+    def max_killable(self):
+        """
+        Return the maximum number of monitors we can kill.
+        """
+        m = len(_get_mons(self.ctx))
+        if self.maintain_quorum:
+            return max(math.ceil(m/2.0)-1, 0)
+        else:
+            return m
+
+    def do_thrash(self):
+        """
+        Cotinuously loop and thrash the monitors.
+        """
+        self.log('start thrashing')
+        self.log('seed: {s}, revive delay: {r}, thrash delay: {t} '\
+                   'thrash many: {tm}, maintain quorum: {mq} '\
+                   'store thrash: {st}, probability: {stp} '\
+                   'freeze mon: prob {fp} duration {fd}'.format(
+                s=self.random_seed,r=self.revive_delay,t=self.thrash_delay,
+                tm=self.thrash_many, mq=self.maintain_quorum,
+                st=self.store_thrash,stp=self.store_thrash_probability,
+                fp=self.freeze_mon_probability,fd=self.freeze_mon_duration,
+                ))
+
+        while not self.stopping:
+            mons = _get_mons(self.ctx)
+            self.manager.wait_for_mon_quorum_size(len(mons))
+            self.log('making sure all monitors are in the quorum')
+            for m in mons:
+                s = self.manager.get_mon_status(m)
+                assert s['state'] == 'leader' or s['state'] == 'peon'
+                assert len(s['quorum']) == len(mons)
+
+            kill_up_to = self.rng.randrange(1, self.max_killable()+1)
+            mons_to_kill = self.rng.sample(mons, kill_up_to)
+            self.log('monitors to thrash: {m}'.format(m=mons_to_kill))
+
+            mons_to_freeze = []
+            for mon in mons:
+                if mon in mons_to_kill:
+                    continue
+                if self.should_freeze_mon():
+                    mons_to_freeze.append(mon)
+            self.log('monitors to freeze: {m}'.format(m=mons_to_freeze))
+
+            for mon in mons_to_kill:
+                self.log('thrashing mon.{m}'.format(m=mon))
+
+                """ we only thrash stores if we are maintaining quorum """
+                if self.should_thrash_store() and self.maintain_quorum:
+                    self.thrash_store(mon)
+
+                self.kill_mon(mon)
+
+            if mons_to_freeze:
+                for mon in mons_to_freeze:
+                    self.freeze_mon(mon)
+                self.log('waiting for {delay} secs to unfreeze mons'.format(
+                    delay=self.freeze_mon_duration))
+                time.sleep(self.freeze_mon_duration)
+                for mon in mons_to_freeze:
+                    self.unfreeze_mon(mon)
+
+            if self.maintain_quorum:
+                self.manager.wait_for_mon_quorum_size(len(mons)-len(mons_to_kill))
+                for m in mons:
+                    if m in mons_to_kill:
+                        continue
+                    s = self.manager.get_mon_status(m)
+                    assert s['state'] == 'leader' or s['state'] == 'peon'
+                    assert len(s['quorum']) == len(mons)-len(mons_to_kill)
+
+            self.log('waiting for {delay} secs before reviving monitors'.format(
+                delay=self.revive_delay))
+            time.sleep(self.revive_delay)
+
+            for mon in mons_to_kill:
+                self.revive_mon(mon)
+            # do more freezes
+            if mons_to_freeze:
+                for mon in mons_to_freeze:
+                    self.freeze_mon(mon)
+                self.log('waiting for {delay} secs to unfreeze mons'.format(
+                    delay=self.freeze_mon_duration))
+                time.sleep(self.freeze_mon_duration)
+                for mon in mons_to_freeze:
+                    self.unfreeze_mon(mon)
+
+            self.manager.wait_for_mon_quorum_size(len(mons))
+            for m in mons:
+                s = self.manager.get_mon_status(m)
+                assert s['state'] == 'leader' or s['state'] == 'peon'
+                assert len(s['quorum']) == len(mons)
+
+            if self.scrub:
+                self.log('triggering scrub')
+                try:
+                    self.manager.raw_cluster_cmd('scrub')
+                except Exception:
+                    log.exception("Saw exception while triggering scrub")
+
+            if self.thrash_delay > 0.0:
+                self.log('waiting for {delay} secs before continuing thrashing'.format(
+                    delay=self.thrash_delay))
+                time.sleep(self.thrash_delay)
+
+ at contextlib.contextmanager
+def task(ctx, config):
+    """
+    Stress test the monitor by thrashing them while another task/workunit
+    is running.
+
+    Please refer to MonitorThrasher class for further information on the
+    available options.
+    """
+    if config is None:
+        config = {}
+    assert isinstance(config, dict), \
+        'mon_thrash task only accepts a dict for configuration'
+    assert len(_get_mons(ctx)) > 2, \
+        'mon_thrash task requires at least 3 monitors'
+    log.info('Beginning mon_thrash...')
+    first_mon = teuthology.get_first_mon(ctx, config)
+    (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+    manager = ceph_manager.CephManager(
+        mon,
+        ctx=ctx,
+        logger=log.getChild('ceph_manager'),
+        )
+    thrash_proc = MonitorThrasher(ctx,
+        manager, config,
+        logger=log.getChild('mon_thrasher'))
+    try:
+        log.debug('Yielding')
+        yield
+    finally:
+        log.info('joining mon_thrasher')
+        thrash_proc.do_join()
+        mons = _get_mons(ctx)
+        manager.wait_for_mon_quorum_size(len(mons))
diff --git a/qa/tasks/multibench.py b/qa/tasks/multibench.py
new file mode 100644
index 0000000..13b5ffe
--- /dev/null
+++ b/qa/tasks/multibench.py
@@ -0,0 +1,57 @@
+"""
+Multibench testing
+"""
+import contextlib
+import logging
+import radosbench
+import time
+import copy
+import gevent
+
+log = logging.getLogger(__name__)
+
+ at contextlib.contextmanager
+def task(ctx, config):
+    """
+    Run multibench
+
+    The config should be as follows:
+
+    multibench:
+        time: <seconds to run total>
+        segments: <number of concurrent benches>
+        radosbench: <config for radosbench>
+
+    example:
+
+    tasks:
+    - ceph:
+    - multibench:
+        clients: [client.0]
+        time: 360
+    - interactive:
+    """
+    log.info('Beginning multibench...')
+    assert isinstance(config, dict), \
+        "please list clients to run on"
+
+    def run_one(num):
+        """Run test spawn from gevent"""
+        start = time.time()
+        benchcontext = copy.copy(config.get('radosbench'))
+        iterations = 0
+        while time.time() - start < int(config.get('time', 600)):
+            log.info("Starting iteration %s of segment %s"%(iterations, num))
+            benchcontext['pool'] = str(num) + "-" + str(iterations)
+            with radosbench.task(ctx, benchcontext):
+                time.sleep()
+            iterations += 1
+    log.info("Starting %s threads"%(str(config.get('segments', 3)),))
+    segments = [
+        gevent.spawn(run_one, i)
+        for i in range(0, int(config.get('segments', 3)))]
+
+    try:
+        yield
+    finally:
+        [i.get() for i in segments]
diff --git a/qa/tasks/object_source_down.py b/qa/tasks/object_source_down.py
new file mode 100644
index 0000000..bea3d18
--- /dev/null
+++ b/qa/tasks/object_source_down.py
@@ -0,0 +1,104 @@
+"""
+Test Object locations going down
+"""
+import logging
+import ceph_manager
+import time
+from teuthology import misc as teuthology
+from util.rados import rados
+
+log = logging.getLogger(__name__)
+
+def task(ctx, config):
+    """
+    Test handling of object location going down
+    """
+    if config is None:
+        config = {}
+    assert isinstance(config, dict), \
+        'lost_unfound task only accepts a dict for configuration'
+    first_mon = teuthology.get_first_mon(ctx, config)
+    (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+
+    manager = ceph_manager.CephManager(
+        mon,
+        ctx=ctx,
+        logger=log.getChild('ceph_manager'),
+        )
+
+    while len(manager.get_osd_status()['up']) < 3:
+        time.sleep(10)
+    manager.wait_for_clean()
+
+    # something that is always there
+    dummyfile = '/etc/fstab'
+
+    # take 0, 1 out
+    manager.mark_out_osd(0)
+    manager.mark_out_osd(1)
+    manager.wait_for_clean()
+
+    # delay recovery, and make the pg log very long (to prevent backfill)
+    manager.raw_cluster_cmd(
+            'tell', 'osd.0',
+            'injectargs',
+            '--osd-recovery-delay-start 10000 --osd-min-pg-log-entries 100000000'
+            )
+    # delay recovery, and make the pg log very long (to prevent backfill)
+    manager.raw_cluster_cmd(
+            'tell', 'osd.1',
+            'injectargs',
+            '--osd-recovery-delay-start 10000 --osd-min-pg-log-entries 100000000'
+            )
+    # delay recovery, and make the pg log very long (to prevent backfill)
+    manager.raw_cluster_cmd(
+            'tell', 'osd.2',
+            'injectargs',
+            '--osd-recovery-delay-start 10000 --osd-min-pg-log-entries 100000000'
+            )
+    # delay recovery, and make the pg log very long (to prevent backfill)
+    manager.raw_cluster_cmd(
+            'tell', 'osd.3',
+            'injectargs',
+            '--osd-recovery-delay-start 10000 --osd-min-pg-log-entries 100000000'
+            )
+
+    # kludge to make sure they get a map
+    rados(ctx, mon, ['-p', 'data', 'put', 'dummy', dummyfile])
+
+    # create old objects
+    for f in range(1, 10):
+        rados(ctx, mon, ['-p', 'data', 'put', 'existing_%d' % f, dummyfile])
+
+    manager.mark_out_osd(3)
+    manager.wait_till_active()
+
+    manager.mark_in_osd(0)
+    manager.wait_till_active()
+
+    manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
+    manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+
+    manager.mark_out_osd(2)
+    manager.wait_till_active()
+
+    # bring up 1
+    manager.mark_in_osd(1)
+    manager.wait_till_active()
+
+    manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
+    manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+    log.info("Getting unfound objects")
+    unfound = manager.get_num_unfound_objects()
+    assert not unfound
+
+    manager.kill_osd(2)
+    manager.mark_down_osd(2)
+    manager.kill_osd(3)
+    manager.mark_down_osd(3)
+
+    manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
+    manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+    log.info("Getting unfound objects")
+    unfound = manager.get_num_unfound_objects()
+    assert unfound
diff --git a/qa/tasks/omapbench.py b/qa/tasks/omapbench.py
new file mode 100644
index 0000000..e026c74
--- /dev/null
+++ b/qa/tasks/omapbench.py
@@ -0,0 +1,83 @@
+"""
+Run omapbench executable within teuthology
+"""
+import contextlib
+import logging
+
+from teuthology.orchestra import run
+from teuthology import misc as teuthology
+
+log = logging.getLogger(__name__)
+
+ at contextlib.contextmanager
+def task(ctx, config):
+    """
+    Run omapbench
+
+    The config should be as follows::
+
+		  omapbench:
+		      clients: [client list]
+		      threads: <threads at once>
+		      objects: <number of objects to write>
+		      entries: <number of entries per object map>
+		      keysize: <number of characters per object map key>
+		      valsize: <number of characters per object map val>
+		      increment: <interval to show in histogram (in ms)>
+		      omaptype: <how the omaps should be generated>
+
+    example::
+
+		  tasks:
+		  - ceph:
+		  - omapbench:
+		      clients: [client.0]
+		      threads: 30
+		      objects: 1000
+		      entries: 10
+		      keysize: 10
+		      valsize: 100
+		      increment: 100
+		      omaptype: uniform
+		  - interactive:
+    """
+    log.info('Beginning omapbench...')
+    assert isinstance(config, dict), \
+        "please list clients to run on"
+    omapbench = {}
+    testdir = teuthology.get_testdir(ctx)
+    print(str(config.get('increment',-1)))
+    for role in config.get('clients', ['client.0']):
+        assert isinstance(role, basestring)
+        PREFIX = 'client.'
+        assert role.startswith(PREFIX)
+        id_ = role[len(PREFIX):]
+        (remote,) = ctx.cluster.only(role).remotes.iterkeys()
+        proc = remote.run(
+            args=[
+                "/bin/sh", "-c",
+                " ".join(['adjust-ulimits',
+                          'ceph-coverage',
+                          '{tdir}/archive/coverage',
+                          'omapbench',
+                          '--name', role[len(PREFIX):],
+                          '-t', str(config.get('threads', 30)),
+                          '-o', str(config.get('objects', 1000)),
+                          '--entries', str(config.get('entries',10)),
+                          '--keysize', str(config.get('keysize',10)),
+                          '--valsize', str(config.get('valsize',1000)),
+                          '--inc', str(config.get('increment',10)),
+                          '--omaptype', str(config.get('omaptype','uniform'))
+                          ]).format(tdir=testdir),
+                ],
+            logger=log.getChild('omapbench.{id}'.format(id=id_)),
+            stdin=run.PIPE,
+            wait=False
+            )
+        omapbench[id_] = proc
+
+    try:
+        yield
+    finally:
+        log.info('joining omapbench')
+        run.wait(omapbench.itervalues())
diff --git a/qa/tasks/osd_backfill.py b/qa/tasks/osd_backfill.py
new file mode 100644
index 0000000..f0bba79
--- /dev/null
+++ b/qa/tasks/osd_backfill.py
@@ -0,0 +1,105 @@
+"""
+Osd backfill test
+"""
+import logging
+import ceph_manager
+import time
+from teuthology import misc as teuthology
+
+
+log = logging.getLogger(__name__)
+
+
+def rados_start(ctx, remote, cmd):
+    """
+    Run a remote rados command (currently used to only write data)
+    """
+    log.info("rados %s" % ' '.join(cmd))
+    testdir = teuthology.get_testdir(ctx)
+    pre = [
+        'adjust-ulimits',
+        'ceph-coverage',
+        '{tdir}/archive/coverage'.format(tdir=testdir),
+        'rados',
+        ];
+    pre.extend(cmd)
+    proc = remote.run(
+        args=pre,
+        wait=False,
+        )
+    return proc
+
+def task(ctx, config):
+    """
+    Test backfill
+    """
+    if config is None:
+        config = {}
+    assert isinstance(config, dict), \
+        'thrashosds task only accepts a dict for configuration'
+    first_mon = teuthology.get_first_mon(ctx, config)
+    (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+
+    num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd')
+    log.info('num_osds is %s' % num_osds)
+    assert num_osds == 3
+
+    manager = ceph_manager.CephManager(
+        mon,
+        ctx=ctx,
+        logger=log.getChild('ceph_manager'),
+        )
+
+    while len(manager.get_osd_status()['up']) < 3:
+        time.sleep(10)
+    manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+    manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
+    manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
+    manager.wait_for_clean()
+
+    # write some data
+    p = rados_start(ctx, mon, ['-p', 'rbd', 'bench', '15', 'write', '-b', '4096',
+                          '--no-cleanup'])
+    err = p.wait()
+    log.info('err is %d' % err)
+
+    # mark osd.0 out to trigger a rebalance/backfill
+    manager.mark_out_osd(0)
+
+    # also mark it down to it won't be included in pg_temps
+    manager.kill_osd(0)
+    manager.mark_down_osd(0)
+
+    # wait for everything to peer and be happy...
+    manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
+    manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
+    manager.wait_for_recovery()
+
+    # write some new data
+    p = rados_start(ctx, mon, ['-p', 'rbd', 'bench', '30', 'write', '-b', '4096',
+                          '--no-cleanup'])
+
+    time.sleep(15)
+
+    # blackhole + restart osd.1
+    # this triggers a divergent backfill target
+    manager.blackhole_kill_osd(1)
+    time.sleep(2)
+    manager.revive_osd(1)
+
+    # wait for our writes to complete + succeed
+    err = p.wait()
+    log.info('err is %d' % err)
+
+    # cluster must recover
+    manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
+    manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
+    manager.wait_for_recovery()
+
+    # re-add osd.0
+    manager.revive_osd(0)
+    manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
+    manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
+    manager.wait_for_clean()
+
+
diff --git a/qa/tasks/osd_failsafe_enospc.py b/qa/tasks/osd_failsafe_enospc.py
new file mode 100644
index 0000000..6910854
--- /dev/null
+++ b/qa/tasks/osd_failsafe_enospc.py
@@ -0,0 +1,218 @@
+"""
+Handle osdfailsafe configuration settings (nearfull ratio and full ratio)
+"""
+from cStringIO import StringIO
+import logging
+import time
+
+from teuthology.orchestra import run
+from util.rados import rados
+from teuthology import misc as teuthology
+
+log = logging.getLogger(__name__)
+
+def task(ctx, config):
+    """
+    Test handling of osd_failsafe_nearfull_ratio and osd_failsafe_full_ratio
+    configuration settings
+
+    In order for test to pass must use log-whitelist as follows
+
+        tasks:
+            - chef:
+            - install:
+            - ceph:
+                log-whitelist: ['OSD near full', 'OSD full dropping all updates']
+            - osd_failsafe_enospc:
+
+    """
+    if config is None:
+        config = {}
+    assert isinstance(config, dict), \
+        'osd_failsafe_enospc task only accepts a dict for configuration'
+
+    # Give 2 seconds for injectargs + osd_op_complaint_time (30) + 2 * osd_heartbeat_interval (6) + 6 padding
+    sleep_time = 50
+
+    # something that is always there
+    dummyfile = '/etc/fstab'
+    dummyfile2 = '/etc/resolv.conf'
+
+    manager = ctx.managers['ceph']
+
+    # create 1 pg pool with 1 rep which can only be on osd.0
+    osds = manager.get_osd_dump()
+    for osd in osds:
+        if osd['osd'] != 0:
+            manager.mark_out_osd(osd['osd'])
+
+    log.info('creating pool foo')
+    manager.create_pool("foo")
+    manager.raw_cluster_cmd('osd', 'pool', 'set', 'foo', 'size', '1')
+
+    # State NONE -> NEAR
+    log.info('1. Verify warning messages when exceeding nearfull_ratio')
+
+    first_mon = teuthology.get_first_mon(ctx, config)
+    (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+
+    proc = mon.run(
+             args=[
+                 'sudo',
+                 'daemon-helper',
+                 'kill',
+                 'ceph', '-w'
+             ],
+             stdin=run.PIPE,
+             stdout=StringIO(),
+             wait=False,
+        )
+
+    manager.raw_cluster_cmd('tell', 'osd.0', 'injectargs', '--osd_failsafe_nearfull_ratio .00001')
+
+    time.sleep(sleep_time)
+    proc.stdin.close() # causes daemon-helper send SIGKILL to ceph -w
+    proc.wait()
+
+    lines = proc.stdout.getvalue().split('\n')
+
+    count = len(filter(lambda line: '[WRN] OSD near full' in line, lines))
+    assert count == 2, 'Incorrect number of warning messages expected 2 got %d' % count
+    count = len(filter(lambda line: '[ERR] OSD full dropping all updates' in line, lines))
+    assert count == 0, 'Incorrect number of error messages expected 0 got %d' % count
+
+    # State NEAR -> FULL
+    log.info('2. Verify error messages when exceeding full_ratio')
+
+    proc = mon.run(
+             args=[
+                 'sudo',
+                 'daemon-helper',
+                 'kill',
+                 'ceph', '-w'
+             ],
+             stdin=run.PIPE,
+             stdout=StringIO(),
+             wait=False,
+        )
+
+    manager.raw_cluster_cmd('tell', 'osd.0', 'injectargs', '--osd_failsafe_full_ratio .00001')
+
+    time.sleep(sleep_time)
+    proc.stdin.close() # causes daemon-helper send SIGKILL to ceph -w
+    proc.wait()
+
+    lines = proc.stdout.getvalue().split('\n')
+
+    count = len(filter(lambda line: '[ERR] OSD full dropping all updates' in line, lines))
+    assert count == 2, 'Incorrect number of error messages expected 2 got %d' % count
+
+    log.info('3. Verify write failure when exceeding full_ratio')
+
+    # Write data should fail
+    ret = rados(ctx, mon, ['-p', 'foo', 'put', 'newfile1', dummyfile])
+    assert ret != 0, 'Expected write failure but it succeeded with exit status 0'
+
+    # Put back default
+    manager.raw_cluster_cmd('tell', 'osd.0', 'injectargs', '--osd_failsafe_full_ratio .97')
+    time.sleep(10)
+
+    # State FULL -> NEAR
+    log.info('4. Verify write success when NOT exceeding full_ratio')
+
+    # Write should succeed
+    ret = rados(ctx, mon, ['-p', 'foo', 'put', 'newfile2', dummyfile2])
+    assert ret == 0, 'Expected write to succeed, but got exit status %d' % ret
+
+    log.info('5. Verify warning messages again when exceeding nearfull_ratio')
+
+    proc = mon.run(
+             args=[
+                 'sudo',
+                 'daemon-helper',
+                 'kill',
+                 'ceph', '-w'
+             ],
+             stdin=run.PIPE,
+             stdout=StringIO(),
+             wait=False,
+        )
+
+    time.sleep(sleep_time)
+    proc.stdin.close() # causes daemon-helper send SIGKILL to ceph -w
+    proc.wait()
+
+    lines = proc.stdout.getvalue().split('\n')
+
+    count = len(filter(lambda line: '[WRN] OSD near full' in line, lines))
+    assert count == 1 or count == 2, 'Incorrect number of warning messages expected 1 or 2 got %d' % count
+    count = len(filter(lambda line: '[ERR] OSD full dropping all updates' in line, lines))
+    assert count == 0, 'Incorrect number of error messages expected 0 got %d' % count
+
+    manager.raw_cluster_cmd('tell', 'osd.0', 'injectargs', '--osd_failsafe_nearfull_ratio .90')
+    time.sleep(10)
+
+    # State NONE -> FULL
+    log.info('6. Verify error messages again when exceeding full_ratio')
+
+    proc = mon.run(
+             args=[
+                 'sudo',
+                 'daemon-helper',
+                 'kill',
+                 'ceph', '-w'
+             ],
+             stdin=run.PIPE,
+             stdout=StringIO(),
+             wait=False,
+        )
+
+    manager.raw_cluster_cmd('tell', 'osd.0', 'injectargs', '--osd_failsafe_full_ratio .00001')
+
+    time.sleep(sleep_time)
+    proc.stdin.close() # causes daemon-helper send SIGKILL to ceph -w
+    proc.wait()
+
+    lines = proc.stdout.getvalue().split('\n')
+
+    count = len(filter(lambda line: '[WRN] OSD near full' in line, lines))
+    assert count == 0, 'Incorrect number of warning messages expected 0 got %d' % count
+    count = len(filter(lambda line: '[ERR] OSD full dropping all updates' in line, lines))
+    assert count == 2, 'Incorrect number of error messages expected 2 got %d' % count
+
+    # State FULL -> NONE
+    log.info('7. Verify no messages settings back to default')
+
+    manager.raw_cluster_cmd('tell', 'osd.0', 'injectargs', '--osd_failsafe_full_ratio .97')
+    time.sleep(10)
+
+    proc = mon.run(
+             args=[
+                 'sudo',
+                 'daemon-helper',
+                 'kill',
+                 'ceph', '-w'
+             ],
+             stdin=run.PIPE,
+             stdout=StringIO(),
+             wait=False,
+        )
+
+    time.sleep(sleep_time)
+    proc.stdin.close() # causes daemon-helper send SIGKILL to ceph -w
+    proc.wait()
+
+    lines = proc.stdout.getvalue().split('\n')
+
+    count = len(filter(lambda line: '[WRN] OSD near full' in line, lines))
+    assert count == 0, 'Incorrect number of warning messages expected 0 got %d' % count
+    count = len(filter(lambda line: '[ERR] OSD full dropping all updates' in line, lines))
+    assert count == 0, 'Incorrect number of error messages expected 0 got %d' % count
+
+    log.info('Test Passed')
+
+    # Bring all OSDs back in
+    manager.remove_pool("foo")
+    for osd in osds:
+        if osd['osd'] != 0:
+            manager.mark_in_osd(osd['osd'])
diff --git a/qa/tasks/osd_recovery.py b/qa/tasks/osd_recovery.py
new file mode 100644
index 0000000..db46ade
--- /dev/null
+++ b/qa/tasks/osd_recovery.py
@@ -0,0 +1,208 @@
+"""
+osd recovery
+"""
+import logging
+import ceph_manager
+import time
+from teuthology import misc as teuthology
+
+
+log = logging.getLogger(__name__)
+
+
+def rados_start(testdir, remote, cmd):
+    """
+    Run a remote rados command (currently used to only write data)
+    """
+    log.info("rados %s" % ' '.join(cmd))
+    pre = [
+        'adjust-ulimits',
+        'ceph-coverage',
+        '{tdir}/archive/coverage'.format(tdir=testdir),
+        'rados',
+        ];
+    pre.extend(cmd)
+    proc = remote.run(
+        args=pre,
+        wait=False,
+        )
+    return proc
+
+def task(ctx, config):
+    """
+    Test (non-backfill) recovery
+    """
+    if config is None:
+        config = {}
+    assert isinstance(config, dict), \
+        'task only accepts a dict for configuration'
+    testdir = teuthology.get_testdir(ctx)
+    first_mon = teuthology.get_first_mon(ctx, config)
+    (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+
+    num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd')
+    log.info('num_osds is %s' % num_osds)
+    assert num_osds == 3
+
+    manager = ceph_manager.CephManager(
+        mon,
+        ctx=ctx,
+        logger=log.getChild('ceph_manager'),
+        )
+
+    while len(manager.get_osd_status()['up']) < 3:
+        time.sleep(10)
+    manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+    manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
+    manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
+    manager.wait_for_clean()
+
+    # test some osdmap flags
+    manager.raw_cluster_cmd('osd', 'set', 'noin')
+    manager.raw_cluster_cmd('osd', 'set', 'noout')
+    manager.raw_cluster_cmd('osd', 'set', 'noup')
+    manager.raw_cluster_cmd('osd', 'set', 'nodown')
+    manager.raw_cluster_cmd('osd', 'unset', 'noin')
+    manager.raw_cluster_cmd('osd', 'unset', 'noout')
+    manager.raw_cluster_cmd('osd', 'unset', 'noup')
+    manager.raw_cluster_cmd('osd', 'unset', 'nodown')
+
+    # write some new data
+    p = rados_start(testdir, mon, ['-p', 'rbd', 'bench', '20', 'write', '-b', '4096',
+                          '--no-cleanup'])
+
+    time.sleep(15)
+
+    # trigger a divergent target:
+    #  blackhole + restart osd.1 (shorter log)
+    manager.blackhole_kill_osd(1)
+    #  kill osd.2 (longer log... we'll make it divergent below)
+    manager.kill_osd(2)
+    time.sleep(2)
+    manager.revive_osd(1)
+
+    # wait for our writes to complete + succeed
+    err = p.wait()
+    log.info('err is %d' % err)
+
+    # cluster must repeer
+    manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+    manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
+    manager.wait_for_active_or_down()
+
+    # write some more (make sure osd.2 really is divergent)
+    p = rados_start(testdir, mon, ['-p', 'rbd', 'bench', '15', 'write', '-b', '4096'])
+    p.wait()
+
+    # revive divergent osd
+    manager.revive_osd(2)
+
+    while len(manager.get_osd_status()['up']) < 3:
+        log.info('waiting a bit...')
+        time.sleep(2)
+    log.info('3 are up!')
+
+    # cluster must recover
+    manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+    manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
+    manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
+    manager.wait_for_clean()
+
+
+def test_incomplete_pgs(ctx, config):
+    """
+    Test handling of incomplete pgs.  Requires 4 osds.
+    """
+    testdir = teuthology.get_testdir(ctx)
+    if config is None:
+        config = {}
+    assert isinstance(config, dict), \
+        'task only accepts a dict for configuration'
+    first_mon = teuthology.get_first_mon(ctx, config)
+    (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+
+    num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd')
+    log.info('num_osds is %s' % num_osds)
+    assert num_osds == 4
+
+    manager = ceph_manager.CephManager(
+        mon,
+        ctx=ctx,
+        logger=log.getChild('ceph_manager'),
+        )
+
+    while len(manager.get_osd_status()['up']) < 4:
+        time.sleep(10)
+
+    manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+    manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
+    manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
+    manager.raw_cluster_cmd('tell', 'osd.3', 'flush_pg_stats')
+    manager.wait_for_clean()
+
+    log.info('Testing incomplete pgs...')
+
+    for i in range(4):
+        manager.set_config(
+            i,
+            osd_recovery_delay_start=1000)
+
+    # move data off of osd.0, osd.1
+    manager.raw_cluster_cmd('osd', 'out', '0', '1')
+    manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+    manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
+    manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
+    manager.raw_cluster_cmd('tell', 'osd.3', 'flush_pg_stats')
+    manager.wait_for_clean()
+
+    # lots of objects in rbd (no pg log, will backfill)
+    p = rados_start(testdir, mon,
+                    ['-p', 'rbd', 'bench', '20', 'write', '-b', '1',
+                     '--no-cleanup'])
+    p.wait()
+
+    # few objects in rbd pool (with pg log, normal recovery)
+    for f in range(1, 20):
+        p = rados_start(testdir, mon, ['-p', 'rbd', 'put',
+                              'foo.%d' % f, '/etc/passwd'])
+        p.wait()
+
+    # move it back
+    manager.raw_cluster_cmd('osd', 'in', '0', '1')
+    manager.raw_cluster_cmd('osd', 'out', '2', '3')
+    time.sleep(10)
+    manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+    manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
+    manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
+    manager.raw_cluster_cmd('tell', 'osd.3', 'flush_pg_stats')
+    time.sleep(10)
+    manager.wait_for_active()
+
+    assert not manager.is_clean()
+    assert not manager.is_recovered()
+
+    # kill 2 + 3
+    log.info('stopping 2,3')
+    manager.kill_osd(2)
+    manager.kill_osd(3)
+    log.info('...')
+    manager.raw_cluster_cmd('osd', 'down', '2', '3')
+    manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+    manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
+    manager.wait_for_active_or_down()
+
+    assert manager.get_num_down() > 0
+
+    # revive 2 + 3
+    manager.revive_osd(2)
+    manager.revive_osd(3)
+    while len(manager.get_osd_status()['up']) < 4:
+        log.info('waiting a bit...')
+        time.sleep(2)
+    log.info('all are up!')
+
+    for i in range(4):
+        manager.kick_recovery_wq(i)
+
+    # cluster must recover
+    manager.wait_for_clean()
diff --git a/qa/tasks/peer.py b/qa/tasks/peer.py
new file mode 100644
index 0000000..a189ae0
--- /dev/null
+++ b/qa/tasks/peer.py
@@ -0,0 +1,97 @@
+"""
+Peer test (Single test, not much configurable here)
+"""
+import logging
+import json
+import time
+
+import ceph_manager
+from teuthology import misc as teuthology
+from util.rados import rados
+
+log = logging.getLogger(__name__)
+
+def task(ctx, config):
+    """
+    Test peering.
+    """
+    if config is None:
+        config = {}
+    assert isinstance(config, dict), \
+        'peer task only accepts a dict for configuration'
+    first_mon = teuthology.get_first_mon(ctx, config)
+    (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+
+    manager = ceph_manager.CephManager(
+        mon,
+        ctx=ctx,
+        logger=log.getChild('ceph_manager'),
+        )
+
+    while len(manager.get_osd_status()['up']) < 3:
+        time.sleep(10)
+    manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+    manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
+    manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
+    manager.wait_for_clean()
+
+    for i in range(3):
+        manager.set_config(
+            i,
+            osd_recovery_delay_start=120)
+
+    # take on osd down
+    manager.kill_osd(2)
+    manager.mark_down_osd(2)
+
+    # kludge to make sure they get a map
+    rados(ctx, mon, ['-p', 'data', 'get', 'dummy', '-'])
+
+    manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+    manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
+    manager.wait_for_recovery()
+
+    # kill another and revive 2, so that some pgs can't peer.
+    manager.kill_osd(1)
+    manager.mark_down_osd(1)
+    manager.revive_osd(2)
+    manager.wait_till_osd_is_up(2)
+
+    manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+    manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
+
+    manager.wait_for_active_or_down()
+
+    manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+    manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
+
+    # look for down pgs
+    num_down_pgs = 0
+    pgs = manager.get_pg_stats()
+    for pg in pgs:
+        out = manager.raw_cluster_cmd('pg', pg['pgid'], 'query')
+	log.debug("out string %s",out)
+        j = json.loads(out)
+        log.info("pg is %s, query json is %s", pg, j)
+
+        if pg['state'].count('down'):
+            num_down_pgs += 1
+            # verify that it is blocked on osd.1
+            rs = j['recovery_state']
+            assert len(rs) > 0
+            assert rs[0]['name'] == 'Started/Primary/Peering/GetInfo'
+            assert rs[1]['name'] == 'Started/Primary/Peering'
+            assert rs[1]['blocked']
+            assert rs[1]['down_osds_we_would_probe'] == [1]
+            assert len(rs[1]['peering_blocked_by']) == 1
+            assert rs[1]['peering_blocked_by'][0]['osd'] == 1
+
+    assert num_down_pgs > 0
+
+    # bring it all back
+    manager.revive_osd(1)
+    manager.wait_till_osd_is_up(1)
+    manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+    manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
+    manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
+    manager.wait_for_clean()
diff --git a/qa/tasks/peering_speed_test.py b/qa/tasks/peering_speed_test.py
new file mode 100644
index 0000000..ab53238
--- /dev/null
+++ b/qa/tasks/peering_speed_test.py
@@ -0,0 +1,87 @@
+"""
+Remotely run peering tests.
+"""
+import logging
+import time
+
+log = logging.getLogger(__name__)
+
+from args import argify
+
+POOLNAME = "POOLNAME"
+ARGS = [
+    ('num_pgs', 'number of pgs to create', 256, int),
+    ('max_time', 'seconds to complete peering', 0, int),
+    ('runs', 'trials to run', 10, int),
+    ('num_objects', 'objects to create', 256 * 1024, int),
+    ('object_size', 'size in bytes for objects', 64, int),
+    ('creation_time_limit', 'time limit for pool population', 60*60, int),
+    ('create_threads', 'concurrent writes for create', 256, int)
+    ]
+
+def setup(ctx, config):
+    """
+    Setup peering test on remotes.
+    """
+    manager = ctx.managers['ceph']
+    manager.clear_pools()
+    manager.create_pool(POOLNAME, config.num_pgs)
+    log.info("populating pool")
+    manager.rados_write_objects(
+        POOLNAME,
+        config.num_objects,
+        config.object_size,
+        config.creation_time_limit,
+        config.create_threads)
+    log.info("done populating pool")
+
+def do_run(ctx, config):
+    """
+    Perform the test.
+    """
+    start = time.time()
+    # mark in osd
+    manager = ctx.managers['ceph']
+    manager.mark_in_osd(0)
+    log.info("writing out objects")
+    manager.rados_write_objects(
+        POOLNAME,
+        config.num_pgs, # write 1 object per pg or so
+        1,
+        config.creation_time_limit,
+        config.num_pgs, # lots of concurrency
+        cleanup = True)
+    peering_end = time.time()
+
+    log.info("peering done, waiting on recovery")
+    manager.wait_for_clean()
+
+    log.info("recovery done")
+    recovery_end = time.time()
+    if config.max_time:
+        assert(peering_end - start < config.max_time)
+    manager.mark_out_osd(0)
+    manager.wait_for_clean()
+    return {
+        'time_to_active': peering_end - start,
+        'time_to_clean': recovery_end - start
+        }
+
+ at argify("peering_speed_test", ARGS)
+def task(ctx, config):
+    """
+    Peering speed test
+    """
+    setup(ctx, config)
+    manager = ctx.managers['ceph']
+    manager.mark_out_osd(0)
+    manager.wait_for_clean()
+    ret = []
+    for i in range(config.runs):
+        log.info("Run {i}".format(i = i))
+        ret.append(do_run(ctx, config))
+
+    manager.mark_in_osd(0)
+    ctx.summary['recovery_times'] = {
+        'runs': ret
+        }
diff --git a/qa/tasks/populate_rbd_pool.py b/qa/tasks/populate_rbd_pool.py
new file mode 100644
index 0000000..db67d60
--- /dev/null
+++ b/qa/tasks/populate_rbd_pool.py
@@ -0,0 +1,82 @@
+"""
+Populate rbd pools
+"""
+import contextlib
+import logging
+
+log = logging.getLogger(__name__)
+
+ at contextlib.contextmanager
+def task(ctx, config):
+    """
+    Populate <num_pools> pools with prefix <pool_prefix> with <num_images>
+    rbd images at <num_snaps> snaps
+
+    The config could be as follows::
+
+        populate_rbd_pool:
+          client: <client>
+          pool_prefix: foo
+          num_pools: 5
+          num_images: 10
+          num_snaps: 3
+          image_size: 10737418240
+    """
+    if config is None:
+        config = {}
+    client = config.get("client", "client.0")
+    pool_prefix = config.get("pool_prefix", "foo")
+    num_pools = config.get("num_pools", 2)
+    num_images = config.get("num_images", 20)
+    num_snaps = config.get("num_snaps", 4)
+    image_size = config.get("image_size", 100)
+    write_size = config.get("write_size", 1024*1024)
+    write_threads = config.get("write_threads", 10)
+    write_total_per_snap = config.get("write_total_per_snap", 1024*1024*30)
+
+    (remote,) = ctx.cluster.only(client).remotes.iterkeys()
+
+    for poolid in range(num_pools):
+        poolname = "%s-%s" % (pool_prefix, str(poolid))
+        log.info("Creating pool %s" % (poolname,))
+        ctx.managers['ceph'].create_pool(poolname)
+        for imageid in range(num_images):
+            imagename = "rbd-%s" % (str(imageid),)
+            log.info("Creating imagename %s" % (imagename,))
+            remote.run(
+                args = [
+                    "rbd",
+                    "create",
+                    imagename,
+                    "--image-format", "1",
+                    "--size", str(image_size),
+                    "--pool", str(poolname)])
+            def bench_run():
+                remote.run(
+                    args = [
+                        "rbd",
+                        "bench-write",
+                        imagename,
+                        "--pool", poolname,
+                        "--io-size", str(write_size),
+                        "--io-threads", str(write_threads),
+                        "--io-total", str(write_total_per_snap),
+                        "--io-pattern", "rand"])
+            log.info("imagename %s first bench" % (imagename,))
+            bench_run()
+            for snapid in range(num_snaps):
+                snapname = "snap-%s" % (str(snapid),)
+                log.info("imagename %s creating snap %s" % (imagename, snapname))
+                remote.run(
+                    args = [
+                        "rbd", "snap", "create",
+                        "--pool", poolname,
+                        "--snap", snapname,
+                        imagename
+                        ])
+                bench_run()
+
+    try:
+        yield
+    finally:
+        log.info('done')
diff --git a/qa/tasks/qemu.py b/qa/tasks/qemu.py
new file mode 100644
index 0000000..5b25acd
--- /dev/null
+++ b/qa/tasks/qemu.py
@@ -0,0 +1,473 @@
+"""
+Qemu task
+"""
+from cStringIO import StringIO
+
+import contextlib
+import logging
+import os
+
+from teuthology import misc as teuthology
+from teuthology import contextutil
+from tasks import rbd
+from teuthology.orchestra import run
+from teuthology.config import config as teuth_config
+
+log = logging.getLogger(__name__)
+
+DEFAULT_NUM_RBD = 1
+DEFAULT_IMAGE_URL = 'http://download.ceph.com/qa/ubuntu-12.04.qcow2'
+DEFAULT_MEM = 4096 # in megabytes
+
+def create_images(ctx, config, managers):
+    for client, client_config in config.iteritems():
+        num_rbd = client_config.get('num_rbd', 1)
+        clone = client_config.get('clone', False)
+        assert num_rbd > 0, 'at least one rbd device must be used'
+        for i in xrange(num_rbd):
+            create_config = {
+                client: {
+                    'image_name': '{client}.{num}'.format(client=client, num=i),
+                    'image_format': 2 if clone else 1,
+                    }
+                }
+            managers.append(
+                lambda create_config=create_config:
+                rbd.create_image(ctx=ctx, config=create_config)
+                )
+
+def create_clones(ctx, config, managers):
+    for client, client_config in config.iteritems():
+        num_rbd = client_config.get('num_rbd', 1)
+        clone = client_config.get('clone', False)
+        if clone:
+            for i in xrange(num_rbd):
+                create_config = {
+                    client: {
+                        'image_name':
+                        '{client}.{num}-clone'.format(client=client, num=i),
+                        'parent_name':
+                        '{client}.{num}'.format(client=client, num=i),
+                        }
+                    }
+                managers.append(
+                    lambda create_config=create_config:
+                    rbd.clone_image(ctx=ctx, config=create_config)
+                    )
+
+ at contextlib.contextmanager
+def create_dirs(ctx, config):
+    """
+    Handle directory creation and cleanup
+    """
+    testdir = teuthology.get_testdir(ctx)
+    for client, client_config in config.iteritems():
+        assert 'test' in client_config, 'You must specify a test to run'
+        (remote,) = ctx.cluster.only(client).remotes.keys()
+        remote.run(
+            args=[
+                'install', '-d', '-m0755', '--',
+                '{tdir}/qemu'.format(tdir=testdir),
+                '{tdir}/archive/qemu'.format(tdir=testdir),
+                ]
+            )
+    try:
+        yield
+    finally:
+        for client, client_config in config.iteritems():
+            assert 'test' in client_config, 'You must specify a test to run'
+            (remote,) = ctx.cluster.only(client).remotes.keys()
+            remote.run(
+                args=[
+                    'rmdir', '{tdir}/qemu'.format(tdir=testdir), run.Raw('||'), 'true',
+                    ]
+                )
+
+ at contextlib.contextmanager
+def generate_iso(ctx, config):
+    """Execute system commands to generate iso"""
+    log.info('generating iso...')
+    testdir = teuthology.get_testdir(ctx)
+
+    # use ctx.config instead of config, because config has been
+    # through teuthology.replace_all_with_clients()
+    refspec = ctx.config.get('branch')
+    if refspec is None:
+        refspec = ctx.config.get('tag')
+    if refspec is None:
+        refspec = ctx.config.get('sha1')
+    if refspec is None:
+        refspec = 'HEAD'
+
+    # hack: the git_url is always ceph-ci or ceph
+    git_url = teuth_config.get_ceph_git_url()
+    repo_name = 'ceph.git'
+    if git_url.count('ceph-ci'):
+        repo_name = 'ceph-ci.git'
+
+    for client, client_config in config.iteritems():
+        assert 'test' in client_config, 'You must specify a test to run'
+        test_url = client_config['test'].format(repo=repo_name, branch=refspec)
+        (remote,) = ctx.cluster.only(client).remotes.keys()
+        src_dir = os.path.dirname(__file__)
+        userdata_path = os.path.join(testdir, 'qemu', 'userdata.' + client)
+        metadata_path = os.path.join(testdir, 'qemu', 'metadata.' + client)
+
+        with file(os.path.join(src_dir, 'userdata_setup.yaml'), 'rb') as f:
+            test_setup = ''.join(f.readlines())
+            # configuring the commands to setup the nfs mount
+            mnt_dir = "/export/{client}".format(client=client)
+            test_setup = test_setup.format(
+                mnt_dir=mnt_dir
+            )
+
+        with file(os.path.join(src_dir, 'userdata_teardown.yaml'), 'rb') as f:
+            test_teardown = ''.join(f.readlines())
+
+        user_data = test_setup
+        if client_config.get('type', 'filesystem') == 'filesystem':
+            for i in xrange(0, client_config.get('num_rbd', DEFAULT_NUM_RBD)):
+                dev_letter = chr(ord('b') + i)
+                user_data += """
+- |
+  #!/bin/bash
+  mkdir /mnt/test_{dev_letter}
+  mkfs -t xfs /dev/vd{dev_letter}
+  mount -t xfs /dev/vd{dev_letter} /mnt/test_{dev_letter}
+""".format(dev_letter=dev_letter)
+
+        # this may change later to pass the directories as args to the
+        # script or something. xfstests needs that.
+        user_data += """
+- |
+  #!/bin/bash
+  test -d /mnt/test_b && cd /mnt/test_b
+  /mnt/cdrom/test.sh > /mnt/log/test.log 2>&1 && touch /mnt/log/success
+""" + test_teardown
+
+        teuthology.write_file(remote, userdata_path, StringIO(user_data))
+
+        with file(os.path.join(src_dir, 'metadata.yaml'), 'rb') as f:
+            teuthology.write_file(remote, metadata_path, f)
+
+        test_file = '{tdir}/qemu/{client}.test.sh'.format(tdir=testdir, client=client)
+
+        log.info('fetching test %s for %s', test_url, client)
+        remote.run(
+            args=[
+                'wget', '-nv', '-O', test_file,
+                test_url,
+                run.Raw('&&'),
+                'chmod', '755', test_file,
+                ],
+            )
+        remote.run(
+            args=[
+                'genisoimage', '-quiet', '-input-charset', 'utf-8',
+                '-volid', 'cidata', '-joliet', '-rock',
+                '-o', '{tdir}/qemu/{client}.iso'.format(tdir=testdir, client=client),
+                '-graft-points',
+                'user-data={userdata}'.format(userdata=userdata_path),
+                'meta-data={metadata}'.format(metadata=metadata_path),
+                'test.sh={file}'.format(file=test_file),
+                ],
+            )
+    try:
+        yield
+    finally:
+        for client in config.iterkeys():
+            (remote,) = ctx.cluster.only(client).remotes.keys()
+            remote.run(
+                args=[
+                    'rm', '-f',
+                    '{tdir}/qemu/{client}.iso'.format(tdir=testdir, client=client),
+                    os.path.join(testdir, 'qemu', 'userdata.' + client),
+                    os.path.join(testdir, 'qemu', 'metadata.' + client),
+                    '{tdir}/qemu/{client}.test.sh'.format(tdir=testdir, client=client),
+                    ],
+                )
+
+ at contextlib.contextmanager
+def download_image(ctx, config):
+    """Downland base image, remove image file when done"""
+    log.info('downloading base image')
+    testdir = teuthology.get_testdir(ctx)
+    for client, client_config in config.iteritems():
+        (remote,) = ctx.cluster.only(client).remotes.keys()
+        base_file = '{tdir}/qemu/base.{client}.qcow2'.format(tdir=testdir, client=client)
+        remote.run(
+            args=[
+                'wget', '-nv', '-O', base_file, DEFAULT_IMAGE_URL,
+                ]
+            )
+    try:
+        yield
+    finally:
+        log.debug('cleaning up base image files')
+        for client in config.iterkeys():
+            base_file = '{tdir}/qemu/base.{client}.qcow2'.format(
+                tdir=testdir,
+                client=client,
+                )
+            (remote,) = ctx.cluster.only(client).remotes.keys()
+            remote.run(
+                args=[
+                    'rm', '-f', base_file,
+                    ],
+                )
+
+
+def _setup_nfs_mount(remote, client, mount_dir):
+    """
+    Sets up an nfs mount on the remote that the guest can use to
+    store logs. This nfs mount is also used to touch a file
+    at the end of the test to indiciate if the test was successful
+    or not.
+    """
+    export_dir = "/export/{client}".format(client=client)
+    log.info("Creating the nfs export directory...")
+    remote.run(args=[
+        'sudo', 'mkdir', '-p', export_dir,
+    ])
+    log.info("Mounting the test directory...")
+    remote.run(args=[
+        'sudo', 'mount', '--bind', mount_dir, export_dir,
+    ])
+    log.info("Adding mount to /etc/exports...")
+    export = "{dir} *(rw,no_root_squash,no_subtree_check,insecure)".format(
+        dir=export_dir
+    )
+    remote.run(args=[
+        'sudo', 'sed', '-i', '/^\/export\//d', "/etc/exports",
+    ])
+    remote.run(args=[
+        'echo', export, run.Raw("|"),
+        'sudo', 'tee', '-a', "/etc/exports",
+    ])
+    log.info("Restarting NFS...")
+    if remote.os.package_type == "deb":
+        remote.run(args=['sudo', 'service', 'nfs-kernel-server', 'restart'])
+    else:
+        remote.run(args=['sudo', 'systemctl', 'restart', 'nfs'])
+
+
+def _teardown_nfs_mount(remote, client):
+    """
+    Tears down the nfs mount on the remote used for logging and reporting the
+    status of the tests being ran in the guest.
+    """
+    log.info("Tearing down the nfs mount for {remote}".format(remote=remote))
+    export_dir = "/export/{client}".format(client=client)
+    log.info("Stopping NFS...")
+    if remote.os.package_type == "deb":
+        remote.run(args=[
+            'sudo', 'service', 'nfs-kernel-server', 'stop'
+        ])
+    else:
+        remote.run(args=[
+            'sudo', 'systemctl', 'stop', 'nfs'
+        ])
+    log.info("Unmounting exported directory...")
+    remote.run(args=[
+        'sudo', 'umount', export_dir
+    ])
+    log.info("Deleting exported directory...")
+    remote.run(args=[
+        'sudo', 'rm', '-r', '/export'
+    ])
+    log.info("Deleting export from /etc/exports...")
+    remote.run(args=[
+        'sudo', 'sed', '-i', '$ d', '/etc/exports'
+    ])
+    log.info("Starting NFS...")
+    if remote.os.package_type == "deb":
+        remote.run(args=[
+            'sudo', 'service', 'nfs-kernel-server', 'start'
+        ])
+    else:
+        remote.run(args=[
+            'sudo', 'systemctl', 'start', 'nfs'
+        ])
+
+
+ at contextlib.contextmanager
+def run_qemu(ctx, config):
+    """Setup kvm environment and start qemu"""
+    procs = []
+    testdir = teuthology.get_testdir(ctx)
+    for client, client_config in config.iteritems():
+        (remote,) = ctx.cluster.only(client).remotes.keys()
+        log_dir = '{tdir}/archive/qemu/{client}'.format(tdir=testdir, client=client)
+        remote.run(
+            args=[
+                'mkdir', log_dir, run.Raw('&&'),
+                'sudo', 'modprobe', 'kvm',
+                ]
+            )
+
+        # make an nfs mount to use for logging and to
+        # allow to test to tell teuthology the tests outcome
+        _setup_nfs_mount(remote, client, log_dir)
+
+        base_file = '{tdir}/qemu/base.{client}.qcow2'.format(
+            tdir=testdir,
+            client=client
+        )
+        qemu_cmd = 'qemu-system-x86_64'
+        if remote.os.package_type == "rpm":
+            qemu_cmd = "/usr/libexec/qemu-kvm"
+        args=[
+            'adjust-ulimits',
+            'ceph-coverage',
+            '{tdir}/archive/coverage'.format(tdir=testdir),
+            'daemon-helper',
+            'term',
+            qemu_cmd, '-enable-kvm', '-nographic',
+            '-m', str(client_config.get('memory', DEFAULT_MEM)),
+            # base OS device
+            '-drive',
+            'file={base},format=qcow2,if=virtio'.format(base=base_file),
+            # cd holding metadata for cloud-init
+            '-cdrom', '{tdir}/qemu/{client}.iso'.format(tdir=testdir, client=client),
+            ]
+
+        cachemode = 'none'
+        ceph_config = ctx.ceph['ceph'].conf.get('global', {})
+        ceph_config.update(ctx.ceph['ceph'].conf.get('client', {}))
+        ceph_config.update(ctx.ceph['ceph'].conf.get(client, {}))
+        if ceph_config.get('rbd cache'):
+            if ceph_config.get('rbd cache max dirty', 1) > 0:
+                cachemode = 'writeback'
+            else:
+                cachemode = 'writethrough'
+
+        clone = client_config.get('clone', False)
+        for i in xrange(client_config.get('num_rbd', DEFAULT_NUM_RBD)):
+            suffix = '-clone' if clone else ''
+            args.extend([
+                '-drive',
+                'file=rbd:rbd/{img}:id={id},format=raw,if=virtio,cache={cachemode}'.format(
+                    img='{client}.{num}{suffix}'.format(client=client, num=i,
+                                                        suffix=suffix),
+                    id=client[len('client.'):],
+                    cachemode=cachemode,
+                    ),
+                ])
+
+        log.info('starting qemu...')
+        procs.append(
+            remote.run(
+                args=args,
+                logger=log.getChild(client),
+                stdin=run.PIPE,
+                wait=False,
+                )
+            )
+
+    try:
+        yield
+    finally:
+        log.info('waiting for qemu tests to finish...')
+        run.wait(procs)
+
+        log.debug('checking that qemu tests succeeded...')
+        for client in config.iterkeys():
+            (remote,) = ctx.cluster.only(client).remotes.keys()
+            # teardown nfs mount
+            _teardown_nfs_mount(remote, client)
+            # check for test status
+            remote.run(
+                args=[
+                    'test', '-f',
+                    '{tdir}/archive/qemu/{client}/success'.format(
+                        tdir=testdir,
+                        client=client
+                        ),
+                    ],
+                )
+
+
+ at contextlib.contextmanager
+def task(ctx, config):
+    """
+    Run a test inside of QEMU on top of rbd. Only one test
+    is supported per client.
+
+    For example, you can specify which clients to run on::
+
+        tasks:
+        - ceph:
+        - qemu:
+            client.0:
+              test: http://download.ceph.com/qa/test.sh
+            client.1:
+              test: http://download.ceph.com/qa/test2.sh
+
+    Or use the same settings on all clients:
+
+        tasks:
+        - ceph:
+        - qemu:
+            all:
+              test: http://download.ceph.com/qa/test.sh
+
+    For tests that don't need a filesystem, set type to block::
+
+        tasks:
+        - ceph:
+        - qemu:
+            client.0:
+              test: http://download.ceph.com/qa/test.sh
+              type: block
+
+    The test should be configured to run on /dev/vdb and later
+    devices.
+
+    If you want to run a test that uses more than one rbd image,
+    specify how many images to use::
+
+        tasks:
+        - ceph:
+        - qemu:
+            client.0:
+              test: http://download.ceph.com/qa/test.sh
+              type: block
+              num_rbd: 2
+
+    You can set the amount of memory the VM has (default is 1024 MB)::
+
+        tasks:
+        - ceph:
+        - qemu:
+            client.0:
+              test: http://download.ceph.com/qa/test.sh
+              memory: 512 # megabytes
+
+    If you want to run a test against a cloned rbd image, set clone to true::
+
+        tasks:
+        - ceph:
+        - qemu:
+            client.0:
+              test: http://download.ceph.com/qa/test.sh
+              clone: true
+    """
+    assert isinstance(config, dict), \
+           "task qemu only supports a dictionary for configuration"
+
+    config = teuthology.replace_all_with_clients(ctx.cluster, config)
+
+    managers = []
+    create_images(ctx=ctx, config=config, managers=managers)
+    managers.extend([
+        lambda: create_dirs(ctx=ctx, config=config),
+        lambda: generate_iso(ctx=ctx, config=config),
+        lambda: download_image(ctx=ctx, config=config),
+        ])
+    create_clones(ctx=ctx, config=config, managers=managers)
+    managers.append(
+        lambda: run_qemu(ctx=ctx, config=config),
+        )
+
+    with contextutil.nested(*managers):
+        yield
diff --git a/qa/tasks/rados.py b/qa/tasks/rados.py
new file mode 100644
index 0000000..43f560c
--- /dev/null
+++ b/qa/tasks/rados.py
@@ -0,0 +1,250 @@
+"""
+Rados modle-based integration tests
+"""
+import contextlib
+import logging
+import gevent
+from teuthology import misc as teuthology
+
+from teuthology.orchestra import run
+
+log = logging.getLogger(__name__)
+
+ at contextlib.contextmanager
+def task(ctx, config):
+    """
+    Run RadosModel-based integration tests.
+
+    The config should be as follows::
+
+        rados:
+          clients: [client list]
+          ops: <number of ops>
+          objects: <number of objects to use>
+          max_in_flight: <max number of operations in flight>
+          object_size: <size of objects in bytes>
+          min_stride_size: <minimum write stride size in bytes>
+          max_stride_size: <maximum write stride size in bytes>
+          op_weights: <dictionary mapping operation type to integer weight>
+          runs: <number of times to run> - the pool is remade between runs
+          ec_pool: use an ec pool
+          erasure_code_profile: profile to use with the erasure coded pool
+          pool_snaps: use pool snapshots instead of selfmanaged snapshots
+	  write_fadvise_dontneed: write behavior like with LIBRADOS_OP_FLAG_FADVISE_DONTNEED.
+	                          This mean data don't access in the near future.
+				  Let osd backend don't keep data in cache.
+
+    For example::
+
+        tasks:
+        - ceph:
+        - rados:
+            clients: [client.0]
+            ops: 1000
+            max_seconds: 0   # 0 for no limit
+            objects: 25
+            max_in_flight: 16
+            object_size: 4000000
+            min_stride_size: 1024
+            max_stride_size: 4096
+            op_weights:
+              read: 20
+              write: 10
+              delete: 2
+              snap_create: 3
+              rollback: 2
+              snap_remove: 0
+            ec_pool: create an ec pool, defaults to False
+            erasure_code_profile:
+              name: teuthologyprofile
+              k: 2
+              m: 1
+              ruleset-failure-domain: osd
+            pool_snaps: true
+	    write_fadvise_dontneed: true
+            runs: 10
+        - interactive:
+
+    Optionally, you can provide the pool name to run against:
+
+        tasks:
+        - ceph:
+        - exec:
+            client.0:
+              - ceph osd pool create foo
+        - rados:
+            clients: [client.0]
+            pools: [foo]
+            ...
+
+    Alternatively, you can provide a pool prefix:
+
+        tasks:
+        - ceph:
+        - exec:
+            client.0:
+              - ceph osd pool create foo.client.0
+        - rados:
+            clients: [client.0]
+            pool_prefix: foo
+            ...
+
+    The tests are run asynchronously, they are not complete when the task
+    returns. For instance:
+
+        - rados:
+            clients: [client.0]
+            pools: [ecbase]
+            ops: 4000
+            objects: 500
+            op_weights:
+              read: 100
+              write: 100
+              delete: 50
+              copy_from: 50
+        - print: "**** done rados ec-cache-agent (part 2)"
+
+     will run the print task immediately after the rados tasks begins but
+     not after it completes. To make the rados task a blocking / sequential
+     task, use:
+
+        - sequential:
+          - rados:
+              clients: [client.0]
+              pools: [ecbase]
+              ops: 4000
+              objects: 500
+              op_weights:
+                read: 100
+                write: 100
+                delete: 50
+                copy_from: 50
+        - print: "**** done rados ec-cache-agent (part 2)"
+
+    """
+    log.info('Beginning rados...')
+    assert isinstance(config, dict), \
+        "please list clients to run on"
+
+    object_size = int(config.get('object_size', 4000000))
+    op_weights = config.get('op_weights', {})
+    testdir = teuthology.get_testdir(ctx)
+    args = [
+        'adjust-ulimits',
+        'ceph-coverage',
+        '{tdir}/archive/coverage'.format(tdir=testdir),
+        'ceph_test_rados']
+    if config.get('ec_pool', False):
+        args.extend(['--ec-pool'])
+    if config.get('write_fadvise_dontneed', False):
+        args.extend(['--write-fadvise-dontneed'])
+    if config.get('pool_snaps', False):
+        args.extend(['--pool-snaps'])
+    args.extend([
+        '--max-ops', str(config.get('ops', 10000)),
+        '--objects', str(config.get('objects', 500)),
+        '--max-in-flight', str(config.get('max_in_flight', 16)),
+        '--size', str(object_size),
+        '--min-stride-size', str(config.get('min_stride_size', object_size / 10)),
+        '--max-stride-size', str(config.get('max_stride_size', object_size / 5)),
+        '--max-seconds', str(config.get('max_seconds', 0))
+        ])
+
+    weights = {}
+    weights['read'] = 100
+    weights['write'] = 100
+    weights['delete'] = 10
+    # Parallel of the op_types in test/osd/TestRados.cc
+    for field in [
+        # read handled above
+        # write handled above
+        # delete handled above
+        "snap_create",
+        "snap_remove",
+        "rollback",
+        "setattr",
+        "rmattr",
+        "watch",
+        "copy_from",
+        "hit_set_list",
+        "is_dirty",
+        "undirty",
+        "cache_flush",
+        "cache_try_flush",
+        "cache_evict",
+        "append",
+        "write",
+        "read",
+        "delete"
+        ]:
+        if field in op_weights:
+            weights[field] = op_weights[field]
+
+    if config.get('write_append_excl', True):
+        if 'write' in weights:
+            weights['write'] = weights['write'] / 2
+            weights['write_excl'] = weights['write']
+
+        if 'append' in weights:
+            weights['append'] = weights['append'] / 2
+            weights['append_excl'] = weights['append']
+
+    for op, weight in weights.iteritems():
+        args.extend([
+            '--op', op, str(weight)
+        ])
+                
+
+    def thread():
+        """Thread spawned by gevent"""
+        clients = ['client.{id}'.format(id=id_) for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
+        log.info('clients are %s' % clients)
+        manager = ctx.managers['ceph']
+        if config.get('ec_pool', False):
+            profile = config.get('erasure_code_profile', {})
+            profile_name = profile.get('name', 'teuthologyprofile')
+            manager.create_erasure_code_profile(profile_name, profile)
+        else:
+            profile_name = None
+        for i in range(int(config.get('runs', '1'))):
+            log.info("starting run %s out of %s", str(i), config.get('runs', '1'))
+            tests = {}
+            existing_pools = config.get('pools', [])
+            created_pools = []
+            for role in config.get('clients', clients):
+                assert isinstance(role, basestring)
+                PREFIX = 'client.'
+                assert role.startswith(PREFIX)
+                id_ = role[len(PREFIX):]
+
+                pool = config.get('pool', None)
+                if not pool and existing_pools:
+                    pool = existing_pools.pop()
+                else:
+                    pool = manager.create_pool_with_unique_name(erasure_code_profile_name=profile_name)
+                    created_pools.append(pool)
+                    if config.get('fast_read', False):
+                        manager.raw_cluster_cmd(
+                            'osd', 'pool', 'set', pool, 'fast_read', 'true')
+
+                (remote,) = ctx.cluster.only(role).remotes.iterkeys()
+                proc = remote.run(
+                    args=["CEPH_CLIENT_ID={id_}".format(id_=id_)] + args +
+                    ["--pool", pool],
+                    logger=log.getChild("rados.{id}".format(id=id_)),
+                    stdin=run.PIPE,
+                    wait=False
+                    )
+                tests[id_] = proc
+            run.wait(tests.itervalues())
+
+            for pool in created_pools:
+                manager.remove_pool(pool)
+
+    running = gevent.spawn(thread)
+
+    try:
+        yield
+    finally:
+        log.info('joining rados')
+        running.get()
diff --git a/qa/tasks/radosbench.py b/qa/tasks/radosbench.py
new file mode 100644
index 0000000..3db57af
--- /dev/null
+++ b/qa/tasks/radosbench.py
@@ -0,0 +1,104 @@
+"""
+Rados benchmarking
+"""
+import contextlib
+import logging
+
+from teuthology.orchestra import run
+from teuthology import misc as teuthology
+
+log = logging.getLogger(__name__)
+
+ at contextlib.contextmanager
+def task(ctx, config):
+    """
+    Run radosbench
+
+    The config should be as follows:
+
+    radosbench:
+        clients: [client list]
+        time: <seconds to run>
+        pool: <pool to use>
+        size: write size to use
+        unique_pool: use a unique pool, defaults to False
+        ec_pool: create an ec pool, defaults to False
+        create_pool: create pool, defaults to False
+        erasure_code_profile:
+          name: teuthologyprofile
+          k: 2
+          m: 1
+          ruleset-failure-domain: osd
+        cleanup: false (defaults to true)
+    example:
+
+    tasks:
+    - ceph:
+    - radosbench:
+        clients: [client.0]
+        time: 360
+    - interactive:
+    """
+    log.info('Beginning radosbench...')
+    assert isinstance(config, dict), \
+        "please list clients to run on"
+    radosbench = {}
+
+    testdir = teuthology.get_testdir(ctx)
+    manager = ctx.managers['ceph']
+
+    create_pool = config.get('create_pool', True)
+    for role in config.get('clients', ['client.0']):
+        assert isinstance(role, basestring)
+        PREFIX = 'client.'
+        assert role.startswith(PREFIX)
+        id_ = role[len(PREFIX):]
+        (remote,) = ctx.cluster.only(role).remotes.iterkeys()
+
+        if config.get('ec_pool', False):
+            profile = config.get('erasure_code_profile', {})
+            profile_name = profile.get('name', 'teuthologyprofile')
+            manager.create_erasure_code_profile(profile_name, profile)
+        else:
+            profile_name = None
+
+        cleanup = []
+        if not config.get('cleanup', True):
+            cleanup = ['--no-cleanup']
+
+        pool = config.get('pool', 'data')
+        if create_pool:
+            if pool != 'data':
+                manager.create_pool(pool, erasure_code_profile_name=profile_name)
+            else:
+                pool = manager.create_pool_with_unique_name(erasure_code_profile_name=profile_name)
+
+        proc = remote.run(
+            args=[
+                "/bin/sh", "-c",
+                " ".join(['adjust-ulimits',
+                          'ceph-coverage',
+                          '{tdir}/archive/coverage',
+                          'rados',
+			  '--no-log-to-stderr',
+                          '--name', role,
+                          '-b', str(config.get('size', 4<<20)),
+                          '-p' , pool,
+                          'bench', str(config.get('time', 360)), 'write',
+                          ] + cleanup).format(tdir=testdir),
+                ],
+            logger=log.getChild('radosbench.{id}'.format(id=id_)),
+            stdin=run.PIPE,
+            wait=False
+            )
+        radosbench[id_] = proc
+
+    try:
+        yield
+    finally:
+        timeout = config.get('time', 360) * 5 + 180
+        log.info('joining radosbench (timing out after %ss)', timeout)
+        run.wait(radosbench.itervalues(), timeout=timeout)
+
+        if pool is not 'data' and create_pool:
+            manager.remove_pool(pool)
diff --git a/qa/tasks/radosgw_admin.py b/qa/tasks/radosgw_admin.py
new file mode 100644
index 0000000..6c59815
--- /dev/null
+++ b/qa/tasks/radosgw_admin.py
@@ -0,0 +1,1034 @@
+"""
+Rgw admin testing against a running instance
+"""
+# The test cases in this file have been annotated for inventory.
+# To extract the inventory (in csv format) use the command:
+#
+#   grep '^ *# TESTCASE' | sed 's/^ *# TESTCASE //'
+#
+
+import copy
+import json
+import logging
+import time
+import datetime
+
+from cStringIO import StringIO
+
+import boto.exception
+import boto.s3.connection
+import boto.s3.acl
+
+import httplib2
+
+import util.rgw as rgw_utils
+
+from teuthology import misc as teuthology
+from util.rgw import rgwadmin, get_user_summary, get_user_successful_ops
+
+log = logging.getLogger(__name__)
+
+def create_presigned_url(conn, method, bucket_name, key_name, expiration):
+    return conn.generate_url(expires_in=expiration,
+        method=method,
+        bucket=bucket_name,
+        key=key_name,
+        query_auth=True,
+    )
+
+def send_raw_http_request(conn, method, bucket_name, key_name, follow_redirects = False):
+    url = create_presigned_url(conn, method, bucket_name, key_name, 3600)
+    print url
+    h = httplib2.Http()
+    h.follow_redirects = follow_redirects
+    return h.request(url, method)
+
+
+def get_acl(key):
+    """
+    Helper function to get the xml acl from a key, ensuring that the xml
+    version tag is removed from the acl response
+    """
+    raw_acl = key.get_xml_acl()
+
+    def remove_version(string):
+        return string.split(
+            '<?xml version="1.0" encoding="UTF-8"?>'
+        )[-1]
+
+    def remove_newlines(string):
+        return string.strip('\n')
+
+    return remove_version(
+        remove_newlines(raw_acl)
+    )
+
+
+def task(ctx, config):
+    """
+    Test radosgw-admin functionality against a running rgw instance.
+    """
+    global log
+    assert config is None or isinstance(config, list) \
+        or isinstance(config, dict), \
+        "task s3tests only supports a list or dictionary for configuration"
+    all_clients = ['client.{id}'.format(id=id_)
+                   for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
+    if config is None:
+        config = all_clients
+    if isinstance(config, list):
+        config = dict.fromkeys(config)
+    clients = config.keys()
+
+    multi_region_run = rgw_utils.multi_region_enabled(ctx)
+
+    client = clients[0]; # default choice, multi-region code may overwrite this
+    if multi_region_run:
+        client = rgw_utils.get_master_client(ctx, clients)
+
+    # once the client is chosen, pull the host name and  assigned port out of
+    # the role_endpoints that were assigned by the rgw task
+    (remote_host, remote_port) = ctx.rgw.role_endpoints[client]
+
+    realm = ctx.rgw.realm
+    log.debug('radosgw-admin: realm %r', realm)
+    
+    ##
+    user1='foo'
+    user2='fud'
+    subuser1='foo:foo1'
+    subuser2='foo:foo2'
+    display_name1='Foo'
+    display_name2='Fud'
+    email='foo at foo.com'
+    email2='bar at bar.com'
+    access_key='9te6NH5mcdcq0Tc5i8i1'
+    secret_key='Ny4IOauQoL18Gp2zM7lC1vLmoawgqcYP/YGcWfXu'
+    access_key2='p5YnriCv1nAtykxBrupQ'
+    secret_key2='Q8Tk6Q/27hfbFSYdSkPtUqhqx1GgzvpXa4WARozh'
+    swift_secret1='gpS2G9RREMrnbqlp29PP2D36kgPR1tm72n5fPYfL'
+    swift_secret2='ri2VJQcKSYATOY6uaDUX7pxgkW+W1YmC6OCxPHwy'
+
+    bucket_name='myfoo'
+    bucket_name2='mybar'
+
+    # connect to rgw
+    connection = boto.s3.connection.S3Connection(
+        aws_access_key_id=access_key,
+        aws_secret_access_key=secret_key,
+        is_secure=False,
+        port=remote_port,
+        host=remote_host,
+        calling_format=boto.s3.connection.OrdinaryCallingFormat(),
+        )
+    connection2 = boto.s3.connection.S3Connection(
+        aws_access_key_id=access_key2,
+        aws_secret_access_key=secret_key2,
+        is_secure=False,
+        port=remote_port,
+        host=remote_host,
+        calling_format=boto.s3.connection.OrdinaryCallingFormat(),
+        )
+
+    # legend (test cases can be easily grep-ed out)
+    # TESTCASE 'testname','object','method','operation','assertion'
+    # TESTCASE 'info-nosuch','user','info','non-existent user','fails'
+    (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1])
+    assert err
+
+    # TESTCASE 'create-ok','user','create','w/all valid info','succeeds'
+    (err, out) = rgwadmin(ctx, client, [
+            'user', 'create',
+            '--uid', user1,
+            '--display-name', display_name1,
+            '--email', email,
+            '--access-key', access_key,
+            '--secret', secret_key,
+            '--max-buckets', '4'
+            ],
+            check_status=True)
+
+    # TESTCASE 'duplicate email','user','create','existing user email','fails'
+    (err, out) = rgwadmin(ctx, client, [
+            'user', 'create',
+            '--uid', user2,
+            '--display-name', display_name2,
+            '--email', email,
+            ])
+    assert err
+
+    # TESTCASE 'info-existing','user','info','existing user','returns correct info'
+    (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1], check_status=True)
+    assert out['user_id'] == user1
+    assert out['email'] == email
+    assert out['display_name'] == display_name1
+    assert len(out['keys']) == 1
+    assert out['keys'][0]['access_key'] == access_key
+    assert out['keys'][0]['secret_key'] == secret_key
+    assert not out['suspended']
+
+    # this whole block should only be run if regions have been configured
+    if multi_region_run:
+        rgw_utils.radosgw_agent_sync_all(ctx)
+        # post-sync, validate that user1 exists on the sync destination host
+        for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
+            dest_client = c_config['dest']
+            (err, out) = rgwadmin(ctx, dest_client, ['metadata', 'list', 'user'])
+            (err, out) = rgwadmin(ctx, dest_client, ['user', 'info', '--uid', user1], check_status=True)
+            assert out['user_id'] == user1
+            assert out['email'] == email
+            assert out['display_name'] == display_name1
+            assert len(out['keys']) == 1
+            assert out['keys'][0]['access_key'] == access_key
+            assert out['keys'][0]['secret_key'] == secret_key
+            assert not out['suspended']
+
+        # compare the metadata between different regions, make sure it matches
+        log.debug('compare the metadata between different regions, make sure it matches')
+        for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
+            source_client = c_config['src']
+            dest_client = c_config['dest']
+            (err1, out1) = rgwadmin(ctx, source_client,
+                ['metadata', 'get', 'user:{uid}'.format(uid=user1)], check_status=True)
+            (err2, out2) = rgwadmin(ctx, dest_client,
+                ['metadata', 'get', 'user:{uid}'.format(uid=user1)], check_status=True)
+            assert out1 == out2
+
+        # suspend a user on the master, then check the status on the destination
+        log.debug('suspend a user on the master, then check the status on the destination')
+        for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
+            source_client = c_config['src']
+            dest_client = c_config['dest']
+            (err, out) = rgwadmin(ctx, source_client, ['user', 'suspend', '--uid', user1])
+            rgw_utils.radosgw_agent_sync_all(ctx)
+            (err, out) = rgwadmin(ctx, dest_client, ['user', 'info', '--uid', user1], check_status=True)
+            assert out['suspended']
+
+        # delete a user on the master, then check that it's gone on the destination
+        log.debug('delete a user on the master, then check that it\'s gone on the destination')
+        for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
+            source_client = c_config['src']
+            dest_client = c_config['dest']
+            (err, out) = rgwadmin(ctx, source_client, ['user', 'rm', '--uid', user1], check_status=True)
+            rgw_utils.radosgw_agent_sync_all(ctx)
+            (err, out) = rgwadmin(ctx, source_client, ['user', 'info', '--uid', user1])
+            assert out is None
+            (err, out) = rgwadmin(ctx, dest_client, ['user', 'info', '--uid', user1])
+            assert out is None
+
+            # then recreate it so later tests pass
+            (err, out) = rgwadmin(ctx, client, [
+                'user', 'create',
+                '--uid', user1,
+                '--display-name', display_name1,
+                '--email', email,
+                '--access-key', access_key,
+                '--secret', secret_key,
+                '--max-buckets', '4'
+                ],
+                check_status=True)
+
+        # now do the multi-region bucket tests
+        log.debug('now do the multi-region bucket tests')
+
+        # Create a second user for the following tests
+        log.debug('Create a second user for the following tests')
+        (err, out) = rgwadmin(ctx, client, [
+            'user', 'create',
+            '--uid', user2,
+            '--display-name', display_name2,
+            '--email', email2,
+            '--access-key', access_key2,
+            '--secret', secret_key2,
+            '--max-buckets', '4'
+            ],
+            check_status=True)
+        (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user2], check_status=True)
+        assert out is not None
+
+        # create a bucket and do a sync
+        log.debug('create a bucket and do a sync')
+        bucket = connection.create_bucket(bucket_name2)
+        rgw_utils.radosgw_agent_sync_all(ctx)
+
+        # compare the metadata for the bucket between different regions, make sure it matches
+        log.debug('compare the metadata for the bucket between different regions, make sure it matches')
+        for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
+            source_client = c_config['src']
+            dest_client = c_config['dest']
+            (err1, out1) = rgwadmin(ctx, source_client,
+                ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)],
+                check_status=True)
+            (err2, out2) = rgwadmin(ctx, dest_client,
+                ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)],
+                check_status=True)
+            log.debug('metadata 1 %r', out1)
+            log.debug('metadata 2 %r', out2)
+            assert out1 == out2
+
+            # get the bucket.instance info and compare that
+            src_bucket_id = out1['data']['bucket']['bucket_id']
+            dest_bucket_id = out2['data']['bucket']['bucket_id']
+            (err1, out1) = rgwadmin(ctx, source_client, ['metadata', 'get',
+                'bucket.instance:{bucket_name}:{bucket_instance}'.format(
+                bucket_name=bucket_name2,bucket_instance=src_bucket_id)],
+                check_status=True)
+            (err2, out2) = rgwadmin(ctx, dest_client, ['metadata', 'get',
+                'bucket.instance:{bucket_name}:{bucket_instance}'.format(
+                bucket_name=bucket_name2,bucket_instance=dest_bucket_id)],
+                check_status=True)
+            del out1['data']['bucket_info']['bucket']['pool']
+            del out1['data']['bucket_info']['bucket']['index_pool']
+            del out1['data']['bucket_info']['bucket']['data_extra_pool']
+            del out2['data']['bucket_info']['bucket']['pool']
+            del out2['data']['bucket_info']['bucket']['index_pool']
+            del out2['data']['bucket_info']['bucket']['data_extra_pool']
+            assert out1 == out2
+
+        same_region = 0
+        for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
+            source_client = c_config['src']
+            dest_client = c_config['dest']
+
+            source_region = rgw_utils.region_for_client(ctx, source_client)
+            dest_region = rgw_utils.region_for_client(ctx, dest_client)
+
+            # 301 is only returned for requests to something in a different region
+            if source_region == dest_region:
+                log.debug('301 is only returned for requests to something in a different region')
+                same_region += 1
+                continue
+
+            # Attempt to create a new connection with user1 to the destination RGW
+            log.debug('Attempt to create a new connection with user1 to the destination RGW')
+            # and use that to attempt a delete (that should fail)
+
+            (dest_remote_host, dest_remote_port) = ctx.rgw.role_endpoints[dest_client]
+            connection_dest = boto.s3.connection.S3Connection(
+                aws_access_key_id=access_key,
+                aws_secret_access_key=secret_key,
+                is_secure=False,
+                port=dest_remote_port,
+                host=dest_remote_host,
+                calling_format=boto.s3.connection.OrdinaryCallingFormat(),
+                )
+
+            # this should fail
+            r, content = send_raw_http_request(connection_dest, 'DELETE', bucket_name2, '', follow_redirects = False)
+            assert r.status == 301
+
+            # now delete the bucket on the source RGW and do another sync
+            log.debug('now delete the bucket on the source RGW and do another sync')
+            bucket.delete()
+            rgw_utils.radosgw_agent_sync_all(ctx)
+
+        if same_region == len(ctx.radosgw_agent.config):
+            bucket.delete()
+            rgw_utils.radosgw_agent_sync_all(ctx)
+
+        # make sure that the bucket no longer exists in either region
+        log.debug('make sure that the bucket no longer exists in either region')
+        for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
+            source_client = c_config['src']
+            dest_client = c_config['dest']
+            (err1, out1) = rgwadmin(ctx, source_client, ['metadata', 'get',
+                'bucket:{bucket_name}'.format(bucket_name=bucket_name2)])
+            (err2, out2) = rgwadmin(ctx, dest_client, ['metadata', 'get',
+                'bucket:{bucket_name}'.format(bucket_name=bucket_name2)])
+            # Both of the previous calls should have errors due to requesting
+            # metadata for non-existent buckets
+            assert err1
+            assert err2
+
+        # create a bucket and then sync it
+        log.debug('create a bucket and then sync it')
+        bucket = connection.create_bucket(bucket_name2)
+        rgw_utils.radosgw_agent_sync_all(ctx)
+
+        # compare the metadata for the bucket between different regions, make sure it matches
+        log.debug('compare the metadata for the bucket between different regions, make sure it matches')
+        for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
+            source_client = c_config['src']
+            dest_client = c_config['dest']
+            (err1, out1) = rgwadmin(ctx, source_client,
+                ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)],
+                check_status=True)
+            (err2, out2) = rgwadmin(ctx, dest_client,
+                ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)],
+                check_status=True)
+            assert out1 == out2
+
+        # Now delete the bucket and recreate it with a different user
+        log.debug('Now delete the bucket and recreate it with a different user')
+        # within the same window of time and then sync.
+        bucket.delete()
+        bucket = connection2.create_bucket(bucket_name2)
+        rgw_utils.radosgw_agent_sync_all(ctx)
+
+        # compare the metadata for the bucket between different regions, make sure it matches
+        log.debug('compare the metadata for the bucket between different regions, make sure it matches')
+        # user2 should own the bucket in both regions
+        for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
+            source_client = c_config['src']
+            dest_client = c_config['dest']
+            (err1, out1) = rgwadmin(ctx, source_client,
+                ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)],
+                check_status=True)
+            (err2, out2) = rgwadmin(ctx, dest_client,
+                ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)],
+                check_status=True)
+            assert out1 == out2
+            assert out1['data']['owner'] == user2
+            assert out1['data']['owner'] != user1
+
+        # now we're going to use this bucket to test meta-data update propagation
+        log.debug('now we\'re going to use this bucket to test meta-data update propagation')
+        for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
+            source_client = c_config['src']
+            dest_client = c_config['dest']
+
+            # get the metadata so we can tweak it
+            log.debug('get the metadata so we can tweak it')
+            (err, orig_data) = rgwadmin(ctx, source_client,
+                ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)],
+                check_status=True)
+
+            # manually edit mtime for this bucket to be 300 seconds in the past
+            log.debug('manually edit mtime for this bucket to be 300 seconds in the past')
+            new_data = copy.deepcopy(orig_data)
+            mtime = datetime.datetime.strptime(orig_data['mtime'], "%Y-%m-%d %H:%M:%S.%fZ") - datetime.timedelta(300)
+            new_data['mtime'] =  unicode(mtime.strftime("%Y-%m-%d %H:%M:%S.%fZ"))
+            log.debug("new mtime ", mtime)
+            assert new_data != orig_data
+            (err, out) = rgwadmin(ctx, source_client,
+                ['metadata', 'put', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)],
+                stdin=StringIO(json.dumps(new_data)),
+                check_status=True)
+
+            # get the metadata and make sure that the 'put' worked
+            log.debug('get the metadata and make sure that the \'put\' worked')
+            (err, out) = rgwadmin(ctx, source_client,
+                ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)],
+                check_status=True)
+            assert out == new_data
+
+            # sync to propagate the new metadata
+            log.debug('sync to propagate the new metadata')
+            rgw_utils.radosgw_agent_sync_all(ctx)
+
+            # get the metadata from the dest and compare it to what we just set
+            log.debug('get the metadata from the dest and compare it to what we just set')
+            # and what the source region has.
+            (err1, out1) = rgwadmin(ctx, source_client,
+                ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)],
+                check_status=True)
+            (err2, out2) = rgwadmin(ctx, dest_client,
+                ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)],
+                check_status=True)
+            # yeah for the transitive property
+            assert out1 == out2
+            assert out1 == new_data
+
+        # now we delete the bucket
+        log.debug('now we delete the bucket')
+        bucket.delete()
+
+        log.debug('sync to propagate the deleted bucket')
+        rgw_utils.radosgw_agent_sync_all(ctx)
+
+        # Delete user2 as later tests do not expect it to exist.
+        # Verify that it is gone on both regions
+        for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
+            source_client = c_config['src']
+            dest_client = c_config['dest']
+            (err, out) = rgwadmin(ctx, source_client,
+                ['user', 'rm', '--uid', user2], check_status=True)
+            rgw_utils.radosgw_agent_sync_all(ctx)
+            # The two 'user info' calls should fail and not return any data
+            # since we just deleted this user.
+            (err, out) = rgwadmin(ctx, source_client, ['user', 'info', '--uid', user2])
+            assert out is None
+            (err, out) = rgwadmin(ctx, dest_client, ['user', 'info', '--uid', user2])
+            assert out is None
+
+        # Test data sync
+
+        # First create a bucket for data sync test purpose
+        bucket = connection.create_bucket(bucket_name + 'data')
+
+        # Create a tiny file and check if in sync
+        for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
+            if c_config.get('metadata-only'):
+                continue
+
+            for full in (True, False):
+                source_client = c_config['src']
+                dest_client = c_config['dest']
+                k = boto.s3.key.Key(bucket)
+                k.key = 'tiny_file'
+                k.set_contents_from_string("123456789")
+                safety_window = rgw_utils.radosgw_data_log_window(ctx, source_client)
+                time.sleep(safety_window)
+                rgw_utils.radosgw_agent_sync_all(ctx, data=True, full=full)
+                (dest_host, dest_port) = ctx.rgw.role_endpoints[dest_client]
+                dest_connection = boto.s3.connection.S3Connection(
+                    aws_access_key_id=access_key,
+                    aws_secret_access_key=secret_key,
+                    is_secure=False,
+                    port=dest_port,
+                    host=dest_host,
+                    calling_format=boto.s3.connection.OrdinaryCallingFormat(),
+                )
+                dest_k = dest_connection.get_bucket(bucket_name + 'data').get_key('tiny_file')
+                assert k.get_contents_as_string() == dest_k.get_contents_as_string()
+
+                # check that deleting it removes it from the dest zone
+                k.delete()
+                time.sleep(safety_window)
+                # full sync doesn't handle deleted objects yet
+                rgw_utils.radosgw_agent_sync_all(ctx, data=True, full=False)
+
+                dest_bucket = dest_connection.get_bucket(bucket_name + 'data')
+                dest_k = dest_bucket.get_key('tiny_file')
+                assert dest_k == None, 'object not deleted from destination zone'
+
+        # finally we delete the bucket
+        bucket.delete()
+
+        bucket = connection.create_bucket(bucket_name + 'data2')
+        for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
+            if c_config.get('metadata-only'):
+                continue
+
+            for full in (True, False):
+                source_client = c_config['src']
+                dest_client = c_config['dest']
+                (dest_host, dest_port) = ctx.rgw.role_endpoints[dest_client]
+                dest_connection = boto.s3.connection.S3Connection(
+                    aws_access_key_id=access_key,
+                    aws_secret_access_key=secret_key,
+                    is_secure=False,
+                    port=dest_port,
+                    host=dest_host,
+                    calling_format=boto.s3.connection.OrdinaryCallingFormat(),
+                )
+                for i in range(20):
+                    k = boto.s3.key.Key(bucket)
+                    k.key = 'tiny_file_' + str(i)
+                    k.set_contents_from_string(str(i) * 100)
+
+                safety_window = rgw_utils.radosgw_data_log_window(ctx, source_client)
+                time.sleep(safety_window)
+                rgw_utils.radosgw_agent_sync_all(ctx, data=True, full=full)
+
+                for i in range(20):
+                    dest_k = dest_connection.get_bucket(bucket_name + 'data2').get_key('tiny_file_' + str(i))
+                    assert (str(i) * 100) == dest_k.get_contents_as_string()
+                    k = boto.s3.key.Key(bucket)
+                    k.key = 'tiny_file_' + str(i)
+                    k.delete()
+
+                # check that deleting removes the objects from the dest zone
+                time.sleep(safety_window)
+                # full sync doesn't delete deleted objects yet
+                rgw_utils.radosgw_agent_sync_all(ctx, data=True, full=False)
+
+                for i in range(20):
+                    dest_bucket = dest_connection.get_bucket(bucket_name + 'data2')
+                    dest_k = dest_bucket.get_key('tiny_file_' + str(i))
+                    assert dest_k == None, 'object %d not deleted from destination zone' % i
+        bucket.delete()
+
+    # end of 'if multi_region_run:'
+
+    # TESTCASE 'suspend-ok','user','suspend','active user','succeeds'
+    (err, out) = rgwadmin(ctx, client, ['user', 'suspend', '--uid', user1],
+        check_status=True)
+
+    # TESTCASE 'suspend-suspended','user','suspend','suspended user','succeeds w/advisory'
+    (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1], check_status=True)
+    assert out['suspended']
+
+    # TESTCASE 're-enable','user','enable','suspended user','succeeds'
+    (err, out) = rgwadmin(ctx, client, ['user', 'enable', '--uid', user1], check_status=True)
+
+    # TESTCASE 'info-re-enabled','user','info','re-enabled user','no longer suspended'
+    (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1], check_status=True)
+    assert not out['suspended']
+
+    # TESTCASE 'add-keys','key','create','w/valid info','succeeds'
+    (err, out) = rgwadmin(ctx, client, [
+            'key', 'create', '--uid', user1,
+            '--access-key', access_key2, '--secret', secret_key2,
+            ], check_status=True)
+
+    # TESTCASE 'info-new-key','user','info','after key addition','returns all keys'
+    (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1],
+        check_status=True)
+    assert len(out['keys']) == 2
+    assert out['keys'][0]['access_key'] == access_key2 or out['keys'][1]['access_key'] == access_key2
+    assert out['keys'][0]['secret_key'] == secret_key2 or out['keys'][1]['secret_key'] == secret_key2
+
+    # TESTCASE 'rm-key','key','rm','newly added key','succeeds, key is removed'
+    (err, out) = rgwadmin(ctx, client, [
+            'key', 'rm', '--uid', user1,
+            '--access-key', access_key2,
+            ], check_status=True)
+    assert len(out['keys']) == 1
+    assert out['keys'][0]['access_key'] == access_key
+    assert out['keys'][0]['secret_key'] == secret_key
+
+    # TESTCASE 'add-swift-key','key','create','swift key','succeeds'
+    subuser_access = 'full'
+    subuser_perm = 'full-control'
+
+    (err, out) = rgwadmin(ctx, client, [
+            'subuser', 'create', '--subuser', subuser1,
+            '--access', subuser_access
+            ], check_status=True)
+
+    # TESTCASE 'add-swift-key','key','create','swift key','succeeds'
+    (err, out) = rgwadmin(ctx, client, [
+            'subuser', 'modify', '--subuser', subuser1,
+            '--secret', swift_secret1,
+            '--key-type', 'swift',
+            ], check_status=True)
+
+    # TESTCASE 'subuser-perm-mask', 'subuser', 'info', 'test subuser perm mask durability', 'succeeds'
+    (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1])
+
+    assert out['subusers'][0]['permissions'] == subuser_perm
+
+    # TESTCASE 'info-swift-key','user','info','after key addition','returns all keys'
+    (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1], check_status=True)
+    assert len(out['swift_keys']) == 1
+    assert out['swift_keys'][0]['user'] == subuser1
+    assert out['swift_keys'][0]['secret_key'] == swift_secret1
+
+    # TESTCASE 'add-swift-subuser','key','create','swift sub-user key','succeeds'
+    (err, out) = rgwadmin(ctx, client, [
+            'subuser', 'create', '--subuser', subuser2,
+            '--secret', swift_secret2,
+            '--key-type', 'swift',
+            ], check_status=True)
+
+    # TESTCASE 'info-swift-subuser','user','info','after key addition','returns all sub-users/keys'
+    (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1], check_status=True)
+    assert len(out['swift_keys']) == 2
+    assert out['swift_keys'][0]['user'] == subuser2 or out['swift_keys'][1]['user'] == subuser2
+    assert out['swift_keys'][0]['secret_key'] == swift_secret2 or out['swift_keys'][1]['secret_key'] == swift_secret2
+
+    # TESTCASE 'rm-swift-key1','key','rm','subuser','succeeds, one key is removed'
+    (err, out) = rgwadmin(ctx, client, [
+            'key', 'rm', '--subuser', subuser1,
+            '--key-type', 'swift',
+            ], check_status=True)
+    assert len(out['swift_keys']) == 1
+
+    # TESTCASE 'rm-subuser','subuser','rm','subuser','success, subuser is removed'
+    (err, out) = rgwadmin(ctx, client, [
+            'subuser', 'rm', '--subuser', subuser1,
+            ], check_status=True)
+    assert len(out['subusers']) == 1
+
+    # TESTCASE 'rm-subuser-with-keys','subuser','rm','subuser','succeeds, second subser and key is removed'
+    (err, out) = rgwadmin(ctx, client, [
+            'subuser', 'rm', '--subuser', subuser2,
+            '--key-type', 'swift', '--purge-keys',
+            ], check_status=True)
+    assert len(out['swift_keys']) == 0
+    assert len(out['subusers']) == 0
+
+    # TESTCASE 'bucket-stats','bucket','stats','no session/buckets','succeeds, empty list'
+    (err, out) = rgwadmin(ctx, client, ['bucket', 'stats', '--uid', user1],
+        check_status=True)
+    assert len(out) == 0
+
+    if multi_region_run:
+        rgw_utils.radosgw_agent_sync_all(ctx)
+
+    # TESTCASE 'bucket-stats2','bucket','stats','no buckets','succeeds, empty list'
+    (err, out) = rgwadmin(ctx, client, ['bucket', 'list', '--uid', user1], check_status=True)
+    assert len(out) == 0
+
+    # create a first bucket
+    bucket = connection.create_bucket(bucket_name)
+
+    # TESTCASE 'bucket-list','bucket','list','one bucket','succeeds, expected list'
+    (err, out) = rgwadmin(ctx, client, ['bucket', 'list', '--uid', user1], check_status=True)
+    assert len(out) == 1
+    assert out[0] == bucket_name
+
+    # TESTCASE 'bucket-list-all','bucket','list','all buckets','succeeds, expected list'
+    (err, out) = rgwadmin(ctx, client, ['bucket', 'list'], check_status=True)
+    assert len(out) >= 1
+    assert bucket_name in out;
+
+    # TESTCASE 'max-bucket-limit,'bucket','create','4 buckets','5th bucket fails due to max buckets == 4'
+    bucket2 = connection.create_bucket(bucket_name + '2')
+    bucket3 = connection.create_bucket(bucket_name + '3')
+    bucket4 = connection.create_bucket(bucket_name + '4')
+    # the 5th should fail.
+    failed = False
+    try:
+        connection.create_bucket(bucket_name + '5')
+    except Exception:
+        failed = True
+    assert failed
+
+    # delete the buckets
+    bucket2.delete()
+    bucket3.delete()
+    bucket4.delete()
+
+    # TESTCASE 'bucket-stats3','bucket','stats','new empty bucket','succeeds, empty list'
+    (err, out) = rgwadmin(ctx, client, [
+            'bucket', 'stats', '--bucket', bucket_name], check_status=True)
+    assert out['owner'] == user1
+    bucket_id = out['id']
+
+    # TESTCASE 'bucket-stats4','bucket','stats','new empty bucket','succeeds, expected bucket ID'
+    (err, out) = rgwadmin(ctx, client, ['bucket', 'stats', '--uid', user1], check_status=True)
+    assert len(out) == 1
+    assert out[0]['id'] == bucket_id    # does it return the same ID twice in a row?
+
+    # use some space
+    key = boto.s3.key.Key(bucket)
+    key.set_contents_from_string('one')
+
+    # TESTCASE 'bucket-stats5','bucket','stats','after creating key','succeeds, lists one non-empty object'
+    (err, out) = rgwadmin(ctx, client, [
+            'bucket', 'stats', '--bucket', bucket_name], check_status=True)
+    assert out['id'] == bucket_id
+    assert out['usage']['rgw.main']['num_objects'] == 1
+    assert out['usage']['rgw.main']['size_kb'] > 0
+
+    # reclaim it
+    key.delete()
+
+    # TESTCASE 'bucket unlink', 'bucket', 'unlink', 'unlink bucket from user', 'fails', 'access denied error'
+    (err, out) = rgwadmin(ctx, client,
+        ['bucket', 'unlink', '--uid', user1, '--bucket', bucket_name],
+        check_status=True)
+
+    # create a second user to link the bucket to
+    (err, out) = rgwadmin(ctx, client, [
+            'user', 'create',
+            '--uid', user2,
+            '--display-name', display_name2,
+            '--access-key', access_key2,
+            '--secret', secret_key2,
+            '--max-buckets', '1',
+            ],
+            check_status=True)
+
+    # try creating an object with the first user before the bucket is relinked
+    denied = False
+    key = boto.s3.key.Key(bucket)
+
+    try:
+        key.set_contents_from_string('two')
+    except boto.exception.S3ResponseError:
+        denied = True
+
+    assert not denied
+
+    # delete the object
+    key.delete()
+
+    # link the bucket to another user
+    (err, out) = rgwadmin(ctx, client, ['metadata', 'get', 'bucket:{n}'.format(n=bucket_name)],
+        check_status=True)
+
+    bucket_data = out['data']
+    assert bucket_data['bucket']['name'] == bucket_name
+
+    bucket_id = bucket_data['bucket']['bucket_id']
+
+    # link the bucket to another user
+    (err, out) = rgwadmin(ctx, client, ['bucket', 'link', '--uid', user2, '--bucket', bucket_name, '--bucket-id', bucket_id],
+        check_status=True)
+
+    # try to remove user, should fail (has a linked bucket)
+    (err, out) = rgwadmin(ctx, client, ['user', 'rm', '--uid', user2])
+    assert err
+
+    # TESTCASE 'bucket unlink', 'bucket', 'unlink', 'unlink bucket from user', 'succeeds, bucket unlinked'
+    (err, out) = rgwadmin(ctx, client, ['bucket', 'unlink', '--uid', user2, '--bucket', bucket_name],
+        check_status=True)
+
+    # relink the bucket to the first user and delete the second user
+    (err, out) = rgwadmin(ctx, client,
+        ['bucket', 'link', '--uid', user1, '--bucket', bucket_name, '--bucket-id', bucket_id],
+        check_status=True)
+
+    (err, out) = rgwadmin(ctx, client, ['user', 'rm', '--uid', user2],
+        check_status=True)
+
+    # TESTCASE 'object-rm', 'object', 'rm', 'remove object', 'succeeds, object is removed'
+
+    # upload an object
+    object_name = 'four'
+    key = boto.s3.key.Key(bucket, object_name)
+    key.set_contents_from_string(object_name)
+
+    # now delete it
+    (err, out) = rgwadmin(ctx, client,
+        ['object', 'rm', '--bucket', bucket_name, '--object', object_name],
+        check_status=True)
+
+    # TESTCASE 'bucket-stats6','bucket','stats','after deleting key','succeeds, lists one no objects'
+    (err, out) = rgwadmin(ctx, client, [
+            'bucket', 'stats', '--bucket', bucket_name],
+            check_status=True)
+    assert out['id'] == bucket_id
+    assert out['usage']['rgw.main']['num_objects'] == 0
+
+    # list log objects
+    # TESTCASE 'log-list','log','list','after activity','succeeds, lists one no objects'
+    (err, out) = rgwadmin(ctx, client, ['log', 'list'], check_status=True)
+    assert len(out) > 0
+
+    for obj in out:
+        # TESTCASE 'log-show','log','show','after activity','returns expected info'
+        if obj[:4] == 'meta' or obj[:4] == 'data' or obj[:18] == 'obj_delete_at_hint':
+            continue
+
+        (err, rgwlog) = rgwadmin(ctx, client, ['log', 'show', '--object', obj],
+            check_status=True)
+        assert len(rgwlog) > 0
+
+        # exempt bucket_name2 from checking as it was only used for multi-region tests
+        assert rgwlog['bucket'].find(bucket_name) == 0 or rgwlog['bucket'].find(bucket_name2) == 0
+        assert rgwlog['bucket'] != bucket_name or rgwlog['bucket_id'] == bucket_id
+        assert rgwlog['bucket_owner'] == user1 or rgwlog['bucket'] == bucket_name + '5' or rgwlog['bucket'] == bucket_name2
+        for entry in rgwlog['log_entries']:
+            log.debug('checking log entry: ', entry)
+            assert entry['bucket'] == rgwlog['bucket']
+            possible_buckets = [bucket_name + '5', bucket_name2]
+            user = entry['user']
+            assert user == user1 or user.endswith('system-user') or \
+                rgwlog['bucket'] in possible_buckets
+
+        # TESTCASE 'log-rm','log','rm','delete log objects','succeeds'
+        (err, out) = rgwadmin(ctx, client, ['log', 'rm', '--object', obj],
+            check_status=True)
+
+    # TODO: show log by bucket+date
+
+    # need to wait for all usage data to get flushed, should take up to 30 seconds
+    timestamp = time.time()
+    while time.time() - timestamp <= (20 * 60):      # wait up to 20 minutes
+        (err, out) = rgwadmin(ctx, client, ['usage', 'show', '--categories', 'delete_obj'])  # last operation we did is delete obj, wait for it to flush
+        if get_user_successful_ops(out, user1) > 0:
+            break
+        time.sleep(1)
+
+    assert time.time() - timestamp <= (20 * 60)
+
+    # TESTCASE 'usage-show' 'usage' 'show' 'all usage' 'succeeds'
+    (err, out) = rgwadmin(ctx, client, ['usage', 'show'], check_status=True)
+    assert len(out['entries']) > 0
+    assert len(out['summary']) > 0
+
+    user_summary = get_user_summary(out, user1)
+
+    total = user_summary['total']
+    assert total['successful_ops'] > 0
+
+    # TESTCASE 'usage-show2' 'usage' 'show' 'user usage' 'succeeds'
+    (err, out) = rgwadmin(ctx, client, ['usage', 'show', '--uid', user1],
+        check_status=True)
+    assert len(out['entries']) > 0
+    assert len(out['summary']) > 0
+    user_summary = out['summary'][0]
+    for entry in user_summary['categories']:
+        assert entry['successful_ops'] > 0
+    assert user_summary['user'] == user1
+
+    # TESTCASE 'usage-show3' 'usage' 'show' 'user usage categories' 'succeeds'
+    test_categories = ['create_bucket', 'put_obj', 'delete_obj', 'delete_bucket']
+    for cat in test_categories:
+        (err, out) = rgwadmin(ctx, client, ['usage', 'show', '--uid', user1, '--categories', cat],
+            check_status=True)
+        assert len(out['summary']) > 0
+        user_summary = out['summary'][0]
+        assert user_summary['user'] == user1
+        assert len(user_summary['categories']) == 1
+        entry = user_summary['categories'][0]
+        assert entry['category'] == cat
+        assert entry['successful_ops'] > 0
+
+    # the usage flush interval is 30 seconds, wait that much an then some
+    # to make sure everything has been flushed
+    time.sleep(35)
+
+    # TESTCASE 'usage-trim' 'usage' 'trim' 'user usage' 'succeeds, usage removed'
+    (err, out) = rgwadmin(ctx, client, ['usage', 'trim', '--uid', user1],
+        check_status=True)
+    (err, out) = rgwadmin(ctx, client, ['usage', 'show', '--uid', user1],
+        check_status=True)
+    assert len(out['entries']) == 0
+    assert len(out['summary']) == 0
+
+    # TESTCASE 'user-suspend2','user','suspend','existing user','succeeds'
+    (err, out) = rgwadmin(ctx, client, ['user', 'suspend', '--uid', user1],
+        check_status=True)
+
+    # TESTCASE 'user-suspend3','user','suspend','suspended user','cannot write objects'
+    try:
+        key = boto.s3.key.Key(bucket)
+        key.set_contents_from_string('five')
+    except boto.exception.S3ResponseError as e:
+        assert e.status == 403
+
+    # TESTCASE 'user-renable2','user','enable','suspended user','succeeds'
+    (err, out) = rgwadmin(ctx, client, ['user', 'enable', '--uid', user1],
+        check_status=True)
+
+    # TESTCASE 'user-renable3','user','enable','reenabled user','can write objects'
+    key = boto.s3.key.Key(bucket)
+    key.set_contents_from_string('six')
+
+    # TESTCASE 'gc-list', 'gc', 'list', 'get list of objects ready for garbage collection'
+
+    # create an object large enough to be split into multiple parts
+    test_string = 'foo'*10000000
+
+    big_key = boto.s3.key.Key(bucket)
+    big_key.set_contents_from_string(test_string)
+
+    # now delete the head
+    big_key.delete()
+
+    # wait a bit to give the garbage collector time to cycle
+    time.sleep(15)
+
+    (err, out) = rgwadmin(ctx, client, ['gc', 'list'])
+
+    assert len(out) > 0
+
+    # TESTCASE 'gc-process', 'gc', 'process', 'manually collect garbage'
+    (err, out) = rgwadmin(ctx, client, ['gc', 'process'], check_status=True)
+
+    #confirm
+    (err, out) = rgwadmin(ctx, client, ['gc', 'list'])
+
+    assert len(out) == 0
+
+    # TESTCASE 'rm-user-buckets','user','rm','existing user','fails, still has buckets'
+    (err, out) = rgwadmin(ctx, client, ['user', 'rm', '--uid', user1])
+    assert err
+
+    # delete should fail because ``key`` still exists
+    try:
+        bucket.delete()
+    except boto.exception.S3ResponseError as e:
+        assert e.status == 409
+
+    key.delete()
+    bucket.delete()
+
+    # TESTCASE 'policy', 'bucket', 'policy', 'get bucket policy', 'returns S3 policy'
+    bucket = connection.create_bucket(bucket_name)
+
+    # create an object
+    key = boto.s3.key.Key(bucket)
+    key.set_contents_from_string('seven')
+
+    # should be private already but guarantee it
+    key.set_acl('private')
+
+    (err, out) = rgwadmin(ctx, client,
+        ['policy', '--bucket', bucket.name, '--object', key.key],
+        check_status=True, format='xml')
+
+    acl = get_acl(key)
+
+    assert acl == out.strip('\n')
+
+    # add another grantee by making the object public read
+    key.set_acl('public-read')
+
+    (err, out) = rgwadmin(ctx, client,
+        ['policy', '--bucket', bucket.name, '--object', key.key],
+        check_status=True, format='xml')
+
+    acl = get_acl(key)
+
+    assert acl == out.strip('\n')
+
+    # TESTCASE 'rm-bucket', 'bucket', 'rm', 'bucket with objects', 'succeeds'
+    bucket = connection.create_bucket(bucket_name)
+    key_name = ['eight', 'nine', 'ten', 'eleven']
+    for i in range(4):
+        key = boto.s3.key.Key(bucket)
+        key.set_contents_from_string(key_name[i])
+
+    (err, out) = rgwadmin(ctx, client,
+        ['bucket', 'rm', '--bucket', bucket_name, '--purge-objects'],
+        check_status=True)
+
+    # TESTCASE 'caps-add', 'caps', 'add', 'add user cap', 'succeeds'
+    caps='user=read'
+    (err, out) = rgwadmin(ctx, client, ['caps', 'add', '--uid', user1, '--caps', caps])
+
+    assert out['caps'][0]['perm'] == 'read'
+
+    # TESTCASE 'caps-rm', 'caps', 'rm', 'remove existing cap from user', 'succeeds'
+    (err, out) = rgwadmin(ctx, client, ['caps', 'rm', '--uid', user1, '--caps', caps])
+
+    assert not out['caps']
+
+    # TESTCASE 'rm-user','user','rm','existing user','fails, still has buckets'
+    bucket = connection.create_bucket(bucket_name)
+    key = boto.s3.key.Key(bucket)
+
+    (err, out) = rgwadmin(ctx, client, ['user', 'rm', '--uid', user1])
+    assert err
+
+    # TESTCASE 'rm-user2', 'user', 'rm', 'user with data', 'succeeds'
+    bucket = connection.create_bucket(bucket_name)
+    key = boto.s3.key.Key(bucket)
+    key.set_contents_from_string('twelve')
+
+    (err, out) = rgwadmin(ctx, client,
+        ['user', 'rm', '--uid', user1, '--purge-data' ],
+        check_status=True)
+
+    # TESTCASE 'rm-user3','user','rm','deleted user','fails'
+    (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1])
+    assert err
+
+    # TESTCASE 'zone-info', 'zone', 'get', 'get zone info', 'succeeds, has default placement rule'
+    #
+
+    if realm is None:
+        (err, out) = rgwadmin(ctx, client, ['zone', 'get','--rgw-zone','default'])
+    else:
+        (err, out) = rgwadmin(ctx, client, ['zone', 'get'])
+    orig_placement_pools = len(out['placement_pools'])
+
+    # removed this test, it is not correct to assume that zone has default placement, it really
+    # depends on how we set it up before
+    #
+    # assert len(out) > 0
+    # assert len(out['placement_pools']) == 1
+
+    # default_rule = out['placement_pools'][0]
+    # assert default_rule['key'] == 'default-placement'
+
+    rule={'key': 'new-placement', 'val': {'data_pool': '.rgw.buckets.2', 'index_pool': '.rgw.buckets.index.2'}}
+
+    out['placement_pools'].append(rule)
+
+    (err, out) = rgwadmin(ctx, client, ['zone', 'set'],
+        stdin=StringIO(json.dumps(out)),
+        check_status=True)
+
+    if realm is None:
+        (err, out) = rgwadmin(ctx, client, ['zone', 'get','--rgw-zone','default'])
+    else:
+        (err, out) = rgwadmin(ctx, client, ['zone', 'get'])
+    assert len(out) > 0
+    assert len(out['placement_pools']) == orig_placement_pools + 1
diff --git a/qa/tasks/radosgw_admin_rest.py b/qa/tasks/radosgw_admin_rest.py
new file mode 100644
index 0000000..7bd72d1
--- /dev/null
+++ b/qa/tasks/radosgw_admin_rest.py
@@ -0,0 +1,668 @@
+"""
+Run a series of rgw admin commands through the rest interface.
+
+The test cases in this file have been annotated for inventory.
+To extract the inventory (in csv format) use the command:
+
+   grep '^ *# TESTCASE' | sed 's/^ *# TESTCASE //'
+
+"""
+from cStringIO import StringIO
+import logging
+import json
+
+import boto.exception
+import boto.s3.connection
+import boto.s3.acl
+
+import requests
+import time
+
+from boto.connection import AWSAuthConnection
+from teuthology import misc as teuthology
+from util.rgw import get_user_summary, get_user_successful_ops
+
+log = logging.getLogger(__name__)
+
+def rgwadmin(ctx, client, cmd):
+    """
+    Perform rgw admin command
+
+    :param client: client
+    :param cmd: command to execute.
+    :return: command exit status, json result.
+    """
+    log.info('radosgw-admin: %s' % cmd)
+    testdir = teuthology.get_testdir(ctx)
+    pre = [
+        'adjust-ulimits',
+        'ceph-coverage',
+        '{tdir}/archive/coverage'.format(tdir=testdir),
+        'radosgw-admin',
+        '--log-to-stderr',
+        '--format', 'json',
+        ]
+    pre.extend(cmd)
+    (remote,) = ctx.cluster.only(client).remotes.iterkeys()
+    proc = remote.run(
+        args=pre,
+        check_status=False,
+        stdout=StringIO(),
+        stderr=StringIO(),
+        )
+    r = proc.exitstatus
+    out = proc.stdout.getvalue()
+    j = None
+    if not r and out != '':
+        try:
+            j = json.loads(out)
+            log.info(' json result: %s' % j)
+        except ValueError:
+            j = out
+            log.info(' raw result: %s' % j)
+    return (r, j)
+
+
+def rgwadmin_rest(connection, cmd, params=None, headers=None, raw=False):
+    """
+    perform a rest command
+    """
+    log.info('radosgw-admin-rest: %s %s' % (cmd, params))
+    put_cmds = ['create', 'link', 'add']
+    post_cmds = ['unlink', 'modify']
+    delete_cmds = ['trim', 'rm', 'process']
+    get_cmds = ['check', 'info', 'show', 'list']
+
+    bucket_sub_resources = ['object', 'policy', 'index']
+    user_sub_resources = ['subuser', 'key', 'caps']
+    zone_sub_resources = ['pool', 'log', 'garbage']
+
+    def get_cmd_method_and_handler(cmd):
+        """
+        Get the rest command and handler from information in cmd and
+        from the imported requests object.
+        """
+        if cmd[1] in put_cmds:
+            return 'PUT', requests.put
+        elif cmd[1] in delete_cmds:
+            return 'DELETE', requests.delete
+        elif cmd[1] in post_cmds:
+            return 'POST', requests.post
+        elif cmd[1] in get_cmds:
+            return 'GET', requests.get
+
+    def get_resource(cmd):
+        """
+        Get the name of the resource from information in cmd.
+        """
+        if cmd[0] == 'bucket' or cmd[0] in bucket_sub_resources:
+            if cmd[0] == 'bucket':
+                return 'bucket', ''
+            else:
+                return 'bucket', cmd[0]
+        elif cmd[0] == 'user' or cmd[0] in user_sub_resources:
+            if cmd[0] == 'user':
+                return 'user', ''
+            else:
+                return 'user', cmd[0]
+        elif cmd[0] == 'usage':
+            return 'usage', ''
+        elif cmd[0] == 'zone' or cmd[0] in zone_sub_resources:
+            if cmd[0] == 'zone':
+                return 'zone', ''
+            else:
+                return 'zone', cmd[0]
+
+    def build_admin_request(conn, method, resource = '', headers=None, data='',
+            query_args=None, params=None):
+        """
+        Build an administative request adapted from the build_request()
+        method of boto.connection
+        """
+
+        path = conn.calling_format.build_path_base('admin', resource)
+        auth_path = conn.calling_format.build_auth_path('admin', resource)
+        host = conn.calling_format.build_host(conn.server_name(), 'admin')
+        if query_args:
+            path += '?' + query_args
+            boto.log.debug('path=%s' % path)
+            auth_path += '?' + query_args
+            boto.log.debug('auth_path=%s' % auth_path)
+        return AWSAuthConnection.build_base_http_request(conn, method, path,
+                auth_path, params, headers, data, host)
+
+    method, handler = get_cmd_method_and_handler(cmd)
+    resource, query_args = get_resource(cmd)
+    request = build_admin_request(connection, method, resource,
+            query_args=query_args, headers=headers)
+
+    url = '{protocol}://{host}{path}'.format(protocol=request.protocol,
+            host=request.host, path=request.path)
+
+    request.authorize(connection=connection)
+    result = handler(url, params=params, headers=request.headers)
+
+    if raw:
+        log.info(' text result: %s' % result.txt)
+        return result.status_code, result.txt
+    else:
+        log.info(' json result: %s' % result.json())
+        return result.status_code, result.json()
+
+
+def task(ctx, config):
+    """
+    Test radosgw-admin functionality through the RESTful interface
+    """
+    assert config is None or isinstance(config, list) \
+        or isinstance(config, dict), \
+        "task s3tests only supports a list or dictionary for configuration"
+    all_clients = ['client.{id}'.format(id=id_)
+                   for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
+    if config is None:
+        config = all_clients
+    if isinstance(config, list):
+        config = dict.fromkeys(config)
+    clients = config.keys()
+
+    # just use the first client...
+    client = clients[0]
+
+    ##
+    admin_user = 'ada'
+    admin_display_name = 'Ms. Admin User'
+    admin_access_key = 'MH1WC2XQ1S8UISFDZC8W'
+    admin_secret_key = 'dQyrTPA0s248YeN5bBv4ukvKU0kh54LWWywkrpoG'
+    admin_caps = 'users=read, write; usage=read, write; buckets=read, write; zone=read, write'
+
+    user1 = 'foo'
+    user2 = 'fud'
+    subuser1 = 'foo:foo1'
+    subuser2 = 'foo:foo2'
+    display_name1 = 'Foo'
+    display_name2 = 'Fud'
+    email = 'foo at foo.com'
+    access_key = '9te6NH5mcdcq0Tc5i8i1'
+    secret_key = 'Ny4IOauQoL18Gp2zM7lC1vLmoawgqcYP/YGcWfXu'
+    access_key2 = 'p5YnriCv1nAtykxBrupQ'
+    secret_key2 = 'Q8Tk6Q/27hfbFSYdSkPtUqhqx1GgzvpXa4WARozh'
+    swift_secret1 = 'gpS2G9RREMrnbqlp29PP2D36kgPR1tm72n5fPYfL'
+    swift_secret2 = 'ri2VJQcKSYATOY6uaDUX7pxgkW+W1YmC6OCxPHwy'
+
+    bucket_name = 'myfoo'
+
+    # legend (test cases can be easily grep-ed out)
+    # TESTCASE 'testname','object','method','operation','assertion'
+    # TESTCASE 'create-admin-user','user','create','administrative user','succeeds'
+    (err, out) = rgwadmin(ctx, client, [
+            'user', 'create',
+            '--uid', admin_user,
+            '--display-name', admin_display_name,
+            '--access-key', admin_access_key,
+            '--secret', admin_secret_key,
+            '--max-buckets', '0',
+            '--caps', admin_caps
+            ])
+    logging.error(out)
+    logging.error(err)
+    assert not err
+
+    (remote,) = ctx.cluster.only(client).remotes.iterkeys()
+    remote_host = remote.name.split('@')[1]
+    admin_conn = boto.s3.connection.S3Connection(
+        aws_access_key_id=admin_access_key,
+        aws_secret_access_key=admin_secret_key,
+        is_secure=False,
+        port=7280,
+        host=remote_host,
+        calling_format=boto.s3.connection.OrdinaryCallingFormat(),
+        )
+
+    # TESTCASE 'info-nosuch','user','info','non-existent user','fails'
+    (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {"uid": user1})
+    assert ret == 404
+
+    # TESTCASE 'create-ok','user','create','w/all valid info','succeeds'
+    (ret, out) = rgwadmin_rest(admin_conn,
+            ['user', 'create'],
+            {'uid' : user1,
+             'display-name' :  display_name1,
+             'email' : email,
+             'access-key' : access_key,
+             'secret-key' : secret_key,
+             'max-buckets' : '4'
+            })
+
+    assert ret == 200
+
+    # TESTCASE 'info-existing','user','info','existing user','returns correct info'
+    (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
+
+    assert out['user_id'] == user1
+    assert out['email'] == email
+    assert out['display_name'] == display_name1
+    assert len(out['keys']) == 1
+    assert out['keys'][0]['access_key'] == access_key
+    assert out['keys'][0]['secret_key'] == secret_key
+    assert not out['suspended']
+
+    # TESTCASE 'suspend-ok','user','suspend','active user','succeeds'
+    (ret, out) = rgwadmin_rest(admin_conn, ['user', 'modify'], {'uid' : user1, 'suspended' : True})
+    assert ret == 200
+
+    # TESTCASE 'suspend-suspended','user','suspend','suspended user','succeeds w/advisory'
+    (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
+    assert ret == 200
+    assert out['suspended']
+
+    # TESTCASE 're-enable','user','enable','suspended user','succeeds'
+    (ret, out) = rgwadmin_rest(admin_conn, ['user', 'modify'], {'uid' : user1, 'suspended' : 'false'})
+    assert not err
+
+    # TESTCASE 'info-re-enabled','user','info','re-enabled user','no longer suspended'
+    (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
+    assert ret == 200
+    assert not out['suspended']
+
+    # TESTCASE 'add-keys','key','create','w/valid info','succeeds'
+    (ret, out) = rgwadmin_rest(admin_conn,
+            ['key', 'create'],
+            {'uid' : user1,
+             'access-key' : access_key2,
+             'secret-key' : secret_key2
+            })
+
+
+    assert ret == 200
+
+    # TESTCASE 'info-new-key','user','info','after key addition','returns all keys'
+    (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
+    assert ret == 200
+    assert len(out['keys']) == 2
+    assert out['keys'][0]['access_key'] == access_key2 or out['keys'][1]['access_key'] == access_key2
+    assert out['keys'][0]['secret_key'] == secret_key2 or out['keys'][1]['secret_key'] == secret_key2
+
+    # TESTCASE 'rm-key','key','rm','newly added key','succeeds, key is removed'
+    (ret, out) = rgwadmin_rest(admin_conn,
+            ['key', 'rm'],
+            {'uid' : user1,
+             'access-key' : access_key2
+            })
+
+    assert ret == 200
+
+    (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
+
+    assert len(out['keys']) == 1
+    assert out['keys'][0]['access_key'] == access_key
+    assert out['keys'][0]['secret_key'] == secret_key
+
+    # TESTCASE 'add-swift-key','key','create','swift key','succeeds'
+    (ret, out) = rgwadmin_rest(admin_conn,
+            ['subuser', 'create'],
+            {'subuser' : subuser1,
+             'secret-key' : swift_secret1,
+             'key-type' : 'swift'
+            })
+
+    assert ret == 200
+
+    # TESTCASE 'info-swift-key','user','info','after key addition','returns all keys'
+    (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
+    assert ret == 200
+    assert len(out['swift_keys']) == 1
+    assert out['swift_keys'][0]['user'] == subuser1
+    assert out['swift_keys'][0]['secret_key'] == swift_secret1
+
+    # TESTCASE 'add-swift-subuser','key','create','swift sub-user key','succeeds'
+    (ret, out) = rgwadmin_rest(admin_conn,
+            ['subuser', 'create'],
+            {'subuser' : subuser2,
+             'secret-key' : swift_secret2,
+             'key-type' : 'swift'
+            })
+
+    assert ret == 200
+
+    # TESTCASE 'info-swift-subuser','user','info','after key addition','returns all sub-users/keys'
+    (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' :  user1})
+    assert ret == 200
+    assert len(out['swift_keys']) == 2
+    assert out['swift_keys'][0]['user'] == subuser2 or out['swift_keys'][1]['user'] == subuser2
+    assert out['swift_keys'][0]['secret_key'] == swift_secret2 or out['swift_keys'][1]['secret_key'] == swift_secret2
+
+    # TESTCASE 'rm-swift-key1','key','rm','subuser','succeeds, one key is removed'
+    (ret, out) = rgwadmin_rest(admin_conn,
+            ['key', 'rm'],
+            {'subuser' : subuser1,
+             'key-type' :'swift'
+            })
+
+    assert ret == 200
+
+    (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' :  user1})
+    assert len(out['swift_keys']) == 1
+
+    # TESTCASE 'rm-subuser','subuser','rm','subuser','success, subuser is removed'
+    (ret, out) = rgwadmin_rest(admin_conn,
+            ['subuser', 'rm'],
+            {'subuser' : subuser1
+            })
+
+    assert ret == 200
+
+    (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' :  user1})
+    assert len(out['subusers']) == 1
+
+    # TESTCASE 'rm-subuser-with-keys','subuser','rm','subuser','succeeds, second subser and key is removed'
+    (ret, out) = rgwadmin_rest(admin_conn,
+            ['subuser', 'rm'],
+            {'subuser' : subuser2,
+             'key-type' : 'swift',
+             '{purge-keys' :True
+            })
+
+    assert ret == 200
+
+    (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' :  user1})
+    assert len(out['swift_keys']) == 0
+    assert len(out['subusers']) == 0
+
+    # TESTCASE 'bucket-stats','bucket','info','no session/buckets','succeeds, empty list'
+    (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'uid' :  user1})
+    assert ret == 200
+    assert len(out) == 0
+
+    # connect to rgw
+    connection = boto.s3.connection.S3Connection(
+        aws_access_key_id=access_key,
+        aws_secret_access_key=secret_key,
+        is_secure=False,
+        port=7280,
+        host=remote_host,
+        calling_format=boto.s3.connection.OrdinaryCallingFormat(),
+        )
+
+    # TESTCASE 'bucket-stats2','bucket','stats','no buckets','succeeds, empty list'
+    (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'uid' : user1, 'stats' : True})
+    assert ret == 200
+    assert len(out) == 0
+
+    # create a first bucket
+    bucket = connection.create_bucket(bucket_name)
+
+    # TESTCASE 'bucket-list','bucket','list','one bucket','succeeds, expected list'
+    (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'uid' : user1})
+    assert ret == 200
+    assert len(out) == 1
+    assert out[0] == bucket_name
+
+    # TESTCASE 'bucket-stats3','bucket','stats','new empty bucket','succeeds, empty list'
+    (ret, out) = rgwadmin_rest(admin_conn,
+            ['bucket', 'info'], {'bucket' : bucket_name, 'stats' : True})
+
+    assert ret == 200
+    assert out['owner'] == user1
+    bucket_id = out['id']
+
+    # TESTCASE 'bucket-stats4','bucket','stats','new empty bucket','succeeds, expected bucket ID'
+    (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'uid' : user1, 'stats' : True})
+    assert ret == 200
+    assert len(out) == 1
+    assert out[0]['id'] == bucket_id    # does it return the same ID twice in a row?
+
+    # use some space
+    key = boto.s3.key.Key(bucket)
+    key.set_contents_from_string('one')
+
+    # TESTCASE 'bucket-stats5','bucket','stats','after creating key','succeeds, lists one non-empty object'
+    (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'bucket' : bucket_name, 'stats' : True})
+    assert ret == 200
+    assert out['id'] == bucket_id
+    assert out['usage']['rgw.main']['num_objects'] == 1
+    assert out['usage']['rgw.main']['size_kb'] > 0
+
+    # reclaim it
+    key.delete()
+
+    # TESTCASE 'bucket unlink', 'bucket', 'unlink', 'unlink bucket from user', 'fails', 'access denied error'
+    (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'unlink'], {'uid' : user1, 'bucket' : bucket_name})
+
+    assert ret == 200
+
+    # create a second user to link the bucket to
+    (ret, out) = rgwadmin_rest(admin_conn,
+            ['user', 'create'],
+            {'uid' : user2,
+            'display-name' :  display_name2,
+            'access-key' : access_key2,
+            'secret-key' : secret_key2,
+            'max-buckets' : '1',
+            })
+
+    assert ret == 200
+
+    # try creating an object with the first user before the bucket is relinked
+    denied = False
+    key = boto.s3.key.Key(bucket)
+
+    try:
+        key.set_contents_from_string('two')
+    except boto.exception.S3ResponseError:
+        denied = True
+
+    assert not denied
+
+    # delete the object
+    key.delete()
+
+    # link the bucket to another user
+    (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'link'], {'uid' : user2, 'bucket' : bucket_name})
+
+    assert ret == 200
+
+    # try creating an object with the first user which should cause an error
+    key = boto.s3.key.Key(bucket)
+
+    try:
+        key.set_contents_from_string('three')
+    except boto.exception.S3ResponseError:
+        denied = True
+
+    assert denied
+
+    # relink the bucket to the first user and delete the second user
+    (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'link'], {'uid' : user1, 'bucket' : bucket_name})
+    assert ret == 200
+
+    (ret, out) = rgwadmin_rest(admin_conn, ['user', 'rm'], {'uid' : user2})
+    assert ret == 200
+
+    # TESTCASE 'object-rm', 'object', 'rm', 'remove object', 'succeeds, object is removed'
+
+    # upload an object
+    object_name = 'four'
+    key = boto.s3.key.Key(bucket, object_name)
+    key.set_contents_from_string(object_name)
+
+    # now delete it
+    (ret, out) = rgwadmin_rest(admin_conn, ['object', 'rm'], {'bucket' : bucket_name, 'object' : object_name})
+    assert ret == 200
+
+    # TESTCASE 'bucket-stats6','bucket','stats','after deleting key','succeeds, lists one no objects'
+    (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'bucket' : bucket_name, 'stats' : True})
+    assert ret == 200
+    assert out['id'] == bucket_id
+    assert out['usage']['rgw.main']['num_objects'] == 0
+
+    # create a bucket for deletion stats
+    useless_bucket = connection.create_bucket('useless_bucket')
+    useless_key = useless_bucket.new_key('useless_key')
+    useless_key.set_contents_from_string('useless string')
+
+    # delete it
+    useless_key.delete()
+    useless_bucket.delete()
+
+    # wait for the statistics to flush
+    time.sleep(60)
+
+    # need to wait for all usage data to get flushed, should take up to 30 seconds
+    timestamp = time.time()
+    while time.time() - timestamp <= (20 * 60):      # wait up to 20 minutes
+        (ret, out) = rgwadmin_rest(admin_conn, ['usage', 'show'], {'categories' : 'delete_obj'})  # last operation we did is delete obj, wait for it to flush
+
+        if get_user_successful_ops(out, user1) > 0:
+            break
+        time.sleep(1)
+
+    assert time.time() - timestamp <= (20 * 60)
+
+    # TESTCASE 'usage-show' 'usage' 'show' 'all usage' 'succeeds'
+    (ret, out) = rgwadmin_rest(admin_conn, ['usage', 'show'])
+    assert ret == 200
+    assert len(out['entries']) > 0
+    assert len(out['summary']) > 0
+    user_summary = get_user_summary(out, user1)
+    total = user_summary['total']
+    assert total['successful_ops'] > 0
+
+    # TESTCASE 'usage-show2' 'usage' 'show' 'user usage' 'succeeds'
+    (ret, out) = rgwadmin_rest(admin_conn, ['usage', 'show'], {'uid' : user1})
+    assert ret == 200
+    assert len(out['entries']) > 0
+    assert len(out['summary']) > 0
+    user_summary = out['summary'][0]
+    for entry in user_summary['categories']:
+        assert entry['successful_ops'] > 0
+    assert user_summary['user'] == user1
+
+    # TESTCASE 'usage-show3' 'usage' 'show' 'user usage categories' 'succeeds'
+    test_categories = ['create_bucket', 'put_obj', 'delete_obj', 'delete_bucket']
+    for cat in test_categories:
+        (ret, out) = rgwadmin_rest(admin_conn, ['usage', 'show'], {'uid' : user1, 'categories' : cat})
+        assert ret == 200
+        assert len(out['summary']) > 0
+        user_summary = out['summary'][0]
+        assert user_summary['user'] == user1
+        assert len(user_summary['categories']) == 1
+        entry = user_summary['categories'][0]
+        assert entry['category'] == cat
+        assert entry['successful_ops'] > 0
+
+    # TESTCASE 'usage-trim' 'usage' 'trim' 'user usage' 'succeeds, usage removed'
+    (ret, out) = rgwadmin_rest(admin_conn, ['usage', 'trim'], {'uid' : user1})
+    assert ret == 200
+    (ret, out) = rgwadmin_rest(admin_conn, ['usage', 'show'], {'uid' : user1})
+    assert ret == 200
+    assert len(out['entries']) == 0
+    assert len(out['summary']) == 0
+
+    # TESTCASE 'user-suspend2','user','suspend','existing user','succeeds'
+    (ret, out) = rgwadmin_rest(admin_conn, ['user', 'modify'], {'uid' : user1, 'suspended' : True})
+    assert ret == 200
+
+    # TESTCASE 'user-suspend3','user','suspend','suspended user','cannot write objects'
+    try:
+        key = boto.s3.key.Key(bucket)
+        key.set_contents_from_string('five')
+    except boto.exception.S3ResponseError as e:
+        assert e.status == 403
+
+    # TESTCASE 'user-renable2','user','enable','suspended user','succeeds'
+    (ret, out) = rgwadmin_rest(admin_conn, ['user', 'modify'], {'uid' :  user1, 'suspended' : 'false'})
+    assert ret == 200
+
+    # TESTCASE 'user-renable3','user','enable','reenabled user','can write objects'
+    key = boto.s3.key.Key(bucket)
+    key.set_contents_from_string('six')
+
+    # TESTCASE 'garbage-list', 'garbage', 'list', 'get list of objects ready for garbage collection'
+
+    # create an object large enough to be split into multiple parts
+    test_string = 'foo'*10000000
+
+    big_key = boto.s3.key.Key(bucket)
+    big_key.set_contents_from_string(test_string)
+
+    # now delete the head
+    big_key.delete()
+
+    # TESTCASE 'rm-user-buckets','user','rm','existing user','fails, still has buckets'
+    (ret, out) = rgwadmin_rest(admin_conn, ['user', 'rm'], {'uid' : user1})
+    assert ret == 409
+
+    # delete should fail because ``key`` still exists
+    try:
+        bucket.delete()
+    except boto.exception.S3ResponseError as e:
+        assert e.status == 409
+
+    key.delete()
+    bucket.delete()
+
+    # TESTCASE 'policy', 'bucket', 'policy', 'get bucket policy', 'returns S3 policy'
+    bucket = connection.create_bucket(bucket_name)
+
+    # create an object
+    key = boto.s3.key.Key(bucket)
+    key.set_contents_from_string('seven')
+
+    # should be private already but guarantee it
+    key.set_acl('private')
+
+    (ret, out) = rgwadmin_rest(admin_conn, ['policy', 'show'], {'bucket' : bucket.name, 'object' : key.key})
+    assert ret == 200
+
+    acl = key.get_xml_acl()
+    assert acl == out.strip('\n')
+
+    # add another grantee by making the object public read
+    key.set_acl('public-read')
+
+    (ret, out) = rgwadmin_rest(admin_conn, ['policy', 'show'], {'bucket' : bucket.name, 'object' : key.key})
+    assert ret == 200
+
+    acl = key.get_xml_acl()
+    assert acl == out.strip('\n')
+
+    # TESTCASE 'rm-bucket', 'bucket', 'rm', 'bucket with objects', 'succeeds'
+    bucket = connection.create_bucket(bucket_name)
+    key_name = ['eight', 'nine', 'ten', 'eleven']
+    for i in range(4):
+        key = boto.s3.key.Key(bucket)
+        key.set_contents_from_string(key_name[i])
+
+    (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'rm'], {'bucket' : bucket_name, 'purge-objects' : True})
+    assert ret == 200
+
+    # TESTCASE 'caps-add', 'caps', 'add', 'add user cap', 'succeeds'
+    caps = 'usage=read'
+    (ret, out) = rgwadmin_rest(admin_conn, ['caps', 'add'], {'uid' :  user1, 'user-caps' : caps})
+    assert ret == 200
+    assert out[0]['perm'] == 'read'
+
+    # TESTCASE 'caps-rm', 'caps', 'rm', 'remove existing cap from user', 'succeeds'
+    (ret, out) = rgwadmin_rest(admin_conn, ['caps', 'rm'], {'uid' :  user1, 'user-caps' : caps})
+    assert ret == 200
+    assert not out
+
+    # TESTCASE 'rm-user','user','rm','existing user','fails, still has buckets'
+    bucket = connection.create_bucket(bucket_name)
+    key = boto.s3.key.Key(bucket)
+
+    (ret, out) = rgwadmin_rest(admin_conn, ['user', 'rm'], {'uid' : user1})
+    assert ret == 409
+
+    # TESTCASE 'rm-user2', 'user', 'rm', user with data', 'succeeds'
+    bucket = connection.create_bucket(bucket_name)
+    key = boto.s3.key.Key(bucket)
+    key.set_contents_from_string('twelve')
+
+    (ret, out) = rgwadmin_rest(admin_conn, ['user', 'rm'], {'uid' : user1, 'purge-data' : True})
+    assert ret == 200
+
+    # TESTCASE 'rm-user3','user','info','deleted user','fails'
+    (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' :  user1})
+    assert ret == 404
+
diff --git a/qa/tasks/radosgw_agent.py b/qa/tasks/radosgw_agent.py
new file mode 100644
index 0000000..0254805
--- /dev/null
+++ b/qa/tasks/radosgw_agent.py
@@ -0,0 +1,211 @@
+"""
+Run rados gateway agent in test mode
+"""
+import contextlib
+import logging
+import argparse
+
+from teuthology.orchestra import run
+from teuthology import misc as teuthology
+import util.rgw as rgw_utils
+
+log = logging.getLogger(__name__)
+
+def run_radosgw_agent(ctx, config):
+    """
+    Run a single radosgw-agent. See task() for config format.
+    """
+    return_list = list()
+    for (client, cconf) in config.items():
+        # don't process entries that are not clients
+        if not client.startswith('client.'):
+            log.debug('key {data} does not start with \'client.\', moving on'.format(
+                      data=client))
+            continue
+
+        src_client = cconf['src']
+        dest_client = cconf['dest']
+
+        src_zone = rgw_utils.zone_for_client(ctx, src_client)
+        dest_zone = rgw_utils.zone_for_client(ctx, dest_client)
+
+        log.info("source is %s", src_zone)
+        log.info("dest is %s", dest_zone)
+
+        testdir = teuthology.get_testdir(ctx)
+        (remote,) = ctx.cluster.only(client).remotes.keys()
+        # figure out which branch to pull from
+        branch = cconf.get('force-branch', None)
+        if not branch:
+            branch = cconf.get('branch', 'master')
+        sha1 = cconf.get('sha1')
+        remote.run(
+            args=[
+                'cd', testdir, run.Raw('&&'),
+                'git', 'clone',
+                '-b', branch,
+#                'https://github.com/ceph/radosgw-agent.git',
+                'git://git.ceph.com/radosgw-agent.git',
+                'radosgw-agent.{client}'.format(client=client),
+                ]
+            )
+        if sha1 is not None:
+            remote.run(
+                args=[
+                    'cd', testdir, run.Raw('&&'),
+                    run.Raw('&&'),
+                    'git', 'reset', '--hard', sha1,
+                ]
+            )
+        remote.run(
+            args=[
+                'cd', testdir, run.Raw('&&'),
+                'cd', 'radosgw-agent.{client}'.format(client=client),
+                run.Raw('&&'),
+                './bootstrap',
+                ]
+            )
+
+        src_host, src_port = rgw_utils.get_zone_host_and_port(ctx, src_client,
+                                                              src_zone)
+        dest_host, dest_port = rgw_utils.get_zone_host_and_port(ctx, dest_client,
+                                                                 dest_zone)
+        src_access, src_secret = rgw_utils.get_zone_system_keys(ctx, src_client,
+                                                               src_zone)
+        dest_access, dest_secret = rgw_utils.get_zone_system_keys(ctx, dest_client,
+                                                                 dest_zone)
+        sync_scope = cconf.get('sync-scope', None)
+        port = cconf.get('port', 8000)
+        daemon_name = '{host}.{port}.syncdaemon'.format(host=remote.name, port=port)
+        in_args=[
+            'daemon-helper',
+            'kill',
+            '{tdir}/radosgw-agent.{client}/radosgw-agent'.format(tdir=testdir,
+                                                                 client=client),
+            '-v',
+            '--src-access-key', src_access,
+            '--src-secret-key', src_secret,
+            '--source', "http://{addr}:{port}".format(addr=src_host, port=src_port),
+            '--dest-access-key', dest_access,
+            '--dest-secret-key', dest_secret,
+            '--max-entries', str(cconf.get('max-entries', 1000)),
+            '--log-file', '{tdir}/archive/rgw_sync_agent.{client}.log'.format(
+                tdir=testdir,
+                client=client),
+            '--object-sync-timeout', '30',
+            ]
+
+        if cconf.get('metadata-only', False):
+            in_args.append('--metadata-only')
+
+        # the test server and full/incremental flags are mutually exclusive
+        if sync_scope is None:
+            in_args.append('--test-server-host')
+            in_args.append('0.0.0.0')
+            in_args.append('--test-server-port')
+            in_args.append(str(port))
+            log.debug('Starting a sync test server on {client}'.format(client=client))
+            # Stash the radosgw-agent server / port # for use by subsequent tasks
+            ctx.radosgw_agent.endpoint = (client, str(port))
+        else:
+            in_args.append('--sync-scope')
+            in_args.append(sync_scope)
+            log.debug('Starting a {scope} sync on {client}'.format(scope=sync_scope,client=client))
+
+        # positional arg for destination must come last
+        in_args.append("http://{addr}:{port}".format(addr=dest_host,
+                                                     port=dest_port))
+
+        return_list.append((client, remote.run(
+            args=in_args,
+            wait=False,
+            stdin=run.PIPE,
+            logger=log.getChild(daemon_name),
+            )))
+    return return_list
+
+
+ at contextlib.contextmanager
+def task(ctx, config):
+    """
+    Run radosgw-agents in test mode.
+
+    Configuration is clients to run the agents on, with settings for
+    source client, destination client, and port to listen on.  Binds
+    to 0.0.0.0. Port defaults to 8000. This must be run on clients
+    that have the correct zone root pools and rgw zone set in
+    ceph.conf, or the task cannot read the region information from the
+    cluster.
+
+    By default, this task will start an HTTP server that will trigger full
+    or incremental syncs based on requests made to it.
+    Alternatively, a single full sync can be triggered by
+    specifying 'sync-scope: full' or a loop of incremental syncs can be triggered
+    by specifying 'sync-scope: incremental' (the loop will sleep
+    '--incremental-sync-delay' seconds between each sync, default is 30 seconds).
+
+    By default, both data and metadata are synced. To only sync
+    metadata, for example because you want to sync between regions,
+    set metadata-only: true.
+
+    An example::
+
+      tasks:
+      - ceph:
+          conf:
+            client.0:
+              rgw zone = foo
+              rgw zone root pool = .root.pool
+            client.1:
+              rgw zone = bar
+              rgw zone root pool = .root.pool2
+      - rgw: # region configuration omitted for brevity
+      - radosgw-agent:
+          client.0:
+            branch: wip-next-feature-branch
+            src: client.0
+            dest: client.1
+            sync-scope: full
+            metadata-only: true
+            # port: 8000 (default)
+          client.1:
+            src: client.1
+            dest: client.0
+            port: 8001
+    """
+    assert isinstance(config, dict), 'rgw_sync_agent requires a dictionary config'
+    log.debug("config is %s", config)
+
+    overrides = ctx.config.get('overrides', {})
+    # merge each client section, but only if it exists in config since there isn't
+    # a sensible default action for this task
+    for client in config.iterkeys():
+        if config[client]:
+            log.debug('config[{client}]: {data}'.format(client=client, data=config[client]))
+            teuthology.deep_merge(config[client], overrides.get('radosgw-agent', {}))
+
+    ctx.radosgw_agent = argparse.Namespace()
+    ctx.radosgw_agent.config = config
+
+    procs = run_radosgw_agent(ctx, config)
+
+    ctx.radosgw_agent.procs = procs
+
+    try:
+        yield
+    finally:
+        testdir = teuthology.get_testdir(ctx)
+        try:
+            for client, proc in procs:
+                log.info("shutting down sync agent on %s", client)
+                proc.stdin.close()
+                proc.wait()
+        finally:
+            for client, proc in procs:
+                ctx.cluster.only(client).run(
+                    args=[
+                        'rm', '-rf',
+                        '{tdir}/radosgw-agent.{client}'.format(tdir=testdir,
+                                                               client=client)
+                        ]
+                    )
diff --git a/qa/tasks/rbd.py b/qa/tasks/rbd.py
new file mode 100644
index 0000000..7b3a8ef
--- /dev/null
+++ b/qa/tasks/rbd.py
@@ -0,0 +1,598 @@
+"""
+Rbd testing task
+"""
+import contextlib
+import logging
+import os
+
+from cStringIO import StringIO
+from teuthology.orchestra import run
+from teuthology import misc as teuthology
+from teuthology import contextutil
+from teuthology.parallel import parallel
+from teuthology.task.common_fs_utils import generic_mkfs
+from teuthology.task.common_fs_utils import generic_mount
+from teuthology.task.common_fs_utils import default_image_name
+
+log = logging.getLogger(__name__)
+
+ at contextlib.contextmanager
+def create_image(ctx, config):
+    """
+    Create an rbd image.
+
+    For example::
+
+        tasks:
+        - ceph:
+        - rbd.create_image:
+            client.0:
+                image_name: testimage
+                image_size: 100
+                image_format: 1
+            client.1:
+
+    Image size is expressed as a number of megabytes; default value
+    is 10240.
+
+    Image format value must be either 1 or 2; default value is 1.
+
+    """
+    assert isinstance(config, dict) or isinstance(config, list), \
+        "task create_image only supports a list or dictionary for configuration"
+
+    if isinstance(config, dict):
+        images = config.items()
+    else:
+        images = [(role, None) for role in config]
+
+    testdir = teuthology.get_testdir(ctx)
+    for role, properties in images:
+        if properties is None:
+            properties = {}
+        name = properties.get('image_name', default_image_name(role))
+        size = properties.get('image_size', 10240)
+        fmt = properties.get('image_format', 1)
+        (remote,) = ctx.cluster.only(role).remotes.keys()
+        log.info('Creating image {name} with size {size}'.format(name=name,
+                                                                 size=size))
+        args = [
+                'adjust-ulimits',
+                'ceph-coverage'.format(tdir=testdir),
+                '{tdir}/archive/coverage'.format(tdir=testdir),
+                'rbd',
+                '-p', 'rbd',
+                'create',
+                '--size', str(size),
+                name,
+            ]
+        # omit format option if using the default (format 1)
+        # since old versions of don't support it
+        if int(fmt) != 1:
+            args += ['--image-format', str(fmt)]
+        remote.run(args=args)
+    try:
+        yield
+    finally:
+        log.info('Deleting rbd images...')
+        for role, properties in images:
+            if properties is None:
+                properties = {}
+            name = properties.get('image_name', default_image_name(role))
+            (remote,) = ctx.cluster.only(role).remotes.keys()
+            remote.run(
+                args=[
+                    'adjust-ulimits',
+                    'ceph-coverage',
+                    '{tdir}/archive/coverage'.format(tdir=testdir),
+                    'rbd',
+                    '-p', 'rbd',
+                    'rm',
+                    name,
+                    ],
+                )
+
+ at contextlib.contextmanager
+def clone_image(ctx, config):
+    """
+    Clones a parent imag
+
+    For example::
+
+        tasks:
+        - ceph:
+        - rbd.clone_image:
+            client.0:
+                parent_name: testimage
+                image_name: cloneimage
+    """
+    assert isinstance(config, dict) or isinstance(config, list), \
+        "task clone_image only supports a list or dictionary for configuration"
+
+    if isinstance(config, dict):
+        images = config.items()
+    else:
+        images = [(role, None) for role in config]
+
+    testdir = teuthology.get_testdir(ctx)
+    for role, properties in images:
+        if properties is None:
+            properties = {}
+
+        name = properties.get('image_name', default_image_name(role))
+        parent_name = properties.get('parent_name')
+        assert parent_name is not None, \
+            "parent_name is required"
+        parent_spec = '{name}@{snap}'.format(name=parent_name, snap=name)
+
+        (remote,) = ctx.cluster.only(role).remotes.keys()
+        log.info('Clone image {parent} to {child}'.format(parent=parent_name,
+                                                          child=name))
+        for cmd in [('snap', 'create', parent_spec),
+                    ('snap', 'protect', parent_spec),
+                    ('clone', parent_spec, name)]:
+            args = [
+                    'adjust-ulimits',
+                    'ceph-coverage'.format(tdir=testdir),
+                    '{tdir}/archive/coverage'.format(tdir=testdir),
+                    'rbd', '-p', 'rbd'
+                    ]
+            args.extend(cmd)
+            remote.run(args=args)
+
+    try:
+        yield
+    finally:
+        log.info('Deleting rbd clones...')
+        for role, properties in images:
+            if properties is None:
+                properties = {}
+            name = properties.get('image_name', default_image_name(role))
+            parent_name = properties.get('parent_name')
+            parent_spec = '{name}@{snap}'.format(name=parent_name, snap=name)
+
+            (remote,) = ctx.cluster.only(role).remotes.keys()
+
+            for cmd in [('rm', name),
+                        ('snap', 'unprotect', parent_spec),
+                        ('snap', 'rm', parent_spec)]:
+                args = [
+                        'adjust-ulimits',
+                        'ceph-coverage'.format(tdir=testdir),
+                        '{tdir}/archive/coverage'.format(tdir=testdir),
+                        'rbd', '-p', 'rbd'
+                        ]
+                args.extend(cmd)
+                remote.run(args=args)
+
+ at contextlib.contextmanager
+def modprobe(ctx, config):
+    """
+    Load the rbd kernel module..
+
+    For example::
+
+        tasks:
+        - ceph:
+        - rbd.create_image: [client.0]
+        - rbd.modprobe: [client.0]
+    """
+    log.info('Loading rbd kernel module...')
+    for role in config:
+        (remote,) = ctx.cluster.only(role).remotes.keys()
+        remote.run(
+            args=[
+                'sudo',
+                'modprobe',
+                'rbd',
+                ],
+            )
+    try:
+        yield
+    finally:
+        log.info('Unloading rbd kernel module...')
+        for role in config:
+            (remote,) = ctx.cluster.only(role).remotes.keys()
+            remote.run(
+                args=[
+                    'sudo',
+                    'modprobe',
+                    '-r',
+                    'rbd',
+                    # force errors to be ignored; necessary if more
+                    # than one device was created, which may mean
+                    # the module isn't quite ready to go the first
+                    # time through.
+                    run.Raw('||'),
+                    'true',
+                    ],
+                )
+
+ at contextlib.contextmanager
+def dev_create(ctx, config):
+    """
+    Map block devices to rbd images.
+
+    For example::
+
+        tasks:
+        - ceph:
+        - rbd.create_image: [client.0]
+        - rbd.modprobe: [client.0]
+        - rbd.dev_create:
+            client.0: testimage.client.0
+    """
+    assert isinstance(config, dict) or isinstance(config, list), \
+        "task dev_create only supports a list or dictionary for configuration"
+
+    if isinstance(config, dict):
+        role_images = config.items()
+    else:
+        role_images = [(role, None) for role in config]
+
+    log.info('Creating rbd block devices...')
+
+    testdir = teuthology.get_testdir(ctx)
+
+    for role, image in role_images:
+        if image is None:
+            image = default_image_name(role)
+        (remote,) = ctx.cluster.only(role).remotes.keys()
+
+        remote.run(
+            args=[
+                'sudo',
+                'adjust-ulimits',
+                'ceph-coverage',
+                '{tdir}/archive/coverage'.format(tdir=testdir),
+                'rbd',
+                '--user', role.rsplit('.')[-1],
+                '-p', 'rbd',
+                'map',
+                image,
+                run.Raw('&&'),
+                # wait for the symlink to be created by udev
+                'while', 'test', '!', '-e', '/dev/rbd/rbd/{image}'.format(image=image), run.Raw(';'), 'do',
+                'sleep', '1', run.Raw(';'),
+                'done',
+                ],
+            )
+    try:
+        yield
+    finally:
+        log.info('Unmapping rbd devices...')
+        for role, image in role_images:
+            if image is None:
+                image = default_image_name(role)
+            (remote,) = ctx.cluster.only(role).remotes.keys()
+            remote.run(
+                args=[
+                    'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir),
+                    'sudo',
+                    'adjust-ulimits',
+                    'ceph-coverage',
+                    '{tdir}/archive/coverage'.format(tdir=testdir),
+                    'rbd',
+                    '-p', 'rbd',
+                    'unmap',
+                    '/dev/rbd/rbd/{imgname}'.format(imgname=image),
+                    run.Raw('&&'),
+                    # wait for the symlink to be deleted by udev
+                    'while', 'test', '-e', '/dev/rbd/rbd/{image}'.format(image=image),
+                    run.Raw(';'),
+                    'do',
+                    'sleep', '1', run.Raw(';'),
+                    'done',
+                    ],
+                )
+
+
+def rbd_devname_rtn(ctx, image):
+    return '/dev/rbd/rbd/{image}'.format(image=image)    
+
+def canonical_path(ctx, role, path):
+    """
+    Determine the canonical path for a given path on the host
+    representing the given role.  A canonical path contains no
+    . or .. components, and includes no symbolic links.
+    """
+    version_fp = StringIO()
+    ctx.cluster.only(role).run(
+        args=[ 'readlink', '-f', path ],
+        stdout=version_fp,
+        )
+    canonical_path = version_fp.getvalue().rstrip('\n')
+    version_fp.close()
+    return canonical_path
+
+ at contextlib.contextmanager
+def run_xfstests(ctx, config):
+    """
+    Run xfstests over specified devices.
+
+    Warning: both the test and scratch devices specified will be
+    overwritten.  Normally xfstests modifies (but does not destroy)
+    the test device, but for now the run script used here re-makes
+    both filesystems.
+
+    Note: Only one instance of xfstests can run on a single host at
+    a time, although this is not enforced.
+
+    This task in its current form needs some improvement.  For
+    example, it assumes all roles provided in the config are
+    clients, and that the config provided is a list of key/value
+    pairs.  For now please use the xfstests() interface, below.
+
+    For example::
+
+        tasks:
+        - ceph:
+        - rbd.run_xfstests:
+            client.0:
+                count: 2
+                test_dev: 'test_dev'
+                scratch_dev: 'scratch_dev'
+                fs_type: 'xfs'
+                tests: 'generic/100 xfs/003 xfs/005 xfs/006 generic/015'
+                randomize: true
+    """
+    with parallel() as p:
+        for role, properties in config.items():
+            p.spawn(run_xfstests_one_client, ctx, role, properties)
+    yield
+
+def run_xfstests_one_client(ctx, role, properties):
+    """
+    Spawned routine to handle xfs tests for a single client
+    """
+    testdir = teuthology.get_testdir(ctx)
+    try:
+        count = properties.get('count')
+        test_dev = properties.get('test_dev')
+        assert test_dev is not None, \
+            "task run_xfstests requires test_dev to be defined"
+        test_dev = canonical_path(ctx, role, test_dev)
+
+        scratch_dev = properties.get('scratch_dev')
+        assert scratch_dev is not None, \
+            "task run_xfstests requires scratch_dev to be defined"
+        scratch_dev = canonical_path(ctx, role, scratch_dev)
+
+        fs_type = properties.get('fs_type')
+        tests = properties.get('tests')
+        randomize = properties.get('randomize')
+
+
+        (remote,) = ctx.cluster.only(role).remotes.keys()
+
+        # Fetch the test script
+        test_root = teuthology.get_testdir(ctx)
+        test_script = 'run_xfstests_krbd.sh'
+        test_path = os.path.join(test_root, test_script)
+
+        xfstests_url = properties.get('xfstests_url')
+        assert xfstests_url is not None, \
+            "task run_xfstests requires xfstests_url to be defined"
+
+        xfstests_krbd_url = xfstests_url + '/' + test_script
+
+        log.info('Fetching {script} for {role} from {url}'.format(
+            script=test_script,
+            role=role,
+            url=xfstests_krbd_url))
+
+        args = [ 'wget', '-O', test_path, '--', xfstests_krbd_url ]
+        remote.run(args=args)
+
+        log.info('Running xfstests on {role}:'.format(role=role))
+        log.info('   iteration count: {count}:'.format(count=count))
+        log.info('       test device: {dev}'.format(dev=test_dev))
+        log.info('    scratch device: {dev}'.format(dev=scratch_dev))
+        log.info('     using fs_type: {fs_type}'.format(fs_type=fs_type))
+        log.info('      tests to run: {tests}'.format(tests=tests))
+        log.info('         randomize: {randomize}'.format(randomize=randomize))
+
+        # Note that the device paths are interpreted using
+        # readlink -f <path> in order to get their canonical
+        # pathname (so it matches what the kernel remembers).
+        args = [
+            '/usr/bin/sudo',
+            'TESTDIR={tdir}'.format(tdir=testdir),
+            'URL_BASE={url}'.format(url=xfstests_url),
+            'adjust-ulimits',
+            'ceph-coverage',
+            '{tdir}/archive/coverage'.format(tdir=testdir),
+            '/bin/bash',
+            test_path,
+            '-c', str(count),
+            '-f', fs_type,
+            '-t', test_dev,
+            '-s', scratch_dev,
+            ]
+        if randomize:
+            args.append('-r')
+        if tests:
+            args.extend(['--', tests])
+        remote.run(args=args, logger=log.getChild(role))
+    finally:
+        log.info('Removing {script} on {role}'.format(script=test_script,
+                                                      role=role))
+        remote.run(args=['rm', '-f', test_path])
+
+ at contextlib.contextmanager
+def xfstests(ctx, config):
+    """
+    Run xfstests over rbd devices.  This interface sets up all
+    required configuration automatically if not otherwise specified.
+    Note that only one instance of xfstests can run on a single host
+    at a time.  By default, the set of tests specified is run once.
+    If a (non-zero) count value is supplied, the complete set of
+    tests will be run that number of times.
+
+    For example::
+
+        tasks:
+        - ceph:
+        # Image sizes are in MB
+        - rbd.xfstests:
+            client.0:
+                count: 3
+                test_image: 'test_image'
+                test_size: 250
+                test_format: 2
+                scratch_image: 'scratch_image'
+                scratch_size: 250
+                scratch_format: 1
+                fs_type: 'xfs'
+                tests: 'generic/100 xfs/003 xfs/005 xfs/006 generic/015'
+                randomize: true
+                xfstests_branch: master
+                xfstests_url: 'https://raw.github.com/ceph/branch/master/qa'
+    """
+    if config is None:
+        config = { 'all': None }
+    assert isinstance(config, dict) or isinstance(config, list), \
+        "task xfstests only supports a list or dictionary for configuration"
+    if isinstance(config, dict):
+        config = teuthology.replace_all_with_clients(ctx.cluster, config)
+        runs = config.items()
+    else:
+        runs = [(role, None) for role in config]
+
+    running_xfstests = {}
+    for role, properties in runs:
+        assert role.startswith('client.'), \
+            "task xfstests can only run on client nodes"
+        for host, roles_for_host in ctx.cluster.remotes.items():
+            if role in roles_for_host:
+                assert host not in running_xfstests, \
+                    "task xfstests allows only one instance at a time per host"
+                running_xfstests[host] = True
+
+    images_config = {}
+    scratch_config = {}
+    modprobe_config = {}
+    image_map_config = {}
+    scratch_map_config = {}
+    xfstests_config = {}
+    for role, properties in runs:
+        if properties is None:
+            properties = {}
+
+        test_image = properties.get('test_image', 'test_image.{role}'.format(role=role))
+        test_size = properties.get('test_size', 10000) # 10G
+        test_fmt = properties.get('test_format', 1)
+        scratch_image = properties.get('scratch_image', 'scratch_image.{role}'.format(role=role))
+        scratch_size = properties.get('scratch_size', 10000) # 10G
+        scratch_fmt = properties.get('scratch_format', 1)
+
+        images_config[role] = dict(
+            image_name=test_image,
+            image_size=test_size,
+            image_format=test_fmt,
+            )
+
+        scratch_config[role] = dict(
+            image_name=scratch_image,
+            image_size=scratch_size,
+            image_format=scratch_fmt,
+            )
+
+        xfstests_branch = properties.get('xfstests_branch', 'master')
+        xfstests_url = properties.get('xfstests_url', 'https://raw.github.com/ceph/ceph/{branch}/qa'.format(branch=xfstests_branch))
+
+        xfstests_config[role] = dict(
+            count=properties.get('count', 1),
+            test_dev='/dev/rbd/rbd/{image}'.format(image=test_image),
+            scratch_dev='/dev/rbd/rbd/{image}'.format(image=scratch_image),
+            fs_type=properties.get('fs_type', 'xfs'),
+            randomize=properties.get('randomize', False),
+            tests=properties.get('tests'),
+            xfstests_url=xfstests_url,
+            )
+
+        log.info('Setting up xfstests using RBD images:')
+        log.info('      test ({size} MB): {image}'.format(size=test_size,
+                                                        image=test_image))
+        log.info('   scratch ({size} MB): {image}'.format(size=scratch_size,
+                                                        image=scratch_image))
+        modprobe_config[role] = None
+        image_map_config[role] = test_image
+        scratch_map_config[role] = scratch_image
+
+    with contextutil.nested(
+        lambda: create_image(ctx=ctx, config=images_config),
+        lambda: create_image(ctx=ctx, config=scratch_config),
+        lambda: modprobe(ctx=ctx, config=modprobe_config),
+        lambda: dev_create(ctx=ctx, config=image_map_config),
+        lambda: dev_create(ctx=ctx, config=scratch_map_config),
+        lambda: run_xfstests(ctx=ctx, config=xfstests_config),
+        ):
+        yield
+
+
+ at contextlib.contextmanager
+def task(ctx, config):
+    """
+    Create and mount an rbd image.
+
+    For example, you can specify which clients to run on::
+
+        tasks:
+        - ceph:
+        - rbd: [client.0, client.1]
+
+    There are a few image options::
+
+        tasks:
+        - ceph:
+        - rbd:
+            client.0: # uses defaults
+            client.1:
+                image_name: foo
+                image_size: 2048
+                image_format: 2
+                fs_type: xfs
+
+    To use default options on all clients::
+
+        tasks:
+        - ceph:
+        - rbd:
+            all:
+
+    To create 20GiB images and format them with xfs on all clients::
+
+        tasks:
+        - ceph:
+        - rbd:
+            all:
+              image_size: 20480
+              fs_type: xfs
+    """
+    if config is None:
+        config = { 'all': None }
+    norm_config = config
+    if isinstance(config, dict):
+        norm_config = teuthology.replace_all_with_clients(ctx.cluster, config)
+    if isinstance(norm_config, dict):
+        role_images = {}
+        for role, properties in norm_config.iteritems():
+            if properties is None:
+                properties = {}
+            role_images[role] = properties.get('image_name')
+    else:
+        role_images = norm_config
+
+    log.debug('rbd config is: %s', norm_config)
+
+    with contextutil.nested(
+        lambda: create_image(ctx=ctx, config=norm_config),
+        lambda: modprobe(ctx=ctx, config=norm_config),
+        lambda: dev_create(ctx=ctx, config=role_images),
+        lambda: generic_mkfs(ctx=ctx, config=norm_config,
+                devname_rtn=rbd_devname_rtn),
+        lambda: generic_mount(ctx=ctx, config=role_images,
+                devname_rtn=rbd_devname_rtn),
+        ):
+        yield
diff --git a/qa/tasks/rbd_fio.py b/qa/tasks/rbd_fio.py
new file mode 100644
index 0000000..4bd2fd3
--- /dev/null
+++ b/qa/tasks/rbd_fio.py
@@ -0,0 +1,214 @@
+"""
+ Long running fio tests on rbd mapped devices for format/features provided in config
+ Many fio parameters can be configured so that this task can be used along with thrash/power-cut tests
+ and exercise IO on full disk for all format/features
+  - This test should not be run on VM due to heavy use of resource
+
+"""
+import contextlib
+import json
+import logging
+import StringIO
+import re
+
+from teuthology.parallel import parallel
+from teuthology import misc as teuthology
+from tempfile import NamedTemporaryFile
+from teuthology.orchestra import run
+
+log = logging.getLogger(__name__)
+
+ at contextlib.contextmanager
+def task(ctx, config):
+    """
+    client.0:
+       fio-io-size: 100g or 80% or 100m
+       fio-version: 2.2.9
+       formats: [2]
+       features: [[layering],[striping],[layering,exclusive-lock,object-map]]
+       test-clone-io: 1  #remove this option to not run create rbd clone and not run io on clone
+       io-engine: "sync or rbd or any io-engine"
+       rw: randrw
+    client.1:
+       fio-io-size: 100g
+       fio-version: 2.2.9
+       rw: read
+       image-size:20480
+
+or
+    all:
+       fio-io-size: 400g
+       rw: randrw
+       formats: [2]
+       features: [[layering],[striping]]
+       io-engine: libaio
+
+    Create rbd image + device and exercise IO for format/features provided in config file
+    Config can be per client or one config can be used for all clients, fio jobs are run in parallel for client provided
+
+    """
+    if config.get('all'):
+        client_config = config['all']
+    clients = ctx.cluster.only(teuthology.is_type('client'))
+    rbd_test_dir = teuthology.get_testdir(ctx) + "/rbd_fio_test"
+    for remote,role in clients.remotes.iteritems():
+        if 'client_config' in locals():
+           with parallel() as p:
+               p.spawn(run_fio, remote, client_config, rbd_test_dir)
+        else:
+           for client_config in config:
+              if client_config in role:
+                 with parallel() as p:
+                     p.spawn(run_fio, remote, config[client_config], rbd_test_dir)
+
+    yield
+
+
+def run_fio(remote, config, rbd_test_dir):
+    """
+    create fio config file with options based on above config
+    get the fio from github, generate binary, and use it to run on
+    the generated fio config file
+    """
+    fio_config=NamedTemporaryFile(prefix='fio_rbd_', dir='/tmp/', delete=False)
+    fio_config.write('[global]\n')
+    if config.get('io-engine'):
+        ioengine=config['io-engine']
+        fio_config.write('ioengine={ioe}\n'.format(ioe=ioengine))
+    else:
+        fio_config.write('ioengine=sync\n')
+    if config.get('bs'):
+        bs=config['bs']
+        fio_config.write('bs={bs}\n'.format(bs=bs))
+    else:
+        fio_config.write('bs=4k\n')
+    fio_config.write('iodepth=2\n')
+    if config.get('fio-io-size'):
+        size=config['fio-io-size']
+        fio_config.write('size={size}\n'.format(size=size))
+    else:
+        fio_config.write('size=100m\n')
+
+    fio_config.write('time_based\n')
+    if config.get('runtime'):
+        runtime=config['runtime']
+        fio_config.write('runtime={runtime}\n'.format(runtime=runtime))
+    else:
+        fio_config.write('runtime=1800\n')
+    fio_config.write('allow_file_create=0\n')
+    image_size=10240
+    if config.get('image_size'):
+        image_size=config['image_size']
+
+    formats=[1,2]
+    features=[['layering'],['striping'],['exclusive-lock','object-map']]
+    fio_version='2.7'
+    if config.get('formats'):
+        formats=config['formats']
+    if config.get('features'):
+        features=config['features']
+    if config.get('fio-version'):
+        fio_version=config['fio-version']
+
+    fio_config.write('norandommap\n')
+    if ioengine == 'rbd':
+        fio_config.write('invalidate=0\n')
+    #handle package required for librbd engine
+    sn=remote.shortname
+    system_type= teuthology.get_system_type(remote)
+    if system_type == 'rpm' and ioengine == 'rbd':
+        log.info("Installing librbd1 devel package on {sn}".format(sn=sn))
+        remote.run(args=['sudo', 'yum' , 'install', 'librbd1-devel', '-y'])
+    elif ioengine == 'rbd':
+        log.info("Installing librbd devel package on {sn}".format(sn=sn))
+        remote.run(args=['sudo', 'apt-get', '-y',
+                         '--force-yes',
+                         'install', 'librbd-dev'])
+    if ioengine == 'rbd':
+        fio_config.write('clientname=admin\n')
+        fio_config.write('pool=rbd\n')
+    for frmt in formats:
+        for feature in features:
+           log.info("Creating rbd images on {sn}".format(sn=sn))
+           feature_name = '-'.join(feature)
+           rbd_name = 'i{i}f{f}{sn}'.format(i=frmt,f=feature_name,sn=sn)
+           rbd_snap_name = 'i{i}f{f}{sn}@i{i}f{f}{sn}Snap'.format(i=frmt,f=feature_name,sn=sn)
+           rbd_clone_name = 'i{i}f{f}{sn}Clone'.format(i=frmt,f=feature_name,sn=sn)
+           create_args=['rbd', 'create',
+                        '--size', '{size}'.format(size=image_size),
+                        '--image', rbd_name,
+                        '--image-format', '{f}'.format(f=frmt)]
+           map(lambda x: create_args.extend(['--image-feature', x]), feature)
+           remote.run(args=create_args)
+           remote.run(args=['rbd', 'info', rbd_name])
+           if ioengine != 'rbd':
+               out=StringIO.StringIO()
+               remote.run(args=['sudo', 'rbd', 'map', rbd_name ],stdout=out)
+               dev=re.search(r'(/dev/rbd\d+)',out.getvalue())
+               rbd_dev=dev.group(1)
+               if config.get('test-clone-io'):
+                    log.info("Testing clones using fio")
+                    remote.run(args=['rbd', 'snap', 'create', rbd_snap_name])
+                    remote.run(args=['rbd', 'snap', 'protect', rbd_snap_name])
+                    remote.run(args=['rbd', 'clone', rbd_snap_name, rbd_clone_name])
+                    remote.run(args=['sudo', 'rbd', 'map', rbd_clone_name], stdout=out)
+                    dev=re.search(r'(/dev/rbd\d+)',out.getvalue())
+                    rbd_clone_dev=dev.group(1)
+               fio_config.write('[{rbd_dev}]\n'.format(rbd_dev=rbd_dev))
+               if config.get('rw'):
+                   rw=config['rw']
+                   fio_config.write('rw={rw}\n'.format(rw=rw))
+               else:
+                   fio_config .write('rw=randrw\n')
+               fio_config.write('filename={rbd_dev}\n'.format(rbd_dev=rbd_dev))
+               if config.get('test-clone-io'):
+                   fio_config.write('[{rbd_clone_dev}]\n'.format(rbd_clone_dev=rbd_clone_dev))
+                   fio_config.write('rw={rw}\n'.format(rw=rw))
+                   fio_config.write('filename={rbd_clone_dev}\n'.format(rbd_clone_dev=rbd_clone_dev))
+           else:
+               if config.get('test-clone-io'):
+                    log.info("Testing clones using fio")
+                    remote.run(args=['rbd', 'snap', 'create', rbd_snap_name])
+                    remote.run(args=['rbd', 'snap', 'protect', rbd_snap_name])
+                    remote.run(args=['rbd', 'clone', rbd_snap_name, rbd_clone_name])
+               fio_config.write('[{img_name}]\n'.format(img_name=rbd_name))
+               if config.get('rw'):
+                   rw=config['rw']
+                   fio_config.write('rw={rw}\n'.format(rw=rw))
+               else:
+                   fio_config.write('rw=randrw\n')
+               fio_config.write('rbdname={img_name}\n'.format(img_name=rbd_name))
+               if config.get('test-clone-io'):
+                   fio_config.write('[{clone_img_name}]\n'.format(clone_img_name=rbd_clone_name))
+                   fio_config.write('rw={rw}\n'.format(rw=rw))
+                   fio_config.write('rbdname={clone_img_name}\n'.format(clone_img_name=rbd_clone_name))
+
+
+    fio_config.close()
+    remote.put_file(fio_config.name,fio_config.name)
+    try:
+        log.info("Running rbd feature - fio test on {sn}".format(sn=sn))
+        fio = "https://github.com/axboe/fio/archive/fio-" + fio_version + ".tar.gz"
+        remote.run(args=['mkdir', run.Raw(rbd_test_dir),])
+        remote.run(args=['cd' , run.Raw(rbd_test_dir),
+                         run.Raw(';'), 'wget' , fio , run.Raw(';'), run.Raw('tar -xvf fio*tar.gz'), run.Raw(';'),
+                         run.Raw('cd fio-fio*'), 'configure', run.Raw(';') ,'make'])
+        remote.run(args=['ceph', '-s'])
+        remote.run(args=['sudo', run.Raw('{tdir}/fio-fio-{v}/fio {f}'.format(tdir=rbd_test_dir,v=fio_version,f=fio_config.name))])
+        remote.run(args=['ceph', '-s'])
+    finally:
+        out=StringIO.StringIO()
+        remote.run(args=['rbd','showmapped', '--format=json'], stdout=out)
+        mapped_images = json.loads(out.getvalue())
+        if mapped_images:
+            log.info("Unmapping rbd images on {sn}".format(sn=sn))
+            for image in mapped_images.itervalues():
+                remote.run(args=['sudo', 'rbd', 'unmap', str(image['device'])])
+        log.info("Cleaning up fio install")
+        remote.run(args=['rm','-rf', run.Raw(rbd_test_dir)])
+        if system_type == 'rpm' and ioengine == 'rbd':
+            log.info("Uninstall librbd1 devel package on {sn}".format(sn=sn))
+            remote.run(args=['sudo', 'yum' , 'remove', 'librbd1-devel', '-y'])
+        elif ioengine == 'rbd':
+            log.info("Uninstall librbd devel package on {sn}".format(sn=sn))
+            remote.run(args=['sudo', 'apt-get', '-y', 'remove', 'librbd-dev'])
diff --git a/qa/tasks/rbd_fsx.py b/qa/tasks/rbd_fsx.py
new file mode 100644
index 0000000..ab1a47f
--- /dev/null
+++ b/qa/tasks/rbd_fsx.py
@@ -0,0 +1,102 @@
+"""
+Run fsx on an rbd image
+"""
+import contextlib
+import logging
+
+from teuthology.parallel import parallel
+from teuthology import misc as teuthology
+
+log = logging.getLogger(__name__)
+
+ at contextlib.contextmanager
+def task(ctx, config):
+    """
+    Run fsx on an rbd image.
+
+    Currently this requires running as client.admin
+    to create a pool.
+
+    Specify which clients to run on as a list::
+
+      tasks:
+        ceph:
+        rbd_fsx:
+          clients: [client.0, client.1]
+
+    You can optionally change some properties of fsx:
+
+      tasks:
+        ceph:
+        rbd_fsx:
+          clients: <list of clients>
+          seed: <random seed number, or 0 to use the time>
+          ops: <number of operations to do>
+          size: <maximum image size in bytes>
+          valgrind: [--tool=<valgrind tool>]
+    """
+    log.info('starting rbd_fsx...')
+    with parallel() as p:
+        for role in config['clients']:
+            p.spawn(_run_one_client, ctx, config, role)
+    yield
+
+def _run_one_client(ctx, config, role):
+    """Spawned task that runs the client"""
+    krbd = config.get('krbd', False)
+    nbd = config.get('nbd', False)
+    testdir = teuthology.get_testdir(ctx)
+    (remote,) = ctx.cluster.only(role).remotes.iterkeys()
+
+    args = []
+    if krbd or nbd:
+        args.append('sudo') # rbd(-nbd) map/unmap need privileges
+    args.extend([
+        'adjust-ulimits',
+        'ceph-coverage',
+        '{tdir}/archive/coverage'.format(tdir=testdir)
+    ])
+
+    overrides = ctx.config.get('overrides', {})
+    teuthology.deep_merge(config, overrides.get('rbd_fsx', {}))
+
+    if config.get('valgrind'):
+        args = teuthology.get_valgrind_args(
+            testdir,
+            'fsx_{id}'.format(id=role),
+            args,
+            config.get('valgrind')
+        )
+
+    args.extend([
+        'ceph_test_librbd_fsx',
+        '-d', # debug output for all operations
+        '-W', '-R', # mmap doesn't work with rbd
+        '-p', str(config.get('progress_interval', 100)), # show progress
+        '-P', '{tdir}/archive'.format(tdir=testdir),
+        '-r', str(config.get('readbdy',1)),
+        '-w', str(config.get('writebdy',1)),
+        '-t', str(config.get('truncbdy',1)),
+        '-h', str(config.get('holebdy',1)),
+        '-l', str(config.get('size', 250000000)),
+        '-S', str(config.get('seed', 0)),
+        '-N', str(config.get('ops', 1000)),
+    ])
+    if krbd:
+        args.append('-K') # -K enables krbd mode
+    if nbd:
+        args.append('-M') # -M enables nbd mode
+    if config.get('direct_io', False):
+        args.append('-Z') # -Z use direct IO
+    if not config.get('randomized_striping', True):
+        args.append('-U') # -U disables randomized striping
+    if not config.get('punch_holes', True):
+        args.append('-H') # -H disables discard ops
+    if config.get('journal_replay', False):
+        args.append('-j') # -j replay all IO events from journal
+    args.extend([
+        'pool_{pool}'.format(pool=role),
+        'image_{image}'.format(image=role),
+    ])
+
+    remote.run(args=args)
diff --git a/qa/tasks/rbd_mirror.py b/qa/tasks/rbd_mirror.py
new file mode 100644
index 0000000..7e0bdb7
--- /dev/null
+++ b/qa/tasks/rbd_mirror.py
@@ -0,0 +1,117 @@
+"""
+Task for running rbd mirroring daemons and configuring mirroring
+"""
+
+import logging
+
+from teuthology.orchestra import run
+from teuthology import misc
+from teuthology.exceptions import ConfigError
+from teuthology.task import Task
+from util import get_remote_for_role
+
+log = logging.getLogger(__name__)
+
+
+class RBDMirror(Task):
+    """
+    Run an rbd-mirror daemon to sync rbd images between clusters.
+
+    This requires two clients (one from each cluster) on the same host
+    to connect with. The pool configuration should be adjusted by later
+    test scripts to include the remote client and cluster name. This task
+    just needs to know how to connect to the local cluster.
+
+    For example:
+
+        roles:
+        - [primary.mon.a, primary.osd.0, primary.osd.1, primary.osd.2]
+        - [secondary.mon.a, secondary.osd.0, secondary.osd.1, secondary.osd.2]
+        - [primary.client.mirror, secondary.client.mirror]
+        tasks:
+        - ceph:
+            cluster: primary
+        - ceph:
+            cluster: secondary
+        - rbd-mirror:
+            client: primary.client.mirror
+
+    To mirror back to the primary cluster as well, add another
+    rbd_mirror instance:
+
+        - rbd-mirror:
+            client: secondary.client.mirror
+
+    Possible options for this task are:
+
+        client: role - ceph client to connect as
+        valgrind: [--tool=<valgrind tool>] - none by default
+        coverage: bool - whether this run may be collecting coverage data
+    """
+    def __init__(self, ctx, config):
+        super(RBDMirror, self).__init__(ctx, config)
+        self.log = log
+
+    def setup(self):
+        super(RBDMirror, self).setup()
+        try:
+            self.client = self.config['client']
+        except KeyError:
+            raise ConfigError('rbd-mirror requires a client to connect with')
+
+        self.cluster_name, type_, self.client_id = misc.split_role(self.client)
+
+        if type_ != 'client':
+            msg = 'client role ({0}) must be a client'.format(self.client)
+            raise ConfigError(msg)
+
+        self.remote = get_remote_for_role(self.ctx, self.client)
+
+    def begin(self):
+        super(RBDMirror, self).begin()
+        testdir = misc.get_testdir(self.ctx)
+        daemon_signal = 'kill'
+        if 'coverage' in self.config or 'valgrind' in self.config:
+            daemon_signal = 'term'
+
+        args = [
+            'adjust-ulimits',
+            'ceph-coverage',
+            '{tdir}/archive/coverage'.format(tdir=testdir),
+            'daemon-helper',
+            daemon_signal,
+            ]
+
+        if 'valgrind' in self.config:
+            args = misc.get_valgrind_args(
+                testdir,
+                'rbd-mirror-{id}'.format(id=self.client),
+                args,
+                self.config.get('valgrind')
+            )
+
+        args.extend([
+            'rbd-mirror',
+            '--cluster',
+            self.cluster_name,
+            '--id',
+            self.client_id,
+            ])
+
+        self.ctx.daemons.add_daemon(
+            self.remote, 'rbd-mirror', self.client,
+            cluster=self.cluster_name,
+            args=args,
+            logger=self.log.getChild(self.client),
+            stdin=run.PIPE,
+            wait=False,
+        )
+
+    def end(self):
+        mirror_daemon = self.ctx.daemons.get_daemon('rbd-mirror',
+                                                    self.client,
+                                                    self.cluster_name)
+        mirror_daemon.stop()
+        super(RBDMirror, self).end()
+
+task = RBDMirror
diff --git a/qa/tasks/recovery_bench.py b/qa/tasks/recovery_bench.py
new file mode 100644
index 0000000..5eb9fd2
--- /dev/null
+++ b/qa/tasks/recovery_bench.py
@@ -0,0 +1,208 @@
+"""
+Recovery system benchmarking
+"""
+from cStringIO import StringIO
+
+import contextlib
+import gevent
+import json
+import logging
+import random
+import time
+
+import ceph_manager
+from teuthology import misc as teuthology
+
+log = logging.getLogger(__name__)
+
+ at contextlib.contextmanager
+def task(ctx, config):
+    """
+    Benchmark the recovery system.
+
+    Generates objects with smalliobench, runs it normally to get a
+    baseline performance measurement, then marks an OSD out and reruns
+    to measure performance during recovery.
+
+    The config should be as follows:
+
+    recovery_bench:
+        duration: <seconds for each measurement run>
+        num_objects: <number of objects>
+        io_size: <io size in bytes>
+
+    example:
+
+    tasks:
+    - ceph:
+    - recovery_bench:
+        duration: 60
+        num_objects: 500
+        io_size: 4096
+    """
+    if config is None:
+        config = {}
+    assert isinstance(config, dict), \
+        'recovery_bench task only accepts a dict for configuration'
+
+    log.info('Beginning recovery bench...')
+
+    first_mon = teuthology.get_first_mon(ctx, config)
+    (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+
+    manager = ceph_manager.CephManager(
+        mon,
+        ctx=ctx,
+        logger=log.getChild('ceph_manager'),
+        )
+
+    num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd')
+    while len(manager.get_osd_status()['up']) < num_osds:
+        time.sleep(10)
+
+    bench_proc = RecoveryBencher(
+        manager,
+        config,
+        )
+    try:
+        yield
+    finally:
+        log.info('joining recovery bencher')
+        bench_proc.do_join()
+
+class RecoveryBencher:
+    """
+    RecoveryBencher
+    """
+    def __init__(self, manager, config):
+        self.ceph_manager = manager
+        self.ceph_manager.wait_for_clean()
+
+        osd_status = self.ceph_manager.get_osd_status()
+        self.osds = osd_status['up']
+
+        self.config = config
+        if self.config is None:
+            self.config = dict()
+
+        else:
+            def tmp(x):
+                """
+                Local wrapper to print value.
+                """
+                print x
+            self.log = tmp
+
+        log.info("spawning thread")
+
+        self.thread = gevent.spawn(self.do_bench)
+
+    def do_join(self):
+        """
+        Join the recovery bencher.  This is called after the main
+        task exits.
+        """
+        self.thread.get()
+
+    def do_bench(self):
+        """
+        Do the benchmarking.
+        """
+        duration = self.config.get("duration", 60)
+        num_objects = self.config.get("num_objects", 500)
+        io_size = self.config.get("io_size", 4096)
+
+        osd = str(random.choice(self.osds))
+        (osd_remote,) = self.ceph_manager.ctx.cluster.only('osd.%s' % osd).remotes.iterkeys()
+
+        testdir = teuthology.get_testdir(self.ceph_manager.ctx)
+
+        # create the objects
+        osd_remote.run(
+            args=[
+                'adjust-ulimits',
+                'ceph-coverage',
+                '{tdir}/archive/coverage'.format(tdir=testdir),
+                'smalliobench'.format(tdir=testdir),
+                '--use-prefix', 'recovery_bench',
+                '--init-only', '1',
+                '--num-objects', str(num_objects),
+                '--io-size', str(io_size),
+                ],
+            wait=True,
+        )
+
+        # baseline bench
+        log.info('non-recovery (baseline)')
+        p = osd_remote.run(
+            args=[
+                'adjust-ulimits',
+                'ceph-coverage',
+                '{tdir}/archive/coverage'.format(tdir=testdir),
+                'smalliobench',
+                '--use-prefix', 'recovery_bench',
+                '--do-not-init', '1',
+                '--duration', str(duration),
+                '--io-size', str(io_size),
+                ],
+            stdout=StringIO(),
+            stderr=StringIO(),
+            wait=True,
+        )
+        self.process_samples(p.stderr.getvalue())
+
+        self.ceph_manager.raw_cluster_cmd('osd', 'out', osd)
+        time.sleep(5)
+
+        # recovery bench
+        log.info('recovery active')
+        p = osd_remote.run(
+            args=[
+                'adjust-ulimits',
+                'ceph-coverage',
+                '{tdir}/archive/coverage'.format(tdir=testdir),
+                'smalliobench',
+                '--use-prefix', 'recovery_bench',
+                '--do-not-init', '1',
+                '--duration', str(duration),
+                '--io-size', str(io_size),
+                ],
+            stdout=StringIO(),
+            stderr=StringIO(),
+            wait=True,
+        )
+        self.process_samples(p.stderr.getvalue())
+
+        self.ceph_manager.raw_cluster_cmd('osd', 'in', osd)
+
+    def process_samples(self, input):
+        """
+        Extract samples from the input and process the results
+
+        :param input: input lines in JSON format
+        """
+        lat = {}
+        for line in input.split('\n'):
+            try:
+                sample = json.loads(line)
+                samples = lat.setdefault(sample['type'], [])
+                samples.append(float(sample['latency']))
+            except Exception:
+                pass
+
+        for type in lat:
+            samples = lat[type]
+            samples.sort()
+
+            num = len(samples)
+
+            # median
+            if num & 1 == 1: # odd number of samples
+                median = samples[num / 2]
+            else:
+                median = (samples[num / 2] + samples[num / 2 - 1]) / 2
+
+            # 99%
+            ninety_nine = samples[int(num * 0.99)]
+
+            log.info("%s: median %f, 99%% %f" % (type, median, ninety_nine))
diff --git a/qa/tasks/reg11184.py b/qa/tasks/reg11184.py
new file mode 100644
index 0000000..016057d
--- /dev/null
+++ b/qa/tasks/reg11184.py
@@ -0,0 +1,242 @@
+"""
+Special regression test for tracker #11184
+
+Synopsis: osd/SnapMapper.cc: 282: FAILED assert(check(oid))
+
+This is accomplished by moving a pg that wasn't part of split and still include
+divergent priors.
+"""
+import logging
+import time
+from cStringIO import StringIO
+
+from teuthology import misc as teuthology
+from util.rados import rados
+import os
+
+
+log = logging.getLogger(__name__)
+
+
+def task(ctx, config):
+    """
+    Test handling of divergent entries during export / import
+    to regression test tracker #11184
+
+    overrides:
+      ceph:
+        conf:
+          osd:
+            debug osd: 5
+
+    Requires 3 osds on a single test node.
+    """
+    if config is None:
+        config = {}
+    assert isinstance(config, dict), \
+        'divergent_priors task only accepts a dict for configuration'
+
+    manager = ctx.managers['ceph']
+
+    while len(manager.get_osd_status()['up']) < 3:
+        time.sleep(10)
+    manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+    manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
+    manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
+    manager.raw_cluster_cmd('osd', 'set', 'noout')
+    manager.raw_cluster_cmd('osd', 'set', 'noin')
+    manager.raw_cluster_cmd('osd', 'set', 'nodown')
+    manager.wait_for_clean()
+
+    # something that is always there
+    dummyfile = '/etc/fstab'
+    dummyfile2 = '/etc/resolv.conf'
+    testdir = teuthology.get_testdir(ctx)
+
+    # create 1 pg pool
+    log.info('creating foo')
+    manager.raw_cluster_cmd('osd', 'pool', 'create', 'foo', '1')
+
+    osds = [0, 1, 2]
+    for i in osds:
+        manager.set_config(i, osd_min_pg_log_entries=10)
+        manager.set_config(i, osd_max_pg_log_entries=10)
+        manager.set_config(i, osd_pg_log_trim_min=5)
+
+    # determine primary
+    divergent = manager.get_pg_primary('foo', 0)
+    log.info("primary and soon to be divergent is %d", divergent)
+    non_divergent = list(osds)
+    non_divergent.remove(divergent)
+
+    log.info('writing initial objects')
+    first_mon = teuthology.get_first_mon(ctx, config)
+    (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+    # write 100 objects
+    for i in range(100):
+        rados(ctx, mon, ['-p', 'foo', 'put', 'existing_%d' % i, dummyfile])
+
+    manager.wait_for_clean()
+
+    # blackhole non_divergent
+    log.info("blackholing osds %s", str(non_divergent))
+    for i in non_divergent:
+        manager.set_config(i, filestore_blackhole=1)
+
+    DIVERGENT_WRITE = 5
+    DIVERGENT_REMOVE = 5
+    # Write some soon to be divergent
+    log.info('writing divergent objects')
+    for i in range(DIVERGENT_WRITE):
+        rados(ctx, mon, ['-p', 'foo', 'put', 'existing_%d' % i,
+                         dummyfile2], wait=False)
+    # Remove some soon to be divergent
+    log.info('remove divergent objects')
+    for i in range(DIVERGENT_REMOVE):
+        rados(ctx, mon, ['-p', 'foo', 'rm',
+                         'existing_%d' % (i + DIVERGENT_WRITE)], wait=False)
+    time.sleep(10)
+    mon.run(
+        args=['killall', '-9', 'rados'],
+        wait=True,
+        check_status=False)
+
+    # kill all the osds but leave divergent in
+    log.info('killing all the osds')
+    for i in osds:
+        manager.kill_osd(i)
+    for i in osds:
+        manager.mark_down_osd(i)
+    for i in non_divergent:
+        manager.mark_out_osd(i)
+
+    # bring up non-divergent
+    log.info("bringing up non_divergent %s", str(non_divergent))
+    for i in non_divergent:
+        manager.revive_osd(i)
+    for i in non_divergent:
+        manager.mark_in_osd(i)
+
+    # write 1 non-divergent object (ensure that old divergent one is divergent)
+    objname = "existing_%d" % (DIVERGENT_WRITE + DIVERGENT_REMOVE)
+    log.info('writing non-divergent object ' + objname)
+    rados(ctx, mon, ['-p', 'foo', 'put', objname, dummyfile2])
+
+    manager.wait_for_recovery()
+
+    # ensure no recovery of up osds first
+    log.info('delay recovery')
+    for i in non_divergent:
+        manager.wait_run_admin_socket(
+            'osd', i, ['set_recovery_delay', '100000'])
+
+    # bring in our divergent friend
+    log.info("revive divergent %d", divergent)
+    manager.raw_cluster_cmd('osd', 'set', 'noup')
+    manager.revive_osd(divergent)
+
+    log.info('delay recovery divergent')
+    manager.wait_run_admin_socket(
+        'osd', divergent, ['set_recovery_delay', '100000'])
+
+    manager.raw_cluster_cmd('osd', 'unset', 'noup')
+    while len(manager.get_osd_status()['up']) < 3:
+        time.sleep(10)
+
+    log.info('wait for peering')
+    rados(ctx, mon, ['-p', 'foo', 'put', 'foo', dummyfile])
+
+    # At this point the divergent_priors should have been detected
+
+    log.info("killing divergent %d", divergent)
+    manager.kill_osd(divergent)
+
+    # Split pgs for pool foo
+    manager.raw_cluster_cmd('osd', 'pool', 'set', 'foo', 'pg_num', '2')
+    time.sleep(5)
+
+    # Export a pg
+    (exp_remote,) = ctx.\
+        cluster.only('osd.{o}'.format(o=divergent)).remotes.iterkeys()
+    FSPATH = manager.get_filepath()
+    JPATH = os.path.join(FSPATH, "journal")
+    prefix = ("sudo adjust-ulimits ceph-objectstore-tool "
+              "--data-path {fpath} --journal-path {jpath} "
+              "--log-file="
+              "/var/log/ceph/objectstore_tool.$$.log ".
+              format(fpath=FSPATH, jpath=JPATH))
+    pid = os.getpid()
+    expfile = os.path.join(testdir, "exp.{pid}.out".format(pid=pid))
+    cmd = ((prefix + "--op export --pgid 1.0 --file {file}").
+           format(id=divergent, file=expfile))
+    proc = exp_remote.run(args=cmd, wait=True,
+                          check_status=False, stdout=StringIO())
+    assert proc.exitstatus == 0
+
+    # Remove the same pg that was exported
+    cmd = ((prefix + "--op remove --pgid 1.0").
+           format(id=divergent, file=expfile))
+    proc = exp_remote.run(args=cmd, wait=True,
+                          check_status=False, stdout=StringIO())
+    assert proc.exitstatus == 0
+
+    # Kill one of non-divergent OSDs
+    log.info('killing osd.%d' % non_divergent[1])
+    manager.kill_osd(non_divergent[1])
+    manager.mark_down_osd(non_divergent[1])
+    # manager.mark_out_osd(non_divergent[1])
+
+    cmd = ((prefix + "--op import --file {file}").
+           format(id=non_divergent[1], file=expfile))
+    proc = exp_remote.run(args=cmd, wait=True,
+                          check_status=False, stdout=StringIO())
+    assert proc.exitstatus == 0
+
+    # bring in our divergent friend and other node
+    log.info("revive divergent %d", divergent)
+    manager.revive_osd(divergent)
+    manager.mark_in_osd(divergent)
+    log.info("revive %d", non_divergent[1])
+    manager.revive_osd(non_divergent[1])
+
+    while len(manager.get_osd_status()['up']) < 3:
+        time.sleep(10)
+
+    log.info('delay recovery divergent')
+    manager.set_config(divergent, osd_recovery_delay_start=100000)
+    log.info('mark divergent in')
+    manager.mark_in_osd(divergent)
+
+    log.info('wait for peering')
+    rados(ctx, mon, ['-p', 'foo', 'put', 'foo', dummyfile])
+
+    log.info("killing divergent %d", divergent)
+    manager.kill_osd(divergent)
+    log.info("reviving divergent %d", divergent)
+    manager.revive_osd(divergent)
+    time.sleep(3)
+
+    log.info('allowing recovery')
+    # Set osd_recovery_delay_start back to 0 and kick the queue
+    for i in osds:
+        manager.raw_cluster_cmd('tell', 'osd.%d' % i, 'debug',
+                                'kick_recovery_wq', ' 0')
+
+    log.info('reading divergent objects')
+    for i in range(DIVERGENT_WRITE + DIVERGENT_REMOVE):
+        exit_status = rados(ctx, mon, ['-p', 'foo', 'get', 'existing_%d' % i,
+                                       '/tmp/existing'])
+        assert exit_status is 0
+
+    (remote,) = ctx.\
+        cluster.only('osd.{o}'.format(o=divergent)).remotes.iterkeys()
+    msg = "dirty_divergent_priors: true, divergent_priors: %d" \
+          % (DIVERGENT_WRITE + DIVERGENT_REMOVE)
+    cmd = 'grep "{msg}" /var/log/ceph/ceph-osd.{osd}.log'\
+          .format(msg=msg, osd=divergent)
+    proc = remote.run(args=cmd, wait=True, check_status=False)
+    assert proc.exitstatus == 0
+
+    cmd = 'rm {file}'.format(file=expfile)
+    remote.run(args=cmd, wait=True)
+    log.info("success")
diff --git a/qa/tasks/rep_lost_unfound_delete.py b/qa/tasks/rep_lost_unfound_delete.py
new file mode 100644
index 0000000..b0ba3dc
--- /dev/null
+++ b/qa/tasks/rep_lost_unfound_delete.py
@@ -0,0 +1,184 @@
+"""
+Lost_unfound
+"""
+import logging
+from teuthology.orchestra import run
+import ceph_manager
+import time
+from teuthology import misc as teuthology
+from util.rados import rados
+
+log = logging.getLogger(__name__)
+
+def task(ctx, config):
+    """
+    Test handling of lost objects.
+
+    A pretty rigid cluseter is brought up andtested by this task
+    """
+    POOL = 'unfounddel_pool'
+    if config is None:
+        config = {}
+    assert isinstance(config, dict), \
+        'lost_unfound task only accepts a dict for configuration'
+    first_mon = teuthology.get_first_mon(ctx, config)
+    (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+
+    manager = ceph_manager.CephManager(
+        mon,
+        ctx=ctx,
+        logger=log.getChild('ceph_manager'),
+        )
+
+    while len(manager.get_osd_status()['up']) < 3:
+        time.sleep(10)
+    manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+    manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
+    manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
+    manager.wait_for_clean()
+
+    manager.create_pool(POOL)
+
+    # something that is always there
+    dummyfile = '/etc/fstab'
+
+    # take an osd out until the very end
+    manager.kill_osd(2)
+    manager.mark_down_osd(2)
+    manager.mark_out_osd(2)
+
+    # kludge to make sure they get a map
+    rados(ctx, mon, ['-p', POOL, 'put', 'dummy', dummyfile])
+
+    manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+    manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
+    manager.wait_for_recovery()
+
+    # create old objects
+    for f in range(1, 10):
+        rados(ctx, mon, ['-p', POOL, 'put', 'existing_%d' % f, dummyfile])
+        rados(ctx, mon, ['-p', POOL, 'put', 'existed_%d' % f, dummyfile])
+        rados(ctx, mon, ['-p', POOL, 'rm', 'existed_%d' % f])
+
+    # delay recovery, and make the pg log very long (to prevent backfill)
+    manager.raw_cluster_cmd(
+            'tell', 'osd.1',
+            'injectargs',
+            '--osd-recovery-delay-start 1000 --osd-min-pg-log-entries 100000000'
+            )
+
+    manager.kill_osd(0)
+    manager.mark_down_osd(0)
+    
+    for f in range(1, 10):
+        rados(ctx, mon, ['-p', POOL, 'put', 'new_%d' % f, dummyfile])
+        rados(ctx, mon, ['-p', POOL, 'put', 'existed_%d' % f, dummyfile])
+        rados(ctx, mon, ['-p', POOL, 'put', 'existing_%d' % f, dummyfile])
+
+    # bring osd.0 back up, let it peer, but don't replicate the new
+    # objects...
+    log.info('osd.0 command_args is %s' % 'foo')
+    log.info(ctx.daemons.get_daemon('osd', 0).command_args)
+    ctx.daemons.get_daemon('osd', 0).command_kwargs['args'].extend([
+            '--osd-recovery-delay-start', '1000'
+            ])
+    manager.revive_osd(0)
+    manager.mark_in_osd(0)
+    manager.wait_till_osd_is_up(0)
+
+    manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
+    manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+    manager.wait_till_active()
+
+    # take out osd.1 and the only copy of those objects.
+    manager.kill_osd(1)
+    manager.mark_down_osd(1)
+    manager.mark_out_osd(1)
+    manager.raw_cluster_cmd('osd', 'lost', '1', '--yes-i-really-mean-it')
+
+    # bring up osd.2 so that things would otherwise, in theory, recovery fully
+    manager.revive_osd(2)
+    manager.mark_in_osd(2)
+    manager.wait_till_osd_is_up(2)
+
+    manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+    manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
+    manager.wait_till_active()
+    manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+    manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
+
+    # verify that there are unfound objects
+    unfound = manager.get_num_unfound_objects()
+    log.info("there are %d unfound objects" % unfound)
+    assert unfound
+
+    testdir = teuthology.get_testdir(ctx)
+    procs = []
+    if config.get('parallel_bench', True):
+        procs.append(mon.run(
+            args=[
+                "/bin/sh", "-c",
+                " ".join(['adjust-ulimits',
+                          'ceph-coverage',
+                          '{tdir}/archive/coverage',
+                          'rados',
+                          '--no-log-to-stderr',
+                          '--name', 'client.admin',
+                          '-b', str(4<<10),
+                          '-p' , POOL,
+                          '-t', '20',
+                          'bench', '240', 'write',
+                      ]).format(tdir=testdir),
+            ],
+            logger=log.getChild('radosbench.{id}'.format(id='client.admin')),
+            stdin=run.PIPE,
+            wait=False
+        ))
+    time.sleep(10)
+
+    # mark stuff lost
+    pgs = manager.get_pg_stats()
+    for pg in pgs:
+        if pg['stat_sum']['num_objects_unfound'] > 0:
+            primary = 'osd.%d' % pg['acting'][0]
+
+            # verify that i can list them direct from the osd
+            log.info('listing missing/lost in %s state %s', pg['pgid'],
+                     pg['state']);
+            m = manager.list_pg_missing(pg['pgid'])
+            #log.info('%s' % m)
+            assert m['num_unfound'] == pg['stat_sum']['num_objects_unfound']
+            num_unfound=0
+            for o in m['objects']:
+                if len(o['locations']) == 0:
+                    num_unfound += 1
+            assert m['num_unfound'] == num_unfound
+
+            log.info("reverting unfound in %s on %s", pg['pgid'], primary)
+            manager.raw_cluster_cmd('pg', pg['pgid'],
+                                    'mark_unfound_lost', 'delete')
+        else:
+            log.info("no unfound in %s", pg['pgid'])
+
+    manager.raw_cluster_cmd('tell', 'osd.0', 'debug', 'kick_recovery_wq', '5')
+    manager.raw_cluster_cmd('tell', 'osd.2', 'debug', 'kick_recovery_wq', '5')
+    manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+    manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
+    manager.wait_for_recovery()
+
+    # verify result
+    for f in range(1, 10):
+        err = rados(ctx, mon, ['-p', POOL, 'get', 'new_%d' % f, '-'])
+        assert err
+        err = rados(ctx, mon, ['-p', POOL, 'get', 'existed_%d' % f, '-'])
+        assert err
+        err = rados(ctx, mon, ['-p', POOL, 'get', 'existing_%d' % f, '-'])
+        assert err
+
+    # see if osd.1 can cope
+    manager.revive_osd(1)
+    manager.mark_in_osd(1)
+    manager.wait_till_osd_is_up(1)
+    manager.wait_for_clean()
+    run.wait(procs)
+
diff --git a/qa/tasks/repair_test.py b/qa/tasks/repair_test.py
new file mode 100644
index 0000000..a547c76
--- /dev/null
+++ b/qa/tasks/repair_test.py
@@ -0,0 +1,304 @@
+"""
+Test pool repairing after objects are damaged.
+"""
+import logging
+import time
+
+from teuthology import misc as teuthology
+
+log = logging.getLogger(__name__)
+
+
+def choose_primary(manager, pool, num):
+    """
+    Return primary to test on.
+    """
+    log.info("Choosing primary")
+    return manager.get_pg_primary(pool, num)
+
+
+def choose_replica(manager, pool, num):
+    """
+    Return replica to test on.
+    """
+    log.info("Choosing replica")
+    return manager.get_pg_replica(pool, num)
+
+
+def trunc(manager, osd, pool, obj):
+    """
+    truncate an object
+    """
+    log.info("truncating object")
+    return manager.osd_admin_socket(
+        osd,
+        ['truncobj', pool, obj, '1'])
+
+
+def dataerr(manager, osd, pool, obj):
+    """
+    cause an error in the data
+    """
+    log.info("injecting data err on object")
+    return manager.osd_admin_socket(
+        osd,
+        ['injectdataerr', pool, obj])
+
+
+def mdataerr(manager, osd, pool, obj):
+    """
+    cause an error in the mdata
+    """
+    log.info("injecting mdata err on object")
+    return manager.osd_admin_socket(
+        osd,
+        ['injectmdataerr', pool, obj])
+
+
+def omaperr(manager, osd, pool, obj):
+    """
+    Cause an omap error.
+    """
+    log.info("injecting omap err on object")
+    return manager.osd_admin_socket(osd, ['setomapval', pool, obj,
+                                              'badkey', 'badval'])
+
+
+def repair_test_1(manager, corrupter, chooser, scrub_type):
+    """
+    Creates an object in the pool, corrupts it,
+    scrubs it, and verifies that the pool is inconsistent.  It then repairs
+    the pool, rescrubs it, and verifies that the pool is consistent
+
+    :param corrupter: error generating function (truncate, data-error, or
+     meta-data error, for example).
+    :param chooser: osd type chooser (primary or replica)
+    :param scrub_type: regular scrub or deep-scrub
+    """
+    pool = "repair_pool_1"
+    manager.wait_for_clean()
+    with manager.pool(pool, 1):
+
+        log.info("starting repair test type 1")
+        victim_osd = chooser(manager, pool, 0)
+
+        # create object
+        log.info("doing put")
+        manager.do_put(pool, 'repair_test_obj', '/etc/hosts')
+
+        # corrupt object
+        log.info("corrupting object")
+        corrupter(manager, victim_osd, pool, 'repair_test_obj')
+
+        # verify inconsistent
+        log.info("scrubbing")
+        manager.do_pg_scrub(pool, 0, scrub_type)
+
+        assert manager.pg_inconsistent(pool, 0)
+
+        # repair
+        log.info("repairing")
+        manager.do_pg_scrub(pool, 0, "repair")
+
+        log.info("re-scrubbing")
+        manager.do_pg_scrub(pool, 0, scrub_type)
+
+        # verify consistent
+        assert not manager.pg_inconsistent(pool, 0)
+        log.info("done")
+
+
+def repair_test_2(ctx, manager, config, chooser):
+    """
+    First creates a set of objects and
+    sets the omap value.  It then corrupts an object, does both a scrub
+    and a deep-scrub, and then corrupts more objects.  After that, it
+    repairs the pool and makes sure that the pool is consistent some
+    time after a deep-scrub.
+
+    :param chooser: primary or replica selection routine.
+    """
+    pool = "repair_pool_2"
+    manager.wait_for_clean()
+    with manager.pool(pool, 1):
+        log.info("starting repair test type 2")
+        victim_osd = chooser(manager, pool, 0)
+        first_mon = teuthology.get_first_mon(ctx, config)
+        (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+
+        # create object
+        log.info("doing put and setomapval")
+        manager.do_put(pool, 'file1', '/etc/hosts')
+        manager.do_rados(mon, ['-p', pool, 'setomapval', 'file1',
+                                   'key', 'val'])
+        manager.do_put(pool, 'file2', '/etc/hosts')
+        manager.do_put(pool, 'file3', '/etc/hosts')
+        manager.do_put(pool, 'file4', '/etc/hosts')
+        manager.do_put(pool, 'file5', '/etc/hosts')
+        manager.do_rados(mon, ['-p', pool, 'setomapval', 'file5',
+                                   'key', 'val'])
+        manager.do_put(pool, 'file6', '/etc/hosts')
+
+        # corrupt object
+        log.info("corrupting object")
+        omaperr(manager, victim_osd, pool, 'file1')
+
+        # verify inconsistent
+        log.info("scrubbing")
+        manager.do_pg_scrub(pool, 0, 'deep-scrub')
+
+        assert manager.pg_inconsistent(pool, 0)
+
+        # Regression test for bug #4778, should still
+        # be inconsistent after scrub
+        manager.do_pg_scrub(pool, 0, 'scrub')
+
+        assert manager.pg_inconsistent(pool, 0)
+
+        # Additional corruptions including 2 types for file1
+        log.info("corrupting more objects")
+        dataerr(manager, victim_osd, pool, 'file1')
+        mdataerr(manager, victim_osd, pool, 'file2')
+        trunc(manager, victim_osd, pool, 'file3')
+        omaperr(manager, victim_osd, pool, 'file6')
+
+        # see still inconsistent
+        log.info("scrubbing")
+        manager.do_pg_scrub(pool, 0, 'deep-scrub')
+
+        assert manager.pg_inconsistent(pool, 0)
+
+        # repair
+        log.info("repairing")
+        manager.do_pg_scrub(pool, 0, "repair")
+
+        # Let repair clear inconsistent flag
+        time.sleep(10)
+
+        # verify consistent
+        assert not manager.pg_inconsistent(pool, 0)
+
+        # In the future repair might determine state of
+        # inconsistency itself, verify with a deep-scrub
+        log.info("scrubbing")
+        manager.do_pg_scrub(pool, 0, 'deep-scrub')
+
+        # verify consistent
+        assert not manager.pg_inconsistent(pool, 0)
+
+        log.info("done")
+
+
+def hinfoerr(manager, victim, pool, obj):
+    """
+    cause an error in the hinfo_key
+    """
+    log.info("remove the hinfo_key")
+    manager.objectstore_tool(pool,
+                             options='',
+                             args='rm-attr hinfo_key',
+                             object_name=obj,
+                             osd=victim)
+
+
+def repair_test_erasure_code(manager, corrupter, victim, scrub_type):
+    """
+    Creates an object in the pool, corrupts it,
+    scrubs it, and verifies that the pool is inconsistent.  It then repairs
+    the pool, rescrubs it, and verifies that the pool is consistent
+
+    :param corrupter: error generating function.
+    :param chooser: osd type chooser (primary or replica)
+    :param scrub_type: regular scrub or deep-scrub
+    """
+    pool = "repair_pool_3"
+    manager.wait_for_clean()
+    with manager.pool(pool_name=pool, pg_num=1,
+                          erasure_code_profile_name='default'):
+
+        log.info("starting repair test for erasure code")
+
+        # create object
+        log.info("doing put")
+        manager.do_put(pool, 'repair_test_obj', '/etc/hosts')
+
+        # corrupt object
+        log.info("corrupting object")
+        corrupter(manager, victim, pool, 'repair_test_obj')
+
+        # verify inconsistent
+        log.info("scrubbing")
+        manager.do_pg_scrub(pool, 0, scrub_type)
+
+        assert manager.pg_inconsistent(pool, 0)
+
+        # repair
+        log.info("repairing")
+        manager.do_pg_scrub(pool, 0, "repair")
+
+        log.info("re-scrubbing")
+        manager.do_pg_scrub(pool, 0, scrub_type)
+
+        # verify consistent
+        assert not manager.pg_inconsistent(pool, 0)
+        log.info("done")
+
+
+def task(ctx, config):
+    """
+    Test [deep] repair in several situations:
+      Repair [Truncate, Data EIO, MData EIO] on [Primary|Replica]
+
+    The config should be as follows:
+
+      Must include the log-whitelist below
+      Must enable filestore_debug_inject_read_err config
+
+    example:
+
+    tasks:
+    - chef:
+    - install:
+    - ceph:
+        log-whitelist:
+          - 'candidate had a stat error'
+          - 'candidate had a read error'
+          - 'deep-scrub 0 missing, 1 inconsistent objects'
+          - 'deep-scrub 0 missing, 4 inconsistent objects'
+          - 'deep-scrub 1 errors'
+          - 'deep-scrub 4 errors'
+          - '!= known omap_digest'
+          - 'repair 0 missing, 1 inconsistent objects'
+          - 'repair 0 missing, 4 inconsistent objects'
+          - 'repair 1 errors, 1 fixed'
+          - 'repair 4 errors, 4 fixed'
+          - 'scrub 0 missing, 1 inconsistent'
+          - 'scrub 1 errors'
+          - 'size 1 != known size'
+        conf:
+          osd:
+            filestore debug inject read err: true
+    - repair_test:
+
+    """
+    if config is None:
+        config = {}
+    assert isinstance(config, dict), \
+        'repair_test task only accepts a dict for config'
+
+    manager = ctx.managers['ceph']
+    manager.wait_for_all_up()
+
+    manager.raw_cluster_cmd('osd', 'set', 'noscrub')
+    manager.raw_cluster_cmd('osd', 'set', 'nodeep-scrub')
+
+    repair_test_1(manager, mdataerr, choose_primary, "scrub")
+    repair_test_1(manager, mdataerr, choose_replica, "scrub")
+    repair_test_1(manager, dataerr, choose_primary, "deep-scrub")
+    repair_test_1(manager, dataerr, choose_replica, "deep-scrub")
+    repair_test_1(manager, trunc, choose_primary, "scrub")
+    repair_test_1(manager, trunc, choose_replica, "scrub")
+    repair_test_2(ctx, manager, config, choose_primary)
+    repair_test_2(ctx, manager, config, choose_replica)
+
+    repair_test_erasure_code(manager, hinfoerr, 'primary', "deep-scrub")
diff --git a/qa/tasks/rest_api.py b/qa/tasks/rest_api.py
new file mode 100644
index 0000000..e86f77e
--- /dev/null
+++ b/qa/tasks/rest_api.py
@@ -0,0 +1,184 @@
+"""
+Rest Api
+"""
+import logging
+import contextlib
+import time
+
+from teuthology import misc as teuthology
+from teuthology import contextutil
+from teuthology.orchestra import run
+from teuthology.orchestra.daemon import DaemonGroup
+
+log = logging.getLogger(__name__)
+
+
+ at contextlib.contextmanager
+def run_rest_api_daemon(ctx, api_clients):
+    """
+    Wrapper starts the rest api daemons
+    """
+    if not hasattr(ctx, 'daemons'):
+        ctx.daemons = DaemonGroup()
+    remotes = ctx.cluster.only(teuthology.is_type('client')).remotes
+    for rems, roles in remotes.iteritems():
+        for whole_id_ in roles:
+            if whole_id_ in api_clients:
+                id_ = whole_id_[len('clients'):]
+                run_cmd = [
+                    'sudo',
+                    'daemon-helper',
+                    'kill',
+                    'ceph-rest-api',
+                    '-n',
+                    'client.rest{id}'.format(id=id_), ]
+                cl_rest_id = 'client.rest{id}'.format(id=id_)
+                ctx.daemons.add_daemon(rems, 'restapi',
+                    cl_rest_id,
+                    args=run_cmd,
+                    logger=log.getChild(cl_rest_id),
+                    stdin=run.PIPE,
+                    wait=False,
+                    )
+                for i in range(1, 12):
+                    log.info('testing for ceph-rest-api try {0}'.format(i))
+                    run_cmd = [
+                        'wget',
+                        '-O',
+                        '/dev/null',
+                        '-q',
+                        'http://localhost:5000/api/v0.1/status'
+                    ]
+                    proc = rems.run(
+                        args=run_cmd,
+                        check_status=False
+                    )
+                    if proc.exitstatus == 0:
+                        break
+                    time.sleep(5)
+                if proc.exitstatus != 0:
+                    raise RuntimeError('Cannot contact ceph-rest-api')
+    try:
+        yield
+
+    finally:
+        """
+        TO DO: destroy daemons started -- modify iter_daemons_of_role
+        """
+        teuthology.stop_daemons_of_type(ctx, 'restapi')
+
+ at contextlib.contextmanager
+def task(ctx, config):
+    """
+    Start up rest-api.
+
+    To start on on all clients::
+
+        tasks:
+        - ceph:
+        - rest-api:
+
+    To only run on certain clients::
+
+        tasks:
+        - ceph:
+        - rest-api: [client.0, client.3]
+
+    or
+
+        tasks:
+        - ceph:
+        - rest-api:
+            client.0:
+            client.3:
+
+    The general flow of things here is:
+        1. Find clients on which rest-api is supposed to run (api_clients)
+        2. Generate keyring values
+        3. Start up ceph-rest-api daemons
+    On cleanup:
+        4. Stop the daemons
+        5. Delete keyring value files.
+    """
+    api_clients = []
+    remotes = ctx.cluster.only(teuthology.is_type('client')).remotes
+    log.info(remotes)
+    if config == None:
+        api_clients = ['client.{id}'.format(id=id_)
+            for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
+    else:
+        api_clients = config
+    log.info(api_clients)
+    testdir = teuthology.get_testdir(ctx)
+    coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir)
+    for rems, roles in remotes.iteritems():
+        for whole_id_ in roles:
+            if whole_id_ in api_clients:
+                id_ = whole_id_[len('client.'):]
+                keyring = '/etc/ceph/ceph.client.rest{id}.keyring'.format(
+                        id=id_)
+                rems.run(
+                    args=[
+                        'sudo',
+                        'adjust-ulimits',
+                        'ceph-coverage',
+                        coverage_dir,
+                        'ceph-authtool',
+                        '--create-keyring',
+                        '--gen-key',
+                        '--name=client.rest{id}'.format(id=id_),
+                        '--set-uid=0',
+                        '--cap', 'mon', 'allow *',
+                        '--cap', 'osd', 'allow *',
+                        '--cap', 'mds', 'allow',
+                        keyring,
+                        run.Raw('&&'),
+                        'sudo',
+                        'chmod',
+                        '0644',
+                        keyring,
+                        ],
+                    )
+                rems.run(
+                    args=[
+                        'sudo',
+                        'sh',
+                        '-c',
+                        run.Raw("'"),
+                        "echo",
+                        '[client.rest{id}]'.format(id=id_),
+                        run.Raw('>>'),
+                        "/etc/ceph/ceph.conf",
+                        run.Raw("'")
+                        ]
+                    )
+                rems.run(
+                    args=[
+                        'sudo',
+                        'sh',
+                        '-c',
+                        run.Raw("'"),
+                        'echo',
+                        'restapi',
+                        'keyring',
+                        '=',
+                        '/etc/ceph/ceph.client.rest{id}.keyring'.format(id=id_),
+                        run.Raw('>>'),
+                        '/etc/ceph/ceph.conf',
+                        run.Raw("'"),
+                        ]
+                    )
+                rems.run(
+                    args=[
+                        'sudo',
+                        'ceph',
+                        'auth',
+                        'import',
+                        '-i',
+                        '/etc/ceph/ceph.client.rest{id}.keyring'.format(id=id_),
+                    ]
+                )
+    with contextutil.nested(
+            lambda: run_rest_api_daemon(ctx=ctx, api_clients=api_clients),):
+        yield
+
diff --git a/qa/tasks/restart.py b/qa/tasks/restart.py
new file mode 100644
index 0000000..697345a
--- /dev/null
+++ b/qa/tasks/restart.py
@@ -0,0 +1,163 @@
+"""
+Daemon restart
+"""
+import logging
+import pipes
+
+from teuthology import misc as teuthology
+from teuthology.orchestra import run as tor
+
+from teuthology.orchestra import run
+log = logging.getLogger(__name__)
+
+def restart_daemon(ctx, config, role, id_, *args):
+    """
+    Handle restart (including the execution of the command parameters passed)
+    """
+    log.info('Restarting {r}.{i} daemon...'.format(r=role, i=id_))
+    daemon = ctx.daemons.get_daemon(role, id_)
+    log.debug('Waiting for exit of {r}.{i} daemon...'.format(r=role, i=id_))
+    try:
+        daemon.wait_for_exit()
+    except tor.CommandFailedError as e:
+        log.debug('Command Failed: {e}'.format(e=e))
+    if len(args) > 0:
+        confargs = ['--{k}={v}'.format(k=k, v=v) for k,v in zip(args[0::2], args[1::2])]
+        log.debug('Doing restart of {r}.{i} daemon with args: {a}...'.format(r=role, i=id_, a=confargs))
+        daemon.restart_with_args(confargs)
+    else:
+        log.debug('Doing restart of {r}.{i} daemon...'.format(r=role, i=id_))
+        daemon.restart()
+
+def get_tests(ctx, config, role, remote, testdir):
+    """Download restart tests"""
+    srcdir = '{tdir}/restart.{role}'.format(tdir=testdir, role=role)
+
+    refspec = config.get('branch')
+    if refspec is None:
+        refspec = config.get('sha1')
+    if refspec is None:
+        refspec = config.get('tag')
+    if refspec is None:
+        refspec = 'HEAD'
+    log.info('Pulling restart qa/workunits from ref %s', refspec)
+
+    remote.run(
+        logger=log.getChild(role),
+        args=[
+            'mkdir', '--', srcdir,
+            run.Raw('&&'),
+            'git',
+            'archive',
+            '--remote=git://git.ceph.com/ceph.git',
+            '%s:qa/workunits' % refspec,
+            run.Raw('|'),
+            'tar',
+            '-C', srcdir,
+            '-x',
+            '-f-',
+            run.Raw('&&'),
+            'cd', '--', srcdir,
+            run.Raw('&&'),
+            'if', 'test', '-e', 'Makefile', run.Raw(';'), 'then', 'make', run.Raw(';'), 'fi',
+            run.Raw('&&'),
+            'find', '-executable', '-type', 'f', '-printf', r'%P\0'.format(srcdir=srcdir),
+            run.Raw('>{tdir}/restarts.list'.format(tdir=testdir)),
+            ],
+        )
+    restarts = sorted(teuthology.get_file(
+                        remote,
+                        '{tdir}/restarts.list'.format(tdir=testdir)).split('\0'))
+    return (srcdir, restarts)
+
+def task(ctx, config):
+    """
+    Execute commands and allow daemon restart with config options.
+    Each process executed can output to stdout restart commands of the form:
+        restart <role> <id> <conf_key1> <conf_value1> <conf_key2> <conf_value2>
+    This will restart the daemon <role>.<id> with the specified config values once
+    by modifying the conf file with those values, and then replacing the old conf file
+    once the daemon is restarted.
+    This task does not kill a running daemon, it assumes the daemon will abort on an
+    assert specified in the config.
+
+        tasks:
+        - install:
+        - ceph:
+        - restart:
+            exec:
+              client.0:
+                - test_backtraces.py
+
+    """
+    assert isinstance(config, dict), "task kill got invalid config"
+
+    testdir = teuthology.get_testdir(ctx)
+
+    try:
+        assert 'exec' in config, "config requires exec key with <role>: <command> entries"
+        for role, task in config['exec'].iteritems():
+            log.info('restart for role {r}'.format(r=role))
+            (remote,) = ctx.cluster.only(role).remotes.iterkeys()
+            srcdir, restarts = get_tests(ctx, config, role, remote, testdir)
+            log.info('Running command on role %s host %s', role, remote.name)
+            spec = '{spec}'.format(spec=task[0])
+            log.info('Restarts list: %s', restarts)
+            log.info('Spec is %s', spec)
+            to_run = [w for w in restarts if w == task or w.find(spec) != -1]
+            log.info('To run: %s', to_run)
+            for c in to_run:
+                log.info('Running restart script %s...', c)
+                args = [
+                    run.Raw('TESTDIR="{tdir}"'.format(tdir=testdir)),
+                    ]
+                env = config.get('env')
+                if env is not None:
+                    for var, val in env.iteritems():
+                        quoted_val = pipes.quote(val)
+                        env_arg = '{var}={val}'.format(var=var, val=quoted_val)
+                        args.append(run.Raw(env_arg))
+                args.extend([
+                            'adjust-ulimits',
+                            'ceph-coverage',
+                            '{tdir}/archive/coverage'.format(tdir=testdir),
+                            '{srcdir}/{c}'.format(
+                                srcdir=srcdir,
+                                c=c,
+                                ),
+                            ])
+                proc = remote.run(
+                    args=args,
+                    stdout=tor.PIPE,
+                    stdin=tor.PIPE,
+                    stderr=log,
+                    wait=False,
+                    )
+                log.info('waiting for a command from script')
+                while True:
+                    l = proc.stdout.readline()
+                    if not l or l == '':
+                        break
+                    log.debug('script command: {c}'.format(c=l))
+                    ll = l.strip()
+                    cmd = ll.split(' ')
+                    if cmd[0] == "done":
+                        break
+                    assert cmd[0] == 'restart', "script sent invalid command request to kill task"
+                    # cmd should be: restart <role> <id> <conf_key1> <conf_value1> <conf_key2> <conf_value2>
+                    # or to clear, just: restart <role> <id>
+                    restart_daemon(ctx, config, cmd[1], cmd[2], *cmd[3:])
+                    proc.stdin.writelines(['restarted\n'])
+                    proc.stdin.flush()
+                try:
+                    proc.wait()
+                except tor.CommandFailedError:
+                    raise Exception('restart task got non-zero exit status from script: {s}'.format(s=c))
+    finally:
+        log.info('Finishing %s on %s...', task, role)
+        remote.run(
+            logger=log.getChild(role),
+            args=[
+                'rm', '-rf', '--', '{tdir}/restarts.list'.format(tdir=testdir), srcdir,
+                ],
+            )
diff --git a/qa/tasks/rgw.py b/qa/tasks/rgw.py
new file mode 100644
index 0000000..3c8d8de
--- /dev/null
+++ b/qa/tasks/rgw.py
@@ -0,0 +1,1377 @@
+"""
+rgw routines
+"""
+import argparse
+import contextlib
+import json
+import logging
+import os
+import errno
+import util.rgw as rgw_utils
+
+from requests.packages.urllib3 import PoolManager
+from requests.packages.urllib3.util import Retry
+
+from cStringIO import StringIO
+
+from teuthology.orchestra import run
+from teuthology import misc as teuthology
+from teuthology import contextutil
+from teuthology.orchestra.run import CommandFailedError
+from util.rgw import rgwadmin
+from util.rados import (rados, create_ec_pool,
+                                        create_replicated_pool,
+                                        create_cache_pool)
+
+log = logging.getLogger(__name__)
+
+def get_config_master_client(ctx, config, regions):
+
+    role_zones = dict([(client, extract_zone_info(ctx, client, c_config))
+                       for client, c_config in config.iteritems()])
+    log.debug('roles_zones = %r', role_zones)
+    region_info = dict([
+        (region_name, extract_region_info(region_name, r_config))
+        for region_name, r_config in regions.iteritems()])
+
+     # read master zonegroup and master_zone
+    for zonegroup, zg_info in region_info.iteritems():
+        if zg_info['is_master']:
+            master_zonegroup = zonegroup
+            master_zone = zg_info['master_zone']
+            break
+
+    for client in config.iterkeys():
+        (zonegroup, zone, zone_info) = role_zones[client]
+        if zonegroup == master_zonegroup and zone == master_zone:
+            return client
+
+    return None
+
+ at contextlib.contextmanager
+def create_apache_dirs(ctx, config, on_client = None, except_client = None):
+    """
+    Remotely create apache directories.  Delete when finished.
+    """
+    log.info('Creating apache directories...')
+    log.debug('client is %r', on_client)
+    testdir = teuthology.get_testdir(ctx)
+    clients_to_create_as = [on_client]
+    if on_client is None:
+        clients_to_create_as = config.keys()
+    for client in clients_to_create_as:
+        if client == except_client:
+            continue
+        ctx.cluster.only(client).run(
+            args=[
+                'mkdir',
+                '-p',
+                '{tdir}/apache/htdocs.{client}'.format(tdir=testdir,
+                                                       client=client),
+                '{tdir}/apache/tmp.{client}/fastcgi_sock'.format(
+                    tdir=testdir,
+                    client=client),
+                run.Raw('&&'),
+                'mkdir',
+                '{tdir}/archive/apache.{client}'.format(tdir=testdir,
+                                                        client=client),
+                ],
+            )
+    try:
+        yield
+    finally:
+        log.info('Cleaning up apache directories...')
+        for client in clients_to_create_as:
+            ctx.cluster.only(client).run(
+                args=[
+                    'rm',
+                    '-rf',
+                    '{tdir}/apache/tmp.{client}'.format(tdir=testdir,
+                                                        client=client),
+                    run.Raw('&&'),
+                    'rmdir',
+                    '{tdir}/apache/htdocs.{client}'.format(tdir=testdir,
+                                                           client=client),
+                    ],
+                )
+        for client in clients_to_create_as:
+            ctx.cluster.only(client).run(
+                args=[
+                    'rmdir',
+                    '{tdir}/apache'.format(tdir=testdir),
+                    ],
+                check_status=False,  # only need to remove once per host
+                )
+
+
+def _use_uds_with_fcgi(remote):
+    """
+    Returns true if this node supports the usage of
+    unix domain sockets with mod_proxy_fcgi.
+
+    FIXME: returns False always for now until we know for
+    sure what distros will support UDS. RHEL 7.0 is the only one
+    currently I know of, but we can't install that version of apache
+    yet in the labs.
+    """
+    return False
+
+
+ at contextlib.contextmanager
+def ship_apache_configs(ctx, config, role_endpoints, on_client = None,
+                        except_client = None):
+    """
+    Ship apache config and rgw.fgci to all clients.  Clean up on termination
+    """
+    assert isinstance(config, dict)
+    assert isinstance(role_endpoints, dict)
+    testdir = teuthology.get_testdir(ctx)
+    log.info('Shipping apache config and rgw.fcgi...')
+    src = os.path.join(os.path.dirname(__file__), 'apache.conf.template')
+    clients_to_create_as = [on_client]
+    if on_client is None:
+        clients_to_create_as = config.keys()
+    for client in clients_to_create_as:
+        if client == except_client:
+            continue
+        (remote,) = ctx.cluster.only(client).remotes.keys()
+        system_type = teuthology.get_system_type(remote)
+        conf = config.get(client)
+        if not conf:
+            conf = {}
+        idle_timeout = conf.get('idle_timeout', ctx.rgw.default_idle_timeout)
+        if system_type == 'deb':
+            mod_path = '/usr/lib/apache2/modules'
+            print_continue = 'on'
+            user = 'www-data'
+            group = 'www-data'
+            apache24_modconfig = '''
+  IncludeOptional /etc/apache2/mods-available/mpm_event.conf
+  IncludeOptional /etc/apache2/mods-available/mpm_event.load
+'''
+        else:
+            mod_path = '/usr/lib64/httpd/modules'
+            print_continue = 'off'
+            user = 'apache'
+            group = 'apache'
+            apache24_modconfig = \
+                'IncludeOptional /etc/httpd/conf.modules.d/00-mpm.conf'
+        host, port = role_endpoints[client]
+
+        # decide if we want to use mod_fastcgi or mod_proxy_fcgi
+        template_dir = os.path.dirname(__file__)
+        fcgi_config = os.path.join(template_dir,
+                                   'mod_proxy_fcgi.tcp.conf.template')
+        if ctx.rgw.use_fastcgi:
+            log.info("Apache is configured to use mod_fastcgi")
+            fcgi_config = os.path.join(template_dir,
+                                       'mod_fastcgi.conf.template')
+        elif _use_uds_with_fcgi(remote):
+            log.info("Apache is configured to use mod_proxy_fcgi with UDS")
+            fcgi_config = os.path.join(template_dir,
+                                       'mod_proxy_fcgi.uds.conf.template')
+        else:
+            log.info("Apache is configured to use mod_proxy_fcgi with TCP")
+
+        with file(fcgi_config, 'rb') as f:
+            fcgi_config = f.read()
+        with file(src, 'rb') as f:
+            conf = f.read() + fcgi_config
+            conf = conf.format(
+                testdir=testdir,
+                mod_path=mod_path,
+                print_continue=print_continue,
+                host=host,
+                port=port,
+                client=client,
+                idle_timeout=idle_timeout,
+                user=user,
+                group=group,
+                apache24_modconfig=apache24_modconfig,
+                )
+            teuthology.write_file(
+                remote=remote,
+                path='{tdir}/apache/apache.{client}.conf'.format(
+                    tdir=testdir,
+                    client=client),
+                data=conf,
+                )
+        rgw_options = []
+        if ctx.rgw.use_fastcgi or _use_uds_with_fcgi(remote):
+            rgw_options = [
+                '--rgw-socket-path',
+                '{tdir}/apache/tmp.{client}/fastcgi_sock/rgw_sock'.format(
+                    tdir=testdir,
+                    client=client
+                ),
+                '--rgw-frontends',
+                'fastcgi',
+            ]
+        else:
+            rgw_options = [
+                '--rgw-socket-path', '""',
+                '--rgw-print-continue', 'false',
+                '--rgw-frontends',
+                'fastcgi socket_port=9000 socket_host=0.0.0.0',
+            ]
+
+        teuthology.write_file(
+            remote=remote,
+            path='{tdir}/apache/htdocs.{client}/rgw.fcgi'.format(
+                tdir=testdir,
+                client=client),
+            data="""#!/bin/sh
+ulimit -c unlimited
+exec radosgw -f -n {client} -k /etc/ceph/ceph.{client}.keyring {rgw_options}
+
+""".format(tdir=testdir, client=client, rgw_options=" ".join(rgw_options))
+            )
+        remote.run(
+            args=[
+                'chmod',
+                'a=rx',
+                '{tdir}/apache/htdocs.{client}/rgw.fcgi'.format(tdir=testdir,
+                                                                client=client),
+                ],
+            )
+    try:
+        yield
+    finally:
+        log.info('Removing apache config...')
+        for client in clients_to_create_as:
+            ctx.cluster.only(client).run(
+                args=[
+                    'rm',
+                    '-f',
+                    '{tdir}/apache/apache.{client}.conf'.format(tdir=testdir,
+                                                                client=client),
+                    run.Raw('&&'),
+                    'rm',
+                    '-f',
+                    '{tdir}/apache/htdocs.{client}/rgw.fcgi'.format(
+                        tdir=testdir,
+                        client=client),
+                    ],
+                )
+
+
+ at contextlib.contextmanager
+def start_rgw(ctx, config, on_client = None, except_client = None):
+    """
+    Start rgw on remote sites.
+    """
+    log.info('Starting rgw...')
+    log.debug('client %r', on_client)
+    clients_to_run = [on_client]
+    if on_client is None:
+        clients_to_run = config.keys()
+    testdir = teuthology.get_testdir(ctx)
+    for client in clients_to_run:
+        if client == except_client:
+            continue
+        (remote,) = ctx.cluster.only(client).remotes.iterkeys()
+        zone = rgw_utils.zone_for_client(ctx, client)
+        log.debug('zone %s', zone)
+        client_config = config.get(client)
+        if client_config is None:
+            client_config = {}
+        log.info("rgw %s config is %s", client, client_config)
+        id_ = client.split('.', 1)[1]
+        log.info('client {client} is id {id}'.format(client=client, id=id_))
+        cmd_prefix = [
+            'sudo',
+            'adjust-ulimits',
+            'ceph-coverage',
+            '{tdir}/archive/coverage'.format(tdir=testdir),
+            'daemon-helper',
+            'term',
+            ]
+
+        rgw_cmd = ['radosgw']
+
+        if ctx.rgw.frontend == 'apache':
+            if ctx.rgw.use_fastcgi or _use_uds_with_fcgi(remote):
+                rgw_cmd.extend([
+                    '--rgw-socket-path',
+                    '{tdir}/apache/tmp.{client}/fastcgi_sock/rgw_sock'.format(
+                        tdir=testdir,
+                        client=client,
+                    ),
+                    '--rgw-frontends',
+                    'fastcgi',
+                ])
+            else:
+                # for mod_proxy_fcgi, using tcp
+                rgw_cmd.extend([
+                    '--rgw-socket-path', '',
+                    '--rgw-print-continue', 'false',
+                    '--rgw-frontends',
+                    'fastcgi socket_port=9000 socket_host=0.0.0.0',
+                ])
+
+        elif ctx.rgw.frontend == 'civetweb':
+            host, port = ctx.rgw.role_endpoints[client]
+            rgw_cmd.extend([
+                '--rgw-frontends',
+                'civetweb port={port}'.format(port=port),
+            ])
+
+        if zone is not None:
+            rgw_cmd.extend(['--rgw-zone', zone])
+
+        rgw_cmd.extend([
+            '-n', client,
+            '-k', '/etc/ceph/ceph.{client}.keyring'.format(client=client),
+            '--log-file',
+            '/var/log/ceph/rgw.{client}.log'.format(client=client),
+            '--rgw_ops_log_socket_path',
+            '{tdir}/rgw.opslog.{client}.sock'.format(tdir=testdir,
+                                                     client=client),
+            '--foreground',
+            run.Raw('|'),
+            'sudo',
+            'tee',
+            '/var/log/ceph/rgw.{client}.stdout'.format(tdir=testdir,
+                                                       client=client),
+            run.Raw('2>&1'),
+            ])
+
+        if client_config.get('valgrind'):
+            cmd_prefix = teuthology.get_valgrind_args(
+                testdir,
+                client,
+                cmd_prefix,
+                client_config.get('valgrind')
+                )
+
+        run_cmd = list(cmd_prefix)
+        run_cmd.extend(rgw_cmd)
+
+        ctx.daemons.add_daemon(
+            remote, 'rgw', client,
+            args=run_cmd,
+            logger=log.getChild(client),
+            stdin=run.PIPE,
+            wait=False,
+            )
+
+    # XXX: add_daemon() doesn't let us wait until radosgw finishes startup
+    # use a connection pool with retry/backoff to poll each gateway until it starts listening
+    http = PoolManager(retries=Retry(connect=8, backoff_factor=1))
+    for client in clients_to_run:
+        if client == except_client:
+            continue
+        host, port = ctx.rgw.role_endpoints[client]
+        endpoint = 'http://{host}:{port}/'.format(host=host, port=port)
+        log.info('Polling {client} until it starts accepting connections on {endpoint}'.format(client=client, endpoint=endpoint))
+        http.request('GET', endpoint)
+
+    try:
+        yield
+    finally:
+        teuthology.stop_daemons_of_type(ctx, 'rgw')
+        for client in config.iterkeys():
+            ctx.cluster.only(client).run(
+                args=[
+                    'rm',
+                    '-f',
+                    '{tdir}/rgw.opslog.{client}.sock'.format(tdir=testdir,
+                                                             client=client),
+                    ],
+                )
+
+
+ at contextlib.contextmanager
+def start_apache(ctx, config, on_client = None, except_client = None):
+    """
+    Start apache on remote sites.
+    """
+    log.info('Starting apache...')
+    testdir = teuthology.get_testdir(ctx)
+    apaches = {}
+    clients_to_run = [on_client]
+    if on_client is None:
+        clients_to_run = config.keys()
+    for client in clients_to_run:
+        if client == except_client:
+            continue
+        (remote,) = ctx.cluster.only(client).remotes.keys()
+        system_type = teuthology.get_system_type(remote)
+        if system_type == 'deb':
+            apache_name = 'apache2'
+        else:
+            try:
+                remote.run(
+                    args=[
+                        'stat',
+                        '/usr/sbin/httpd.worker',
+                    ],
+                )
+                apache_name = '/usr/sbin/httpd.worker'
+            except CommandFailedError:
+                apache_name = '/usr/sbin/httpd'
+
+        proc = remote.run(
+            args=[
+                'adjust-ulimits',
+                'daemon-helper',
+                'kill',
+                apache_name,
+                '-X',
+                '-f',
+                '{tdir}/apache/apache.{client}.conf'.format(tdir=testdir,
+                                                            client=client),
+                ],
+            logger=log.getChild(client),
+            stdin=run.PIPE,
+            wait=False,
+            )
+        apaches[client] = proc
+
+    try:
+        yield
+    finally:
+        log.info('Stopping apache...')
+        for client, proc in apaches.iteritems():
+            proc.stdin.close()
+
+        run.wait(apaches.itervalues())
+
+
+def extract_user_info(client_config):
+    """
+    Extract user info from the client config specified.  Returns a dict
+    that includes system key information.
+    """
+    # test if there isn't a system user or if there isn't a name for that
+    # user, return None
+    if ('system user' not in client_config or
+            'name' not in client_config['system user']):
+        return None
+
+    user_info = dict()
+    user_info['system_key'] = dict(
+        user=client_config['system user']['name'],
+        access_key=client_config['system user']['access key'],
+        secret_key=client_config['system user']['secret key'],
+        )
+    return user_info
+
+
+def extract_zone_info(ctx, client, client_config):
+    """
+    Get zone information.
+    :param client: dictionary of client information
+    :param client_config: dictionary of client configuration information
+    :returns: zone extracted from client and client_config information
+    """
+    ceph_config = ctx.ceph['ceph'].conf.get('global', {})
+    ceph_config.update(ctx.ceph['ceph'].conf.get('client', {}))
+    ceph_config.update(ctx.ceph['ceph'].conf.get(client, {}))
+    for key in ['rgw zone', 'rgw region', 'rgw zone root pool']:
+        assert key in ceph_config, \
+            'ceph conf must contain {key} for {client}'.format(key=key,
+                                                               client=client)
+    region = ceph_config['rgw region']
+    zone = ceph_config['rgw zone']
+    zone_info = dict()
+    for key in ['rgw control pool', 'rgw gc pool', 'rgw log pool',
+                'rgw intent log pool', 'rgw usage log pool',
+                'rgw user keys pool', 'rgw user email pool',
+                'rgw user swift pool', 'rgw user uid pool',
+                'rgw domain root']:
+        new_key = key.split(' ', 1)[1]
+        new_key = new_key.replace(' ', '_')
+
+        if key in ceph_config:
+            value = ceph_config[key]
+            log.debug('{key} specified in ceph_config ({val})'.format(
+                key=key, val=value))
+            zone_info[new_key] = value
+        else:
+            zone_info[new_key] = '.' + region + '.' + zone + '.' + new_key
+
+    index_pool = '.' + region + '.' + zone + '.' + 'index_pool'
+    data_pool = '.' + region + '.' + zone + '.' + 'data_pool'
+    data_extra_pool = '.' + region + '.' + zone + '.' + 'data_extra_pool'
+
+    zone_info['placement_pools'] = [{'key': 'default_placement',
+                                     'val': {'index_pool': index_pool,
+                                             'data_pool': data_pool,
+                                             'data_extra_pool': data_extra_pool}
+                                     }]
+
+    # these keys are meant for the zones argument in the region info.  We
+    # insert them into zone_info with a different format and then remove them
+    # in the fill_in_endpoints() method
+    for key in ['rgw log meta', 'rgw log data']:
+        if key in ceph_config:
+            zone_info[key] = ceph_config[key]
+
+    # these keys are meant for the zones argument in the region info.  We
+    # insert them into zone_info with a different format and then remove them
+    # in the fill_in_endpoints() method
+    for key in ['rgw log meta', 'rgw log data']:
+        if key in ceph_config:
+            zone_info[key] = ceph_config[key]
+
+    return region, zone, zone_info
+
+
+def extract_region_info(region, region_info):
+    """
+    Extract region information from the region_info parameter, using get
+    to set default values.
+
+    :param region: name of the region
+    :param region_info: region information (in dictionary form).
+    :returns: dictionary of region information set from region_info, using
+            default values for missing fields.
+    """
+    assert isinstance(region_info['zones'], list) and region_info['zones'], \
+        'zones must be a non-empty list'
+    return dict(
+        name=region,
+        api_name=region_info.get('api name', region),
+        is_master=region_info.get('is master', False),
+        log_meta=region_info.get('log meta', False),
+        log_data=region_info.get('log data', False),
+        master_zone=region_info.get('master zone', region_info['zones'][0]),
+        placement_targets=region_info.get('placement targets',
+                                          [{'name': 'default_placement',
+                                            'tags': []}]),
+        default_placement=region_info.get('default placement',
+                                          'default_placement'),
+        )
+
+
+def assign_ports(ctx, config):
+    """
+    Assign port numberst starting with port 7280.
+    """
+    port = 7280
+    role_endpoints = {}
+    for remote, roles_for_host in ctx.cluster.remotes.iteritems():
+        for role in roles_for_host:
+            if role in config:
+                role_endpoints[role] = (remote.name.split('@')[1], port)
+                port += 1
+
+    return role_endpoints
+
+
+def fill_in_endpoints(region_info, role_zones, role_endpoints):
+    """
+    Iterate through the list of role_endpoints, filling in zone information
+
+    :param region_info: region data
+    :param role_zones: region and zone information.
+    :param role_endpoints: endpoints being used
+    """
+    for role, (host, port) in role_endpoints.iteritems():
+        region, zone, zone_info, _ = role_zones[role]
+        host, port = role_endpoints[role]
+        endpoint = 'http://{host}:{port}/'.format(host=host, port=port)
+        # check if the region specified under client actually exists
+        # in region_info (it should, if properly configured).
+        # If not, throw a reasonable error
+        if region not in region_info:
+            raise Exception(
+                'Region: {region} was specified but no corresponding'
+                ' entry was found under \'regions\''.format(region=region))
+
+        region_conf = region_info[region]
+        region_conf.setdefault('endpoints', [])
+        region_conf['endpoints'].append(endpoint)
+
+        # this is the payload for the 'zones' field in the region field
+        zone_payload = dict()
+        zone_payload['endpoints'] = [endpoint]
+        zone_payload['name'] = zone
+
+        # Pull the log meta and log data settings out of zone_info, if they
+        # exist, then pop them as they don't actually belong in the zone info
+        for key in ['rgw log meta', 'rgw log data']:
+            new_key = key.split(' ', 1)[1]
+            new_key = new_key.replace(' ', '_')
+
+            if key in zone_info:
+                value = zone_info.pop(key)
+            else:
+                value = 'false'
+
+            zone_payload[new_key] = value
+
+        region_conf.setdefault('zones', [])
+        region_conf['zones'].append(zone_payload)
+
+
+ at contextlib.contextmanager
+def configure_users_for_client(ctx, config, client, everywhere=False):
+    """
+    Create users by remotely running rgwadmin commands using extracted
+    user information.
+    """
+    log.info('Configuring users...')
+    log.info('for client %s', client)
+    log.info('everywhere %s', everywhere)
+
+    # For data sync the master zones and regions must have the
+    # system users of the secondary zones. To keep this simple,
+    # just create the system users on every client if regions are
+    # configured.
+    clients_to_create_as = [client]
+    if everywhere:
+        clients_to_create_as = config.keys()
+
+    # extract the user info and append it to the payload tuple for the given
+    # client
+    for client, c_config in config.iteritems():
+        if not c_config:
+            continue
+        user_info = extract_user_info(c_config)
+        if not user_info:
+            continue
+
+        for client_name in clients_to_create_as:
+            log.debug('Creating user {user} on {client}'.format(
+                user=user_info['system_key']['user'], client=client_name))
+            rgwadmin(ctx, client_name,
+                     cmd=[
+                         'user', 'create',
+                         '--uid', user_info['system_key']['user'],
+                         '--access-key', user_info['system_key']['access_key'],
+                         '--secret', user_info['system_key']['secret_key'],
+                         '--display-name', user_info['system_key']['user'],
+                         '--system',
+                     ],
+                     check_status=True,
+            )
+    yield
+
+ at contextlib.contextmanager
+def configure_users(ctx, config,  everywhere=False):
+    """
+    Create users by remotely running rgwadmin commands using extracted
+    user information.
+    """
+    log.info('Configuring users...')
+
+    # extract the user info and append it to the payload tuple for the given
+    # client
+    for client, c_config in config.iteritems():
+        if not c_config:
+            continue
+        user_info = extract_user_info(c_config)
+        if not user_info:
+            continue
+
+        # For data sync the master zones and regions must have the
+        # system users of the secondary zones. To keep this simple,
+        # just create the system users on every client if regions are
+        # configured.
+        clients_to_create_as = [client]
+        if everywhere:
+            clients_to_create_as = config.keys()
+        for client_name in clients_to_create_as:
+            log.debug('Creating user {user} on {client}'.format(
+                      user=user_info['system_key']['user'], client=client))
+            rgwadmin(ctx, client_name,
+                     cmd=[
+                         'user', 'create',
+                         '--uid', user_info['system_key']['user'],
+                         '--access-key', user_info['system_key']['access_key'],
+                         '--secret', user_info['system_key']['secret_key'],
+                         '--display-name', user_info['system_key']['user'],
+                         '--system',
+                     ],
+                     check_status=True,
+                     )
+
+    yield
+
+ at contextlib.contextmanager
+def create_nonregion_pools(ctx, config, regions):
+    """Create replicated or erasure coded data pools for rgw."""
+    if regions:
+        yield
+        return
+
+    log.info('creating data pools')
+    for client in config.keys():
+        (remote,) = ctx.cluster.only(client).remotes.iterkeys()
+        data_pool = '.rgw.buckets'
+        if ctx.rgw.ec_data_pool:
+            create_ec_pool(remote, data_pool, client, 64,
+                           ctx.rgw.erasure_code_profile)
+        else:
+            create_replicated_pool(remote, data_pool, 64)
+        if ctx.rgw.cache_pools:
+            create_cache_pool(remote, data_pool, data_pool + '.cache', 64,
+                              64*1024*1024)
+    yield
+
+ at contextlib.contextmanager
+def configure_multisite_regions_and_zones(ctx, config, regions, role_endpoints, realm, master_client):
+    """
+    Configure multisite regions and zones from rados and rgw.
+    """
+    if not regions:
+        log.debug(
+            'In rgw.configure_multisite_regions_and_zones() and regions is None. '
+            'Bailing')
+        yield
+        return
+
+    if not realm:
+        log.debug(
+            'In rgw.configure_multisite_regions_and_zones() and realm is None. '
+            'Bailing')
+        yield
+        return
+
+    log.info('Configuring multisite regions and zones...')
+
+    log.debug('config is %r', config)
+    log.debug('regions are %r', regions)
+    log.debug('role_endpoints = %r', role_endpoints)
+    log.debug('realm is %r', realm)
+    # extract the zone info
+    role_zones = dict([(client, extract_zone_info(ctx, client, c_config))
+                       for client, c_config in config.iteritems()])
+    log.debug('role_zones = %r', role_zones)
+
+    # extract the user info and append it to the payload tuple for the given
+    # client
+    for client, c_config in config.iteritems():
+        if not c_config:
+            user_info = None
+        else:
+            user_info = extract_user_info(c_config)
+
+        (region, zone, zone_info) = role_zones[client]
+        role_zones[client] = (region, zone, zone_info, user_info)
+
+    region_info = dict([
+        (region_name, extract_region_info(region_name, r_config))
+        for region_name, r_config in regions.iteritems()])
+
+    fill_in_endpoints(region_info, role_zones, role_endpoints)
+
+    # clear out the old defaults
+    first_mon = teuthology.get_first_mon(ctx, config)
+    (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+
+    # read master zonegroup and master_zone
+    for zonegroup, zg_info in region_info.iteritems():
+        if zg_info['is_master']:
+            master_zonegroup = zonegroup
+            master_zone = zg_info['master_zone']
+            break
+
+    log.debug('master zonegroup =%r', master_zonegroup)
+    log.debug('master zone = %r', master_zone)
+    log.debug('master client = %r', master_client)
+
+    rgwadmin(ctx, master_client,
+             cmd=['realm', 'create', '--rgw-realm', realm, '--default'],
+             check_status=True)
+
+    for region, info in region_info.iteritems():
+        region_json = json.dumps(info)
+        log.debug('region info is: %s', region_json)
+        rgwadmin(ctx, master_client,
+                 cmd=['zonegroup', 'set'],
+                 stdin=StringIO(region_json),
+                 check_status=True)
+
+    rgwadmin(ctx, master_client,
+             cmd=['zonegroup', 'default', '--rgw-zonegroup', master_zonegroup],
+             check_status=True)
+
+    for role, (zonegroup, zone, zone_info, user_info) in role_zones.iteritems():
+        (remote,) = ctx.cluster.only(role).remotes.keys()
+        for pool_info in zone_info['placement_pools']:
+            remote.run(args=['sudo', 'ceph', 'osd', 'pool', 'create',
+                             pool_info['val']['index_pool'], '64', '64'])
+            if ctx.rgw.ec_data_pool:
+                create_ec_pool(remote, pool_info['val']['data_pool'],
+                               zone, 64, ctx.rgw.erasure_code_profile)
+            else:
+                create_replicated_pool(remote, pool_info['val']['data_pool'], 64)
+
+    (zonegroup, zone, zone_info, user_info) = role_zones[master_client]
+    zone_json = json.dumps(dict(zone_info.items() + user_info.items()))
+    log.debug("zone info is: %r", zone_json)
+    rgwadmin(ctx, master_client,
+             cmd=['zone', 'set', '--rgw-zonegroup', zonegroup,
+                  '--rgw-zone', zone],
+             stdin=StringIO(zone_json),
+             check_status=True)
+
+    rgwadmin(ctx, master_client,
+             cmd=['-n', master_client, 'zone', 'default', zone],
+             check_status=True)
+
+    rgwadmin(ctx, master_client,
+             cmd=['-n', master_client, 'period', 'update', '--commit'],
+             check_status=True)
+
+    yield
+
+ at contextlib.contextmanager
+def configure_regions_and_zones(ctx, config, regions, role_endpoints, realm):
+    """
+    Configure regions and zones from rados and rgw.
+    """
+    if not regions:
+        log.debug(
+            'In rgw.configure_regions_and_zones() and regions is None. '
+            'Bailing')
+        yield
+        return
+
+    if not realm:
+        log.debug(
+            'In rgw.configure_regions_and_zones() and realm is None. '
+            'Bailing')
+        yield
+        return
+
+    log.info('Configuring regions and zones...')
+
+    log.debug('config is %r', config)
+    log.debug('regions are %r', regions)
+    log.debug('role_endpoints = %r', role_endpoints)
+    log.debug('realm is %r', realm)
+    # extract the zone info
+    role_zones = dict([(client, extract_zone_info(ctx, client, c_config))
+                       for client, c_config in config.iteritems()])
+    log.debug('roles_zones = %r', role_zones)
+
+    # extract the user info and append it to the payload tuple for the given
+    # client
+    for client, c_config in config.iteritems():
+        if not c_config:
+            user_info = None
+        else:
+            user_info = extract_user_info(c_config)
+
+        (region, zone, zone_info) = role_zones[client]
+        role_zones[client] = (region, zone, zone_info, user_info)
+
+    region_info = dict([
+        (region_name, extract_region_info(region_name, r_config))
+        for region_name, r_config in regions.iteritems()])
+
+    fill_in_endpoints(region_info, role_zones, role_endpoints)
+
+    # clear out the old defaults
+    first_mon = teuthology.get_first_mon(ctx, config)
+    (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+    # removing these objects from .rgw.root and the per-zone root pools
+    # may or may not matter
+    rados(ctx, mon,
+          cmd=['-p', '.rgw.root', 'rm', 'region_info.default'])
+    rados(ctx, mon,
+          cmd=['-p', '.rgw.root', 'rm', 'zone_info.default'])
+
+    # read master zonegroup and master_zone
+    for zonegroup, zg_info in region_info.iteritems():
+        if zg_info['is_master']:
+            master_zonegroup = zonegroup
+            master_zone = zg_info['master_zone']
+            break
+
+    for client in config.iterkeys():
+        (zonegroup, zone, zone_info, user_info) = role_zones[client]
+        if zonegroup == master_zonegroup and zone == master_zone:
+            master_client = client
+            break
+
+    log.debug('master zonegroup =%r', master_zonegroup)
+    log.debug('master zone = %r', master_zone)
+    log.debug('master client = %r', master_client)
+    log.debug('config %r ', config)
+
+    (ret, out)=rgwadmin(ctx, master_client,
+                        cmd=['realm', 'create', '--rgw-realm', realm, '--default'])
+    log.debug('realm create ret %r exists %r', -ret, errno.EEXIST)
+    assert ret == 0 or ret != -errno.EEXIST
+    if ret is -errno.EEXIST:
+        log.debug('realm %r exists', realm)
+
+    for client in config.iterkeys():
+        for role, (zonegroup, zone, zone_info, user_info) in role_zones.iteritems():
+            rados(ctx, mon,
+                  cmd=['-p', zone_info['domain_root'],
+                       'rm', 'region_info.default'])
+            rados(ctx, mon,
+                  cmd=['-p', zone_info['domain_root'],
+                       'rm', 'zone_info.default'])
+
+            (remote,) = ctx.cluster.only(role).remotes.keys()
+            for pool_info in zone_info['placement_pools']:
+                remote.run(args=['sudo', 'ceph', 'osd', 'pool', 'create',
+                                 pool_info['val']['index_pool'], '64', '64'])
+                if ctx.rgw.ec_data_pool:
+                    create_ec_pool(remote, pool_info['val']['data_pool'],
+                                   zone, 64, ctx.rgw.erasure_code_profile)
+                else:
+                    create_replicated_pool(
+                        remote, pool_info['val']['data_pool'],
+                        64)
+            zone_json = json.dumps(dict(zone_info.items() + user_info.items()))
+            log.debug('zone info is: %r', zone_json)
+            rgwadmin(ctx, client,
+                 cmd=['zone', 'set', '--rgw-zonegroup', zonegroup,
+                      '--rgw-zone', zone],
+                 stdin=StringIO(zone_json),
+                 check_status=True)
+
+        for region, info in region_info.iteritems():
+            region_json = json.dumps(info)
+            log.debug('region info is: %s', region_json)
+            rgwadmin(ctx, client,
+                     cmd=['zonegroup', 'set'],
+                     stdin=StringIO(region_json),
+                     check_status=True)
+            if info['is_master']:
+                rgwadmin(ctx, client,
+                         cmd=['zonegroup', 'default', '--rgw-zonegroup', master_zonegroup],
+                         check_status=True)
+
+        (zonegroup, zone, zone_info, user_info) = role_zones[client]
+        rgwadmin(ctx, client,
+                 cmd=['zone', 'default', zone],
+                 check_status=True)
+
+    rgwadmin(ctx, master_client,
+             cmd=['-n', master_client, 'period', 'update', '--commit'],
+             check_status=True)
+
+    yield
+
+ at contextlib.contextmanager
+def pull_configuration(ctx, config, regions, role_endpoints, realm, master_client):
+    """
+    Configure regions and zones from rados and rgw.
+    """
+    if not regions:
+        log.debug(
+            'In rgw.pull_confguration() and regions is None. '
+            'Bailing')
+        yield
+        return
+
+    if not realm:
+        log.debug(
+            'In rgw.pull_configuration() and realm is None. '
+            'Bailing')
+        yield
+        return
+
+    log.info('Pulling configuration...')
+
+    log.debug('config is %r', config)
+    log.debug('regions are %r', regions)
+    log.debug('role_endpoints = %r', role_endpoints)
+    log.debug('realm is %r', realm)
+    log.debug('master client = %r', master_client)
+
+    # extract the zone info
+    role_zones = dict([(client, extract_zone_info(ctx, client, c_config))
+                       for client, c_config in config.iteritems()])
+    log.debug('roles_zones = %r', role_zones)
+
+    # extract the user info and append it to the payload tuple for the given
+    # client
+    for client, c_config in config.iteritems():
+        if not c_config:
+            user_info = None
+        else:
+            user_info = extract_user_info(c_config)
+
+        (region, zone, zone_info) = role_zones[client]
+        role_zones[client] = (region, zone, zone_info, user_info)
+
+    region_info = dict([
+        (region_name, extract_region_info(region_name, r_config))
+        for region_name, r_config in regions.iteritems()])
+
+    fill_in_endpoints(region_info, role_zones, role_endpoints)
+
+    for client in config.iterkeys():
+        if client != master_client:
+            host, port = role_endpoints[master_client]
+            endpoint = 'http://{host}:{port}/'.format(host=host, port=port)
+            log.debug("endpoint: %s", endpoint)
+            rgwadmin(ctx, client,
+                cmd=['-n', client, 'realm', 'pull', '--rgw-realm', realm, '--default', '--url',
+                     endpoint, '--access_key',
+                     user_info['system_key']['access_key'], '--secret',
+                     user_info['system_key']['secret_key']],
+                     check_status=True)
+
+            (zonegroup, zone, zone_info, zone_user_info) = role_zones[client]
+            zone_json = json.dumps(dict(zone_info.items() + zone_user_info.items()))
+            log.debug("zone info is: %r"), zone_json
+            rgwadmin(ctx, client,
+                     cmd=['zone', 'set', '--rgw-zonegroup', zonegroup,
+                          '--rgw-zone', zone],
+                     stdin=StringIO(zone_json),
+                     check_status=True)
+
+            rgwadmin(ctx, client,
+                     cmd=['period', 'update', '--commit', '--url',
+                          endpoint, '--access_key',
+                          user_info['system_key']['access_key'], '--secret',
+                          user_info['system_key']['secret_key']],
+                     check_status=True)
+
+    yield
+
+ at contextlib.contextmanager
+def task(ctx, config):
+    """
+    Either use configure apache to run a rados gateway, or use the built-in
+    civetweb server.
+    Only one should be run per machine, since it uses a hard-coded port for
+    now.
+
+    For example, to run rgw on all clients::
+
+        tasks:
+        - ceph:
+        - rgw:
+
+    To only run on certain clients::
+
+        tasks:
+        - ceph:
+        - rgw: [client.0, client.3]
+
+    or
+
+        tasks:
+        - ceph:
+        - rgw:
+            client.0:
+            client.3:
+
+    You can adjust the idle timeout for fastcgi (default is 30 seconds):
+
+        tasks:
+        - ceph:
+        - rgw:
+            client.0:
+              idle_timeout: 90
+
+    To run radosgw through valgrind:
+
+        tasks:
+        - ceph:
+        - rgw:
+            client.0:
+              valgrind: [--tool=memcheck]
+            client.3:
+              valgrind: [--tool=memcheck]
+
+    To use civetweb instead of apache:
+
+        tasks:
+        - ceph:
+        - rgw:
+          - client.0
+        overrides:
+          rgw:
+            frontend: civetweb
+
+    Note that without a modified fastcgi module e.g. with the default
+    one on CentOS, you must have rgw print continue = false in ceph.conf::
+
+        tasks:
+        - ceph:
+            conf:
+              global:
+                rgw print continue: false
+        - rgw: [client.0]
+
+    To use mod_proxy_fcgi instead of mod_fastcgi:
+
+        overrides:
+          rgw:
+            use_fcgi: true
+
+    To run rgws for multiple regions or zones, describe the regions
+    and their zones in a regions section. The endpoints will be
+    generated by this task. Each client must have a region, zone,
+    and pools assigned in ceph.conf::
+
+        tasks:
+        - install:
+        - ceph:
+            conf:
+              client.0:
+                rgw region: foo
+                rgw zone: foo-1
+                rgw region root pool: .rgw.rroot.foo
+                rgw zone root pool: .rgw.zroot.foo
+                rgw log meta: true
+                rgw log data: true
+              client.1:
+                rgw region: bar
+                rgw zone: bar-master
+                rgw region root pool: .rgw.rroot.bar
+                rgw zone root pool: .rgw.zroot.bar
+                rgw log meta: true
+                rgw log data: true
+              client.2:
+                rgw region: bar
+                rgw zone: bar-secondary
+                rgw region root pool: .rgw.rroot.bar
+                rgw zone root pool: .rgw.zroot.bar-secondary
+        - rgw:
+            default_idle_timeout: 30
+            ec-data-pool: true
+            erasure_code_profile:
+              k: 2
+              m: 1
+              ruleset-failure-domain: osd
+            realm: foo
+            regions:
+              foo:
+                api name: api_name # default: region name
+                is master: true    # default: false
+                master zone: foo-1 # default: first zone
+                zones: [foo-1]
+                log meta: true
+                log data: true
+                placement targets: [target1, target2] # default: []
+                default placement: target2            # default: ''
+              bar:
+                api name: bar-api
+                zones: [bar-master, bar-secondary]
+            client.0:
+              system user:
+                name: foo-system
+                access key: X2IYPSTY1072DDY1SJMC
+                secret key: YIMHICpPvT+MhLTbSsiBJ1jQF15IFvJA8tgwJEcm
+            client.1:
+              system user:
+                name: bar1
+                access key: Y2IYPSTY1072DDY1SJMC
+                secret key: XIMHICpPvT+MhLTbSsiBJ1jQF15IFvJA8tgwJEcm
+            client.2:
+              system user:
+                name: bar2
+                access key: Z2IYPSTY1072DDY1SJMC
+                secret key: ZIMHICpPvT+MhLTbSsiBJ1jQF15IFvJA8tgwJEcm
+    """
+    if config is None:
+        config = dict(('client.{id}'.format(id=id_), None)
+                      for id_ in teuthology.all_roles_of_type(
+                          ctx.cluster, 'client'))
+    elif isinstance(config, list):
+        config = dict((name, None) for name in config)
+
+    overrides = ctx.config.get('overrides', {})
+    teuthology.deep_merge(config, overrides.get('rgw', {}))
+
+    regions = {}
+    if 'regions' in config:
+        # separate region info so only clients are keys in config
+        regions = config['regions']
+        del config['regions']
+
+    role_endpoints = assign_ports(ctx, config)
+    ctx.rgw = argparse.Namespace()
+    ctx.rgw.role_endpoints = role_endpoints
+    # stash the region info for later, since it was deleted from the config
+    # structure
+    ctx.rgw.regions = regions
+
+    realm = None
+    if 'realm' in config:
+        # separate region info so only clients are keys in config
+        realm = config['realm']
+        del config['realm']
+    ctx.rgw.realm = realm
+
+    ctx.rgw.ec_data_pool = False
+    if 'ec-data-pool' in config:
+        ctx.rgw.ec_data_pool = bool(config['ec-data-pool'])
+        del config['ec-data-pool']
+    ctx.rgw.erasure_code_profile = {}
+    if 'erasure_code_profile' in config:
+        ctx.rgw.erasure_code_profile = config['erasure_code_profile']
+        del config['erasure_code_profile']
+    ctx.rgw.default_idle_timeout = 30
+    if 'default_idle_timeout' in config:
+        ctx.rgw.default_idle_timeout = int(config['default_idle_timeout'])
+        del config['default_idle_timeout']
+    ctx.rgw.cache_pools = False
+    if 'cache-pools' in config:
+        ctx.rgw.cache_pools = bool(config['cache-pools'])
+        del config['cache-pools']
+
+    ctx.rgw.frontend = 'civetweb'
+    if 'frontend' in config:
+        ctx.rgw.frontend = config['frontend']
+        del config['frontend']
+
+    ctx.rgw.use_fastcgi = True
+    if "use_fcgi" in config:
+        ctx.rgw.use_fastcgi = False
+        log.info("Using mod_proxy_fcgi instead of mod_fastcgi...")
+        del config['use_fcgi']
+
+    subtasks = [
+        lambda: create_nonregion_pools(
+            ctx=ctx, config=config, regions=regions),
+        ]
+
+    multisite = len(regions) > 1
+
+    if not multisite:
+        for zonegroup, zonegroup_info in regions.iteritems():
+            log.debug("zonegroup_info =%r", zonegroup_info)
+            if len(zonegroup_info['zones']) > 1:
+                multisite = True
+                break
+
+    log.debug('multisite %s', multisite)
+    multi_cluster = multisite and len(ctx.config['roles']) > 1
+    log.debug('multi_cluster %s', multi_cluster)
+    master_client = None
+
+    if multi_cluster:
+        log.debug('multi cluster run')
+
+        master_client = get_config_master_client(ctx=ctx,
+                                                 config=config,
+                                                 regions=regions)
+        log.debug('master_client %r', master_client)
+        subtasks.extend([
+            lambda: configure_multisite_regions_and_zones(
+                ctx=ctx,
+                config=config,
+                regions=regions,
+                role_endpoints=role_endpoints,
+                realm=realm,
+                master_client = master_client,
+            )
+        ])
+
+        subtasks.extend([
+            lambda: configure_users_for_client(
+                ctx=ctx,
+                config=config,
+                client=master_client,
+                everywhere=False,
+            ),
+        ])
+
+        if ctx.rgw.frontend == 'apache':
+            subtasks.insert(0,
+                            lambda: create_apache_dirs(ctx=ctx, config=config,
+                                                       on_client=master_client))
+            subtasks.extend([
+                lambda: ship_apache_configs(ctx=ctx, config=config,
+                                            role_endpoints=role_endpoints, on_client=master_client),
+                lambda: start_rgw(ctx=ctx, config=config, on_client=master_client),
+                lambda: start_apache(ctx=ctx, config=config, on_client=master_client),
+            ])
+        elif ctx.rgw.frontend == 'civetweb':
+            subtasks.extend([
+                lambda: start_rgw(ctx=ctx, config=config, on_client=master_client),
+            ])
+        else:
+            raise ValueError("frontend must be 'apache' or 'civetweb'")
+
+        subtasks.extend([
+            lambda: pull_configuration(ctx=ctx,
+                                       config=config,
+                                       regions=regions,
+                                       role_endpoints=role_endpoints,
+                                       realm=realm,
+                                       master_client=master_client
+            ),
+        ])
+
+        subtasks.extend([
+            lambda: configure_users_for_client(
+                ctx=ctx,
+                config=config,
+                client=master_client,
+                everywhere=True
+            ),
+        ])
+
+        if ctx.rgw.frontend == 'apache':
+            subtasks.insert(0,
+                            lambda: create_apache_dirs(ctx=ctx, config=config,
+                                                       on_client=None,
+                                                       except_client = master_client))
+            subtasks.extend([
+                lambda: ship_apache_configs(ctx=ctx, config=config,
+                                            role_endpoints=role_endpoints,
+                                            on_client=None,
+                                            except_client = master_client,
+                ),
+                lambda: start_rgw(ctx=ctx,
+                                  config=config,
+                                  on_client=None,
+                                  except_client = master_client),
+                lambda: start_apache(ctx=ctx,
+                                     config = config,
+                                     on_client=None,
+                                     except_client = master_client,
+                ),
+            ])
+        elif ctx.rgw.frontend == 'civetweb':
+            subtasks.extend([
+                lambda: start_rgw(ctx=ctx,
+                                  config=config,
+                                  on_client=None,
+                                  except_client = master_client),
+            ])
+        else:
+            raise ValueError("frontend must be 'apache' or 'civetweb'")
+                
+    else:
+        log.debug('single cluster run')
+        subtasks.extend([
+            lambda: configure_regions_and_zones(
+                ctx=ctx,
+                config=config,
+                regions=regions,
+                role_endpoints=role_endpoints,
+                realm=realm,
+            ),
+            lambda: configure_users(
+                ctx=ctx,
+                config=config,
+                everywhere=True,
+            ),
+        ])
+        if ctx.rgw.frontend == 'apache':
+            subtasks.insert(0, lambda: create_apache_dirs(ctx=ctx, config=config))
+            subtasks.extend([
+                lambda: ship_apache_configs(ctx=ctx, config=config,
+                                            role_endpoints=role_endpoints),
+                lambda: start_rgw(ctx=ctx,
+                                  config=config),
+                lambda: start_apache(ctx=ctx, config=config),
+                ])
+        elif ctx.rgw.frontend == 'civetweb':
+            subtasks.extend([
+                lambda: start_rgw(ctx=ctx,
+                                  config=config),
+            ])
+        else:
+            raise ValueError("frontend must be 'apache' or 'civetweb'")
+
+    log.info("Using %s as radosgw frontend", ctx.rgw.frontend)
+    with contextutil.nested(*subtasks):
+        yield
diff --git a/qa/tasks/rgw_logsocket.py b/qa/tasks/rgw_logsocket.py
new file mode 100644
index 0000000..6f49b00
--- /dev/null
+++ b/qa/tasks/rgw_logsocket.py
@@ -0,0 +1,161 @@
+"""
+rgw s3tests logging wrappers
+"""
+from cStringIO import StringIO
+from configobj import ConfigObj
+import contextlib
+import logging
+import s3tests
+
+from teuthology import misc as teuthology
+from teuthology import contextutil
+
+log = logging.getLogger(__name__)
+
+
+ at contextlib.contextmanager
+def download(ctx, config):
+    """
+    Run s3tests download function
+    """
+    return s3tests.download(ctx, config)
+
+def _config_user(s3tests_conf, section, user):
+    """
+    Run s3tests user config function
+    """
+    return s3tests._config_user(s3tests_conf, section, user)
+
+ at contextlib.contextmanager
+def create_users(ctx, config):
+    """
+    Run s3tests user create function
+    """
+    return s3tests.create_users(ctx, config)
+
+ at contextlib.contextmanager
+def configure(ctx, config):
+    """
+    Run s3tests user configure function
+    """
+    return s3tests.configure(ctx, config)
+
+ at contextlib.contextmanager
+def run_tests(ctx, config):
+    """
+    Run remote netcat tests
+    """
+    assert isinstance(config, dict)
+    testdir = teuthology.get_testdir(ctx)
+    for client, client_config in config.iteritems():
+        client_config['extra_args'] = [
+            's3tests.functional.test_s3:test_bucket_list_return_data',
+        ]
+#        args = [
+#                'S3TEST_CONF={tdir}/archive/s3-tests.{client}.conf'.format(tdir=testdir, client=client),
+#                '{tdir}/s3-tests/virtualenv/bin/nosetests'.format(tdir=testdir),
+#                '-w',
+#                '{tdir}/s3-tests'.format(tdir=testdir),
+#                '-v',
+#		's3tests.functional.test_s3:test_bucket_list_return_data',
+#                ]
+#        if client_config is not None and 'extra_args' in client_config:
+#            args.extend(client_config['extra_args'])
+#
+#        ctx.cluster.only(client).run(
+#            args=args,
+#            )
+
+    s3tests.run_tests(ctx, config)
+
+    netcat_out = StringIO()
+
+    for client, client_config in config.iteritems():
+        ctx.cluster.only(client).run(
+            args = [
+                'netcat',
+                '-w', '5',
+                '-U', '{tdir}/rgw.opslog.sock'.format(tdir=testdir),
+                ],
+             stdout = netcat_out,
+        )
+
+        out = netcat_out.getvalue()
+
+        assert len(out) > 100
+
+        log.info('Received', out)
+
+    yield
+
+
+ at contextlib.contextmanager
+def task(ctx, config):
+    """
+    Run some s3-tests suite against rgw, verify opslog socket returns data
+
+    Must restrict testing to a particular client::
+
+        tasks:
+        - ceph:
+        - rgw: [client.0]
+        - s3tests: [client.0]
+
+    To pass extra arguments to nose (e.g. to run a certain test)::
+
+        tasks:
+        - ceph:
+        - rgw: [client.0]
+        - s3tests:
+            client.0:
+              extra_args: ['test_s3:test_object_acl_grand_public_read']
+            client.1:
+              extra_args: ['--exclude', 'test_100_continue']
+    """
+    assert config is None or isinstance(config, list) \
+        or isinstance(config, dict), \
+        "task s3tests only supports a list or dictionary for configuration"
+    all_clients = ['client.{id}'.format(id=id_)
+                   for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
+    if config is None:
+        config = all_clients
+    if isinstance(config, list):
+        config = dict.fromkeys(config)
+    clients = config.keys()
+
+    overrides = ctx.config.get('overrides', {})
+    # merge each client section, not the top level.
+    for (client, cconf) in config.iteritems():
+        teuthology.deep_merge(cconf, overrides.get('rgw-logsocket', {}))
+
+    log.debug('config is %s', config)
+
+    s3tests_conf = {}
+    for client in clients:
+        s3tests_conf[client] = ConfigObj(
+            indent_type='',
+            infile={
+                'DEFAULT':
+                    {
+                    'port'      : 7280,
+                    'is_secure' : 'no',
+                    },
+                'fixtures' : {},
+                's3 main'  : {},
+                's3 alt'   : {},
+                }
+            )
+
+    with contextutil.nested(
+        lambda: download(ctx=ctx, config=config),
+        lambda: create_users(ctx=ctx, config=dict(
+                clients=clients,
+                s3tests_conf=s3tests_conf,
+                )),
+        lambda: configure(ctx=ctx, config=dict(
+                clients=config,
+                s3tests_conf=s3tests_conf,
+                )),
+        lambda: run_tests(ctx=ctx, config=config),
+        ):
+        yield
diff --git a/qa/tasks/s3readwrite.py b/qa/tasks/s3readwrite.py
new file mode 100644
index 0000000..9f1507e
--- /dev/null
+++ b/qa/tasks/s3readwrite.py
@@ -0,0 +1,346 @@
+"""
+Run rgw s3 readwite tests
+"""
+from cStringIO import StringIO
+import base64
+import contextlib
+import logging
+import os
+import random
+import string
+import yaml
+
+from teuthology import misc as teuthology
+from teuthology import contextutil
+from teuthology.config import config as teuth_config
+from teuthology.orchestra import run
+from teuthology.orchestra.connection import split_user
+
+log = logging.getLogger(__name__)
+
+
+ at contextlib.contextmanager
+def download(ctx, config):
+    """
+    Download the s3 tests from the git builder.
+    Remove downloaded s3 file upon exit.
+    
+    The context passed in should be identical to the context
+    passed in to the main task.
+    """
+    assert isinstance(config, dict)
+    log.info('Downloading s3-tests...')
+    testdir = teuthology.get_testdir(ctx)
+    for (client, cconf) in config.items():
+        branch = cconf.get('force-branch', None)
+        if not branch:
+            branch = cconf.get('branch', 'master')
+        sha1 = cconf.get('sha1')
+        ctx.cluster.only(client).run(
+            args=[
+                'git', 'clone',
+                '-b', branch,
+                teuth_config.ceph_git_base_url + 's3-tests.git',
+                '{tdir}/s3-tests'.format(tdir=testdir),
+                ],
+            )
+        if sha1 is not None:
+            ctx.cluster.only(client).run(
+                args=[
+                    'cd', '{tdir}/s3-tests'.format(tdir=testdir),
+                    run.Raw('&&'),
+                    'git', 'reset', '--hard', sha1,
+                    ],
+                )
+    try:
+        yield
+    finally:
+        log.info('Removing s3-tests...')
+        testdir = teuthology.get_testdir(ctx)
+        for client in config:
+            ctx.cluster.only(client).run(
+                args=[
+                    'rm',
+                    '-rf',
+                    '{tdir}/s3-tests'.format(tdir=testdir),
+                    ],
+                )
+
+
+def _config_user(s3tests_conf, section, user):
+    """
+    Configure users for this section by stashing away keys, ids, and
+    email addresses.
+    """
+    s3tests_conf[section].setdefault('user_id', user)
+    s3tests_conf[section].setdefault('email', '{user}+test at test.test'.format(user=user))
+    s3tests_conf[section].setdefault('display_name', 'Mr. {user}'.format(user=user))
+    s3tests_conf[section].setdefault('access_key', ''.join(random.choice(string.uppercase) for i in xrange(20)))
+    s3tests_conf[section].setdefault('secret_key', base64.b64encode(os.urandom(40)))
+
+ at contextlib.contextmanager
+def create_users(ctx, config):
+    """
+    Create a default s3 user.
+    """
+    assert isinstance(config, dict)
+    log.info('Creating rgw users...')
+    testdir = teuthology.get_testdir(ctx)
+    users = {'s3': 'foo'}
+    cached_client_user_names = dict()
+    for client in config['clients']:
+        cached_client_user_names[client] = dict()
+        s3tests_conf = config['s3tests_conf'][client]
+        s3tests_conf.setdefault('readwrite', {})
+        s3tests_conf['readwrite'].setdefault('bucket', 'rwtest-' + client + '-{random}-')
+        s3tests_conf['readwrite'].setdefault('readers', 10)
+        s3tests_conf['readwrite'].setdefault('writers', 3)
+        s3tests_conf['readwrite'].setdefault('duration', 300)
+        s3tests_conf['readwrite'].setdefault('files', {})
+        rwconf = s3tests_conf['readwrite']
+        rwconf['files'].setdefault('num', 10)
+        rwconf['files'].setdefault('size', 2000)
+        rwconf['files'].setdefault('stddev', 500)
+        for section, user in users.iteritems():
+            _config_user(s3tests_conf, section, '{user}.{client}'.format(user=user, client=client))
+            log.debug('creating user {user} on {client}'.format(user=s3tests_conf[section]['user_id'],
+                                                                client=client))
+
+            # stash the 'delete_user' flag along with user name for easier cleanup
+            delete_this_user = True
+            if 'delete_user' in s3tests_conf['s3']:
+                delete_this_user = s3tests_conf['s3']['delete_user']
+                log.debug('delete_user set to {flag} for {client}'.format(flag=delete_this_user, client=client))
+            cached_client_user_names[client][section+user] = (s3tests_conf[section]['user_id'], delete_this_user)
+
+            # skip actual user creation if the create_user flag is set to false for this client
+            if 'create_user' in s3tests_conf['s3'] and s3tests_conf['s3']['create_user'] == False:
+                log.debug('create_user set to False, skipping user creation for {client}'.format(client=client))
+                continue
+            else:
+                ctx.cluster.only(client).run(
+                    args=[
+                        'adjust-ulimits',
+                        'ceph-coverage',
+                        '{tdir}/archive/coverage'.format(tdir=testdir),
+                        'radosgw-admin',
+                        '-n', client,
+                        'user', 'create',
+                        '--uid', s3tests_conf[section]['user_id'],
+                        '--display-name', s3tests_conf[section]['display_name'],
+                        '--access-key', s3tests_conf[section]['access_key'],
+                        '--secret', s3tests_conf[section]['secret_key'],
+                        '--email', s3tests_conf[section]['email'],
+                    ],
+                )
+    try:
+        yield
+    finally:
+        for client in config['clients']:
+            for section, user in users.iteritems():
+                #uid = '{user}.{client}'.format(user=user, client=client)
+                real_uid, delete_this_user  = cached_client_user_names[client][section+user]
+                if delete_this_user:
+                    ctx.cluster.only(client).run(
+                        args=[
+                            'adjust-ulimits',
+                            'ceph-coverage',
+                            '{tdir}/archive/coverage'.format(tdir=testdir),
+                            'radosgw-admin',
+                            '-n', client,
+                            'user', 'rm',
+                            '--uid', real_uid,
+                            '--purge-data',
+                            ],
+                        )
+                else:
+                    log.debug('skipping delete for user {uid} on {client}'.format(uid=real_uid, client=client))
+
+ at contextlib.contextmanager
+def configure(ctx, config):
+    """
+    Configure the s3-tests.  This includes the running of the
+    bootstrap code and the updating of local conf files.
+    """
+    assert isinstance(config, dict)
+    log.info('Configuring s3-readwrite-tests...')
+    for client, properties in config['clients'].iteritems():
+        s3tests_conf = config['s3tests_conf'][client]
+        if properties is not None and 'rgw_server' in properties:
+            host = None
+            for target, roles in zip(ctx.config['targets'].iterkeys(), ctx.config['roles']):
+                log.info('roles: ' + str(roles))
+                log.info('target: ' + str(target))
+                if properties['rgw_server'] in roles:
+                    _, host = split_user(target)
+            assert host is not None, "Invalid client specified as the rgw_server"
+            s3tests_conf['s3']['host'] = host
+        else:
+            s3tests_conf['s3']['host'] = 'localhost'
+
+        def_conf = s3tests_conf['DEFAULT']
+        s3tests_conf['s3'].setdefault('port', def_conf['port'])
+        s3tests_conf['s3'].setdefault('is_secure', def_conf['is_secure'])
+
+        (remote,) = ctx.cluster.only(client).remotes.keys()
+        remote.run(
+            args=[
+                'cd',
+                '{tdir}/s3-tests'.format(tdir=teuthology.get_testdir(ctx)),
+                run.Raw('&&'),
+                './bootstrap',
+                ],
+            )
+        conf_fp = StringIO()
+        conf = dict(
+                        s3=s3tests_conf['s3'],
+                        readwrite=s3tests_conf['readwrite'],
+                    )
+        yaml.safe_dump(conf, conf_fp, default_flow_style=False)
+        teuthology.write_file(
+            remote=remote,
+            path='{tdir}/archive/s3readwrite.{client}.config.yaml'.format(tdir=teuthology.get_testdir(ctx), client=client),
+            data=conf_fp.getvalue(),
+            )
+    yield
+
+
+ at contextlib.contextmanager
+def run_tests(ctx, config):
+    """
+    Run the s3readwrite tests after everything is set up.
+
+    :param ctx: Context passed to task
+    :param config: specific configuration information
+    """
+    assert isinstance(config, dict)
+    testdir = teuthology.get_testdir(ctx)
+    for client, client_config in config.iteritems():
+        (remote,) = ctx.cluster.only(client).remotes.keys()
+        conf = teuthology.get_file(remote, '{tdir}/archive/s3readwrite.{client}.config.yaml'.format(tdir=testdir, client=client))
+        args = [
+                '{tdir}/s3-tests/virtualenv/bin/s3tests-test-readwrite'.format(tdir=testdir),
+                ]
+        if client_config is not None and 'extra_args' in client_config:
+            args.extend(client_config['extra_args'])
+
+        ctx.cluster.only(client).run(
+            args=args,
+            stdin=conf,
+            )
+    yield
+
+
+ at contextlib.contextmanager
+def task(ctx, config):
+    """
+    Run the s3tests-test-readwrite suite against rgw.
+
+    To run all tests on all clients::
+
+        tasks:
+        - ceph:
+        - rgw:
+        - s3readwrite:
+
+    To restrict testing to particular clients::
+
+        tasks:
+        - ceph:
+        - rgw: [client.0]
+        - s3readwrite: [client.0]
+
+    To run against a server on client.1::
+
+        tasks:
+        - ceph:
+        - rgw: [client.1]
+        - s3readwrite:
+            client.0:
+              rgw_server: client.1
+
+    To pass extra test arguments
+
+        tasks:
+        - ceph:
+        - rgw: [client.0]
+        - s3readwrite:
+            client.0:
+              readwrite:
+                bucket: mybucket
+                readers: 10
+                writers: 3
+                duration: 600
+                files:
+                  num: 10
+                  size: 2000
+                  stddev: 500
+            client.1:
+              ...
+
+    To override s3 configuration
+
+        tasks:
+        - ceph:
+        - rgw: [client.0]
+        - s3readwrite:
+            client.0:
+              s3:
+                user_id: myuserid
+                display_name: myname
+                email: my at email
+                access_key: myaccesskey
+                secret_key: mysecretkey
+
+    """
+    assert config is None or isinstance(config, list) \
+        or isinstance(config, dict), \
+        "task s3tests only supports a list or dictionary for configuration"
+    all_clients = ['client.{id}'.format(id=id_)
+                   for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
+    if config is None:
+        config = all_clients
+    if isinstance(config, list):
+        config = dict.fromkeys(config)
+    clients = config.keys()
+
+    overrides = ctx.config.get('overrides', {})
+    # merge each client section, not the top level.
+    for client in config.iterkeys():
+        if not config[client]:
+            config[client] = {}
+        teuthology.deep_merge(config[client], overrides.get('s3readwrite', {}))
+
+    log.debug('in s3readwrite, config is %s', config)
+
+    s3tests_conf = {}
+    for client in clients:
+        if config[client] is None:
+            config[client] = {}
+        config[client].setdefault('s3', {})
+        config[client].setdefault('readwrite', {})
+
+        s3tests_conf[client] = ({
+                'DEFAULT':
+                    {
+                    'port'      : 7280,
+                    'is_secure' : False,
+                    },
+                'readwrite' : config[client]['readwrite'],
+                's3'  : config[client]['s3'],
+                })
+
+    with contextutil.nested(
+        lambda: download(ctx=ctx, config=config),
+        lambda: create_users(ctx=ctx, config=dict(
+                clients=clients,
+                s3tests_conf=s3tests_conf,
+                )),
+        lambda: configure(ctx=ctx, config=dict(
+                clients=config,
+                s3tests_conf=s3tests_conf,
+                )),
+        lambda: run_tests(ctx=ctx, config=config),
+        ):
+        pass
+    yield
diff --git a/qa/tasks/s3roundtrip.py b/qa/tasks/s3roundtrip.py
new file mode 100644
index 0000000..4c17144
--- /dev/null
+++ b/qa/tasks/s3roundtrip.py
@@ -0,0 +1,302 @@
+"""
+Run rgw roundtrip message tests
+"""
+from cStringIO import StringIO
+import base64
+import contextlib
+import logging
+import os
+import random
+import string
+import yaml
+
+from teuthology import misc as teuthology
+from teuthology import contextutil
+from teuthology.config import config as teuth_config
+from teuthology.orchestra import run
+from teuthology.orchestra.connection import split_user
+
+log = logging.getLogger(__name__)
+
+
+ at contextlib.contextmanager
+def download(ctx, config):
+    """
+    Download the s3 tests from the git builder.
+    Remove downloaded s3 file upon exit.
+    
+    The context passed in should be identical to the context
+    passed in to the main task.
+    """
+    assert isinstance(config, list)
+    log.info('Downloading s3-tests...')
+    testdir = teuthology.get_testdir(ctx)
+    for client in config:
+        ctx.cluster.only(client).run(
+            args=[
+                'git', 'clone',
+                teuth_config.ceph_git_base_url + 's3-tests.git',
+                '{tdir}/s3-tests'.format(tdir=testdir),
+                ],
+            )
+    try:
+        yield
+    finally:
+        log.info('Removing s3-tests...')
+        for client in config:
+            ctx.cluster.only(client).run(
+                args=[
+                    'rm',
+                    '-rf',
+                    '{tdir}/s3-tests'.format(tdir=testdir),
+                    ],
+                )
+
+def _config_user(s3tests_conf, section, user):
+    """
+    Configure users for this section by stashing away keys, ids, and
+    email addresses.
+    """
+    s3tests_conf[section].setdefault('user_id', user)
+    s3tests_conf[section].setdefault('email', '{user}+test at test.test'.format(user=user))
+    s3tests_conf[section].setdefault('display_name', 'Mr. {user}'.format(user=user))
+    s3tests_conf[section].setdefault('access_key', ''.join(random.choice(string.uppercase) for i in xrange(20)))
+    s3tests_conf[section].setdefault('secret_key', base64.b64encode(os.urandom(40)))
+
+ at contextlib.contextmanager
+def create_users(ctx, config):
+    """
+    Create a default s3 user.
+    """
+    assert isinstance(config, dict)
+    log.info('Creating rgw users...')
+    testdir = teuthology.get_testdir(ctx)
+    users = {'s3': 'foo'}
+    for client in config['clients']:
+        s3tests_conf = config['s3tests_conf'][client]
+        s3tests_conf.setdefault('roundtrip', {})
+        s3tests_conf['roundtrip'].setdefault('bucket', 'rttest-' + client + '-{random}-')
+        s3tests_conf['roundtrip'].setdefault('readers', 10)
+        s3tests_conf['roundtrip'].setdefault('writers', 3)
+        s3tests_conf['roundtrip'].setdefault('duration', 300)
+        s3tests_conf['roundtrip'].setdefault('files', {})
+        rtconf = s3tests_conf['roundtrip']
+        rtconf['files'].setdefault('num', 10)
+        rtconf['files'].setdefault('size', 2000)
+        rtconf['files'].setdefault('stddev', 500)
+        for section, user in [('s3', 'foo')]:
+            _config_user(s3tests_conf, section, '{user}.{client}'.format(user=user, client=client))
+            ctx.cluster.only(client).run(
+                args=[
+                    'adjust-ulimits',
+                    'ceph-coverage',
+                    '{tdir}/archive/coverage'.format(tdir=testdir),
+                    'radosgw-admin',
+                    '-n', client,
+                    'user', 'create',
+                    '--uid', s3tests_conf[section]['user_id'],
+                    '--display-name', s3tests_conf[section]['display_name'],
+                    '--access-key', s3tests_conf[section]['access_key'],
+                    '--secret', s3tests_conf[section]['secret_key'],
+                    '--email', s3tests_conf[section]['email'],
+                ],
+            )
+    try:
+        yield
+    finally:
+        for client in config['clients']:
+            for user in users.itervalues():
+                uid = '{user}.{client}'.format(user=user, client=client)
+                ctx.cluster.only(client).run(
+                    args=[
+                        'adjust-ulimits',
+                        'ceph-coverage',
+                        '{tdir}/archive/coverage'.format(tdir=testdir),
+                        'radosgw-admin',
+                        '-n', client,
+                        'user', 'rm',
+                        '--uid', uid,
+                        '--purge-data',
+                        ],
+                    )
+
+ at contextlib.contextmanager
+def configure(ctx, config):
+    """
+    Configure the s3-tests.  This includes the running of the
+    bootstrap code and the updating of local conf files.
+    """
+    assert isinstance(config, dict)
+    log.info('Configuring s3-roundtrip-tests...')
+    testdir = teuthology.get_testdir(ctx)
+    for client, properties in config['clients'].iteritems():
+        s3tests_conf = config['s3tests_conf'][client]
+        if properties is not None and 'rgw_server' in properties:
+            host = None
+            for target, roles in zip(ctx.config['targets'].iterkeys(), ctx.config['roles']):
+                log.info('roles: ' + str(roles))
+                log.info('target: ' + str(target))
+                if properties['rgw_server'] in roles:
+                    _, host = split_user(target)
+            assert host is not None, "Invalid client specified as the rgw_server"
+            s3tests_conf['s3']['host'] = host
+        else:
+            s3tests_conf['s3']['host'] = 'localhost'
+
+        def_conf = s3tests_conf['DEFAULT']
+        s3tests_conf['s3'].setdefault('port', def_conf['port'])
+        s3tests_conf['s3'].setdefault('is_secure', def_conf['is_secure'])
+
+        (remote,) = ctx.cluster.only(client).remotes.keys()
+        remote.run(
+            args=[
+                'cd',
+                '{tdir}/s3-tests'.format(tdir=testdir),
+                run.Raw('&&'),
+                './bootstrap',
+                ],
+            )
+        conf_fp = StringIO()
+        conf = dict(
+                        s3=s3tests_conf['s3'],
+                        roundtrip=s3tests_conf['roundtrip'],
+                    )
+        yaml.safe_dump(conf, conf_fp, default_flow_style=False)
+        teuthology.write_file(
+            remote=remote,
+            path='{tdir}/archive/s3roundtrip.{client}.config.yaml'.format(tdir=testdir, client=client),
+            data=conf_fp.getvalue(),
+            )
+    yield
+
+
+ at contextlib.contextmanager
+def run_tests(ctx, config):
+    """
+    Run the s3 roundtrip after everything is set up.
+
+    :param ctx: Context passed to task
+    :param config: specific configuration information
+    """
+    assert isinstance(config, dict)
+    testdir = teuthology.get_testdir(ctx)
+    for client, client_config in config.iteritems():
+        (remote,) = ctx.cluster.only(client).remotes.keys()
+        conf = teuthology.get_file(remote, '{tdir}/archive/s3roundtrip.{client}.config.yaml'.format(tdir=testdir, client=client))
+        args = [
+                '{tdir}/s3-tests/virtualenv/bin/s3tests-test-roundtrip'.format(tdir=testdir),
+                ]
+        if client_config is not None and 'extra_args' in client_config:
+            args.extend(client_config['extra_args'])
+
+        ctx.cluster.only(client).run(
+            args=args,
+            stdin=conf,
+            )
+    yield
+
+
+ at contextlib.contextmanager
+def task(ctx, config):
+    """
+    Run the s3tests-test-roundtrip suite against rgw.
+
+    To run all tests on all clients::
+
+        tasks:
+        - ceph:
+        - rgw:
+        - s3roundtrip:
+
+    To restrict testing to particular clients::
+
+        tasks:
+        - ceph:
+        - rgw: [client.0]
+        - s3roundtrip: [client.0]
+
+    To run against a server on client.1::
+
+        tasks:
+        - ceph:
+        - rgw: [client.1]
+        - s3roundtrip:
+            client.0:
+              rgw_server: client.1
+
+    To pass extra test arguments
+
+        tasks:
+        - ceph:
+        - rgw: [client.0]
+        - s3roundtrip:
+            client.0:
+              roundtrip:
+                bucket: mybucket
+                readers: 10
+                writers: 3
+                duration: 600
+                files:
+                  num: 10
+                  size: 2000
+                  stddev: 500
+            client.1:
+              ...
+
+    To override s3 configuration
+
+        tasks:
+        - ceph:
+        - rgw: [client.0]
+        - s3roundtrip:
+            client.0:
+              s3:
+                user_id: myuserid
+                display_name: myname
+                email: my at email
+                access_key: myaccesskey
+                secret_key: mysecretkey
+
+    """
+    assert config is None or isinstance(config, list) \
+        or isinstance(config, dict), \
+        "task s3tests only supports a list or dictionary for configuration"
+    all_clients = ['client.{id}'.format(id=id_)
+                   for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
+    if config is None:
+        config = all_clients
+    if isinstance(config, list):
+        config = dict.fromkeys(config)
+    clients = config.keys()
+
+    s3tests_conf = {}
+    for client in clients:
+        if config[client] is None:
+            config[client] = {}
+        config[client].setdefault('s3', {})
+        config[client].setdefault('roundtrip', {})
+
+        s3tests_conf[client] = ({
+                'DEFAULT':
+                    {
+                    'port'      : 7280,
+                    'is_secure' : False,
+                    },
+                'roundtrip' : config[client]['roundtrip'],
+                's3'  : config[client]['s3'],
+                })
+
+    with contextutil.nested(
+        lambda: download(ctx=ctx, config=clients),
+        lambda: create_users(ctx=ctx, config=dict(
+                clients=clients,
+                s3tests_conf=s3tests_conf,
+                )),
+        lambda: configure(ctx=ctx, config=dict(
+                clients=config,
+                s3tests_conf=s3tests_conf,
+                )),
+        lambda: run_tests(ctx=ctx, config=config),
+        ):
+        pass
+    yield
diff --git a/qa/tasks/s3tests.py b/qa/tasks/s3tests.py
new file mode 100644
index 0000000..20f328b
--- /dev/null
+++ b/qa/tasks/s3tests.py
@@ -0,0 +1,449 @@
+"""
+Run a set of s3 tests on rgw.
+"""
+from cStringIO import StringIO
+from configobj import ConfigObj
+import base64
+import contextlib
+import logging
+import os
+import random
+import string
+
+import util.rgw as rgw_utils
+
+from teuthology import misc as teuthology
+from teuthology import contextutil
+from teuthology.config import config as teuth_config
+from teuthology.orchestra import run
+from teuthology.orchestra.connection import split_user
+
+log = logging.getLogger(__name__)
+
+def extract_sync_client_data(ctx, client_name):
+    """
+    Extract synchronized client rgw zone and rgw region information.
+
+    :param ctx: Context passed to the s3tests task
+    :param name: Name of client that we are synching with
+    """
+    return_region_name = None
+    return_dict = None
+    client = ctx.ceph['ceph'].conf.get(client_name, None)
+    if client:
+        current_client_zone = client.get('rgw zone', None)
+        if current_client_zone:
+            (endpoint_host, endpoint_port) = ctx.rgw.role_endpoints.get(client_name, (None, None))
+            # pull out the radosgw_agent stuff
+            regions = ctx.rgw.regions
+            for region in regions:
+                log.debug('jbuck, region is {region}'.format(region=region))
+                region_data = ctx.rgw.regions[region]
+                log.debug('region data is {region}'.format(region=region_data))
+                zones = region_data['zones']
+                for zone in zones:
+                    if current_client_zone in zone:
+                        return_region_name = region
+                        return_dict = dict()
+                        return_dict['api_name'] = region_data['api name']
+                        return_dict['is_master'] = region_data['is master']
+                        return_dict['port'] = endpoint_port
+                        return_dict['host'] = endpoint_host
+
+                        # The s3tests expect the sync_agent_[addr|port} to be
+                        # set on the non-master node for some reason
+                        if not region_data['is master']:
+                            (rgwagent_host, rgwagent_port) = ctx.radosgw_agent.endpoint
+                            (return_dict['sync_agent_addr'], _) = ctx.rgw.role_endpoints[rgwagent_host]
+                            return_dict['sync_agent_port'] = rgwagent_port
+
+        else: #if client_zone:
+            log.debug('No zone info for {host}'.format(host=client_name))
+    else: # if client
+        log.debug('No ceph conf for {host}'.format(host=client_name))
+
+    return return_region_name, return_dict
+
+def update_conf_with_region_info(ctx, config, s3tests_conf):
+    """
+    Scan for a client (passed in s3tests_conf) that is an s3agent
+    with which we can sync.  Update information in local conf file
+    if such a client is found.
+    """
+    for key in s3tests_conf.keys():
+        # we'll assume that there's only one sync relationship (source / destination) with client.X
+        # as the key for now
+
+        # Iterate through all of the radosgw_agent (rgwa) configs and see if a
+        # given client is involved in a relationship.
+        # If a given client isn't, skip it
+        this_client_in_rgwa_config = False
+        for rgwa in ctx.radosgw_agent.config.keys():
+            rgwa_data = ctx.radosgw_agent.config[rgwa]
+
+            if key in rgwa_data['src'] or key in rgwa_data['dest']:
+                this_client_in_rgwa_config = True
+                log.debug('{client} is in an radosgw-agent sync relationship'.format(client=key))
+                radosgw_sync_data = ctx.radosgw_agent.config[key]
+                break
+        if not this_client_in_rgwa_config:
+            log.debug('{client} is NOT in an radosgw-agent sync relationship'.format(client=key))
+            continue
+
+        source_client = radosgw_sync_data['src']
+        dest_client = radosgw_sync_data['dest']
+
+        # #xtract the pertinent info for the source side
+        source_region_name, source_region_dict = extract_sync_client_data(ctx, source_client)
+        log.debug('\t{key} source_region {source_region} source_dict {source_dict}'.format
+            (key=key,source_region=source_region_name,source_dict=source_region_dict))
+
+        # The source *should* be the master region, but test anyway and then set it as the default region
+        if source_region_dict['is_master']:
+            log.debug('Setting {region} as default_region'.format(region=source_region_name))
+            s3tests_conf[key]['fixtures'].setdefault('default_region', source_region_name)
+
+        # Extract the pertinent info for the destination side
+        dest_region_name, dest_region_dict = extract_sync_client_data(ctx, dest_client)
+        log.debug('\t{key} dest_region {dest_region} dest_dict {dest_dict}'.format
+            (key=key,dest_region=dest_region_name,dest_dict=dest_region_dict))
+
+        # now add these regions to the s3tests_conf object
+        s3tests_conf[key]['region {region_name}'.format(region_name=source_region_name)] = source_region_dict
+        s3tests_conf[key]['region {region_name}'.format(region_name=dest_region_name)] = dest_region_dict
+
+ at contextlib.contextmanager
+def download(ctx, config):
+    """
+    Download the s3 tests from the git builder.
+    Remove downloaded s3 file upon exit.
+
+    The context passed in should be identical to the context
+    passed in to the main task.
+    """
+    assert isinstance(config, dict)
+    log.info('Downloading s3-tests...')
+    testdir = teuthology.get_testdir(ctx)
+    s3_branches = [ 'giant', 'firefly', 'firefly-original', 'hammer' ]
+    for (client, cconf) in config.items():
+        branch = cconf.get('force-branch', None)
+        if not branch:
+            ceph_branch = ctx.config.get('branch')
+            suite_branch = ctx.config.get('suite_branch', ceph_branch)
+            if suite_branch in s3_branches:
+                branch = cconf.get('branch', suite_branch)
+	    else:
+                branch = cconf.get('branch', 'ceph-' + suite_branch)
+        if not branch:
+            raise ValueError(
+                "Could not determine what branch to use for s3tests!")
+        else:
+            log.info("Using branch '%s' for s3tests", branch)
+        sha1 = cconf.get('sha1')
+        ctx.cluster.only(client).run(
+            args=[
+                'git', 'clone',
+                '-b', branch,
+                teuth_config.ceph_git_base_url + 's3-tests.git',
+                '{tdir}/s3-tests'.format(tdir=testdir),
+                ],
+            )
+        if sha1 is not None:
+            ctx.cluster.only(client).run(
+                args=[
+                    'cd', '{tdir}/s3-tests'.format(tdir=testdir),
+                    run.Raw('&&'),
+                    'git', 'reset', '--hard', sha1,
+                    ],
+                )
+    try:
+        yield
+    finally:
+        log.info('Removing s3-tests...')
+        testdir = teuthology.get_testdir(ctx)
+        for client in config:
+            ctx.cluster.only(client).run(
+                args=[
+                    'rm',
+                    '-rf',
+                    '{tdir}/s3-tests'.format(tdir=testdir),
+                    ],
+                )
+
+
+def _config_user(s3tests_conf, section, user):
+    """
+    Configure users for this section by stashing away keys, ids, and
+    email addresses.
+    """
+    s3tests_conf[section].setdefault('user_id', user)
+    s3tests_conf[section].setdefault('email', '{user}+test at test.test'.format(user=user))
+    s3tests_conf[section].setdefault('display_name', 'Mr. {user}'.format(user=user))
+    s3tests_conf[section].setdefault('access_key', ''.join(random.choice(string.uppercase) for i in xrange(20)))
+    s3tests_conf[section].setdefault('secret_key', base64.b64encode(os.urandom(40)))
+
+
+ at contextlib.contextmanager
+def create_users(ctx, config):
+    """
+    Create a main and an alternate s3 user.
+    """
+    assert isinstance(config, dict)
+    log.info('Creating rgw users...')
+    testdir = teuthology.get_testdir(ctx)
+    users = {'s3 main': 'foo', 's3 alt': 'bar'}
+    for client in config['clients']:
+        s3tests_conf = config['s3tests_conf'][client]
+        s3tests_conf.setdefault('fixtures', {})
+        s3tests_conf['fixtures'].setdefault('bucket prefix', 'test-' + client + '-{random}-')
+        for section, user in users.iteritems():
+            _config_user(s3tests_conf, section, '{user}.{client}'.format(user=user, client=client))
+            log.debug('Creating user {user} on {host}'.format(user=s3tests_conf[section]['user_id'], host=client))
+            ctx.cluster.only(client).run(
+                args=[
+                    'adjust-ulimits',
+                    'ceph-coverage',
+                    '{tdir}/archive/coverage'.format(tdir=testdir),
+                    'radosgw-admin',
+                    '-n', client,
+                    'user', 'create',
+                    '--uid', s3tests_conf[section]['user_id'],
+                    '--display-name', s3tests_conf[section]['display_name'],
+                    '--access-key', s3tests_conf[section]['access_key'],
+                    '--secret', s3tests_conf[section]['secret_key'],
+                    '--email', s3tests_conf[section]['email'],
+                ],
+            )
+    try:
+        yield
+    finally:
+        for client in config['clients']:
+            for user in users.itervalues():
+                uid = '{user}.{client}'.format(user=user, client=client)
+                ctx.cluster.only(client).run(
+                    args=[
+                        'adjust-ulimits',
+                        'ceph-coverage',
+                        '{tdir}/archive/coverage'.format(tdir=testdir),
+                        'radosgw-admin',
+                        '-n', client,
+                        'user', 'rm',
+                        '--uid', uid,
+                        '--purge-data',
+                        ],
+                    )
+
+
+ at contextlib.contextmanager
+def configure(ctx, config):
+    """
+    Configure the s3-tests.  This includes the running of the
+    bootstrap code and the updating of local conf files.
+    """
+    assert isinstance(config, dict)
+    log.info('Configuring s3-tests...')
+    testdir = teuthology.get_testdir(ctx)
+    for client, properties in config['clients'].iteritems():
+        s3tests_conf = config['s3tests_conf'][client]
+        if properties is not None and 'rgw_server' in properties:
+            host = None
+            for target, roles in zip(ctx.config['targets'].iterkeys(), ctx.config['roles']):
+                log.info('roles: ' + str(roles))
+                log.info('target: ' + str(target))
+                if properties['rgw_server'] in roles:
+                    _, host = split_user(target)
+            assert host is not None, "Invalid client specified as the rgw_server"
+            s3tests_conf['DEFAULT']['host'] = host
+        else:
+            s3tests_conf['DEFAULT']['host'] = 'localhost'
+
+        if properties is not None and 'slow_backend' in properties:
+	    s3tests_conf['fixtures']['slow backend'] = properties['slow_backend']
+
+        (remote,) = ctx.cluster.only(client).remotes.keys()
+        remote.run(
+            args=[
+                'cd',
+                '{tdir}/s3-tests'.format(tdir=testdir),
+                run.Raw('&&'),
+                './bootstrap',
+                ],
+            )
+        conf_fp = StringIO()
+        s3tests_conf.write(conf_fp)
+        teuthology.write_file(
+            remote=remote,
+            path='{tdir}/archive/s3-tests.{client}.conf'.format(tdir=testdir, client=client),
+            data=conf_fp.getvalue(),
+            )
+
+    log.info('Configuring boto...')
+    boto_src = os.path.join(os.path.dirname(__file__), 'boto.cfg.template')
+    for client, properties in config['clients'].iteritems():
+        with file(boto_src, 'rb') as f:
+            (remote,) = ctx.cluster.only(client).remotes.keys()
+            conf = f.read().format(
+                idle_timeout=config.get('idle_timeout', 30)
+                )
+            teuthology.write_file(
+                remote=remote,
+                path='{tdir}/boto.cfg'.format(tdir=testdir),
+                data=conf,
+                )
+
+    try:
+        yield
+
+    finally:
+        log.info('Cleaning up boto...')
+        for client, properties in config['clients'].iteritems():
+            (remote,) = ctx.cluster.only(client).remotes.keys()
+            remote.run(
+                args=[
+                    'rm',
+                    '{tdir}/boto.cfg'.format(tdir=testdir),
+                    ],
+                )
+
+ at contextlib.contextmanager
+def sync_users(ctx, config):
+    """
+    Sync this user.
+    """
+    assert isinstance(config, dict)
+    # do a full sync if this is a multi-region test
+    if rgw_utils.multi_region_enabled(ctx):
+        log.debug('Doing a full sync')
+        rgw_utils.radosgw_agent_sync_all(ctx)
+    else:
+        log.debug('Not a multi-region config; skipping the metadata sync')
+
+    yield
+
+ at contextlib.contextmanager
+def run_tests(ctx, config):
+    """
+    Run the s3tests after everything is set up.
+
+    :param ctx: Context passed to task
+    :param config: specific configuration information
+    """
+    assert isinstance(config, dict)
+    testdir = teuthology.get_testdir(ctx)
+    attrs = ["!fails_on_rgw"]
+    if not ctx.rgw.use_fastcgi:
+        attrs.append("!fails_on_mod_proxy_fcgi")
+    for client, client_config in config.iteritems():
+        args = [
+            'S3TEST_CONF={tdir}/archive/s3-tests.{client}.conf'.format(tdir=testdir, client=client),
+            'BOTO_CONFIG={tdir}/boto.cfg'.format(tdir=testdir),
+            '{tdir}/s3-tests/virtualenv/bin/nosetests'.format(tdir=testdir),
+            '-w',
+            '{tdir}/s3-tests'.format(tdir=testdir),
+            '-v',
+            '-a', ','.join(attrs),
+            ]
+        if client_config is not None and 'extra_args' in client_config:
+            args.extend(client_config['extra_args'])
+
+        ctx.cluster.only(client).run(
+            args=args,
+            label="s3 tests against rgw"
+            )
+    yield
+
+ at contextlib.contextmanager
+def task(ctx, config):
+    """
+    Run the s3-tests suite against rgw.
+
+    To run all tests on all clients::
+
+        tasks:
+        - ceph:
+        - rgw:
+        - s3tests:
+
+    To restrict testing to particular clients::
+
+        tasks:
+        - ceph:
+        - rgw: [client.0]
+        - s3tests: [client.0]
+
+    To run against a server on client.1 and increase the boto timeout to 10m::
+
+        tasks:
+        - ceph:
+        - rgw: [client.1]
+        - s3tests:
+            client.0:
+              rgw_server: client.1
+              idle_timeout: 600
+
+    To pass extra arguments to nose (e.g. to run a certain test)::
+
+        tasks:
+        - ceph:
+        - rgw: [client.0]
+        - s3tests:
+            client.0:
+              extra_args: ['test_s3:test_object_acl_grand_public_read']
+            client.1:
+              extra_args: ['--exclude', 'test_100_continue']
+    """
+    assert config is None or isinstance(config, list) \
+        or isinstance(config, dict), \
+        "task s3tests only supports a list or dictionary for configuration"
+    all_clients = ['client.{id}'.format(id=id_)
+                   for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
+    if config is None:
+        config = all_clients
+    if isinstance(config, list):
+        config = dict.fromkeys(config)
+    clients = config.keys()
+
+    overrides = ctx.config.get('overrides', {})
+    # merge each client section, not the top level.
+    for client in config.iterkeys():
+        if not config[client]:
+            config[client] = {}
+        teuthology.deep_merge(config[client], overrides.get('s3tests', {}))
+
+    log.debug('s3tests config is %s', config)
+
+    s3tests_conf = {}
+    for client in clients:
+        s3tests_conf[client] = ConfigObj(
+            indent_type='',
+            infile={
+                'DEFAULT':
+                    {
+                    'port'      : 7280,
+                    'is_secure' : 'no',
+                    },
+                'fixtures' : {},
+                's3 main'  : {},
+                's3 alt'   : {},
+                }
+            )
+
+    # Only attempt to add in the region info if there's a radosgw_agent configured
+    if hasattr(ctx, 'radosgw_agent'):
+        update_conf_with_region_info(ctx, config, s3tests_conf)
+
+    with contextutil.nested(
+        lambda: download(ctx=ctx, config=config),
+        lambda: create_users(ctx=ctx, config=dict(
+                clients=clients,
+                s3tests_conf=s3tests_conf,
+                )),
+        lambda: sync_users(ctx=ctx, config=config),
+        lambda: configure(ctx=ctx, config=dict(
+                clients=config,
+                s3tests_conf=s3tests_conf,
+                )),
+        lambda: run_tests(ctx=ctx, config=config),
+        ):
+        pass
+    yield
diff --git a/qa/tasks/samba.py b/qa/tasks/samba.py
new file mode 100644
index 0000000..8272e8b
--- /dev/null
+++ b/qa/tasks/samba.py
@@ -0,0 +1,245 @@
+"""
+Samba
+"""
+import contextlib
+import logging
+import sys
+import time
+
+from teuthology import misc as teuthology
+from teuthology.orchestra import run
+from teuthology.orchestra.daemon import DaemonGroup
+
+log = logging.getLogger(__name__)
+
+
+def get_sambas(ctx, roles):
+    """
+    Scan for roles that are samba.  Yield the id of the the samba role
+    (samba.0, samba.1...)  and the associated remote site
+
+    :param ctx: Context
+    :param roles: roles for this test (extracted from yaml files)
+    """
+    for role in roles:
+        assert isinstance(role, basestring)
+        PREFIX = 'samba.'
+        assert role.startswith(PREFIX)
+        id_ = role[len(PREFIX):]
+        (remote,) = ctx.cluster.only(role).remotes.iterkeys()
+        yield (id_, remote)
+
+
+ at contextlib.contextmanager
+def task(ctx, config):
+    """
+    Setup samba smbd with ceph vfs module.  This task assumes the samba
+    package has already been installed via the install task.
+
+    The config is optional and defaults to starting samba on all nodes.
+    If a config is given, it is expected to be a list of
+    samba nodes to start smbd servers on.
+
+    Example that starts smbd on all samba nodes::
+
+        tasks:
+        - install:
+        - install:
+            project: samba
+            extra_packages: ['samba']
+        - ceph:
+        - samba:
+        - interactive:
+
+    Example that starts smbd on just one of the samba nodes and cifs on the other::
+
+        tasks:
+        - samba: [samba.0]
+        - cifs: [samba.1]
+
+    An optional backend can be specified, and requires a path which smbd will
+    use as the backend storage location:
+
+        roles:
+            - [osd.0, osd.1, osd.2, mon.0, mon.1, mon.2, mds.a]
+            - [client.0, samba.0]
+
+        tasks:
+        - ceph:
+        - ceph-fuse: [client.0]
+        - samba:
+            samba.0:
+              cephfuse: "{testdir}/mnt.0"
+
+    This mounts ceph to {testdir}/mnt.0 using fuse, and starts smbd with
+    a UNC of //localhost/cephfuse.  Access through that UNC will be on
+    the ceph fuse mount point.
+
+    If no arguments are specified in the samba
+    role, the default behavior is to enable the ceph UNC //localhost/ceph
+    and use the ceph vfs module as the smbd backend.
+
+    :param ctx: Context
+    :param config: Configuration
+    """
+    log.info("Setting up smbd with ceph vfs...")
+    assert config is None or isinstance(config, list) or isinstance(config, dict), \
+        "task samba got invalid config"
+
+    if config is None:
+        config = dict(('samba.{id}'.format(id=id_), None)
+                  for id_ in teuthology.all_roles_of_type(ctx.cluster, 'samba'))
+    elif isinstance(config, list):
+        config = dict((name, None) for name in config)
+
+    samba_servers = list(get_sambas(ctx=ctx, roles=config.keys()))
+
+    testdir = teuthology.get_testdir(ctx)
+
+    if not hasattr(ctx, 'daemons'):
+        ctx.daemons = DaemonGroup()
+
+    for id_, remote in samba_servers:
+
+        rolestr = "samba.{id_}".format(id_=id_)
+
+        confextras = """vfs objects = ceph
+  ceph:config_file = /etc/ceph/ceph.conf"""
+
+        unc = "ceph"
+        backend = "/"
+
+        if config[rolestr] is not None:
+            # verify that there's just one parameter in role
+            if len(config[rolestr]) != 1:
+                log.error("samba config for role samba.{id_} must have only one parameter".format(id_=id_))
+                raise Exception('invalid config')
+            confextras = ""
+            (unc, backendstr) = config[rolestr].items()[0]
+            backend = backendstr.format(testdir=testdir)
+
+        # on first samba role, set ownership and permissions of ceph root
+        # so that samba tests succeed
+        if config[rolestr] is None and id_ == samba_servers[0][0]:
+            remote.run(
+                    args=[
+                        'mkdir', '-p', '/tmp/cmnt', run.Raw('&&'),
+                        'sudo', 'ceph-fuse', '/tmp/cmnt', run.Raw('&&'),
+                        'sudo', 'chown', 'ubuntu:ubuntu', '/tmp/cmnt/', run.Raw('&&'),
+                        'sudo', 'chmod', '1777', '/tmp/cmnt/', run.Raw('&&'),
+                        'sudo', 'umount', '/tmp/cmnt/', run.Raw('&&'),
+                        'rm', '-rf', '/tmp/cmnt',
+                        ],
+                    )
+        else:
+            remote.run(
+                    args=[
+                        'sudo', 'chown', 'ubuntu:ubuntu', backend, run.Raw('&&'),
+                        'sudo', 'chmod', '1777', backend,
+                        ],
+                    )
+
+        teuthology.sudo_write_file(remote, "/usr/local/samba/etc/smb.conf", """
+[global]
+  workgroup = WORKGROUP
+  netbios name = DOMAIN
+
+[{unc}]
+  path = {backend}
+  {extras}
+  writeable = yes
+  valid users = ubuntu
+""".format(extras=confextras, unc=unc, backend=backend))
+
+        # create ubuntu user
+        remote.run(
+            args=[
+                'sudo', '/usr/local/samba/bin/smbpasswd', '-e', 'ubuntu',
+                run.Raw('||'),
+                'printf', run.Raw('"ubuntu\nubuntu\n"'),
+                run.Raw('|'),
+                'sudo', '/usr/local/samba/bin/smbpasswd', '-s', '-a', 'ubuntu'
+            ])
+
+        smbd_cmd = [
+                'sudo',
+                'daemon-helper',
+                'term',
+                'nostdin',
+                '/usr/local/samba/sbin/smbd',
+                '-F',
+                ]
+        ctx.daemons.add_daemon(remote, 'smbd', id_,
+                               args=smbd_cmd,
+                               logger=log.getChild("smbd.{id_}".format(id_=id_)),
+                               stdin=run.PIPE,
+                               wait=False,
+                               )
+
+        # let smbd initialize, probably a better way...
+        seconds_to_sleep = 100
+        log.info('Sleeping for %s  seconds...' % seconds_to_sleep)
+        time.sleep(seconds_to_sleep)
+        log.info('Sleeping stopped...')
+
+    try:
+        yield
+    finally:
+        log.info('Stopping smbd processes...')
+        exc_info = (None, None, None)
+        for d in ctx.daemons.iter_daemons_of_role('smbd'):
+            try:
+                d.stop()
+            except (run.CommandFailedError,
+                    run.CommandCrashedError,
+                    run.ConnectionLostError):
+                exc_info = sys.exc_info()
+                log.exception('Saw exception from %s.%s', d.role, d.id_)
+        if exc_info != (None, None, None):
+            raise exc_info[0], exc_info[1], exc_info[2]
+
+        for id_, remote in samba_servers:
+            remote.run(
+                args=[
+                    'sudo',
+                    'rm', '-rf',
+                    '/usr/local/samba/etc/smb.conf',
+                    '/usr/local/samba/private/*',
+                    '/usr/local/samba/var/run/',
+                    '/usr/local/samba/var/locks',
+                    '/usr/local/samba/var/lock',
+                    ],
+                )
+            # make sure daemons are gone
+            try:
+                remote.run(
+                    args=[
+                        'while',
+                        'sudo', 'killall', '-9', 'smbd',
+                        run.Raw(';'),
+                        'do', 'sleep', '1',
+                        run.Raw(';'),
+                        'done',
+                        ],
+                    )
+
+                remote.run(
+                    args=[
+                        'sudo',
+                        'lsof',
+                        backend,
+                        ],
+                    check_status=False
+                    )
+                remote.run(
+                    args=[
+                        'sudo',
+                        'fuser',
+                        '-M',
+                        backend,
+                        ],
+                    check_status=False
+                    )
+            except Exception:
+                log.exception("Saw exception")
+                pass
diff --git a/qa/tasks/scrub.py b/qa/tasks/scrub.py
new file mode 100644
index 0000000..9800d1e
--- /dev/null
+++ b/qa/tasks/scrub.py
@@ -0,0 +1,117 @@
+"""
+Scrub osds
+"""
+import contextlib
+import gevent
+import logging
+import random
+import time
+
+import ceph_manager
+from teuthology import misc as teuthology
+
+log = logging.getLogger(__name__)
+
+ at contextlib.contextmanager
+def task(ctx, config):
+    """
+    Run scrub periodically. Randomly chooses an OSD to scrub.
+
+    The config should be as follows:
+
+    scrub:
+        frequency: <seconds between scrubs>
+        deep: <bool for deepness>
+
+    example:
+
+    tasks:
+    - ceph:
+    - scrub:
+        frequency: 30
+        deep: 0
+    """
+    if config is None:
+        config = {}
+    assert isinstance(config, dict), \
+        'scrub task only accepts a dict for configuration'
+
+    log.info('Beginning scrub...')
+
+    first_mon = teuthology.get_first_mon(ctx, config)
+    (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+
+    manager = ceph_manager.CephManager(
+        mon,
+        ctx=ctx,
+        logger=log.getChild('ceph_manager'),
+        )
+
+    num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd')
+    while len(manager.get_osd_status()['up']) < num_osds:
+        time.sleep(10)
+
+    scrub_proc = Scrubber(
+        manager,
+        config,
+        )
+    try:
+        yield
+    finally:
+        log.info('joining scrub')
+        scrub_proc.do_join()
+
+class Scrubber:
+    """
+    Scrubbing is actually performed during initialzation
+    """
+    def __init__(self, manager, config):
+        """
+        Spawn scrubbing thread upon completion.
+        """
+        self.ceph_manager = manager
+        self.ceph_manager.wait_for_clean()
+
+        osd_status = self.ceph_manager.get_osd_status()
+        self.osds = osd_status['up']
+
+        self.config = config
+        if self.config is None:
+            self.config = dict()
+
+        else:
+            def tmp(x):
+                """Local display"""
+                print x
+            self.log = tmp
+
+        self.stopping = False
+
+        log.info("spawning thread")
+
+        self.thread = gevent.spawn(self.do_scrub)
+
+    def do_join(self):
+        """Scrubbing thread finished"""
+        self.stopping = True
+        self.thread.get()
+
+    def do_scrub(self):
+        """Perform the scrub operation"""
+        frequency = self.config.get("frequency", 30)
+        deep = self.config.get("deep", 0)
+
+        log.info("stopping %s" % self.stopping)
+
+        while not self.stopping:
+            osd = str(random.choice(self.osds))
+
+            if deep:
+                cmd = 'deep-scrub'
+            else:
+                cmd = 'scrub'
+
+            log.info('%sbing %s' % (cmd, osd))
+            self.ceph_manager.raw_cluster_cmd('osd', cmd, osd)
+
+            time.sleep(frequency)
diff --git a/qa/tasks/scrub_test.py b/qa/tasks/scrub_test.py
new file mode 100644
index 0000000..40faa4d
--- /dev/null
+++ b/qa/tasks/scrub_test.py
@@ -0,0 +1,383 @@
+"""Scrub testing"""
+from cStringIO import StringIO
+
+import contextlib
+import json
+import logging
+import os
+import time
+import tempfile
+
+import ceph_manager
+from teuthology import misc as teuthology
+
+log = logging.getLogger(__name__)
+
+
+def wait_for_victim_pg(manager):
+    """Return a PG with some data and its acting set"""
+    # wait for some PG to have data that we can mess with
+    victim = None
+    while victim is None:
+        stats = manager.get_pg_stats()
+        for pg in stats:
+            size = pg['stat_sum']['num_bytes']
+            if size > 0:
+                victim = pg['pgid']
+                acting = pg['acting']
+                return victim, acting
+        time.sleep(3)
+
+
+def find_victim_object(ctx, pg, osd):
+    """Return a file to be fuzzed"""
+    (osd_remote,) = ctx.cluster.only('osd.%d' % osd).remotes.iterkeys()
+    data_path = os.path.join(
+        '/var/lib/ceph/osd',
+        'ceph-{id}'.format(id=osd),
+        'current',
+        '{pg}_head'.format(pg=pg)
+        )
+
+    # fuzz time
+    with contextlib.closing(StringIO()) as ls_fp:
+        osd_remote.run(
+            args=['sudo', 'ls', data_path],
+            stdout=ls_fp,
+        )
+        ls_out = ls_fp.getvalue()
+
+    # find an object file we can mess with
+    osdfilename = next(line for line in ls_out.split('\n')
+                       if not line.startswith('__'))
+    assert osdfilename is not None
+
+    # Get actual object name from osd stored filename
+    objname, _ = osdfilename.split('__', 1)
+    objname = objname.replace(r'\u', '_')
+    return osd_remote, os.path.join(data_path, osdfilename), objname
+
+
+def corrupt_file(osd_remote, path):
+    # put a single \0 at the beginning of the file
+    osd_remote.run(
+        args=['sudo', 'dd',
+              'if=/dev/zero',
+              'of=%s' % path,
+              'bs=1', 'count=1', 'conv=notrunc']
+    )
+
+
+def get_pgnum(pgid):
+    pos = pgid.find('.')
+    assert pos != -1
+    return pgid[pos+1:]
+
+
+def deep_scrub(manager, victim, pool):
+    # scrub, verify inconsistent
+    pgnum = get_pgnum(victim)
+    manager.do_pg_scrub(pool, pgnum, 'deep-scrub')
+
+    stats = manager.get_single_pg_stats(victim)
+    inconsistent = stats['state'].find('+inconsistent') != -1
+    assert inconsistent
+
+
+def repair(manager, victim, pool):
+    # repair, verify no longer inconsistent
+    pgnum = get_pgnum(victim)
+    manager.do_pg_scrub(pool, pgnum, 'repair')
+
+    stats = manager.get_single_pg_stats(victim)
+    inconsistent = stats['state'].find('+inconsistent') != -1
+    assert not inconsistent
+
+
+def test_repair_corrupted_obj(ctx, manager, pg, osd_remote, obj_path, pool):
+    corrupt_file(osd_remote, obj_path)
+    deep_scrub(manager, pg, pool)
+    repair(manager, pg, pool)
+
+
+def test_repair_bad_omap(ctx, manager, pg, osd, objname):
+    # Test deep-scrub with various omap modifications
+    # Modify omap on specific osd
+    log.info('fuzzing omap of %s' % objname)
+    manager.osd_admin_socket(osd, ['rmomapkey', 'rbd', objname, 'key'])
+    manager.osd_admin_socket(osd, ['setomapval', 'rbd', objname,
+                                   'badkey', 'badval'])
+    manager.osd_admin_socket(osd, ['setomapheader', 'rbd', objname, 'badhdr'])
+
+    deep_scrub(manager, pg, 'rbd')
+    # please note, the repair here is errnomous, it rewrites the correct omap
+    # digest and data digest on the replicas with the corresponding digests
+    # from the primary osd which is hosting the victim object, see
+    # find_victim_object().
+    # so we need to either put this test and the end of this task or
+    # undo the mess-up manually before the "repair()" that just ensures
+    # the cleanup is sane, otherwise the succeeding tests will fail. if they
+    # try set "badkey" in hope to get an "inconsistent" pg with a deep-scrub.
+    manager.osd_admin_socket(osd, ['setomapheader', 'rbd', objname, 'hdr'])
+    manager.osd_admin_socket(osd, ['rmomapkey', 'rbd', objname, 'badkey'])
+    manager.osd_admin_socket(osd, ['setomapval', 'rbd', objname,
+                                   'key', 'val'])
+    repair(manager, pg, 'rbd')
+
+
+class MessUp:
+    def __init__(self, manager, osd_remote, pool, osd_id,
+                 obj_name, obj_path, omap_key, omap_val):
+        self.manager = manager
+        self.osd = osd_remote
+        self.pool = pool
+        self.osd_id = osd_id
+        self.obj = obj_name
+        self.path = obj_path
+        self.omap_key = omap_key
+        self.omap_val = omap_val
+
+    @contextlib.contextmanager
+    def _test_with_file(self, messup_cmd, *checks):
+        temp = tempfile.mktemp()
+        backup_cmd = ['sudo', 'cp', self.path, temp]
+        self.osd.run(args=backup_cmd)
+        self.osd.run(args=messup_cmd.split())
+        yield checks
+        restore_cmd = ['sudo', 'mv', temp, self.path]
+        self.osd.run(args=restore_cmd)
+
+    def remove(self):
+        cmd = 'sudo rm {path}'.format(path=self.path)
+        return self._test_with_file(cmd, 'missing')
+
+    def append(self):
+        cmd = 'sudo dd if=/dev/zero of={path} bs=1 count=1 ' \
+              'conv=notrunc oflag=append'.format(path=self.path)
+        return self._test_with_file(cmd,
+                                    'data_digest_mismatch',
+                                    'size_mismatch')
+
+    def truncate(self):
+        cmd = 'sudo dd if=/dev/null of={path}'.format(path=self.path)
+        return self._test_with_file(cmd,
+                                    'data_digest_mismatch',
+                                    'size_mismatch')
+
+    def change_obj(self):
+        cmd = 'sudo dd if=/dev/zero of={path} bs=1 count=1 ' \
+              'conv=notrunc'.format(path=self.path)
+        return self._test_with_file(cmd,
+                                    'data_digest_mismatch')
+
+    @contextlib.contextmanager
+    def rm_omap(self):
+        cmd = ['rmomapkey', self.pool, self.obj, self.omap_key]
+        self.manager.osd_admin_socket(self.osd_id, cmd)
+        yield ('omap_digest_mismatch',)
+        cmd = ['setomapval', self.pool, self.obj,
+               self.omap_key, self.omap_val]
+        self.manager.osd_admin_socket(self.osd_id, cmd)
+
+    @contextlib.contextmanager
+    def add_omap(self):
+        cmd = ['setomapval', self.pool, self.obj, 'badkey', 'badval']
+        self.manager.osd_admin_socket(self.osd_id, cmd)
+        yield ('omap_digest_mismatch',)
+        cmd = ['rmomapkey', self.pool, self.obj, 'badkey']
+        self.manager.osd_admin_socket(self.osd_id, cmd)
+
+    @contextlib.contextmanager
+    def change_omap(self):
+        cmd = ['setomapval', self.pool, self.obj, self.omap_key, 'badval']
+        self.manager.osd_admin_socket(self.osd_id, cmd)
+        yield ('omap_digest_mismatch',)
+        cmd = ['setomapval', self.pool, self.obj, self.omap_key, self.omap_val]
+        self.manager.osd_admin_socket(self.osd_id, cmd)
+
+
+class InconsistentObjChecker:
+    """Check the returned inconsistents/inconsistent info"""
+
+    def __init__(self, osd, acting, obj_name):
+        self.osd = osd
+        self.acting = acting
+        self.obj = obj_name
+        assert self.osd in self.acting
+
+    def basic_checks(self, inc):
+        assert inc['object']['name'] == self.obj
+        assert inc['object']['snap'] == "head"
+        assert len(inc['shards']) == len(self.acting), \
+            "the number of returned shard does not match with the acting set"
+
+    def run(self, check, inc):
+        func = getattr(self, check)
+        func(inc)
+
+    def _check_errors(self, inc, err_name):
+        bad_found = False
+        good_found = False
+        for shard in inc['shards']:
+            log.info('shard = %r' % shard)
+            log.info('err = %s' % err_name)
+            assert 'osd' in shard
+            osd = shard['osd']
+            err = err_name in shard['errors']
+            if osd == self.osd:
+                assert bad_found is False, \
+                    "multiple entries found for the given OSD"
+                assert err is True, \
+                    "Didn't find '{err}' in errors".format(err=err_name)
+                bad_found = True
+            else:
+                assert osd in self.acting, "shard not in acting set"
+                assert err is False, \
+                    "Expected '{err}' in errors".format(err=err_name)
+                good_found = True
+        assert bad_found is True, \
+            "Shard for osd.{osd} not found".format(osd=self.osd)
+        assert good_found is True, \
+            "No other acting shards found"
+
+    def _check_attrs(self, inc, attr_name):
+        bad_attr = None
+        good_attr = None
+        for shard in inc['shards']:
+            log.info('shard = %r' % shard)
+            log.info('attr = %s' % attr_name)
+            assert 'osd' in shard
+            osd = shard['osd']
+            attr = shard.get(attr_name, False)
+            if osd == self.osd:
+                assert bad_attr is None, \
+                    "multiple entries found for the given OSD"
+                bad_attr = attr
+            else:
+                assert osd in self.acting, "shard not in acting set"
+                assert good_attr is None or good_attr == attr, \
+                    "multiple good attrs found"
+                good_attr = attr
+        assert bad_attr is not None, \
+            "good {attr} not found".format(attr=attr_name)
+        assert good_attr is not None, \
+            "bad {attr} not found".format(attr=attr_name)
+        assert good_attr != bad_attr, \
+            "bad attr is identical to the good ones: " \
+            "{0} == {1}".format(good_attr, bad_attr)
+
+    def data_digest_mismatch(self, inc):
+        assert 'data_digest_mismatch' in inc['errors']
+        self._check_attrs(inc, 'data_digest')
+
+    def missing(self, inc):
+        assert 'missing' in inc['errors']
+        self._check_errors(inc, 'missing')
+
+    def size_mismatch(self, inc):
+        assert 'size_mismatch' in inc['errors']
+        self._check_attrs(inc, 'size')
+
+    def omap_digest_mismatch(self, inc):
+        assert 'omap_digest_mismatch' in inc['errors']
+        self._check_attrs(inc, 'omap_digest')
+
+
+def test_list_inconsistent_obj(ctx, manager, osd_remote, pg, acting, osd_id,
+                               obj_name, obj_path):
+    mon = manager.controller
+    pool = 'rbd'
+    omap_key = 'key'
+    omap_val = 'val'
+    manager.do_rados(mon, ['-p', pool, 'setomapval', obj_name,
+                           omap_key, omap_val])
+    messup = MessUp(manager, osd_remote, pool, osd_id, obj_name, obj_path,
+                    omap_key, omap_val)
+    for test in [messup.rm_omap, messup.add_omap, messup.change_omap,
+                 messup.append, messup.truncate, messup.change_obj,
+                 messup.remove]:
+        with test() as checks:
+            deep_scrub(manager, pg, pool)
+            cmd = 'rados list-inconsistent-pg {pool} ' \
+                  '--format=json'.format(pool=pool)
+            with contextlib.closing(StringIO()) as out:
+                mon.run(args=cmd.split(), stdout=out)
+                pgs = json.loads(out.getvalue())
+            assert pgs == [pg]
+
+            cmd = 'rados list-inconsistent-obj {pg} ' \
+                  '--format=json'.format(pg=pg)
+            with contextlib.closing(StringIO()) as out:
+                mon.run(args=cmd.split(), stdout=out)
+                objs = json.loads(out.getvalue())
+            assert len(objs['inconsistents']) == 1
+
+            checker = InconsistentObjChecker(osd_id, acting, obj_name)
+            inc_obj = objs['inconsistents'][0]
+            log.info('inc = %r', inc_obj)
+            checker.basic_checks(inc_obj)
+            for check in checks:
+                checker.run(check, inc_obj)
+
+
+def task(ctx, config):
+    """
+    Test [deep] scrub
+
+    tasks:
+    - chef:
+    - install:
+    - ceph:
+        log-whitelist:
+        - '!= known digest'
+        - '!= known omap_digest'
+        - deep-scrub 0 missing, 1 inconsistent objects
+        - deep-scrub 1 errors
+        - repair 0 missing, 1 inconsistent objects
+        - repair 1 errors, 1 fixed
+    - scrub_test:
+    """
+    if config is None:
+        config = {}
+    assert isinstance(config, dict), \
+        'scrub_test task only accepts a dict for configuration'
+    first_mon = teuthology.get_first_mon(ctx, config)
+    (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+
+    num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd')
+    log.info('num_osds is %s' % num_osds)
+
+    manager = ceph_manager.CephManager(
+        mon,
+        ctx=ctx,
+        logger=log.getChild('ceph_manager'),
+        )
+
+    while len(manager.get_osd_status()['up']) < num_osds:
+        time.sleep(10)
+
+    for i in range(num_osds):
+        manager.raw_cluster_cmd('tell', 'osd.%d' % i, 'flush_pg_stats')
+    manager.wait_for_clean()
+
+    # write some data
+    p = manager.do_rados(mon, ['-p', 'rbd', 'bench', '--no-cleanup', '1',
+                               'write', '-b', '4096'])
+    log.info('err is %d' % p.exitstatus)
+
+    # wait for some PG to have data that we can mess with
+    pg, acting = wait_for_victim_pg(manager)
+    osd = acting[0]
+
+    osd_remote, obj_path, obj_name = find_victim_object(ctx, pg, osd)
+    manager.do_rados(mon, ['-p', 'rbd', 'setomapval', obj_name, 'key', 'val'])
+    log.info('err is %d' % p.exitstatus)
+    manager.do_rados(mon, ['-p', 'rbd', 'setomapheader', obj_name, 'hdr'])
+    log.info('err is %d' % p.exitstatus)
+
+    log.info('messing with PG %s on osd %d' % (pg, osd))
+    test_repair_corrupted_obj(ctx, manager, pg, osd_remote, obj_path, 'rbd')
+    test_repair_bad_omap(ctx, manager, pg, osd, obj_name)
+    test_list_inconsistent_obj(ctx, manager, osd_remote, pg, acting, osd,
+                               obj_name, obj_path)
+    log.info('test successful!')
diff --git a/qa/tasks/tests/__init__.py b/qa/tasks/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/qa/tasks/tests/test_buildpackages.py b/qa/tasks/tests/test_buildpackages.py
new file mode 100644
index 0000000..fed5aa0
--- /dev/null
+++ b/qa/tasks/tests/test_buildpackages.py
@@ -0,0 +1,170 @@
+# py.test -v -s tests/test_buildpackages.py
+
+from mock import patch, Mock
+
+from .. import buildpackages
+from teuthology import packaging
+
+def test_get_tag_branch_sha1():
+    gitbuilder = packaging.GitbuilderProject(
+        'ceph',
+        {
+            'os_type': 'centos',
+            'os_version': '7.0',
+        })
+    (tag, branch, sha1) = buildpackages.get_tag_branch_sha1(gitbuilder)
+    assert tag == None
+    assert branch == None
+    assert sha1 is not None
+
+    gitbuilder = packaging.GitbuilderProject(
+        'ceph',
+        {
+            'os_type': 'centos',
+            'os_version': '7.0',
+            'sha1': 'asha1',
+        })
+    (tag, branch, sha1) = buildpackages.get_tag_branch_sha1(gitbuilder)
+    assert tag == None
+    assert branch == None
+    assert sha1 == 'asha1'
+
+    remote = Mock
+    remote.arch = 'x86_64'
+    remote.os = Mock
+    remote.os.name = 'ubuntu'
+    remote.os.version = '14.04'
+    remote.os.codename = 'trusty'
+    remote.system_type = 'deb'
+    ctx = Mock
+    ctx.cluster = Mock
+    ctx.cluster.remotes = {remote: ['client.0']}
+
+    expected_tag = 'v0.94.1'
+    expected_sha1 = 'expectedsha1'
+    def check_output(cmd, shell):
+        assert shell == True
+        return expected_sha1 + " refs/tags/" + expected_tag
+    with patch.multiple(
+            buildpackages,
+            check_output=check_output,
+    ):
+        gitbuilder = packaging.GitbuilderProject(
+            'ceph',
+            {
+                'os_type': 'centos',
+                'os_version': '7.0',
+                'sha1': 'asha1',
+                'all': {
+                    'tag': tag,
+                },
+            },
+            ctx = ctx,
+            remote = remote)
+        (tag, branch, sha1) = buildpackages.get_tag_branch_sha1(gitbuilder)
+        assert tag == expected_tag
+        assert branch == None
+        assert sha1 == expected_sha1
+
+    expected_branch = 'hammer'
+    expected_sha1 = 'otherexpectedsha1'
+    def check_output(cmd, shell):
+        assert shell == True
+        return expected_sha1 + " refs/heads/" + expected_branch
+    with patch.multiple(
+            buildpackages,
+            check_output=check_output,
+    ):
+        gitbuilder = packaging.GitbuilderProject(
+            'ceph',
+            {
+                'os_type': 'centos',
+                'os_version': '7.0',
+                'sha1': 'asha1',
+                'all': {
+                    'branch': branch,
+                },
+            },
+            ctx = ctx,
+            remote = remote)
+        (tag, branch, sha1) = buildpackages.get_tag_branch_sha1(gitbuilder)
+        assert tag == None
+        assert branch == expected_branch
+        assert sha1 == expected_sha1
+
+def test_lookup_configs():
+    expected_system_type = 'deb'
+    def make_remote():
+        remote = Mock()
+        remote.arch = 'x86_64'
+        remote.os = Mock()
+        remote.os.name = 'ubuntu'
+        remote.os.version = '14.04'
+        remote.os.codename = 'trusty'
+        remote.system_type = expected_system_type
+        return remote
+    ctx = Mock()
+    class cluster:
+        remote1 = make_remote()
+        remote2 = make_remote()
+        remotes = {
+            remote1: ['client.0'],
+            remote2: ['mon.a','osd.0'],
+        }
+        def only(self, role):
+            result = Mock()
+            if role in ('client.0',):
+                result.remotes = { cluster.remote1: None }
+            elif role in ('osd.0', 'mon.a'):
+                result.remotes = { cluster.remote2: None }
+            else:
+                result.remotes = None
+            return result
+    ctx.cluster = cluster()
+    ctx.config = {
+        'roles': [ ['client.0'], ['mon.a','osd.0'] ],
+    }
+
+    # nothing -> nothing
+    assert buildpackages.lookup_configs(ctx, {}) == []
+    assert buildpackages.lookup_configs(ctx, {1:[1,2,3]}) == []
+    assert buildpackages.lookup_configs(ctx, [[1,2,3]]) == []
+    assert buildpackages.lookup_configs(ctx, None) == []
+
+    #
+    # the overrides applies to install and to install.upgrade
+    # that have no tag, branch or sha1
+    #
+    config = {
+        'overrides': {
+            'install': {
+                'ceph': {
+                    'sha1': 'overridesha1',
+                    'tag': 'overridetag',
+                    'branch': 'overridebranch',
+                },
+            },
+        },
+        'tasks': [
+            {
+                'install': {
+                    'sha1': 'installsha1',
+                },
+            },
+            {
+                'install.upgrade': {
+                    'osd.0': {
+                    },
+                    'client.0': {
+                        'sha1': 'client0sha1',
+                    },
+                },
+            }
+        ],
+    }
+    ctx.config = config
+    expected_configs = [{'branch': 'overridebranch', 'sha1': 'overridesha1', 'tag': 'overridetag'},
+                        {'project': 'ceph', 'branch': 'overridebranch', 'sha1': 'overridesha1', 'tag': 'overridetag'},
+                        {'project': 'ceph', 'sha1': 'client0sha1'}]
+
+    assert buildpackages.lookup_configs(ctx, config) == expected_configs
diff --git a/qa/tasks/tests/test_devstack.py b/qa/tasks/tests/test_devstack.py
new file mode 100644
index 0000000..117b307
--- /dev/null
+++ b/qa/tasks/tests/test_devstack.py
@@ -0,0 +1,48 @@
+from textwrap import dedent
+
+from .. import devstack
+
+
+class TestDevstack(object):
+    def test_parse_os_table(self):
+        table_str = dedent("""
+            +---------------------+--------------------------------------+
+            |       Property      |                Value                 |
+            +---------------------+--------------------------------------+
+            |     attachments     |                  []                  |
+            |  availability_zone  |                 nova                 |
+            |       bootable      |                false                 |
+            |      created_at     |      2014-02-21T17:14:47.548361      |
+            | display_description |                 None                 |
+            |     display_name    |                 NAME                 |
+            |          id         | ffdbd1bb-60dc-4d95-acfe-88774c09ad3e |
+            |       metadata      |                  {}                  |
+            |         size        |                  1                   |
+            |     snapshot_id     |                 None                 |
+            |     source_volid    |                 None                 |
+            |        status       |               creating               |
+            |     volume_type     |                 None                 |
+            +---------------------+--------------------------------------+
+            """).strip()
+        expected = {
+            'Property': 'Value',
+            'attachments': '[]',
+            'availability_zone': 'nova',
+            'bootable': 'false',
+            'created_at': '2014-02-21T17:14:47.548361',
+            'display_description': 'None',
+            'display_name': 'NAME',
+            'id': 'ffdbd1bb-60dc-4d95-acfe-88774c09ad3e',
+            'metadata': '{}',
+            'size': '1',
+            'snapshot_id': 'None',
+            'source_volid': 'None',
+            'status': 'creating',
+            'volume_type': 'None'}
+
+        vol_info = devstack.parse_os_table(table_str)
+        assert vol_info == expected
+
+
+
+
diff --git a/qa/tasks/tests/test_radosgw_admin.py b/qa/tasks/tests/test_radosgw_admin.py
new file mode 100644
index 0000000..59f3578
--- /dev/null
+++ b/qa/tasks/tests/test_radosgw_admin.py
@@ -0,0 +1,31 @@
+from mock import Mock
+
+from .. import radosgw_admin
+
+acl_with_version = """<?xml version="1.0" encoding="UTF-8"?><AccessControlPolicy xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Owner><ID>foo</ID><DisplayName>Foo</DisplayName></Owner><AccessControlList><Grant><Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="CanonicalUser"><ID>foo</ID><DisplayName>Foo</DisplayName></Grantee><Permission>FULL_CONTROL</Permission></Grant></AccessControlList></AccessControlPolicy>
+"""  # noqa
+
+
+acl_without_version = """<AccessControlPolicy xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Owner><ID>foo</ID><DisplayName>Foo</DisplayName></Owner><AccessControlList><Grant><Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="CanonicalUser"><ID>foo</ID><DisplayName>Foo</DisplayName></Grantee><Permission>FULL_CONTROL</Permission></Grant></AccessControlList></AccessControlPolicy>
+"""  # noqa
+
+
+class TestGetAcl(object):
+
+    def setup(self):
+        self.key = Mock()
+
+    def test_removes_xml_version(self):
+        self.key.get_xml_acl = Mock(return_value=acl_with_version)
+        result = radosgw_admin.get_acl(self.key)
+        assert result.startswith('<AccessControlPolicy')
+
+    def test_xml_version_is_already_removed(self):
+        self.key.get_xml_acl = Mock(return_value=acl_without_version)
+        result = radosgw_admin.get_acl(self.key)
+        assert result.startswith('<AccessControlPolicy')
+
+    def test_newline_gets_trimmed(self):
+        self.key.get_xml_acl = Mock(return_value=acl_without_version)
+        result = radosgw_admin.get_acl(self.key)
+        assert result.endswith('\n') is False
diff --git a/qa/tasks/teuthology_integration.py b/qa/tasks/teuthology_integration.py
new file mode 100644
index 0000000..b5a2278
--- /dev/null
+++ b/qa/tasks/teuthology_integration.py
@@ -0,0 +1,19 @@
+import logging
+from teuthology import misc
+from teuthology.task import Task
+
+log = logging.getLogger(__name__)
+
+
+class TeuthologyIntegration(Task):
+
+    def begin(self):
+        misc.sh("""
+        set -x
+        pip install tox
+        tox
+        # tox -e py27-integration
+        tox -e openstack-integration
+        """)
+
+task = TeuthologyIntegration
diff --git a/qa/tasks/tgt.py b/qa/tasks/tgt.py
new file mode 100644
index 0000000..c2b322e
--- /dev/null
+++ b/qa/tasks/tgt.py
@@ -0,0 +1,177 @@
+"""
+Task to handle tgt
+
+Assumptions made:
+    The ceph-extras tgt package may need to get installed.
+    The open-iscsi package needs to get installed.
+"""
+import logging
+import contextlib
+
+from teuthology import misc as teuthology
+from teuthology import contextutil
+
+log = logging.getLogger(__name__)
+
+
+ at contextlib.contextmanager
+def start_tgt_remotes(ctx, start_tgtd):
+    """
+    This subtask starts up a tgtd on the clients specified
+    """
+    remotes = ctx.cluster.only(teuthology.is_type('client')).remotes
+    tgtd_list = []
+    for rem, roles in remotes.iteritems():
+        for _id in roles:
+            if _id in start_tgtd:
+                if not rem in tgtd_list:
+                    tgtd_list.append(rem)
+                    size = ctx.config.get('image_size', 10240)
+                    rem.run(
+                        args=[
+                            'rbd',
+                            'create',
+                            'iscsi-image',
+                            '--size',
+                            str(size),
+                    ])
+                    rem.run(
+                        args=[
+                            'sudo',
+                            'tgtadm',
+                            '--lld',
+                            'iscsi',
+                            '--mode',
+                            'target',
+                            '--op',
+                            'new',
+                            '--tid',
+                            '1',
+                            '--targetname',
+                            'rbd',
+                        ])
+                    rem.run(
+                        args=[
+                            'sudo',
+                            'tgtadm',
+                            '--lld',
+                            'iscsi',
+                            '--mode',
+                            'logicalunit',
+                            '--op',
+                            'new',
+                            '--tid',
+                            '1',
+                            '--lun',
+                            '1',
+                            '--backing-store',
+                            'iscsi-image',
+                            '--bstype',
+                            'rbd',
+                        ])
+                    rem.run(
+                        args=[
+                            'sudo',
+                            'tgtadm',
+                            '--lld',
+                            'iscsi',
+                            '--op',
+                            'bind',
+                            '--mode',
+                            'target',
+                            '--tid',
+                            '1',
+                            '-I',
+                            'ALL',
+                        ])
+    try:
+        yield
+
+    finally:
+        for rem in tgtd_list:
+            rem.run(
+                args=[
+                    'sudo',
+                    'tgtadm',
+                    '--lld',
+                    'iscsi',
+                    '--mode',
+                    'target',
+                    '--op',
+                    'delete',
+                    '--force',
+                    '--tid',
+                    '1',
+                ])
+            rem.run(
+                args=[
+                    'rbd',
+                    'snap',
+                    'purge',
+                    'iscsi-image',
+                ])
+            rem.run(
+                args=[
+                    'sudo',
+                    'rbd',
+                    'rm',
+                    'iscsi-image',
+                ])
+
+
+ at contextlib.contextmanager
+def task(ctx, config):
+    """
+    Start up tgt.
+
+    To start on on all clients::
+
+        tasks:
+        - ceph:
+        - tgt:
+
+    To start on certain clients::
+
+        tasks:
+        - ceph:
+        - tgt: [client.0, client.3]
+
+    or
+
+        tasks:
+        - ceph:
+        - tgt:
+            client.0:
+            client.3:
+
+    An image blocksize size can also be specified::
+        
+        tasks:
+        - ceph:
+        - tgt:
+            image_size = 20480
+
+    The general flow of things here is:
+        1. Find clients on which tgt is supposed to run (start_tgtd)
+        2. Remotely start up tgt daemon
+    On cleanup:
+        3. Stop tgt daemon
+
+    The iscsi administration is handled by the iscsi task.
+    """
+    if config:
+        config = {key : val for key, val in config.items()
+                if key.startswith('client')}
+    # config at this point should only contain keys starting with 'client'
+    start_tgtd = []
+    remotes = ctx.cluster.only(teuthology.is_type('client')).remotes
+    log.info(remotes)
+    if not config:
+        start_tgtd = ['client.{id}'.format(id=id_)
+            for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
+    else:
+        start_tgtd = config
+    log.info(start_tgtd)
+    with contextutil.nested(
+            lambda: start_tgt_remotes(ctx=ctx, start_tgtd=start_tgtd),):
+        yield
diff --git a/qa/tasks/thrash_pool_snaps.py b/qa/tasks/thrash_pool_snaps.py
new file mode 100644
index 0000000..c71c9ce
--- /dev/null
+++ b/qa/tasks/thrash_pool_snaps.py
@@ -0,0 +1,61 @@
+"""
+Thrash -- Simulate random osd failures.
+"""
+import contextlib
+import logging
+import gevent
+import time
+import random
+
+
+log = logging.getLogger(__name__)
+
+ at contextlib.contextmanager
+def task(ctx, config):
+    """
+    "Thrash" snap creation and removal on the listed pools
+
+    Example:
+
+    thrash_pool_snaps:
+      pools: [.rgw.buckets, .rgw.buckets.index]
+      max_snaps: 10
+      min_snaps: 5
+      period: 10
+    """
+    stopping = False
+    def do_thrash():
+        pools = config.get('pools', [])
+        max_snaps = config.get('max_snaps', 10)
+        min_snaps = config.get('min_snaps', 5)
+        period = config.get('period', 30)
+        snaps = []
+        manager = ctx.managers['ceph']
+        def remove_snap():
+            assert len(snaps) > 0
+            snap = random.choice(snaps)
+            log.info("Removing snap %s" % (snap,))
+            for pool in pools:
+                manager.remove_pool_snap(pool, str(snap))
+            snaps.remove(snap)
+        def add_snap(snap):
+            log.info("Adding snap %s" % (snap,))
+            for pool in pools:
+                manager.add_pool_snap(pool, str(snap))
+            snaps.append(snap)
+        index = 0
+        while not stopping:
+            index += 1
+            time.sleep(period)
+            if len(snaps) <= min_snaps:
+                add_snap(index)
+            elif len(snaps) >= max_snaps:
+                remove_snap()
+            else:
+                random.choice([lambda: add_snap(index), remove_snap])()
+        log.info("Stopping")
+    thread = gevent.spawn(do_thrash)
+    yield
+    stopping = True
+    thread.join()
+
diff --git a/qa/tasks/thrashosds.py b/qa/tasks/thrashosds.py
new file mode 100644
index 0000000..fb1defb
--- /dev/null
+++ b/qa/tasks/thrashosds.py
@@ -0,0 +1,161 @@
+"""
+Thrash -- Simulate random osd failures.
+"""
+import contextlib
+import logging
+import ceph_manager
+from teuthology import misc as teuthology
+
+
+log = logging.getLogger(__name__)
+
+ at contextlib.contextmanager
+def task(ctx, config):
+    """
+    "Thrash" the OSDs by randomly marking them out/down (and then back
+    in) until the task is ended. This loops, and every op_delay
+    seconds it randomly chooses to add or remove an OSD (even odds)
+    unless there are fewer than min_out OSDs out of the cluster, or
+    more than min_in OSDs in the cluster.
+
+    All commands are run on mon0 and it stops when __exit__ is called.
+
+    The config is optional, and is a dict containing some or all of:
+
+    cluster: (default 'ceph') the name of the cluster to thrash
+
+    min_in: (default 3) the minimum number of OSDs to keep in the
+       cluster
+
+    min_out: (default 0) the minimum number of OSDs to keep out of the
+       cluster
+
+    op_delay: (5) the length of time to sleep between changing an
+       OSD's status
+
+    min_dead: (0) minimum number of osds to leave down/dead.
+
+    max_dead: (0) maximum number of osds to leave down/dead before waiting
+       for clean.  This should probably be num_replicas - 1.
+
+    clean_interval: (60) the approximate length of time to loop before
+       waiting until the cluster goes clean. (In reality this is used
+       to probabilistically choose when to wait, and the method used
+       makes it closer to -- but not identical to -- the half-life.)
+
+    scrub_interval: (-1) the approximate length of time to loop before
+       waiting until a scrub is performed while cleaning. (In reality
+       this is used to probabilistically choose when to wait, and it
+       only applies to the cases where cleaning is being performed).
+       -1 is used to indicate that no scrubbing will be done.
+
+    chance_down: (0.4) the probability that the thrasher will mark an
+       OSD down rather than marking it out. (The thrasher will not
+       consider that OSD out of the cluster, since presently an OSD
+       wrongly marked down will mark itself back up again.) This value
+       can be either an integer (eg, 75) or a float probability (eg
+       0.75).
+
+    chance_test_min_size: (0) chance to run test_pool_min_size,
+       which:
+       - kills all but one osd
+       - waits
+       - kills that osd
+       - revives all other osds
+       - verifies that the osds fully recover
+
+    timeout: (360) the number of seconds to wait for the cluster
+       to become clean after each cluster change. If this doesn't
+       happen within the timeout, an exception will be raised.
+
+    revive_timeout: (150) number of seconds to wait for an osd asok to
+       appear after attempting to revive the osd
+
+    thrash_primary_affinity: (true) randomly adjust primary-affinity
+
+    chance_pgnum_grow: (0) chance to increase a pool's size
+    chance_pgpnum_fix: (0) chance to adjust pgpnum to pg for a pool
+    pool_grow_by: (10) amount to increase pgnum by
+    max_pgs_per_pool_osd: (1200) don't expand pools past this size per osd
+
+    pause_short: (3) duration of short pause
+    pause_long: (80) duration of long pause
+    pause_check_after: (50) assert osd down after this long
+    chance_inject_pause_short: (1) chance of injecting short stall
+    chance_inject_pause_long: (0) chance of injecting long stall
+
+    clean_wait: (0) duration to wait before resuming thrashing once clean
+
+    sighup_delay: (0.1) duration to delay between sending signal.SIGHUP to a
+                  random live osd
+
+    powercycle: (false) whether to power cycle the node instead
+        of just the osd process. Note that this assumes that a single
+        osd is the only important process on the node.
+
+    chance_test_backfill_full: (0) chance to simulate full disks stopping
+        backfill
+
+    chance_test_map_discontinuity: (0) chance to test map discontinuity
+    map_discontinuity_sleep_time: (40) time to wait for map trims
+
+    ceph_objectstore_tool: (true) whether to export/import a pg while an osd is down
+    chance_move_pg: (1.0) chance of moving a pg if more than 1 osd is down (default 100%)
+
+    example:
+
+    tasks:
+    - ceph:
+    - thrashosds:
+        cluster: ceph
+        chance_down: 10
+        op_delay: 3
+        min_in: 1
+        timeout: 600
+    - interactive:
+    """
+    if config is None:
+        config = {}
+    assert isinstance(config, dict), \
+        'thrashosds task only accepts a dict for configuration'
+    # add default value for sighup_delay
+    config['sighup_delay'] = config.get('sighup_delay', 0.1)
+    overrides = ctx.config.get('overrides', {})
+    teuthology.deep_merge(config, overrides.get('thrashosds', {}))
+    cluster = config.get('cluster', 'ceph')
+
+    if 'powercycle' in config:
+
+        # sync everyone first to avoid collateral damage to / etc.
+        log.info('Doing preliminary sync to avoid collateral damage...')
+        ctx.cluster.run(args=['sync'])
+
+        if 'ipmi_user' in ctx.teuthology_config:
+            for remote in ctx.cluster.remotes.keys():
+                log.debug('checking console status of %s' % remote.shortname)
+                if not remote.console.check_status():
+                    log.warn('Failed to get console status for %s',
+                             remote.shortname)
+
+            # check that all osd remotes have a valid console
+            osds = ctx.cluster.only(teuthology.is_type('osd', cluster))
+            for remote in osds.remotes.keys():
+                if not remote.console.has_ipmi_credentials:
+                    raise Exception(
+                        'IPMI console required for powercycling, '
+                        'but not available on osd role: {r}'.format(
+                            r=remote.name))
+
+    log.info('Beginning thrashosds...')
+    cluster_manager = ctx.managers[cluster]
+    thrash_proc = ceph_manager.Thrasher(
+        cluster_manager,
+        config,
+        logger=log.getChild('thrasher')
+        )
+    try:
+        yield
+    finally:
+        log.info('joining thrashosds')
+        thrash_proc.do_join()
+        cluster_manager.wait_for_recovery(config.get('timeout', 360))
diff --git a/qa/tasks/userdata_setup.yaml b/qa/tasks/userdata_setup.yaml
new file mode 100644
index 0000000..d39695b
--- /dev/null
+++ b/qa/tasks/userdata_setup.yaml
@@ -0,0 +1,25 @@
+#cloud-config-archive
+
+- type: text/cloud-config
+  content: |
+    output:
+      all: '| tee -a /var/log/cloud-init-output.log'
+
+# allow passwordless access for debugging
+- |
+  #!/bin/bash
+  exec passwd -d ubuntu
+
+- |
+  #!/bin/bash
+
+  # mount a NFS share for storing logs
+  apt-get update
+  apt-get -y install nfs-common
+  mkdir /mnt/log
+  # 10.0.2.2 is the host
+  mount -v -t nfs -o proto=tcp 10.0.2.2:{mnt_dir} /mnt/log
+
+  # mount the iso image that has the test script
+  mkdir /mnt/cdrom
+  mount -t auto /dev/cdrom /mnt/cdrom
diff --git a/qa/tasks/userdata_teardown.yaml b/qa/tasks/userdata_teardown.yaml
new file mode 100644
index 0000000..7f3d64f
--- /dev/null
+++ b/qa/tasks/userdata_teardown.yaml
@@ -0,0 +1,11 @@
+- |
+  #!/bin/bash
+  cp /var/log/cloud-init-output.log /mnt/log
+
+- |
+  #!/bin/bash
+  umount /mnt/log
+
+- |
+  #!/bin/bash
+  shutdown -h -P now
diff --git a/qa/tasks/util/__init__.py b/qa/tasks/util/__init__.py
new file mode 100644
index 0000000..5b8575e
--- /dev/null
+++ b/qa/tasks/util/__init__.py
@@ -0,0 +1,26 @@
+from teuthology import misc
+
+def get_remote(ctx, cluster, service_type, service_id):
+    """
+    Get the Remote for the host where a particular role runs.
+
+    :param cluster: name of the cluster the service is part of
+    :param service_type: e.g. 'mds', 'osd', 'client'
+    :param service_id: The third part of a role, e.g. '0' for
+                       the role 'ceph.client.0'
+    :return: a Remote instance for the host where the
+             requested role is placed
+    """
+    def _is_instance(role):
+        role_tuple = misc.split_role(role)
+        return role_tuple == (cluster, service_type, str(service_id))
+    try:
+        (remote,) = ctx.cluster.only(_is_instance).remotes.keys()
+    except ValueError:
+        raise KeyError("Service {0}.{1}.{2} not found".format(cluster,
+                                                              service_type,
+                                                              service_id))
+    return remote
+
+def get_remote_for_role(ctx, role):
+    return get_remote(ctx, *misc.split_role(role))
diff --git a/qa/tasks/util/rados.py b/qa/tasks/util/rados.py
new file mode 100644
index 0000000..a5b27d5
--- /dev/null
+++ b/qa/tasks/util/rados.py
@@ -0,0 +1,79 @@
+import logging
+
+from teuthology import misc as teuthology
+
+log = logging.getLogger(__name__)
+
+def rados(ctx, remote, cmd, wait=True, check_status=False):
+    testdir = teuthology.get_testdir(ctx)
+    log.info("rados %s" % ' '.join(cmd))
+    pre = [
+        'adjust-ulimits',
+        'ceph-coverage',
+        '{tdir}/archive/coverage'.format(tdir=testdir),
+        'rados',
+        ];
+    pre.extend(cmd)
+    proc = remote.run(
+        args=pre,
+        check_status=check_status,
+        wait=wait,
+        )
+    if wait:
+        return proc.exitstatus
+    else:
+        return proc
+
+def create_ec_pool(remote, name, profile_name, pgnum, profile={}):
+    remote.run(args=['sudo', 'ceph'] +
+               cmd_erasure_code_profile(profile_name, profile))
+    remote.run(args=[
+        'sudo', 'ceph', 'osd', 'pool', 'create', name,
+        str(pgnum), str(pgnum), 'erasure', profile_name,
+        ])
+
+def create_replicated_pool(remote, name, pgnum):
+    remote.run(args=[
+        'sudo', 'ceph', 'osd', 'pool', 'create', name, str(pgnum), str(pgnum),
+        ])
+
+def create_cache_pool(remote, base_name, cache_name, pgnum, size):
+    remote.run(args=[
+        'sudo', 'ceph', 'osd', 'pool', 'create', cache_name, str(pgnum)
+    ])
+    remote.run(args=[
+        'sudo', 'ceph', 'osd', 'tier', 'add-cache', base_name, cache_name,
+        str(size),
+    ])
+
+def cmd_erasure_code_profile(profile_name, profile):
+    """
+    Return the shell command to run to create the erasure code profile
+    described by the profile parameter.
+    
+    :param profile_name: a string matching [A-Za-z0-9-_.]+
+    :param profile: a map whose semantic depends on the erasure code plugin
+    :returns: a shell command as an array suitable for Remote.run
+
+    If profile is {}, it is replaced with 
+
+      { 'k': '2', 'm': '1', 'ruleset-failure-domain': 'osd'}
+
+    for backward compatibility. In previous versions of teuthology,
+    these values were hardcoded as function arguments and some yaml
+    files were designed with these implicit values. The teuthology
+    code should not know anything about the erasure code profile
+    content or semantic. The valid values and parameters are outside
+    its scope.
+    """
+
+    if profile == {}:
+        profile = {
+            'k': '2',
+            'm': '1',
+            'ruleset-failure-domain': 'osd'
+        }
+    return [
+        'osd', 'erasure-code-profile', 'set',
+        profile_name
+        ] + [ str(key) + '=' + str(value) for key, value in profile.iteritems() ]
diff --git a/qa/tasks/util/rgw.py b/qa/tasks/util/rgw.py
new file mode 100644
index 0000000..46160bc
--- /dev/null
+++ b/qa/tasks/util/rgw.py
@@ -0,0 +1,181 @@
+from cStringIO import StringIO
+import logging
+import json
+import requests
+from requests.packages.urllib3.util import Retry
+from urlparse import urlparse
+
+from teuthology.orchestra.connection import split_user
+from teuthology import misc as teuthology
+
+log = logging.getLogger(__name__)
+
+# simple test to indicate if multi-region testing should occur
+def multi_region_enabled(ctx):
+    # this is populated by the radosgw-agent task, seems reasonable to
+    # use that as an indicator that we're testing multi-region sync
+    return 'radosgw_agent' in ctx
+
+def rgwadmin(ctx, client, cmd, stdin=StringIO(), check_status=False,
+             format='json'):
+    log.info('rgwadmin: {client} : {cmd}'.format(client=client,cmd=cmd))
+    testdir = teuthology.get_testdir(ctx)
+    pre = [
+        'adjust-ulimits',
+        'ceph-coverage'.format(tdir=testdir),
+        '{tdir}/archive/coverage'.format(tdir=testdir),
+        'radosgw-admin'.format(tdir=testdir),
+        '--log-to-stderr',
+        '--format', format,
+        '-n',  client,
+        ]
+    pre.extend(cmd)
+    log.info('rgwadmin: cmd=%s' % pre)
+    (remote,) = ctx.cluster.only(client).remotes.iterkeys()
+    proc = remote.run(
+        args=pre,
+        check_status=check_status,
+        stdout=StringIO(),
+        stderr=StringIO(),
+        stdin=stdin,
+        )
+    r = proc.exitstatus
+    out = proc.stdout.getvalue()
+    j = None
+    if not r and out != '':
+        try:
+            j = json.loads(out)
+            log.info(' json result: %s' % j)
+        except ValueError:
+            j = out
+            log.info(' raw result: %s' % j)
+    return (r, j)
+
+def get_user_summary(out, user):
+    """Extract the summary for a given user"""
+    user_summary = None
+    for summary in out['summary']:
+        if summary.get('user') == user:
+            user_summary = summary
+
+    if not user_summary:
+        raise AssertionError('No summary info found for user: %s' % user)
+
+    return user_summary
+
+def get_user_successful_ops(out, user):
+    summary = out['summary']
+    if len(summary) == 0:
+        return 0
+    return get_user_summary(out, user)['total']['successful_ops']
+
+def get_zone_host_and_port(ctx, client, zone):
+    _, region_map = rgwadmin(ctx, client, check_status=True,
+                             cmd=['-n', client, 'region-map', 'get'])
+    regions = region_map['zonegroups']
+    for region in regions:
+        for zone_info in region['val']['zones']:
+            if zone_info['name'] == zone:
+                endpoint = urlparse(zone_info['endpoints'][0])
+                host, port = endpoint.hostname, endpoint.port
+                if port is None:
+                    port = 80
+                return host, port
+    assert False, 'no endpoint for zone {zone} found'.format(zone=zone)
+
+def get_master_zone(ctx, client):
+    _, region_map = rgwadmin(ctx, client, check_status=True,
+                             cmd=['-n', client, 'region-map', 'get'])
+    regions = region_map['zonegroups']
+    for region in regions:
+        is_master = (region['val']['is_master'] == "true")
+        log.info('region={r} is_master={ism}'.format(r=region, ism=is_master))
+        if not is_master:
+          continue
+        master_zone = region['val']['master_zone']
+        log.info('master_zone=%s' % master_zone)
+        for zone_info in region['val']['zones']:
+            if zone_info['name'] == master_zone:
+                return master_zone
+    log.info('couldn\'t find master zone')
+    return None
+
+def get_master_client(ctx, clients):
+    master_zone = get_master_zone(ctx, clients[0]) # can use any client for this as long as system configured correctly
+    if not master_zone:
+        return None
+
+    for client in clients:
+        zone = zone_for_client(ctx, client)
+        if zone == master_zone:
+            return client
+
+    return None
+
+def get_zone_system_keys(ctx, client, zone):
+    _, zone_info = rgwadmin(ctx, client, check_status=True,
+                            cmd=['-n', client,
+                                 'zone', 'get', '--rgw-zone', zone])
+    system_key = zone_info['system_key']
+    return system_key['access_key'], system_key['secret_key']
+
+def zone_for_client(ctx, client):
+    ceph_config = ctx.ceph['ceph'].conf.get('global', {})
+    ceph_config.update(ctx.ceph['ceph'].conf.get('client', {}))
+    ceph_config.update(ctx.ceph['ceph'].conf.get(client, {}))
+    return ceph_config.get('rgw zone')
+
+def region_for_client(ctx, client):
+    ceph_config = ctx.ceph['ceph'].conf.get('global', {})
+    ceph_config.update(ctx.ceph['ceph'].conf.get('client', {}))
+    ceph_config.update(ctx.ceph['ceph'].conf.get(client, {}))
+    return ceph_config.get('rgw region')
+
+def radosgw_data_log_window(ctx, client):
+    ceph_config = ctx.ceph['ceph'].conf.get('global', {})
+    ceph_config.update(ctx.ceph['ceph'].conf.get('client', {}))
+    ceph_config.update(ctx.ceph['ceph'].conf.get(client, {}))
+    return ceph_config.get('rgw data log window', 30)
+
+def radosgw_agent_sync_data(ctx, agent_host, agent_port, full=False):
+    log.info('sync agent {h}:{p}'.format(h=agent_host, p=agent_port))
+    # use retry with backoff to tolerate slow startup of radosgw-agent
+    s = requests.Session()
+    s.mount('http://{addr}:{port}/'.format(addr = agent_host, port = agent_port),
+            requests.adapters.HTTPAdapter(max_retries=Retry(total=5, backoff_factor=1)))
+    method = "full" if full else "incremental"
+    return s.post('http://{addr}:{port}/data/{method}'.format(addr = agent_host, port = agent_port, method = method))
+
+def radosgw_agent_sync_metadata(ctx, agent_host, agent_port, full=False):
+    log.info('sync agent {h}:{p}'.format(h=agent_host, p=agent_port))
+    # use retry with backoff to tolerate slow startup of radosgw-agent
+    s = requests.Session()
+    s.mount('http://{addr}:{port}/'.format(addr = agent_host, port = agent_port),
+            requests.adapters.HTTPAdapter(max_retries=Retry(total=5, backoff_factor=1)))
+    method = "full" if full else "incremental"
+    return s.post('http://{addr}:{port}/metadata/{method}'.format(addr = agent_host, port = agent_port, method = method))
+
+def radosgw_agent_sync_all(ctx, full=False, data=False):
+    if ctx.radosgw_agent.procs:
+        for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
+            zone_for_client(ctx, agent_client)
+            sync_host, sync_port = get_sync_agent(ctx, agent_client)
+            log.debug('doing a sync via {host1}'.format(host1=sync_host))
+            radosgw_agent_sync_metadata(ctx, sync_host, sync_port, full)
+            if (data):
+                radosgw_agent_sync_data(ctx, sync_host, sync_port, full)
+
+def host_for_role(ctx, role):
+    for target, roles in zip(ctx.config['targets'].iterkeys(), ctx.config['roles']):
+        if role in roles:
+            _, host = split_user(target)
+            return host
+
+def get_sync_agent(ctx, source):
+    for task in ctx.config['tasks']:
+        if 'radosgw-agent' not in task:
+            continue
+        for client, conf in task['radosgw-agent'].iteritems():
+            if conf['src'] == source:
+                return host_for_role(ctx, source), conf.get('port', 8000)
+    return None, None
diff --git a/qa/tasks/util/test/__init__.py b/qa/tasks/util/test/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/qa/tasks/util/test/test_rados.py b/qa/tasks/util/test/test_rados.py
new file mode 100644
index 0000000..ee1cfa6
--- /dev/null
+++ b/qa/tasks/util/test/test_rados.py
@@ -0,0 +1,40 @@
+#
+#  The MIT License
+#
+# Copyright (C) 2014 Cloudwatt <libre.licensing at cloudwatt.com>
+#
+# Author: Loic Dachary <loic at dachary.org>
+#
+#  Permission is hereby granted, free of charge, to any person
+#  obtaining a copy of this software and associated documentation
+#  files (the "Software"), to deal in the Software without
+#  restriction, including without limitation the rights to use,
+#  copy, modify, merge, publish, distribute, sublicense, and/or sell
+#  copies of the Software, and to permit persons to whom the
+#  Software is furnished to do so, subject to the following
+#  conditions:
+#
+#  The above copyright notice and this permission notice shall be
+#  included in all copies or substantial portions of the Software.
+#
+#  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+#  EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+#  OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+#  NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+#  HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+#  WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+#  FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+#  OTHER DEALINGS IN THE SOFTWARE.
+#
+from .. import rados
+
+class TestRados(object):
+
+    def test_cmd_erasure_code_profile(self):
+        name = 'NAME'
+        cmd = rados.cmd_erasure_code_profile(name, {})
+        assert 'k=2' in cmd
+        assert name in cmd
+        cmd = rados.cmd_erasure_code_profile(name, { 'k': '88' })
+        assert 'k=88' in cmd
+        assert name in cmd
diff --git a/qa/tasks/watch_notify_same_primary.py b/qa/tasks/watch_notify_same_primary.py
new file mode 100644
index 0000000..8f6d33b
--- /dev/null
+++ b/qa/tasks/watch_notify_same_primary.py
@@ -0,0 +1,134 @@
+
+"""
+watch_notify_same_primary task
+"""
+from cStringIO import StringIO
+import contextlib
+import logging
+
+from teuthology.orchestra import run
+from teuthology.contextutil import safe_while
+
+log = logging.getLogger(__name__)
+
+
+ at contextlib.contextmanager
+def task(ctx, config):
+    """
+    Run watch_notify_same_primary
+
+    The config should be as follows:
+
+    watch_notify_same_primary:
+        clients: [client list]
+
+    The client list should contain 1 client
+
+    The test requires 3 osds.
+
+    example:
+
+    tasks:
+    - ceph:
+    - watch_notify_same_primary:
+        clients: [client.0]
+    - interactive:
+    """
+    log.info('Beginning watch_notify_same_primary...')
+    assert isinstance(config, dict), \
+        "please list clients to run on"
+
+    clients = config.get('clients', ['client.0'])
+    assert len(clients) == 1
+    role = clients[0]
+    assert isinstance(role, basestring)
+    PREFIX = 'client.'
+    assert role.startswith(PREFIX)
+    (remote,) = ctx.cluster.only(role).remotes.iterkeys()
+    manager = ctx.managers['ceph']
+    manager.raw_cluster_cmd('osd', 'set', 'noout')
+
+    pool = manager.create_pool_with_unique_name()
+    def obj(n): return "foo-{num}".format(num=n)
+    def start_watch(n):
+        remote.run(
+            args = [
+                "rados",
+                "-p", pool,
+                "put",
+                obj(n),
+                "/etc/resolv.conf"],
+            logger=log.getChild('watch.{id}'.format(id=n)))
+        proc = remote.run(
+            args = [
+                "rados",
+                "-p", pool,
+                "watch",
+                obj(n)],
+            stdin=run.PIPE,
+            stdout=StringIO(),
+            stderr=StringIO(),
+            wait=False)
+        return proc
+
+    num = 20
+
+    watches = [start_watch(i) for i in range(num)]
+
+    # wait for them all to register
+    for i in range(num):
+        with safe_while() as proceed:
+            while proceed():
+                proc = remote.run(
+                    args = [
+                        "rados",
+                        "-p", pool,
+                        "listwatchers",
+                        obj(i)],
+                    stdout=StringIO())
+                lines = proc.stdout.getvalue()
+                num_watchers = lines.count('watcher=')
+                log.info('i see %d watchers for %s', num_watchers, obj(i))
+                if num_watchers >= 1:
+                    break
+
+    def notify(n, msg):
+        remote.run(
+            args = [
+                "rados",
+                "-p", pool,
+                "notify",
+                obj(n),
+                msg],
+            logger=log.getChild('notify.{id}'.format(id=n)))
+
+    [notify(n, 'notify1') for n in range(len(watches))]
+
+    manager.kill_osd(0)
+    manager.mark_down_osd(0)
+
+    [notify(n, 'notify2') for n in range(len(watches))]
+
+    try:
+        yield
+    finally:
+        log.info('joining watch_notify_stress')
+        for watch in watches:
+            watch.stdin.write("\n")
+
+        run.wait(watches)
+
+        for watch in watches:
+            lines = watch.stdout.getvalue().split("\n")
+            got1 = False
+            got2 = False
+            for l in lines:
+                if 'notify1' in l:
+                    got1 = True
+                if 'notify2' in l:
+                    got2 = True
+            log.info(lines)
+            assert got1 and got2
+
+        manager.revive_osd(0)
+        manager.remove_pool(pool)
diff --git a/qa/tasks/watch_notify_stress.py b/qa/tasks/watch_notify_stress.py
new file mode 100644
index 0000000..6db313f
--- /dev/null
+++ b/qa/tasks/watch_notify_stress.py
@@ -0,0 +1,69 @@
+"""
+test_stress_watch task
+"""
+import contextlib
+import logging
+import proc_thrasher
+
+from teuthology.orchestra import run
+
+log = logging.getLogger(__name__)
+
+
+ at contextlib.contextmanager
+def task(ctx, config):
+    """
+    Run test_stress_watch
+
+    The config should be as follows:
+
+    test_stress_watch:
+        clients: [client list]
+
+    example:
+
+    tasks:
+    - ceph:
+    - test_stress_watch:
+        clients: [client.0]
+    - interactive:
+    """
+    log.info('Beginning test_stress_watch...')
+    assert isinstance(config, dict), \
+        "please list clients to run on"
+    testwatch = {}
+
+    remotes = []
+
+    for role in config.get('clients', ['client.0']):
+        assert isinstance(role, basestring)
+        PREFIX = 'client.'
+        assert role.startswith(PREFIX)
+        id_ = role[len(PREFIX):]
+        (remote,) = ctx.cluster.only(role).remotes.iterkeys()
+        remotes.append(remote)
+
+        args =['CEPH_CLIENT_ID={id_}'.format(id_=id_),
+               'CEPH_ARGS="{flags}"'.format(flags=config.get('flags', '')),
+               'daemon-helper',
+               'kill',
+               'multi_stress_watch foo foo'
+               ]
+
+        log.info("args are %s" % (args,))
+
+        proc = proc_thrasher.ProcThrasher({}, remote,
+            args=[run.Raw(i) for i in args],
+            logger=log.getChild('testwatch.{id}'.format(id=id_)),
+            stdin=run.PIPE,
+            wait=False
+            )
+        proc.start()
+        testwatch[id_] = proc
+
+    try:
+        yield
+    finally:
+        log.info('joining watch_notify_stress')
+        for i in testwatch.itervalues():
+            i.join()
diff --git a/qa/tasks/workunit.py b/qa/tasks/workunit.py
new file mode 100644
index 0000000..c509d50
--- /dev/null
+++ b/qa/tasks/workunit.py
@@ -0,0 +1,428 @@
+"""
+Workunit task -- Run ceph on sets of specific clients
+"""
+import logging
+import pipes
+import os
+
+from util import get_remote_for_role
+
+from teuthology import misc
+from teuthology.config import config as teuth_config
+from teuthology.orchestra.run import CommandFailedError
+from teuthology.parallel import parallel
+from teuthology.orchestra import run
+
+log = logging.getLogger(__name__)
+
+
+def task(ctx, config):
+    """
+    Run ceph on all workunits found under the specified path.
+
+    For example::
+
+        tasks:
+        - ceph:
+        - ceph-fuse: [client.0]
+        - workunit:
+            clients:
+              client.0: [direct_io, xattrs.sh]
+              client.1: [snaps]
+            branch: foo
+
+    You can also run a list of workunits on all clients:
+        tasks:
+        - ceph:
+        - ceph-fuse:
+        - workunit:
+            tag: v0.47
+            clients:
+              all: [direct_io, xattrs.sh, snaps]
+
+    If you have an "all" section it will run all the workunits
+    on each client simultaneously, AFTER running any workunits specified
+    for individual clients. (This prevents unintended simultaneous runs.)
+
+    To customize tests, you can specify environment variables as a dict. You
+    can also specify a time limit for each work unit (defaults to 3h):
+
+        tasks:
+        - ceph:
+        - ceph-fuse:
+        - workunit:
+            sha1: 9b28948635b17165d17c1cf83d4a870bd138ddf6
+            clients:
+              all: [snaps]
+            env:
+              FOO: bar
+              BAZ: quux
+            timeout: 3h
+
+    This task supports roles that include a ceph cluster, e.g.::
+
+        tasks:
+        - ceph:
+        - workunit:
+            clients:
+              backup.client.0: [foo]
+              client.1: [bar] # cluster is implicitly 'ceph'
+
+    :param ctx: Context
+    :param config: Configuration
+    """
+    assert isinstance(config, dict)
+    assert isinstance(config.get('clients'), dict), \
+        'configuration must contain a dictionary of clients'
+
+    overrides = ctx.config.get('overrides', {})
+    misc.deep_merge(config, overrides.get('workunit', {}))
+
+    refspec = config.get('branch')
+    if refspec is None:
+        refspec = config.get('tag')
+    if refspec is None:
+        refspec = config.get('sha1')
+    if refspec is None:
+        refspec = 'HEAD'
+
+    timeout = config.get('timeout', '3h')
+
+    log.info('Pulling workunits from ref %s', refspec)
+
+    created_mountpoint = {}
+
+    if config.get('env') is not None:
+        assert isinstance(config['env'], dict), 'env must be a dictionary'
+    clients = config['clients']
+
+    # Create scratch dirs for any non-all workunits
+    log.info('Making a separate scratch dir for every client...')
+    for role in clients.iterkeys():
+        assert isinstance(role, basestring)
+        if role == "all":
+            continue
+
+        assert 'client' in role
+        created_mnt_dir = _make_scratch_dir(ctx, role, config.get('subdir'))
+        created_mountpoint[role] = created_mnt_dir
+
+    # Execute any non-all workunits
+    with parallel() as p:
+        for role, tests in clients.iteritems():
+            if role != "all":
+                p.spawn(_run_tests, ctx, refspec, role, tests,
+                        config.get('env'), timeout=timeout)
+
+    # Clean up dirs from any non-all workunits
+    for role, created in created_mountpoint.items():
+        _delete_dir(ctx, role, created)
+
+    # Execute any 'all' workunits
+    if 'all' in clients:
+        all_tasks = clients["all"]
+        _spawn_on_all_clients(ctx, refspec, all_tasks, config.get('env'),
+                              config.get('subdir'), timeout=timeout)
+
+
+def _client_mountpoint(ctx, cluster, id_):
+    """
+    Returns the path to the expected mountpoint for workunits running
+    on some kind of filesystem.
+    """
+    # for compatibility with tasks like ceph-fuse that aren't cluster-aware yet,
+    # only include the cluster name in the dir if the cluster is not 'ceph'
+    if cluster == 'ceph':
+        dir_ = 'mnt.{0}'.format(id_)
+    else:
+        dir_ = 'mnt.{0}.{1}'.format(cluster, id_)
+    return os.path.join(misc.get_testdir(ctx), dir_)
+
+
+def _delete_dir(ctx, role, created_mountpoint):
+    """
+    Delete file used by this role, and delete the directory that this
+    role appeared in.
+
+    :param ctx: Context
+    :param role: "role.#" where # is used for the role id.
+    """
+    cluster, _, id_ = misc.split_role(role)
+    remote = get_remote_for_role(ctx, role)
+    mnt = _client_mountpoint(ctx, cluster, id_)
+    client = os.path.join(mnt, 'client.{id}'.format(id=id_))
+
+    # Remove the directory inside the mount where the workunit ran
+    remote.run(
+        args=[
+            'sudo',
+            'rm',
+            '-rf',
+            '--',
+            client,
+        ],
+    )
+    log.info("Deleted dir {dir}".format(dir=client))
+
+    # If the mount was an artificially created dir, delete that too
+    if created_mountpoint:
+        remote.run(
+            args=[
+                'rmdir',
+                '--',
+                mnt,
+            ],
+        )
+        log.info("Deleted artificial mount point {dir}".format(dir=client))
+
+
+def _make_scratch_dir(ctx, role, subdir):
+    """
+    Make scratch directories for this role.  This also makes the mount
+    point if that directory does not exist.
+
+    :param ctx: Context
+    :param role: "role.#" where # is used for the role id.
+    :param subdir: use this subdir (False if not used)
+    """
+    created_mountpoint = False
+    cluster, _, id_ = misc.split_role(role)
+    remote = get_remote_for_role(ctx, role)
+    dir_owner = remote.user
+    mnt = _client_mountpoint(ctx, cluster, id_)
+    # if neither kclient nor ceph-fuse are required for a workunit,
+    # mnt may not exist. Stat and create the directory if it doesn't.
+    try:
+        remote.run(
+            args=[
+                'stat',
+                '--',
+                mnt,
+            ],
+        )
+        log.info('Did not need to create dir {dir}'.format(dir=mnt))
+    except CommandFailedError:
+        remote.run(
+            args=[
+                'mkdir',
+                '--',
+                mnt,
+            ],
+        )
+        log.info('Created dir {dir}'.format(dir=mnt))
+        created_mountpoint = True
+
+    if not subdir:
+        subdir = 'client.{id}'.format(id=id_)
+
+    if created_mountpoint:
+        remote.run(
+            args=[
+                'cd',
+                '--',
+                mnt,
+                run.Raw('&&'),
+                'mkdir',
+                '--',
+                subdir,
+            ],
+        )
+    else:
+        remote.run(
+            args=[
+                # cd first so this will fail if the mount point does
+                # not exist; pure install -d will silently do the
+                # wrong thing
+                'cd',
+                '--',
+                mnt,
+                run.Raw('&&'),
+                'sudo',
+                'install',
+                '-d',
+                '-m', '0755',
+                '--owner={user}'.format(user=dir_owner),
+                '--',
+                subdir,
+            ],
+        )
+
+    return created_mountpoint
+
+
+def _spawn_on_all_clients(ctx, refspec, tests, env, subdir, timeout=None):
+    """
+    Make a scratch directory for each client in the cluster, and then for each
+    test spawn _run_tests() for each role.
+
+    See run_tests() for parameter documentation.
+    """
+    is_client = misc.is_type('client')
+    client_remotes = {}
+    created_mountpoint = {}
+    for remote, roles_for_host in ctx.cluster.remotes.items():
+        for role in roles_for_host:
+            if is_client(role):
+                client_remotes[role] = remote
+                created_mountpoint[role] = _make_scratch_dir(ctx, role, subdir)
+
+    for unit in tests:
+        with parallel() as p:
+            for role, remote in client_remotes.items():
+                p.spawn(_run_tests, ctx, refspec, role, [unit], env, subdir,
+                        timeout=timeout)
+
+    # cleanup the generated client directories
+    for role, _ in client_remotes.items():
+        _delete_dir(ctx, role, created_mountpoint[role])
+
+def _run_tests(ctx, refspec, role, tests, env, subdir=None, timeout=None):
+    """
+    Run the individual test. Create a scratch directory and then extract the
+    workunits from git. Make the executables, and then run the tests.
+    Clean up (remove files created) after the tests are finished.
+
+    :param ctx:     Context
+    :param refspec: branch, sha1, or version tag used to identify this
+                    build
+    :param tests:   specific tests specified.
+    :param env:     environment set in yaml file.  Could be None.
+    :param subdir:  subdirectory set in yaml file.  Could be None
+    :param timeout: If present, use the 'timeout' command on the remote host
+                    to limit execution time. Must be specified by a number
+                    followed by 's' for seconds, 'm' for minutes, 'h' for
+                    hours, or 'd' for days. If '0' or anything that evaluates
+                    to False is passed, the 'timeout' command is not used.
+    """
+    testdir = misc.get_testdir(ctx)
+    assert isinstance(role, basestring)
+    cluster, type_, id_ = misc.split_role(role)
+    assert type_ == 'client'
+    remote = get_remote_for_role(ctx, role)
+    mnt = _client_mountpoint(ctx, cluster, id_)
+    # subdir so we can remove and recreate this a lot without sudo
+    if subdir is None:
+        scratch_tmp = os.path.join(mnt, 'client.{id}'.format(id=id_), 'tmp')
+    else:
+        scratch_tmp = os.path.join(mnt, subdir)
+    clonedir = '{tdir}/clone.{role}'.format(tdir=testdir, role=role)
+    srcdir = '{cdir}/qa/workunits'.format(cdir=clonedir)
+
+    git_url = teuth_config.get_ceph_git_url()
+    try:
+        remote.run(
+            logger=log.getChild(role),
+            args=[
+                'rm',
+                '-rf',
+                clonedir,
+                run.Raw('&&'),
+                'git',
+                'clone',
+                git_url,
+                clonedir,
+                run.Raw('&&'),
+                'cd', '--', clonedir,
+                run.Raw('&&'),
+                'git', 'checkout', refspec,
+            ],
+        )
+    except CommandFailedError:
+        alt_git_url = git_url.replace('ceph-ci', 'ceph')
+        log.info(
+            "failed to check out '%s' from %s; will also try in %s",
+            refspec,
+            git_url,
+            alt_git_url,
+        )
+        remote.run(
+            logger=log.getChild(role),
+            args=[
+                'rm',
+                '-rf',
+                clonedir,
+                run.Raw('&&'),
+                'git',
+                'clone',
+                alt_git_url,
+                clonedir,
+                run.Raw('&&'),
+                'cd', '--', clonedir,
+                run.Raw('&&'),
+                'git', 'checkout', refspec,
+            ],
+        )
+
+    remote.run(
+        logger=log.getChild(role),
+        args=[
+            'cd', '--', srcdir,
+            run.Raw('&&'),
+            'if', 'test', '-e', 'Makefile', run.Raw(';'), 'then', 'make', run.Raw(';'), 'fi',
+            run.Raw('&&'),
+            'find', '-executable', '-type', 'f', '-printf', r'%P\0'.format(srcdir=srcdir),
+            run.Raw('>{tdir}/workunits.list.{role}'.format(tdir=testdir, role=role)),
+        ],
+    )
+
+    workunits_file = '{tdir}/workunits.list.{role}'.format(tdir=testdir, role=role)
+    workunits = sorted(misc.get_file(remote, workunits_file).split('\0'))
+    assert workunits
+
+    try:
+        assert isinstance(tests, list)
+        for spec in tests:
+            log.info('Running workunits matching %s on %s...', spec, role)
+            prefix = '{spec}/'.format(spec=spec)
+            to_run = [w for w in workunits if w == spec or w.startswith(prefix)]
+            if not to_run:
+                raise RuntimeError('Spec did not match any workunits: {spec!r}'.format(spec=spec))
+            for workunit in to_run:
+                log.info('Running workunit %s...', workunit)
+                args = [
+                    'mkdir', '-p', '--', scratch_tmp,
+                    run.Raw('&&'),
+                    'cd', '--', scratch_tmp,
+                    run.Raw('&&'),
+                    run.Raw('CEPH_CLI_TEST_DUP_COMMAND=1'),
+                    run.Raw('CEPH_REF={ref}'.format(ref=refspec)),
+                    run.Raw('TESTDIR="{tdir}"'.format(tdir=testdir)),
+                    run.Raw('CEPH_ARGS="--cluster {0}"'.format(cluster)),
+                    run.Raw('CEPH_ID="{id}"'.format(id=id_)),
+                    run.Raw('PATH=$PATH:/usr/sbin'),
+                    run.Raw('CEPH_BASE={dir}'.format(dir=clonedir)),
+                ]
+                if env is not None:
+                    for var, val in env.iteritems():
+                        quoted_val = pipes.quote(val)
+                        env_arg = '{var}={val}'.format(var=var, val=quoted_val)
+                        args.append(run.Raw(env_arg))
+                args.extend([
+                    'adjust-ulimits',
+                    'ceph-coverage',
+                    '{tdir}/archive/coverage'.format(tdir=testdir)])
+                if timeout and timeout != '0':
+                    args.extend(['timeout', timeout])
+                args.extend([
+                    '{srcdir}/{workunit}'.format(
+                        srcdir=srcdir,
+                        workunit=workunit,
+                    ),
+                ])
+                remote.run(
+                    logger=log.getChild(role),
+                    args=args,
+                    label="workunit test {workunit}".format(workunit=workunit)
+                )
+                remote.run(
+                    logger=log.getChild(role),
+                    args=['sudo', 'rm', '-rf', '--', scratch_tmp],
+                )
+    finally:
+        log.info('Stopping %s on %s...', tests, role)
+        remote.run(
+            logger=log.getChild(role),
+            args=[
+                'rm', '-rf', '--', workunits_file, clonedir,
+            ],
+        )
diff --git a/qa/timezone/eastern.yaml b/qa/timezone/eastern.yaml
new file mode 100644
index 0000000..019c761
--- /dev/null
+++ b/qa/timezone/eastern.yaml
@@ -0,0 +1,4 @@
+tasks:
+- exec:
+    all:
+      - echo America/New_York | sudo tee /etc/timezone
diff --git a/qa/timezone/pacific.yaml b/qa/timezone/pacific.yaml
new file mode 100644
index 0000000..6944aa6
--- /dev/null
+++ b/qa/timezone/pacific.yaml
@@ -0,0 +1,4 @@
+tasks:
+- exec:
+    all:
+      - echo America/Los_Angeles | sudo tee /etc/timezone
diff --git a/qa/timezone/random.yaml b/qa/timezone/random.yaml
new file mode 100644
index 0000000..1d48ce9
--- /dev/null
+++ b/qa/timezone/random.yaml
@@ -0,0 +1,5 @@
+tasks:
+- exec:
+    all:
+      - echo America/Los_Angeles | sudo tee /etc/timezone
+      - [ $RANDOM -gt 32000 ] && echo America/New_York | sudo tee /etc/timezone
diff --git a/qa/tox.ini b/qa/tox.ini
new file mode 100644
index 0000000..c5826ec
--- /dev/null
+++ b/qa/tox.ini
@@ -0,0 +1,8 @@
+[tox]
+envlist = flake8
+skipsdist = True
+
+[testenv:flake8]
+deps=
+  flake8
+commands=flake8 --select=F,E9 --exclude=venv,.tox
diff --git a/qa/workunits/ceph-helpers.sh b/qa/workunits/ceph-helpers.sh
index 4f5f8f8..9947217 100755
--- a/qa/workunits/ceph-helpers.sh
+++ b/qa/workunits/ceph-helpers.sh
@@ -1442,7 +1442,7 @@ function test_wait_background() {
 # @return 0 on success, 1 on error
 #
 function main() {
-    local dir=testdir/$1
+    local dir=td/$1
     shift
 
     shopt -s -o xtrace
@@ -1481,7 +1481,7 @@ function run_tests() {
     export CEPH_CONF=/dev/null
 
     local funcs=${@:-$(set | sed -n -e 's/^\(test_[0-9a-z_]*\) .*/\1/p')}
-    local dir=testdir/ceph-helpers
+    local dir=td/ceph-helpers
 
     for func in $funcs ; do
         $func $dir || return 1
diff --git a/qa/workunits/fs/test_python.sh b/qa/workunits/fs/test_python.sh
index ea0af66..656d89f 100755
--- a/qa/workunits/fs/test_python.sh
+++ b/qa/workunits/fs/test_python.sh
@@ -1,10 +1,6 @@
 #!/bin/sh -ex
 
-CEPH_REF=${CEPH_REF:-master}
-wget -O test_cephfs.py "https://git.ceph.com/?p=ceph.git;a=blob_plain;hb=$CEPH_REF;f=src/test/pybind/test_cephfs.py" || \
-    wget -O test_cephfs.py "https://git.ceph.com/?p=ceph.git;a=blob_plain;hb=ref/heads/$CEPH_REF;f=src/test/pybind/test_cephfs.py"
-
 # Running as root because the filesystem root directory will be
 # owned by uid 0, and that's where we're writing.
-sudo nosetests -v test_cephfs
+sudo nosetests -v $(dirname $0)/../../../src/test/pybind/test_cephfs.py
 exit 0
diff --git a/qa/workunits/rados/test_cache_pool.sh b/qa/workunits/rados/test_cache_pool.sh
index 4db965d..308cb3c 100755
--- a/qa/workunits/rados/test_cache_pool.sh
+++ b/qa/workunits/rados/test_cache_pool.sh
@@ -127,6 +127,37 @@ expect_false rados -p base cache-flush-evict-all
 rados -p cache cache-try-flush-evict-all
 rados -p cache ls - | wc -l | grep 0
 
+# cache flush/evit when clone objects exist
+rados -p base put testclone /etc/passwd
+rados -p cache ls - | wc -l | grep 1
+ceph osd pool mksnap base snap
+rados -p base put testclone /etc/hosts
+rados -p cache cache-flush-evict-all
+rados -p cache ls - | wc -l | grep 0
+
+ceph osd tier cache-mode cache forward --yes-i-really-mean-it
+rados -p base -s snap get testclone testclone.txt
+diff -q testclone.txt /etc/passwd
+rados -p base get testclone testclone.txt
+diff -q testclone.txt /etc/hosts
+
+# test --with-clones option
+ceph osd tier cache-mode cache writeback
+rados -p base put testclone2 /etc/passwd
+rados -p cache ls - | wc -l | grep 1
+ceph osd pool mksnap base snap1
+rados -p base put testclone2 /etc/hosts
+expect_false rados -p cache cache-flush testclone2
+rados -p cache cache-flush testclone2 --with-clones
+expect_false rados -p cache cache-evict testclone2
+rados -p cache cache-evict testclone2 --with-clones
+rados -p cache ls - | wc -l | grep 0
+
+rados -p base -s snap1 get testclone2 testclone2.txt
+diff -q testclone2.txt /etc/passwd
+rados -p base get testclone2 testclone2.txt
+diff -q testclone2.txt /etc/hosts
+
 # cleanup
 ceph osd tier remove-overlay base
 ceph osd tier remove base cache
diff --git a/qa/workunits/rados/test_python.sh b/qa/workunits/rados/test_python.sh
index 2aaff2d..7cdb39e 100755
--- a/qa/workunits/rados/test_python.sh
+++ b/qa/workunits/rados/test_python.sh
@@ -1,8 +1,4 @@
 #!/bin/sh -ex
 
-CEPH_REF=${CEPH_REF:-master}
-#wget -q https://raw.github.com/ceph/ceph/$CEPH_REF/src/test/pybind/test_rados.py
-wget -O test_rados.py "https://git.ceph.com/?p=ceph.git;a=blob_plain;hb=$CEPH_REF;f=src/test/pybind/test_rados.py" || \
-    wget -O test_rados.py "https://git.ceph.com/?p=ceph.git;a=blob_plain;hb=ref/heads/$CEPH_REF;f=src/test/pybind/test_rados.py"
-nosetests -v test_rados
+nosetests -v $(dirname $0)/../../../src/test/pybind/test_rados.py
 exit 0
diff --git a/qa/workunits/rados/test_rados_tool.sh b/qa/workunits/rados/test_rados_tool.sh
index 1368d92..04b2f59 100755
--- a/qa/workunits/rados/test_rados_tool.sh
+++ b/qa/workunits/rados/test_rados_tool.sh
@@ -270,7 +270,7 @@ cleanup() {
 
 test_omap() {
     cleanup
-    for i in $(seq 1 1 600)
+    for i in $(seq 1 1 10)
     do
 	if [ $(($i % 2)) -eq 0 ]; then
             $RADOS_TOOL -p $POOL setomapval $OBJ $i $i
@@ -279,7 +279,26 @@ test_omap() {
 	fi
         $RADOS_TOOL -p $POOL getomapval $OBJ $i | grep -q "|$i|\$"
     done
-    $RADOS_TOOL -p $POOL listomapvals $OBJ | grep -c value | grep 600
+    $RADOS_TOOL -p $POOL listomapvals $OBJ | grep -c value | grep 10
+    for i in $(seq 1 1 5)
+    do
+        $RADOS_TOOL -p $POOL rmomapkey $OBJ $i
+    done
+    $RADOS_TOOL -p $POOL listomapvals $OBJ | grep -c value | grep 5
+    cleanup
+
+    for i in $(seq 1 1 10)
+    do
+        dd if=/dev/urandom bs=128 count=1 > $TDIR/omap_key
+        if [ $(($i % 2)) -eq 0 ]; then
+            $RADOS_TOOL -p $POOL --omap-key-file $TDIR/omap_key setomapval $OBJ $i
+        else
+            echo -n "$i" | $RADOS_TOOL -p $POOL --omap-key-file $TDIR/omap_key setomapval $OBJ
+        fi
+        $RADOS_TOOL -p $POOL --omap-key-file $TDIR/omap_key getomapval $OBJ | grep -q "|$i|\$"
+        $RADOS_TOOL -p $POOL --omap-key-file $TDIR/omap_key rmomapkey $OBJ
+        $RADOS_TOOL -p $POOL listomapvals $OBJ | grep -c value | grep 0
+    done
     cleanup
 }
 
diff --git a/qa/workunits/rbd/notify_master.sh b/qa/workunits/rbd/notify_master.sh
index 3d1b224..6ebea31 100755
--- a/qa/workunits/rbd/notify_master.sh
+++ b/qa/workunits/rbd/notify_master.sh
@@ -1,7 +1,5 @@
 #!/bin/sh -ex
 
-CEPH_REF=${CEPH_REF:-master}
-wget -O test_notify.py "https://git.ceph.com/?p=ceph.git;a=blob_plain;hb=$CEPH_REF;f=src/test/librbd/test_notify.py"
-
-python test_notify.py master
+relpath=$(dirname $0)/../../../src/test/librbd
+python $relpath/test_notify.py master
 exit 0
diff --git a/qa/workunits/rbd/notify_slave.sh b/qa/workunits/rbd/notify_slave.sh
index e94894a..ea66161 100755
--- a/qa/workunits/rbd/notify_slave.sh
+++ b/qa/workunits/rbd/notify_slave.sh
@@ -1,7 +1,5 @@
 #!/bin/sh -ex
 
-CEPH_REF=${CEPH_REF:-master}
-wget -O test_notify.py "https://git.ceph.com/?p=ceph.git;a=blob_plain;hb=$CEPH_REF;f=src/test/librbd/test_notify.py"
-
-python test_notify.py slave
+relpath=$(dirname $0)/../../../src/test/librbd
+python $relpath/test_notify.py slave
 exit 0
diff --git a/qa/workunits/rbd/qemu-iotests.sh b/qa/workunits/rbd/qemu-iotests.sh
index 3d72394..e775ade 100755
--- a/qa/workunits/rbd/qemu-iotests.sh
+++ b/qa/workunits/rbd/qemu-iotests.sh
@@ -5,15 +5,18 @@
 # require the admin ceph user, as there's no way to pass the ceph user
 # to qemu-iotests currently.
 
-# This will only work with particular qemu versions, like 1.0. Later
-# versions of qemu include qemu-iotests directly in the qemu
-# repository.
-testlist='001 002 003 004 005 008 009 010 011 021 025 032 033 055 077'
+testlist='001 002 003 004 005 008 009 010 011 021 025 032 033 055'
 
 git clone https://github.com/qemu/qemu.git
-# use v2.2.0-rc3 (last released version that handles all the tests
 cd qemu
-git checkout 2528043f1f299e0e88cb026f1ca7c40bbb4e1f80
+if lsb_release -da | grep -iq xenial; then
+    # Xenial requires a recent test harness
+    git checkout v2.3.0
+else
+    # use v2.2.0-rc3 (last released version that handles all the tests
+    git checkout 2528043f1f299e0e88cb026f1ca7c40bbb4e1f80
+
+fi
 
 cd tests/qemu-iotests
 mkdir bin
diff --git a/qa/workunits/rbd/rbd-nbd.sh b/qa/workunits/rbd/rbd-nbd.sh
index 1e08ade..a414c4d 100755
--- a/qa/workunits/rbd/rbd-nbd.sh
+++ b/qa/workunits/rbd/rbd-nbd.sh
@@ -4,6 +4,7 @@
 
 POOL=rbd
 IMAGE=testrbdnbd$$
+TOO_LARGE_IMAGE=${IMAGE}_large
 SUDO=sudo
 SIZE=64
 DATA=
@@ -16,6 +17,7 @@ setup()
     DATA=${TEMPDIR}/data
     dd if=/dev/urandom of=${DATA} bs=1M count=${SIZE}
     rbd --dest-pool ${POOL} --no-progress import ${DATA} ${IMAGE}
+    rbd -p ${POOL} create ${TOO_LARGE_IMAGE} --size 3T
 
     if [ `id -u` = 0 ]
     then
@@ -38,6 +40,7 @@ function cleanup()
 	done
 	rbd -p ${POOL} remove ${IMAGE}
     fi
+    rbd -p ${POOL} remove ${TOO_LARGE_IMAGE}
 }
 
 function expect_false()
@@ -60,6 +63,7 @@ then
 fi
 expect_false ${SUDO} rbd-nbd map INVALIDIMAGE
 expect_false ${SUDO} rbd-nbd --device INVALIDDEV map ${IMAGE}
+expect_false ${SUDO} rbd-nbd map ${TOO_LARGE_IMAGE}
 
 # map test using the first unused device
 DEV=`${SUDO} rbd-nbd map ${POOL}/${IMAGE}`
diff --git a/qa/workunits/rbd/rbd_mirror.sh b/qa/workunits/rbd/rbd_mirror.sh
index fcfae98..21e6021 100755
--- a/qa/workunits/rbd/rbd_mirror.sh
+++ b/qa/workunits/rbd/rbd_mirror.sh
@@ -7,12 +7,7 @@
 # socket, temporary files, and launches rbd-mirror daemon.
 #
 
-if [ -n "${CEPH_REF}" ]; then
-  wget -O rbd_mirror_helpers.sh "https://git.ceph.com/?p=ceph.git;a=blob_plain;hb=$CEPH_REF;f=qa/workunits/rbd/rbd_mirror_helpers.sh"
-  . ./rbd_mirror_helpers.sh
-else
-  . $(dirname $0)/rbd_mirror_helpers.sh
-fi
+. $(dirname $0)/rbd_mirror_helpers.sh
 
 testlog "TEST: add image and test replay"
 start_mirror ${CLUSTER1}
@@ -51,34 +46,50 @@ testlog "TEST: stop/start/restart mirror via admin socket"
 admin_daemon ${CLUSTER1} rbd mirror stop
 wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
 wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image1}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+stopped'
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+stopped'
 
 admin_daemon ${CLUSTER1} rbd mirror start
 wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
 wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+replaying'
 
 admin_daemon ${CLUSTER1} rbd mirror restart
 wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
 wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+replaying'
 
 admin_daemon ${CLUSTER1} rbd mirror stop
 wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
 wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image1}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+stopped'
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+stopped'
 
 admin_daemon ${CLUSTER1} rbd mirror restart
 wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
 wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+replaying'
 
 admin_daemon ${CLUSTER1} rbd mirror stop ${POOL} ${CLUSTER2}
 wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
 wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image1}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+stopped'
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+stopped'
 
 admin_daemon ${CLUSTER1} rbd mirror start ${POOL}/${image}
 wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
 
 admin_daemon ${CLUSTER1} rbd mirror start
 wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+replaying'
 
 admin_daemon ${CLUSTER1} rbd mirror start ${POOL} ${CLUSTER2}
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
 
 admin_daemon ${CLUSTER1} rbd mirror restart ${POOL}/${image}
 wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
@@ -86,6 +97,8 @@ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
 admin_daemon ${CLUSTER1} rbd mirror restart ${POOL} ${CLUSTER2}
 wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
 wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+replaying'
 
 admin_daemon ${CLUSTER1} rbd mirror flush
 admin_daemon ${CLUSTER1} rbd mirror status
@@ -242,8 +255,8 @@ wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'present'
 wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
 
 testlog "TEST: simple image resync"
-request_resync_image ${CLUSTER1} ${POOL} ${image}
-wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted'
+request_resync_image ${CLUSTER1} ${POOL} ${image} image_id
+wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted' ${image_id}
 wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
 wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' 'master_position'
 compare_images ${POOL} ${image}
@@ -251,9 +264,9 @@ compare_images ${POOL} ${image}
 testlog "TEST: image resync while replayer is stopped"
 admin_daemon ${CLUSTER1} rbd mirror stop ${POOL}/${image}
 wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
-request_resync_image ${CLUSTER1} ${POOL} ${image}
+request_resync_image ${CLUSTER1} ${POOL} ${image} image_id
 admin_daemon ${CLUSTER1} rbd mirror start ${POOL}/${image}
-wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted'
+wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted' ${image_id}
 admin_daemon ${CLUSTER1} rbd mirror start ${POOL}/${image}
 wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
 wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' 'master_position'
@@ -261,7 +274,7 @@ compare_images ${POOL} ${image}
 
 testlog "TEST: request image resync while daemon is offline"
 stop_mirror ${CLUSTER1}
-request_resync_image ${CLUSTER1} ${POOL} ${image}
+request_resync_image ${CLUSTER1} ${POOL} ${image} image_id
 start_mirror ${CLUSTER1}
 wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
 wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' 'master_position'
@@ -282,8 +295,8 @@ wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
 wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+error' 'disconnected'
 
 testlog " - replay started after resync requested"
-request_resync_image ${CLUSTER1} ${POOL} ${image}
-wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted'
+request_resync_image ${CLUSTER1} ${POOL} ${image} image_id
+wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted' ${image_id}
 wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
 wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
 test -n "$(get_mirror_position ${CLUSTER2} ${POOL} ${image})"
@@ -307,8 +320,8 @@ wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
 wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+error' 'disconnected'
 
 testlog " - replay started after resync requested"
-request_resync_image ${CLUSTER1} ${POOL} ${image}
-wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted'
+request_resync_image ${CLUSTER1} ${POOL} ${image} image_id
+wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted' ${image_id}
 wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
 wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
 test -n "$(get_mirror_position ${CLUSTER2} ${POOL} ${image})"
@@ -330,4 +343,18 @@ test -z "$(get_mirror_position ${CLUSTER2} ${POOL} ${image})"
 wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
 wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+error' 'disconnected'
 
+testlog "TEST: split-brain"
+image=split-brain
+create_image ${CLUSTER2} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' 'master_position'
+demote_image ${CLUSTER2} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+stopped'
+promote_image ${CLUSTER1} ${POOL} ${image}
+write_image ${CLUSTER1} ${POOL} ${image} 10
+demote_image ${CLUSTER1} ${POOL} ${image}
+promote_image ${CLUSTER2} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+error' 'split-brain'
+request_resync_image ${CLUSTER1} ${POOL} ${image} image_id
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' 'master_position'
+
 echo OK
diff --git a/qa/workunits/rbd/rbd_mirror_helpers.sh b/qa/workunits/rbd/rbd_mirror_helpers.sh
index 78784f5..b02dfa7 100755
--- a/qa/workunits/rbd/rbd_mirror_helpers.sh
+++ b/qa/workunits/rbd/rbd_mirror_helpers.sh
@@ -105,7 +105,7 @@ daemon_pid_file()
 
 testlog()
 {
-    echo $(date '+%F %T') $@ | tee -a "${TEMPDIR}/rbd-mirror.test.log"
+    echo $(date '+%F %T') $@ | tee -a "${TEMPDIR}/rbd-mirror.test.log" >&2
 }
 
 expect_failure()
@@ -467,9 +467,9 @@ test_status_in_pool_dir()
 
     local status_log=${TEMPDIR}/${cluster}-${image}.mirror_status
     rbd --cluster ${cluster} -p ${pool} mirror image status ${image} |
-	tee ${status_log}
-    grep "state: .*${state_pattern}" ${status_log}
-    grep "description: .*${description_pattern}" ${status_log}
+	tee ${status_log} >&2
+    grep "state: .*${state_pattern}" ${status_log} || return 1
+    grep "description: .*${description_pattern}" ${status_log} || return 1
 }
 
 wait_for_status_in_pool_dir()
@@ -731,9 +731,13 @@ test_image_present()
     local pool=$2
     local image=$3
     local test_state=$4
+    local image_id=$5
     local current_state=deleted
+    local current_image_id
 
-    rbd --cluster=${cluster} -p ${pool} ls | grep "^${image}$" &&
+    current_image_id=$(get_image_id ${cluster} ${pool} ${image})
+    test -n "${current_image_id}" &&
+    test -z "${image_id}" -o "${image_id}" = "${current_image_id}" &&
     current_state=present
 
     test "${test_state}" = "${current_state}"
@@ -745,21 +749,41 @@ wait_for_image_present()
     local pool=$2
     local image=$3
     local state=$4
+    local image_id=$5
     local s
 
+    test -n "${image_id}" ||
+    image_id=$(get_image_id ${cluster} ${pool} ${image})
+
     # TODO: add a way to force rbd-mirror to update replayers
     for s in 0.1 1 2 4 8 8 8 8 8 8 8 8 16 16 32 32; do
 	sleep ${s}
-	test_image_present "${cluster}" "${pool}" "${image}" "${state}" && return 0
+	test_image_present \
+            "${cluster}" "${pool}" "${image}" "${state}" "${image_id}" &&
+        return 0
     done
     return 1
 }
 
+get_image_id()
+{
+    local cluster=$1
+    local pool=$2
+    local image=$3
+
+    rbd --cluster=${cluster} -p ${pool} info ${image} |
+	sed -ne 's/^.*block_name_prefix: rbd_data\.//p'
+}
+
 request_resync_image()
 {
     local cluster=$1
     local pool=$2
     local image=$3
+    local image_id_var_name=$1
+
+    eval "${image_id_var_name}='$(get_image_id ${cluster} ${pool} ${image})'"
+    eval 'test -n "$'${image_id_var_name}'"'
 
     rbd --cluster=${cluster} -p ${pool} mirror image resync ${image}
 }
diff --git a/qa/workunits/rbd/rbd_mirror_stress.sh b/qa/workunits/rbd/rbd_mirror_stress.sh
index e9ed2ce..b07bf0e 100755
--- a/qa/workunits/rbd/rbd_mirror_stress.sh
+++ b/qa/workunits/rbd/rbd_mirror_stress.sh
@@ -11,12 +11,7 @@
 IMAGE_COUNT=50
 export LOCKDEP=0
 
-if [ -n "${CEPH_REF}" ]; then
-  wget -O rbd_mirror_helpers.sh "https://git.ceph.com/?p=ceph.git;a=blob_plain;hb=$CEPH_REF;f=qa/workunits/rbd/rbd_mirror_helpers.sh"
-  . ./rbd_mirror_helpers.sh
-else
-  . $(dirname $0)/rbd_mirror_helpers.sh
-fi
+. $(dirname $0)/rbd_mirror_helpers.sh
 
 create_snap()
 {
@@ -56,17 +51,18 @@ wait_for_pool_images()
 
     while true; do
         for s in `seq 1 40`; do
+            test $s -ne 1 && sleep 30
             count=$(rbd --cluster ${cluster} -p ${pool} mirror pool status | grep 'images: ' | cut -d' ' -f 2)
             test "${count}" = "${image_count}" && return 0
 
             # reset timeout if making forward progress
-            test $count -gt $last_count && break
-            sleep 30
+            test $count -ne $last_count && break
         done
 
-        test $count -eq $last_count && return 1
-        $last_count=$count
+        test $count -eq $last_count && break
+        last_count=$count
     done
+    rbd --cluster ${cluster} -p ${pool} mirror pool status --verbose >&2
     return 1
 }
 
@@ -78,11 +74,12 @@ wait_for_pool_healthy()
     local state
 
     for s in `seq 1 40`; do
+        test $s -ne 1 && sleep 30
         state=$(rbd --cluster ${cluster} -p ${pool} mirror pool status | grep 'health:' | cut -d' ' -f 2)
-        test "${state}" = "ERROR" && return 1
+        test "${state}" = "ERROR" && break
         test "${state}" = "OK" && return 0
-	sleep 30
     done
+    rbd --cluster ${cluster} -p ${pool} mirror pool status --verbose >&2
     return 1
 }
 
@@ -156,6 +153,7 @@ do
 done
 
 testlog "TEST: image deletions should propagate"
+wait_for_pool_images ${CLUSTER1} ${POOL} 0
 wait_for_pool_healthy ${CLUSTER1} ${POOL} 0
 for i in `seq 1 ${IMAGE_COUNT}`
 do
diff --git a/qa/workunits/rbd/test_librbd_python.sh b/qa/workunits/rbd/test_librbd_python.sh
index 8f02f86..9bef91b 100755
--- a/qa/workunits/rbd/test_librbd_python.sh
+++ b/qa/workunits/rbd/test_librbd_python.sh
@@ -1,14 +1,11 @@
 #!/bin/sh -ex
 
-CEPH_REF=${CEPH_REF:-master}
-#wget -q https://raw.github.com/ceph/ceph/$CEPH_REF/src/test/pybind/test_rbd.py
-wget -O test_rbd.py "https://git.ceph.com/?p=ceph.git;a=blob_plain;hb=$CEPH_REF;f=src/test/pybind/test_rbd.py" || \
-    wget -O test_rbd.py "https://git.ceph.com/?p=ceph.git;a=blob_plain;hb=ref/heads/$CEPH_REF;f=src/test/pybind/test_rbd.py"
+relpath=$(dirname $0)/../../../src/test/pybind
 
 if [ -n "${VALGRIND}" ]; then
   valgrind --tool=${VALGRIND} --suppressions=${TESTDIR}/valgrind.supp \
-    nosetests -v test_rbd
+    nosetests -v $relpath/test_rbd.py
 else
-  nosetests -v test_rbd
+  nosetests -v $relpath/test_rbd.py
 fi
 exit 0
diff --git a/qa/workunits/rbd/test_lock_fence.sh b/qa/workunits/rbd/test_lock_fence.sh
index 7f66478..7ecafd4 100755
--- a/qa/workunits/rbd/test_lock_fence.sh
+++ b/qa/workunits/rbd/test_lock_fence.sh
@@ -3,10 +3,8 @@
 
 IMAGE=rbdrw-image
 LOCKID=rbdrw
-RBDRW=rbdrw.py
-CEPH_REF=${CEPH_REF:-master}
-
-wget -O $RBDRW "https://git.ceph.com/?p=ceph.git;a=blob_plain;hb=$CEPH_REF;f=src/test/librbd/rbdrw.py"
+RELPATH=$(dirname $0)/../../../src/test/librbd
+RBDRW=$RELPATH/rbdrw.py
 
 rbd create $IMAGE --size 10 --image-format 2 --image-shared || exit 1
 
diff --git a/qa/workunits/suites/pjd.sh b/qa/workunits/suites/pjd.sh
index 125ef43..e6df309 100755
--- a/qa/workunits/suites/pjd.sh
+++ b/qa/workunits/suites/pjd.sh
@@ -2,7 +2,6 @@
 
 set -e
 
-#wget http://ceph.com/qa/pjd-fstest-20090130-RC-open24.tgz
 wget http://download.ceph.com/qa/pjd-fstest-20090130-RC-aclfixes.tgz
 tar zxvf pjd*.tgz
 cd pjd*
diff --git a/selinux/ceph.te b/selinux/ceph.te
index 0e85c84..d9927ae 100644
--- a/selinux/ceph.te
+++ b/selinux/ceph.te
@@ -91,6 +91,7 @@ allow ceph_t self:tcp_socket { accept listen };
 corenet_tcp_connect_cyphesis_port(ceph_t)
 corenet_tcp_connect_generic_port(ceph_t)
 files_list_tmp(ceph_t)
+files_manage_generic_tmp_files(ceph_t)
 fstools_exec(ceph_t)
 nis_use_ypbind_uncond(ceph_t)
 storage_raw_rw_fixed_disk(ceph_t)
diff --git a/src/.git_version b/src/.git_version
index d126034..dd8d1a2 100644
--- a/src/.git_version
+++ b/src/.git_version
@@ -1,2 +1,2 @@
-c461ee19ecbc0c5c330aca20f7392c9a00730367
-v10.2.5
+656b5b63ed7c43bd014bcafd81b001959d5f089f
+v10.2.6
diff --git a/src/Makefile.in b/src/Makefile.in
index 055214b..ce1645d 100644
--- a/src/Makefile.in
+++ b/src/Makefile.in
@@ -420,8 +420,6 @@ check_PROGRAMS = $(am__EXEEXT_63) $(am__EXEEXT_64) \
 @ENABLE_SERVER_TRUE at am__append_108 = \
 @ENABLE_SERVER_TRUE@	common/xattr.c \
 @ENABLE_SERVER_TRUE@	common/ipaddr.cc \
- at ENABLE_SERVER_TRUE@	common/ceph_json.cc \
- at ENABLE_SERVER_TRUE@	common/util.cc \
 @ENABLE_SERVER_TRUE@	common/pick_address.cc
 
 @LINUX_TRUE at am__append_109 = \
@@ -530,6 +528,7 @@ check_PROGRAMS = $(am__EXEEXT_63) $(am__EXEEXT_64) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd/AsyncObjectThrottle.h \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd/AsyncOperation.h \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd/AsyncRequest.h \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@        librbd/BlockGuard.h \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd/CopyupRequest.h \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd/DiffIterate.h \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd/ExclusiveLock.h \
@@ -550,9 +549,14 @@ check_PROGRAMS = $(am__EXEEXT_63) $(am__EXEEXT_64) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd/Utils.h \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd/WatchNotifyTypes.h \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd/exclusive_lock/AcquireRequest.h \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd/exclusive_lock/AutomaticPolicy.h \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd/exclusive_lock/BreakRequest.h \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd/exclusive_lock/GetLockerRequest.h \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd/exclusive_lock/Policy.h \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd/exclusive_lock/ReacquireRequest.h \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd/exclusive_lock/ReleaseRequest.h \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd/exclusive_lock/StandardPolicy.h \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd/exclusive_lock/Types.h \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd/image/CloseRequest.h \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd/image/OpenRequest.h \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd/image/RefreshParentRequest.h \
@@ -560,6 +564,7 @@ check_PROGRAMS = $(am__EXEEXT_63) $(am__EXEEXT_64) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd/image/SetSnapRequest.h \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd/image_watcher/Notifier.h \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd/image_watcher/NotifyLockOwner.h \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd/image_watcher/RewatchRequest.h \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd/journal/DisabledPolicy.h \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd/journal/Policy.h \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd/journal/Replay.h \
@@ -590,31 +595,24 @@ check_PROGRAMS = $(am__EXEEXT_63) $(am__EXEEXT_64) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd/operation/SnapshotUnprotectRequest.h \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd/operation/TrimRequest.h
 
-
 # inject rgw stuff in the decoder testcase
- at ENABLE_CLIENT_TRUE@am__append_143 = \
- at ENABLE_CLIENT_TRUE@	rgw/rgw_dencoder.cc \
- at ENABLE_CLIENT_TRUE@	rgw/rgw_acl.cc \
- at ENABLE_CLIENT_TRUE@	rgw/rgw_basic_types.cc \
- at ENABLE_CLIENT_TRUE@	rgw/rgw_common.cc \
- at ENABLE_CLIENT_TRUE@	rgw/rgw_env.cc \
- at ENABLE_CLIENT_TRUE@	rgw/rgw_json_enc.cc \
- at ENABLE_CLIENT_TRUE@	rgw/rgw_keystone.cc
-
- at ENABLE_CLIENT_TRUE@am__append_144 = -lcurl -lexpat \
- at ENABLE_CLIENT_TRUE@	libcls_version_client.la \
- at ENABLE_CLIENT_TRUE@	libcls_log_client.la \
- at ENABLE_CLIENT_TRUE@	libcls_refcount_client.la \
- at ENABLE_CLIENT_TRUE@	libcls_user_client.la \
- at ENABLE_CLIENT_TRUE@	libcls_timeindex_client.la \
- at ENABLE_CLIENT_TRUE@	libcls_statelog_client.la \
- at ENABLE_CLIENT_TRUE@	libcls_lock_client.la \
- at ENABLE_CLIENT_TRUE@	libcls_refcount_client.la \
- at ENABLE_CLIENT_TRUE@	libcls_replica_log_client.la \
- at ENABLE_CLIENT_TRUE@	libcls_rgw_client.la libcls_rbd_client.la \
- at ENABLE_CLIENT_TRUE@	libcls_user_client.la \
- at ENABLE_CLIENT_TRUE@	libcls_numops_client.la \
- at ENABLE_CLIENT_TRUE@	libcls_journal_client.la
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at am__append_143 = \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	rgw/rgw_dencoder.cc \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	rgw/rgw_acl.cc \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	rgw/rgw_basic_types.cc \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	rgw/rgw_common.cc \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	rgw/rgw_env.cc \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	rgw/rgw_json_enc.cc \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	rgw/rgw_keystone.cc
+
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at am__append_144 = -lcurl -lexpat \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	libcls_version_client.la \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	libcls_log_client.la \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	libcls_refcount_client.la \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	libcls_user_client.la \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	libcls_timeindex_client.la \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	libcls_statelog_client.la
+
 @ENABLE_CLIENT_TRUE@@WITH_OPENLDAP_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at am__append_145 = rgw/rgw_ldap.cc
 # noinst_LTLIBRARIES += librgw.la
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at am__append_146 = \
@@ -634,16 +632,19 @@ check_PROGRAMS = $(am__EXEEXT_63) $(am__EXEEXT_64) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	-lfcgi \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	-ldl
 
- at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at am__append_147 = librgw.la
- at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at am__append_148 = -lssl -lcrypto
- at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at am__append_149 = libcivetweb.la
- at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at am__append_150 = radosgw \
+ at ENABLE_CLIENT_TRUE@@WITH_OPENLDAP_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at am__append_147 = \
+ at ENABLE_CLIENT_TRUE@@WITH_OPENLDAP_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@        -lldap
+
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at am__append_148 = librgw.la
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at am__append_149 = -lssl -lcrypto
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at am__append_150 = libcivetweb.la
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at am__append_151 = radosgw \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	radosgw-admin \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	radosgw-token \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	radosgw-object-expirer
- at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at am__append_151 = ceph_rgw_multiparser \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at am__append_152 = ceph_rgw_multiparser \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	ceph_rgw_jsonparser
- at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at am__append_152 = \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at am__append_153 = \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	rgw/rgw_acl.h \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	rgw/rgw_acl_s3.h \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	rgw/rgw_acl_swift.h \
@@ -729,19 +730,28 @@ check_PROGRAMS = $(am__EXEEXT_63) $(am__EXEEXT_64) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	civetweb/include/civetweb_conf.h \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	civetweb/src/md5.h
 
- at ENABLE_CLIENT_TRUE@am__append_153 = libcls_lock_client.la \
+ at ENABLE_CLIENT_TRUE@am__append_154 = libcls_lock_client.la \
 @ENABLE_CLIENT_TRUE@	libcls_refcount_client.la \
 @ENABLE_CLIENT_TRUE@	libcls_version_client.la \
 @ENABLE_CLIENT_TRUE@	libcls_log_client.la \
 @ENABLE_CLIENT_TRUE@	libcls_statelog_client.la \
 @ENABLE_CLIENT_TRUE@	libcls_timeindex_client.la \
- at ENABLE_CLIENT_TRUE@	libcls_replica_log_client.la \
- at ENABLE_CLIENT_TRUE@	libcls_rgw_client.la libcls_rbd_client.la \
+ at ENABLE_CLIENT_TRUE@	libcls_replica_log_client.la
+ at ENABLE_CLIENT_TRUE@am__append_155 = libcls_lock_client.la \
+ at ENABLE_CLIENT_TRUE@	libcls_refcount_client.la \
+ at ENABLE_CLIENT_TRUE@	libcls_replica_log_client.la
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE at am__append_156 = libcls_rgw_client.la
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE at am__append_157 = libcls_rgw_client.la
+ at ENABLE_CLIENT_TRUE@am__append_158 = libcls_rbd_client.la \
 @ENABLE_CLIENT_TRUE@	libcls_user_client.la \
 @ENABLE_CLIENT_TRUE@	libcls_cephfs_client.la \
 @ENABLE_CLIENT_TRUE@	libcls_numops_client.la \
 @ENABLE_CLIENT_TRUE@	libcls_journal_client.la
- at ENABLE_CLIENT_TRUE@am__append_154 = \
+ at ENABLE_CLIENT_TRUE@am__append_159 = libcls_rbd_client.la \
+ at ENABLE_CLIENT_TRUE@	libcls_user_client.la \
+ at ENABLE_CLIENT_TRUE@	libcls_numops_client.la \
+ at ENABLE_CLIENT_TRUE@	libcls_journal_client.la
+ at ENABLE_CLIENT_TRUE@am__append_160 = \
 @ENABLE_CLIENT_TRUE@	cls/lock/cls_lock_types.h \
 @ENABLE_CLIENT_TRUE@	cls/lock/cls_lock_ops.h \
 @ENABLE_CLIENT_TRUE@	cls/lock/cls_lock_client.h \
@@ -766,9 +776,6 @@ check_PROGRAMS = $(am__EXEEXT_63) $(am__EXEEXT_64) \
 @ENABLE_CLIENT_TRUE@	cls/replica_log/cls_replica_log_types.h \
 @ENABLE_CLIENT_TRUE@	cls/replica_log/cls_replica_log_ops.h \
 @ENABLE_CLIENT_TRUE@	cls/replica_log/cls_replica_log_client.h \
- at ENABLE_CLIENT_TRUE@	cls/rgw/cls_rgw_client.h \
- at ENABLE_CLIENT_TRUE@	cls/rgw/cls_rgw_ops.h \
- at ENABLE_CLIENT_TRUE@	cls/rgw/cls_rgw_types.h \
 @ENABLE_CLIENT_TRUE@	cls/user/cls_user_client.h \
 @ENABLE_CLIENT_TRUE@	cls/user/cls_user_ops.h \
 @ENABLE_CLIENT_TRUE@	cls/user/cls_user_types.h \
@@ -777,7 +784,12 @@ check_PROGRAMS = $(am__EXEEXT_63) $(am__EXEEXT_64) \
 @ENABLE_CLIENT_TRUE@	cls/journal/cls_journal_client.h \
 @ENABLE_CLIENT_TRUE@	cls/journal/cls_journal_types.h
 
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at am__append_155 = libcls_hello.la \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE at am__append_161 = \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@	cls/rgw/cls_rgw_client.h \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@	cls/rgw/cls_rgw_ops.h \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@	cls/rgw/cls_rgw_types.h
+
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at am__append_162 = libcls_hello.la \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	libcls_numops.la \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	libcls_rbd.la \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	libcls_lock.la \
@@ -787,17 +799,17 @@ check_PROGRAMS = $(am__EXEEXT_63) $(am__EXEEXT_64) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	libcls_statelog.la \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	libcls_timeindex.la \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	libcls_replica_log.la \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	libcls_user.la \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	libcls_rgw.la \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	libcls_cephfs.la \
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	libcls_user.la
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@@WITH_RADOSGW_TRUE at am__append_163 = libcls_rgw.la
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at am__append_164 = libcls_cephfs.la \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	libcls_journal.la
- at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_156 = libcls_kvs.la
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at am__append_157 = \
+ at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_165 = libcls_kvs.la
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at am__append_166 = \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	key_value_store/key_value_structure.h \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	key_value_store/kv_flat_btree_async.h \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	key_value_store/kvs_arg_types.h
 
- at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE at am__append_158 = rbd_replay/ActionTypes.h \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE at am__append_167 = rbd_replay/ActionTypes.h \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	rbd_replay/actions.hpp \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	rbd_replay/BoundedBuffer.hpp \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	rbd_replay/BufferReader.h \
@@ -807,27 +819,27 @@ check_PROGRAMS = $(am__EXEEXT_63) $(am__EXEEXT_64) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	rbd_replay/rbd_loc.hpp \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	rbd_replay/rbd_replay_debug.hpp \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	rbd_replay/Replayer.hpp
- at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE at am__append_159 = librbd_replay_types.la \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE at am__append_168 = librbd_replay_types.la \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd_replay.la \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd_replay_ios.la
- at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE at am__append_160 = librbd_replay_types.la
- at ENABLE_CLIENT_TRUE@@LINUX_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE at am__append_161 = rbd-replay
- at ENABLE_CLIENT_TRUE@@WITH_BABELTRACE_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE at am__append_162 = rbd-replay-prep
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at am__append_163 = \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE at am__append_169 = librbd_replay_types.la
+ at ENABLE_CLIENT_TRUE@@LINUX_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE at am__append_170 = rbd-replay
+ at ENABLE_CLIENT_TRUE@@WITH_BABELTRACE_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE at am__append_171 = rbd-replay-prep
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at am__append_172 = \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	test/erasure-code/test-erasure-code.sh \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	test/erasure-code/test-erasure-eio.sh
 
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at am__append_164 = test/erasure-code/ceph_erasure_code_benchmark.h \
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at am__append_173 = test/erasure-code/ceph_erasure_code_benchmark.h \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	test/erasure-code/ceph_erasure_code_benchmark.h \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	test/erasure-code/ErasureCodeExample.h
- at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_165 = -ldl
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at am__append_166 = ceph_erasure_code_benchmark \
+ at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_174 = -ldl
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at am__append_175 = ceph_erasure_code_benchmark \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	ceph_erasure_code
- at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_167 = -ldl
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at am__append_168 = ceph_erasure_code_non_regression
- at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_169 = -ldl
- at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_170 = -export-symbols-regex '.*__erasure_code_.*'
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at am__append_171 = libec_example.la \
+ at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_176 = -ldl
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at am__append_177 = ceph_erasure_code_non_regression
+ at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_178 = -ldl
+ at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_179 = -export-symbols-regex '.*__erasure_code_.*'
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at am__append_180 = libec_example.la \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	libec_missing_entry_point.la \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	libec_missing_version.la \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	libec_hangs.la \
@@ -841,28 +853,28 @@ check_PROGRAMS = $(am__EXEEXT_63) $(am__EXEEXT_64) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	libec_test_shec_sse4.la \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	libec_test_shec_sse3.la \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	libec_test_shec_generic.la
- at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_172 = -export-symbols-regex '.*__erasure_code_.*'
- at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_173 = -export-symbols-regex '.*__erasure_code_.*'
- at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_174 = -export-symbols-regex '.*__erasure_code_.*'
- at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_175 = -export-symbols-regex '.*__erasure_code_.*'
- at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_176 = -export-symbols-regex '.*__erasure_code_.*'
- at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_177 = -export-symbols-regex '.*__erasure_code_.*'
- at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_178 = -export-symbols-regex '.*__erasure_code_.*'
- at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_179 = -export-symbols-regex '.*__erasure_code_.*'
- at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_180 = -export-symbols-regex '.*__erasure_code_.*'
- at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_181 = -ldl
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at am__append_182 = unittest_erasure_code_plugin \
+ at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_181 = -export-symbols-regex '.*__erasure_code_.*'
+ at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_182 = -export-symbols-regex '.*__erasure_code_.*'
+ at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_183 = -export-symbols-regex '.*__erasure_code_.*'
+ at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_184 = -export-symbols-regex '.*__erasure_code_.*'
+ at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_185 = -export-symbols-regex '.*__erasure_code_.*'
+ at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_186 = -export-symbols-regex '.*__erasure_code_.*'
+ at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_187 = -export-symbols-regex '.*__erasure_code_.*'
+ at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_188 = -export-symbols-regex '.*__erasure_code_.*'
+ at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_189 = -export-symbols-regex '.*__erasure_code_.*'
+ at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_190 = -ldl
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at am__append_191 = unittest_erasure_code_plugin \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	unittest_erasure_code \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	unittest_erasure_code_jerasure \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	unittest_erasure_code_plugin_jerasure
- at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_183 = -ldl
- at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_184 = -ldl
- at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_BETTER_YASM_ELF64_TRUE@@WITH_OSD_TRUE at am__append_185 = -ldl
- at ENABLE_SERVER_TRUE@@WITH_BETTER_YASM_ELF64_TRUE@@WITH_OSD_TRUE at am__append_186 = unittest_erasure_code_isa \
+ at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_192 = -ldl
+ at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_193 = -ldl
+ at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_BETTER_YASM_ELF64_TRUE@@WITH_OSD_TRUE at am__append_194 = -ldl
+ at ENABLE_SERVER_TRUE@@WITH_BETTER_YASM_ELF64_TRUE@@WITH_OSD_TRUE at am__append_195 = unittest_erasure_code_isa \
 @ENABLE_SERVER_TRUE@@WITH_BETTER_YASM_ELF64_TRUE@@WITH_OSD_TRUE@	unittest_erasure_code_plugin_isa
- at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_BETTER_YASM_ELF64_TRUE@@WITH_OSD_TRUE at am__append_187 = -ldl
- at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_188 = -ldl
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at am__append_189 =  \
+ at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_BETTER_YASM_ELF64_TRUE@@WITH_OSD_TRUE at am__append_196 = -ldl
+ at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_197 = -ldl
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at am__append_198 =  \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	unittest_erasure_code_lrc \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	unittest_erasure_code_plugin_lrc \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	unittest_erasure_code_shec \
@@ -876,61 +888,65 @@ check_PROGRAMS = $(am__EXEEXT_63) $(am__EXEEXT_64) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	unittest_compression_plugin_snappy \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	unittest_compression_zlib \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	unittest_compression_plugin_zlib
- at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_190 = -ldl
- at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_191 = -ldl
- at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_192 = -ldl
- at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_193 = -ldl
- at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_194 = -ldl
- at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_195 = -ldl
- at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_196 = -export-symbols-regex '.*__erasure_code_.*'
- at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_197 = -export-symbols-regex '.*__erasure_code_.*'
- at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_198 = -export-symbols-regex '.*__erasure_code_.*'
- at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_199 = -export-symbols-regex '.*__erasure_code_.*'
- at ENABLE_SERVER_TRUE@@ENABLE_XIO_TRUE at am__append_200 = test/messenger/message_helper.h \
+ at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_199 = -ldl
+ at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_200 = -ldl
+ at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_201 = -ldl
+ at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_202 = -ldl
+ at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_203 = -ldl
+ at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_204 = -ldl
+ at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_205 = -export-symbols-regex '.*__erasure_code_.*'
+ at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_206 = -export-symbols-regex '.*__erasure_code_.*'
+ at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_207 = -export-symbols-regex '.*__erasure_code_.*'
+ at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_208 = -export-symbols-regex '.*__erasure_code_.*'
+ at ENABLE_SERVER_TRUE@@ENABLE_XIO_TRUE at am__append_209 = test/messenger/message_helper.h \
 @ENABLE_SERVER_TRUE@@ENABLE_XIO_TRUE@	test/messenger/simple_dispatcher.h \
 @ENABLE_SERVER_TRUE@@ENABLE_XIO_TRUE@	test/messenger/xio_dispatcher.h
- at ENABLE_SERVER_TRUE@@ENABLE_XIO_TRUE@@LINUX_TRUE at am__append_201 = -ldl
- at ENABLE_SERVER_TRUE@@ENABLE_XIO_TRUE@@LINUX_TRUE at am__append_202 = -ldl
- at ENABLE_SERVER_TRUE@@ENABLE_XIO_TRUE at am__append_203 = simple_server \
+ at ENABLE_SERVER_TRUE@@ENABLE_XIO_TRUE@@LINUX_TRUE at am__append_210 = -ldl
+ at ENABLE_SERVER_TRUE@@ENABLE_XIO_TRUE@@LINUX_TRUE at am__append_211 = -ldl
+ at ENABLE_SERVER_TRUE@@ENABLE_XIO_TRUE at am__append_212 = simple_server \
 @ENABLE_SERVER_TRUE@@ENABLE_XIO_TRUE@	simple_client xio_server \
 @ENABLE_SERVER_TRUE@@ENABLE_XIO_TRUE@	xio_client
- at ENABLE_SERVER_TRUE@@ENABLE_XIO_TRUE@@LINUX_TRUE at am__append_204 = -ldl
- at ENABLE_SERVER_TRUE@@ENABLE_XIO_TRUE@@LINUX_TRUE at am__append_205 = -ldl
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at am__append_206 = test/compressor/compressor_example.h
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at am__append_207 = libceph_example.la
- at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_208 = -ldl
- at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_209 = -ldl
- at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_210 = -ldl
- at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_211 = -ldl
- at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_212 = -ldl
- at COMPILER_HAS_VTA_TRUE@@ENABLE_CLIENT_TRUE at am__append_213 = -fno-var-tracking-assignments
- at COMPILER_HAS_VTA_TRUE@@ENABLE_CLIENT_TRUE at am__append_214 = -fno-var-tracking-assignments
- at ENABLE_CLIENT_TRUE@@WITH_RBD_TRUE at am__append_215 = -DWITH_RBD
- at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE at am__append_216 = -DWITH_RADOSGW
- at ENABLE_CLIENT_TRUE@am__append_217 = ceph-dencoder
- at ENABLE_CLIENT_TRUE@am__append_218 = \
+ at ENABLE_SERVER_TRUE@@ENABLE_XIO_TRUE@@LINUX_TRUE at am__append_213 = -ldl
+ at ENABLE_SERVER_TRUE@@ENABLE_XIO_TRUE@@LINUX_TRUE at am__append_214 = -ldl
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at am__append_215 = test/compressor/compressor_example.h
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at am__append_216 = libceph_example.la
+ at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_217 = -ldl
+ at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_218 = -ldl
+ at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_219 = -ldl
+ at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_220 = -ldl
+ at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_221 = -ldl
+ at COMPILER_HAS_VTA_TRUE@@ENABLE_CLIENT_TRUE at am__append_222 = -fno-var-tracking-assignments
+ at COMPILER_HAS_VTA_TRUE@@ENABLE_CLIENT_TRUE at am__append_223 = -fno-var-tracking-assignments
+ at ENABLE_CLIENT_TRUE@@WITH_RBD_TRUE at am__append_224 = -DWITH_RBD
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE at am__append_225 = -DWITH_RADOSGW
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE at am__append_226 = \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@	$(LIBRGW) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@	$(LIBRGW_DEPS) 
+
+ at ENABLE_CLIENT_TRUE@am__append_227 = ceph-dencoder
+ at ENABLE_CLIENT_TRUE@am__append_228 = \
 @ENABLE_CLIENT_TRUE@	test/encoding/test_ceph_time.h
 
- at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE at am__append_219 = libradostest.la \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE at am__append_229 = libradostest.la \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	librados_test_stub.la \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	libjournal_test_mock.la
- at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE at am__append_220 = ceph_test_rados \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE at am__append_230 = ceph_test_rados \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	ceph_test_mutate
- at ENABLE_CLIENT_TRUE@@WITH_BUILD_TESTS_TRUE@@WITH_RADOS_TRUE at am__append_221 = test_build_librados
- at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE at am__append_222 =  \
+ at ENABLE_CLIENT_TRUE@@WITH_BUILD_TESTS_TRUE@@WITH_RADOS_TRUE at am__append_231 = test_build_librados
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE at am__append_232 =  \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	ceph_smalliobench \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	ceph_omapbench \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	ceph_objectstore_bench
- at ENABLE_CLIENT_TRUE@@LINUX_TRUE@@WITH_RADOS_TRUE at am__append_223 = ceph_kvstorebench \
+ at ENABLE_CLIENT_TRUE@@LINUX_TRUE@@WITH_RADOS_TRUE at am__append_233 = ceph_kvstorebench \
 @ENABLE_CLIENT_TRUE@@LINUX_TRUE@@WITH_RADOS_TRUE@	ceph_test_rados_list_parallel \
 @ENABLE_CLIENT_TRUE@@LINUX_TRUE@@WITH_RADOS_TRUE@	ceph_test_rados_open_pools_parallel \
 @ENABLE_CLIENT_TRUE@@LINUX_TRUE@@WITH_RADOS_TRUE@	ceph_test_rados_delete_pools_parallel \
 @ENABLE_CLIENT_TRUE@@LINUX_TRUE@@WITH_RADOS_TRUE@	ceph_test_rados_watch_notify
- at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE at am__append_224 =  \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE at am__append_234 =  \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	unittest_librados \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	unittest_librados_config \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	unittest_journal
- at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE at am__append_225 =  \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE at am__append_235 =  \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	ceph_multi_stress_watch \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	ceph_test_cls_rbd \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	ceph_test_cls_refcount \
@@ -959,7 +975,7 @@ check_PROGRAMS = $(am__EXEEXT_63) $(am__EXEEXT_64) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	ceph_test_rados_api_lock \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	ceph_test_rados_api_tmap_migrate \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	ceph_test_stress_watch
- at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE at am__append_226 = test/librados_test_stub/LibradosTestStub.h \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE at am__append_236 = test/librados_test_stub/LibradosTestStub.h \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	test/librados_test_stub/MockTestMemIoCtxImpl.h \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	test/librados_test_stub/MockTestMemRadosClient.h \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	test/librados_test_stub/TestClassHandler.h \
@@ -969,19 +985,19 @@ check_PROGRAMS = $(am__EXEEXT_63) $(am__EXEEXT_64) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	test/librados_test_stub/TestMemIoCtxImpl.h \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	test/librados_test_stub/TestIoCtxImpl.h \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	test/journal/mock/MockJournaler.h
- at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE at am__append_227 = ceph_smalliobenchrbd \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE at am__append_237 = ceph_smalliobenchrbd \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	ceph_test_librbd \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	ceph_test_librbd_api \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	ceph_test_rbd_mirror \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	ceph_test_rbd_mirror_random_write
- at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE at am__append_228 = unittest_rbd_replay
- at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE at am__append_229 = librbd_test.la \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE at am__append_238 = unittest_rbd_replay
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE at am__append_239 = librbd_test.la \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd_test_mock.la \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd_mirror_test.la
- at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE at am__append_230 = unittest_librbd \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE at am__append_240 = unittest_librbd \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	unittest_rbd_mirror
- at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE at am__append_231 = test/run-rbd-unit-tests.sh
- at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE at am__append_232 = test/librbd/test_fixture.h \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE at am__append_241 = test/run-rbd-unit-tests.sh
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE at am__append_242 = test/librbd/test_fixture.h \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	test/librbd/test_mock_fixture.h \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	test/librbd/test_support.h \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	test/librbd/mock/MockAioImageRequestWQ.h \
@@ -998,23 +1014,23 @@ check_PROGRAMS = $(am__EXEEXT_63) $(am__EXEEXT_64) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	test/librbd/object_map/mock/MockInvalidateRequest.h \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	test/rbd_mirror/test_fixture.h \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	test/rbd_mirror/test_mock_fixture.h
- at ENABLE_CLIENT_TRUE@@LINUX_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE at am__append_233 = ceph_test_librbd_fsx
- at ENABLE_CLIENT_TRUE@@WITH_RADOSSTRIPER_TRUE@@WITH_RADOS_TRUE at am__append_234 = libradosstripertest.la
- at ENABLE_CLIENT_TRUE@@WITH_RADOSSTRIPER_TRUE@@WITH_RADOS_TRUE at am__append_235 = ceph_test_rados_striper_api_io \
+ at ENABLE_CLIENT_TRUE@@LINUX_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE at am__append_243 = ceph_test_librbd_fsx
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSSTRIPER_TRUE@@WITH_RADOS_TRUE at am__append_244 = libradosstripertest.la
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSSTRIPER_TRUE@@WITH_RADOS_TRUE at am__append_245 = ceph_test_rados_striper_api_io \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSSTRIPER_TRUE@@WITH_RADOS_TRUE@	ceph_test_rados_striper_api_aio \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSSTRIPER_TRUE@@WITH_RADOS_TRUE@	ceph_test_rados_striper_api_striping
- at ENABLE_CLIENT_TRUE@@WITH_BUILD_TESTS_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE at am__append_236 = test_build_libcephfs
- at ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE at am__append_237 = unittest_encoding \
+ at ENABLE_CLIENT_TRUE@@WITH_BUILD_TESTS_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE at am__append_246 = test_build_libcephfs
+ at ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE at am__append_247 = unittest_encoding \
 @ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE@	unittest_base64 \
 @ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE@	unittest_run_cmd \
 @ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE@	unittest_simple_spin \
 @ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE@	unittest_libcephfs_config
- at ENABLE_CLIENT_TRUE@@LINUX_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE at am__append_238 = test/libcephfs/flock.cc
- at ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE at am__append_239 = ceph_test_libcephfs \
+ at ENABLE_CLIENT_TRUE@@LINUX_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE at am__append_248 = test/libcephfs/flock.cc
+ at ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE at am__append_249 = ceph_test_libcephfs \
 @ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE@	ceph_test_c_headers
- at CLANG_FALSE@@ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE at am__append_240 = -Werror -Wold-style-declaration
- at ENABLE_CLIENT_TRUE@@WITH_BUILD_TESTS_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at am__append_241 = test_build_librgw
- at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at am__append_242 = ceph_test_cors \
+ at CLANG_FALSE@@ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE at am__append_250 = -Werror -Wold-style-declaration
+ at ENABLE_CLIENT_TRUE@@WITH_BUILD_TESTS_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at am__append_251 = test_build_librgw
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at am__append_252 = ceph_test_cors \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	ceph_test_rgw_manifest \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	ceph_test_rgw_period_history \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	ceph_test_rgw_obj \
@@ -1027,20 +1043,20 @@ check_PROGRAMS = $(am__EXEEXT_63) $(am__EXEEXT_64) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	librgw_file_gp \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	librgw_file_aw \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	librgw_file_nfsns
- at ENABLE_SERVER_TRUE@am__append_243 = ceph_test_async_driver \
+ at ENABLE_SERVER_TRUE@am__append_253 = ceph_test_async_driver \
 @ENABLE_SERVER_TRUE@	ceph_test_msgr ceph_test_trans \
 @ENABLE_SERVER_TRUE@	ceph_test_mon_workloadgen \
 @ENABLE_SERVER_TRUE@	ceph_test_mon_msg ceph_perf_objectstore \
 @ENABLE_SERVER_TRUE@	ceph_perf_local ceph_perf_msgr_server \
 @ENABLE_SERVER_TRUE@	ceph_perf_msgr_client
- at ENABLE_SERVER_TRUE@am__append_244 = test/perf_helper.h
- at ENABLE_SERVER_TRUE@@LINUX_TRUE at am__append_245 =  \
+ at ENABLE_SERVER_TRUE@am__append_254 = test/perf_helper.h
+ at ENABLE_SERVER_TRUE@@LINUX_TRUE at am__append_255 =  \
 @ENABLE_SERVER_TRUE@@LINUX_TRUE@	ceph_test_objectstore \
 @ENABLE_SERVER_TRUE@@LINUX_TRUE@	ceph_test_keyvaluedb \
 @ENABLE_SERVER_TRUE@@LINUX_TRUE@	ceph_test_filestore
- at ENABLE_SERVER_TRUE@@LINUX_TRUE at am__append_246 = unittest_bluefs \
+ at ENABLE_SERVER_TRUE@@LINUX_TRUE at am__append_256 = unittest_bluefs \
 @ENABLE_SERVER_TRUE@@LINUX_TRUE@	unittest_bluestore_types
- at ENABLE_SERVER_TRUE@am__append_247 =  \
+ at ENABLE_SERVER_TRUE@am__append_257 =  \
 @ENABLE_SERVER_TRUE@	ceph_test_objectstore_workloadgen \
 @ENABLE_SERVER_TRUE@	ceph_test_filestore_idempotent \
 @ENABLE_SERVER_TRUE@	ceph_test_filestore_idempotent_sequence \
@@ -1048,47 +1064,47 @@ check_PROGRAMS = $(am__EXEEXT_63) $(am__EXEEXT_64) \
 @ENABLE_SERVER_TRUE@	ceph_test_object_map \
 @ENABLE_SERVER_TRUE@	ceph_test_keyvaluedb_atomicity \
 @ENABLE_SERVER_TRUE@	ceph_test_keyvaluedb_iterators
- at ENABLE_SERVER_TRUE@am__append_248 = unittest_transaction
- at ENABLE_CLIENT_TRUE@@ENABLE_SERVER_TRUE@@WITH_RADOS_TRUE at am__append_249 = ceph_smalliobenchfs \
+ at ENABLE_SERVER_TRUE@am__append_258 = unittest_transaction
+ at ENABLE_CLIENT_TRUE@@ENABLE_SERVER_TRUE@@WITH_RADOS_TRUE at am__append_259 = ceph_smalliobenchfs \
 @ENABLE_CLIENT_TRUE@@ENABLE_SERVER_TRUE@@WITH_RADOS_TRUE@	ceph_smalliobenchdumb \
 @ENABLE_CLIENT_TRUE@@ENABLE_SERVER_TRUE@@WITH_RADOS_TRUE@	ceph_tpbench
- at ENABLE_SERVER_TRUE@@WITH_MON_TRUE at am__append_250 = ceph_test_keys
- at ENABLE_SERVER_TRUE@@WITH_MON_TRUE at am__append_251 = get_command_descriptions
- at ENABLE_SERVER_TRUE@@WITH_MON_TRUE at am__append_252 =  \
+ at ENABLE_SERVER_TRUE@@WITH_MON_TRUE at am__append_260 = ceph_test_keys
+ at ENABLE_SERVER_TRUE@@WITH_MON_TRUE at am__append_261 = get_command_descriptions
+ at ENABLE_SERVER_TRUE@@WITH_MON_TRUE at am__append_262 =  \
 @ENABLE_SERVER_TRUE@@WITH_MON_TRUE@	unittest_mon_moncap \
 @ENABLE_SERVER_TRUE@@WITH_MON_TRUE@	unittest_mon_pgmap
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at am__append_253 =  \
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at am__append_263 =  \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	unittest_ecbackend \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	unittest_osdscrub \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	unittest_pglog \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	unittest_hitset \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	unittest_osd_osdcap \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	unittest_pageset
- at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_254 = -ldl
- at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_255 = -ldl
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at am__append_256 = ceph_test_snap_mapper
- at ENABLE_SERVER_TRUE@@WITH_SLIBROCKSDB_TRUE at am__append_257 = unittest_rocksdb_option_static
- at ENABLE_SERVER_TRUE@@WITH_DLIBROCKSDB_TRUE at am__append_258 = unittest_rocksdb_option
- at ENABLE_SERVER_TRUE@am__append_259 = unittest_chain_xattr \
+ at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_264 = -ldl
+ at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_265 = -ldl
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at am__append_266 = ceph_test_snap_mapper
+ at ENABLE_SERVER_TRUE@@WITH_SLIBROCKSDB_TRUE at am__append_267 = unittest_rocksdb_option_static
+ at ENABLE_SERVER_TRUE@@WITH_DLIBROCKSDB_TRUE at am__append_268 = unittest_rocksdb_option
+ at ENABLE_SERVER_TRUE@am__append_269 = unittest_chain_xattr \
 @ENABLE_SERVER_TRUE@	unittest_lfnindex
- at ENABLE_SERVER_TRUE@@WITH_MDS_TRUE at am__append_260 = unittest_mds_authcap
- at WITH_BUILD_TESTS_TRUE@am__append_261 = test_build_libcommon
- at LINUX_TRUE@am__append_262 = libsystest.la
- at SOLARIS_TRUE@am__append_263 = \
+ at ENABLE_SERVER_TRUE@@WITH_MDS_TRUE at am__append_270 = unittest_mds_authcap
+ at WITH_BUILD_TESTS_TRUE@am__append_271 = test_build_libcommon
+ at LINUX_TRUE@am__append_272 = libsystest.la
+ at SOLARIS_TRUE@am__append_273 = \
 @SOLARIS_TRUE@	-lsocket -lnsl
 
- at LINUX_TRUE@am__append_264 = unittest_blkdev
- at LINUX_TRUE@am__append_265 = ceph_test_get_blkdev_size
- at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE at am__append_266 =  \
+ at LINUX_TRUE@am__append_274 = unittest_blkdev
+ at LINUX_TRUE@am__append_275 = ceph_test_get_blkdev_size
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE at am__append_276 =  \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	ceph_scratchtool \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	ceph_scratchtoolpp \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	ceph_radosacl
- at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE at am__append_267 = rados
- at ENABLE_CLIENT_TRUE@@LINUX_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE at am__append_268 = \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE at am__append_277 = rados
+ at ENABLE_CLIENT_TRUE@@LINUX_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE at am__append_278 = \
 @ENABLE_CLIENT_TRUE@@LINUX_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	tools/rbd/action/Kernel.cc \
 @ENABLE_CLIENT_TRUE@@LINUX_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	tools/rbd/action/Nbd.cc
 
- at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE at am__append_269 = tools/rbd/ArgumentTypes.h \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE at am__append_279 = tools/rbd/ArgumentTypes.h \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	tools/rbd/IndentStream.h \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	tools/rbd/OptionPrinter.h \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	tools/rbd/Shell.h \
@@ -1109,6 +1125,7 @@ check_PROGRAMS = $(am__EXEEXT_63) $(am__EXEEXT_64) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	tools/rbd_mirror/image_replayer/CloseImageRequest.h \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	tools/rbd_mirror/image_replayer/CreateImageRequest.h \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	tools/rbd_mirror/image_replayer/EventPreprocessor.h \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	tools/rbd_mirror/image_replayer/IsPrimaryRequest.h \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	tools/rbd_mirror/image_replayer/OpenImageRequest.h \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	tools/rbd_mirror/image_replayer/OpenLocalImageRequest.h \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	tools/rbd_mirror/image_replayer/ReplayStatusFormatter.h \
@@ -1119,26 +1136,26 @@ check_PROGRAMS = $(am__EXEEXT_63) $(am__EXEEXT_64) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	tools/rbd_mirror/image_sync/SnapshotCreateRequest.h \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	tools/rbd_mirror/image_sync/SyncPointCreateRequest.h \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	tools/rbd_mirror/image_sync/SyncPointPruneRequest.h
- at ENABLE_CLIENT_TRUE@@LINUX_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE at am__append_270 = $(LIBKRBD)
- at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE at am__append_271 = rbd
- at ENABLE_CLIENT_TRUE@@LINUX_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE at am__append_272 = rbd-nbd
- at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE at am__append_273 = librbd_mirror_internal.la
- at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE at am__append_274 = rbd-mirror
- at ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE at am__append_275 = ceph-client-debug
- at ENABLE_SERVER_TRUE@am__append_276 = ceph-osdomap-tool \
+ at ENABLE_CLIENT_TRUE@@LINUX_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE at am__append_280 = $(LIBKRBD)
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE at am__append_281 = rbd
+ at ENABLE_CLIENT_TRUE@@LINUX_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE at am__append_282 = rbd-nbd
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE at am__append_283 = librbd_mirror_internal.la
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE at am__append_284 = rbd-mirror
+ at ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE at am__append_285 = ceph-client-debug
+ at ENABLE_SERVER_TRUE@am__append_286 = ceph-osdomap-tool \
 @ENABLE_SERVER_TRUE@	ceph-monstore-tool ceph-kvstore-tool
- at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_277 = -ldl
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at am__append_278 = ceph-objectstore-tool
- at ENABLE_CLIENT_TRUE@@ENABLE_SERVER_TRUE@@WITH_MDS_TRUE@@WITH_RADOS_TRUE at am__append_279 = cephfs-journal-tool \
+ at ENABLE_SERVER_TRUE@@LINUX_TRUE@@WITH_OSD_TRUE at am__append_287 = -ldl
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at am__append_288 = ceph-objectstore-tool
+ at ENABLE_CLIENT_TRUE@@ENABLE_SERVER_TRUE@@WITH_MDS_TRUE@@WITH_RADOS_TRUE at am__append_289 = cephfs-journal-tool \
 @ENABLE_CLIENT_TRUE@@ENABLE_SERVER_TRUE@@WITH_MDS_TRUE@@WITH_RADOS_TRUE@	cephfs-table-tool \
 @ENABLE_CLIENT_TRUE@@ENABLE_SERVER_TRUE@@WITH_MDS_TRUE@@WITH_RADOS_TRUE@	cephfs-data-scan
- at WITH_LTTNG_TRUE@am__append_280 = \
+ at WITH_LTTNG_TRUE@am__append_290 = \
 @WITH_LTTNG_TRUE@	libosd_tp.la \
 @WITH_LTTNG_TRUE@	libos_tp.la \
 @WITH_LTTNG_TRUE@	librados_tp.la \
 @WITH_LTTNG_TRUE@	librbd_tp.la
 
- at WITH_LTTNG_TRUE@am__append_281 = \
+ at WITH_LTTNG_TRUE@am__append_291 = \
 @WITH_LTTNG_TRUE@	tracing/librados.h \
 @WITH_LTTNG_TRUE@	tracing/librbd.h \
 @WITH_LTTNG_TRUE@	tracing/objectstore.h \
@@ -1146,75 +1163,75 @@ check_PROGRAMS = $(am__EXEEXT_63) $(am__EXEEXT_64) \
 @WITH_LTTNG_TRUE@	tracing/osd.h \
 @WITH_LTTNG_TRUE@	tracing/pg.h
 
- at ENABLE_CLIENT_TRUE@@WITH_CYTHON_TRUE@@WITH_RADOS_TRUE at am__append_282 = \
+ at ENABLE_CLIENT_TRUE@@WITH_CYTHON_TRUE@@WITH_RADOS_TRUE at am__append_292 = \
 @ENABLE_CLIENT_TRUE@@WITH_CYTHON_TRUE@@WITH_RADOS_TRUE@	$(srcdir)/pybind/rados/CMakeLists.txt \
 @ENABLE_CLIENT_TRUE@@WITH_CYTHON_TRUE@@WITH_RADOS_TRUE@	$(srcdir)/pybind/rados/setup.py \
 @ENABLE_CLIENT_TRUE@@WITH_CYTHON_TRUE@@WITH_RADOS_TRUE@	$(srcdir)/pybind/rados/rados.pyx $(srcdir)/pybind/rados/rados.pxd
 
- at ENABLE_CLIENT_TRUE@@WITH_CYTHON_TRUE@@WITH_RADOS_TRUE at am__append_283 = rados-pybind-all
- at ENABLE_CLIENT_TRUE@@WITH_CYTHON_TRUE@@WITH_RADOS_TRUE at am__append_284 = rados-pybind-clean
- at ENABLE_CLIENT_TRUE@@WITH_CYTHON_TRUE@@WITH_RADOS_TRUE at am__append_285 = rados-pybind-install-exec
- at ENABLE_CLIENT_TRUE@@WITH_CYTHON_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE at am__append_286 = \
+ at ENABLE_CLIENT_TRUE@@WITH_CYTHON_TRUE@@WITH_RADOS_TRUE at am__append_293 = rados-pybind-all
+ at ENABLE_CLIENT_TRUE@@WITH_CYTHON_TRUE@@WITH_RADOS_TRUE at am__append_294 = rados-pybind-clean
+ at ENABLE_CLIENT_TRUE@@WITH_CYTHON_TRUE@@WITH_RADOS_TRUE at am__append_295 = rados-pybind-install-exec
+ at ENABLE_CLIENT_TRUE@@WITH_CYTHON_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE at am__append_296 = \
 @ENABLE_CLIENT_TRUE@@WITH_CYTHON_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	$(srcdir)/pybind/rbd/CMakeLists.txt \
 @ENABLE_CLIENT_TRUE@@WITH_CYTHON_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	$(srcdir)/pybind/rbd/setup.py \
 @ENABLE_CLIENT_TRUE@@WITH_CYTHON_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	$(srcdir)/pybind/rbd/rbd.pyx
 
- at ENABLE_CLIENT_TRUE@@WITH_CYTHON_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE at am__append_287 = rbd-pybind-all
- at ENABLE_CLIENT_TRUE@@WITH_CYTHON_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE at am__append_288 = rbd-pybind-clean
- at ENABLE_CLIENT_TRUE@@WITH_CYTHON_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE at am__append_289 = rbd-pybind-install-exec
- at ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_CYTHON_TRUE@@WITH_RADOS_TRUE at am__append_290 = \
+ at ENABLE_CLIENT_TRUE@@WITH_CYTHON_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE at am__append_297 = rbd-pybind-all
+ at ENABLE_CLIENT_TRUE@@WITH_CYTHON_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE at am__append_298 = rbd-pybind-clean
+ at ENABLE_CLIENT_TRUE@@WITH_CYTHON_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE at am__append_299 = rbd-pybind-install-exec
+ at ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_CYTHON_TRUE@@WITH_RADOS_TRUE at am__append_300 = \
 @ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_CYTHON_TRUE@@WITH_RADOS_TRUE@	$(srcdir)/pybind/cephfs/CMakeLists.txt \
 @ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_CYTHON_TRUE@@WITH_RADOS_TRUE@	$(srcdir)/pybind/cephfs/setup.py \
 @ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_CYTHON_TRUE@@WITH_RADOS_TRUE@	$(srcdir)/pybind/cephfs/cephfs.pyx
 
- at ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_CYTHON_TRUE@@WITH_RADOS_TRUE at am__append_291 = cephfs-pybind-all
- at ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_CYTHON_TRUE@@WITH_RADOS_TRUE at am__append_292 = cephfs-pybind-clean
- at ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_CYTHON_TRUE@@WITH_RADOS_TRUE at am__append_293 = cephfs-pybind-install-exec
+ at ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_CYTHON_TRUE@@WITH_RADOS_TRUE at am__append_301 = cephfs-pybind-all
+ at ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_CYTHON_TRUE@@WITH_RADOS_TRUE at am__append_302 = cephfs-pybind-clean
+ at ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_CYTHON_TRUE@@WITH_RADOS_TRUE at am__append_303 = cephfs-pybind-install-exec
 TESTS = $(am__EXEEXT_63) $(check_SCRIPTS)
- at ENABLE_CLIENT_TRUE@am__append_294 = \
+ at ENABLE_CLIENT_TRUE@am__append_304 = \
 @ENABLE_CLIENT_TRUE@	pybind/ceph_argparse.py \
 @ENABLE_CLIENT_TRUE@	pybind/ceph_daemon.py
 
- at ENABLE_CLIENT_TRUE@am__append_295 = ceph-syn
- at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE at am__append_296 = \
+ at ENABLE_CLIENT_TRUE@am__append_305 = ceph-syn
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE at am__append_306 = \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(srcdir)/bash_completion/rados \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(srcdir)/bash_completion/radosgw-admin
 
- at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE at am__append_297 = librados-config
- at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE at am__append_298 = \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE at am__append_307 = librados-config
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE at am__append_308 = \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	$(srcdir)/bash_completion/rbd
 
- at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE at am__append_299 = \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE at am__append_309 = \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	ceph-rbdnamer \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	rbd-replay-many \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@        rbdmap
 
- at ENABLE_CLIENT_TRUE@@LINUX_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE at am__append_300 = libkrbd.la
- at ENABLE_CLIENT_TRUE@@WITH_FUSE_TRUE@@WITH_RADOS_TRUE at am__append_301 = ceph-fuse
- at ENABLE_CLIENT_TRUE@@WITH_FUSE_TRUE@@WITH_RADOS_TRUE at am__append_302 = mount.fuse.ceph
- at ENABLE_CLIENT_TRUE@@WITH_FUSE_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE at am__append_303 = rbd-fuse
- at ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE at am__append_304 = cephfs
- at ENABLE_CLIENT_TRUE@@LINUX_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE at am__append_305 = mount.ceph
- at ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE at am__append_306 = pybind/ceph_volume_client.py
- at ENABLE_CLIENT_TRUE@@LINUX_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE at am__append_307 = -Xcompiler -Xlinker -Xcompiler '--exclude-libs=libcommon.a'
- at ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE at am__append_308 = libcephfs.la
- at ENABLE_CEPHFS_JAVA_TRUE@@ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE at am__append_309 = libcephfs_jni.la
- at ENABLE_SERVER_TRUE@am__append_310 = ceph-run ceph-rest-api \
+ at ENABLE_CLIENT_TRUE@@LINUX_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE at am__append_310 = libkrbd.la
+ at ENABLE_CLIENT_TRUE@@WITH_FUSE_TRUE@@WITH_RADOS_TRUE at am__append_311 = ceph-fuse
+ at ENABLE_CLIENT_TRUE@@WITH_FUSE_TRUE@@WITH_RADOS_TRUE at am__append_312 = mount.fuse.ceph
+ at ENABLE_CLIENT_TRUE@@WITH_FUSE_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE at am__append_313 = rbd-fuse
+ at ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE at am__append_314 = cephfs
+ at ENABLE_CLIENT_TRUE@@LINUX_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE at am__append_315 = mount.ceph
+ at ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE at am__append_316 = pybind/ceph_volume_client.py
+ at ENABLE_CLIENT_TRUE@@LINUX_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE at am__append_317 = -Xcompiler -Xlinker -Xcompiler '--exclude-libs=libcommon.a'
+ at ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE at am__append_318 = libcephfs.la
+ at ENABLE_CEPHFS_JAVA_TRUE@@ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE at am__append_319 = libcephfs_jni.la
+ at ENABLE_SERVER_TRUE@am__append_320 = ceph-run ceph-rest-api \
 @ENABLE_SERVER_TRUE@	ceph-debugpack ceph-crush-location \
 @ENABLE_SERVER_TRUE@	ceph-coverage
- at ENABLE_SERVER_TRUE@am__append_311 = pybind/ceph_rest_api.py
- at ENABLE_SERVER_TRUE@am__append_312 = ceph-coverage init-ceph
- at ENABLE_SERVER_TRUE@am__append_313 = init-ceph
- at ENABLE_SERVER_TRUE@@WITH_MON_TRUE at am__append_314 = ceph-mon
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at am__append_315 = \
+ at ENABLE_SERVER_TRUE@am__append_321 = pybind/ceph_rest_api.py
+ at ENABLE_SERVER_TRUE@am__append_322 = ceph-coverage init-ceph
+ at ENABLE_SERVER_TRUE@am__append_323 = init-ceph
+ at ENABLE_SERVER_TRUE@@WITH_MON_TRUE at am__append_324 = ceph-mon
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at am__append_325 = \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	ceph-disk-udev
 
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at am__append_316 = \
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at am__append_326 = \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	ceph-clsinfo
 
- at ENABLE_SERVER_TRUE@@WITH_LTTNG_TRUE@@WITH_OSD_TRUE at am__append_317 = $(LIBOSD_TP)
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at am__append_318 = ceph-osd
- at ENABLE_SERVER_TRUE@@WITH_MDS_TRUE at am__append_319 = ceph-mds
+ at ENABLE_SERVER_TRUE@@WITH_LTTNG_TRUE@@WITH_OSD_TRUE at am__append_327 = $(LIBOSD_TP)
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at am__append_328 = ceph-osd
+ at ENABLE_SERVER_TRUE@@WITH_MDS_TRUE at am__append_329 = ceph-mds
 subdir = src
 ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_check_classpath.m4 \
@@ -1756,34 +1773,32 @@ am__libcls_replica_log_client_la_SOURCES_DIST =  \
 libcls_replica_log_client_la_OBJECTS =  \
 	$(am_libcls_replica_log_client_la_OBJECTS)
 @ENABLE_CLIENT_TRUE at am_libcls_replica_log_client_la_rpath =
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at libcls_rgw_la_DEPENDENCIES =  \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	libjson_spirit.la \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_1) \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_3)
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@@WITH_RADOSGW_TRUE at libcls_rgw_la_DEPENDENCIES = libjson_spirit.la \
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@@WITH_RADOSGW_TRUE@	$(am__DEPENDENCIES_1) \
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@@WITH_RADOSGW_TRUE@	$(am__DEPENDENCIES_3)
 am__libcls_rgw_la_SOURCES_DIST = cls/rgw/cls_rgw.cc \
 	cls/rgw/cls_rgw_ops.cc cls/rgw/cls_rgw_types.cc \
 	common/ceph_json.cc
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at am_libcls_rgw_la_OBJECTS =  \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	cls/rgw/cls_rgw.lo \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	cls/rgw/cls_rgw_ops.lo \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	cls/rgw/cls_rgw_types.lo \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	common/ceph_json.lo
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@@WITH_RADOSGW_TRUE at am_libcls_rgw_la_OBJECTS = cls/rgw/cls_rgw.lo \
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@@WITH_RADOSGW_TRUE@	cls/rgw/cls_rgw_ops.lo \
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@@WITH_RADOSGW_TRUE@	cls/rgw/cls_rgw_types.lo \
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@@WITH_RADOSGW_TRUE@	common/ceph_json.lo
 libcls_rgw_la_OBJECTS = $(am_libcls_rgw_la_OBJECTS)
 libcls_rgw_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
 	$(AM_CXXFLAGS) $(CXXFLAGS) $(libcls_rgw_la_LDFLAGS) $(LDFLAGS) \
 	-o $@
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at am_libcls_rgw_la_rpath = -rpath \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(radoslibdir)
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@@WITH_RADOSGW_TRUE at am_libcls_rgw_la_rpath =  \
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@@WITH_RADOSGW_TRUE@	-rpath \
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@@WITH_RADOSGW_TRUE@	$(radoslibdir)
 libcls_rgw_client_la_LIBADD =
 am__libcls_rgw_client_la_SOURCES_DIST = cls/rgw/cls_rgw_client.cc \
 	cls/rgw/cls_rgw_types.cc cls/rgw/cls_rgw_ops.cc
- at ENABLE_CLIENT_TRUE@am_libcls_rgw_client_la_OBJECTS =  \
- at ENABLE_CLIENT_TRUE@	cls/rgw/cls_rgw_client.lo \
- at ENABLE_CLIENT_TRUE@	cls/rgw/cls_rgw_types.lo \
- at ENABLE_CLIENT_TRUE@	cls/rgw/cls_rgw_ops.lo
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE at am_libcls_rgw_client_la_OBJECTS = cls/rgw/cls_rgw_client.lo \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@	cls/rgw/cls_rgw_types.lo \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@	cls/rgw/cls_rgw_ops.lo
 libcls_rgw_client_la_OBJECTS = $(am_libcls_rgw_client_la_OBJECTS)
- at ENABLE_CLIENT_TRUE@am_libcls_rgw_client_la_rpath =
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE at am_libcls_rgw_client_la_rpath =
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at libcls_statelog_la_DEPENDENCIES =  \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_1) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_3)
@@ -1933,8 +1948,8 @@ am__libcommon_internal_la_SOURCES_DIST = ceph_ver.c \
 	common/bloom_filter.cc common/module.c common/Readahead.cc \
 	common/Cycles.cc common/ContextCompletion.cc \
 	common/TracepointProvider.cc common/PluginRegistry.cc \
-	common/scrub_types.cc common/blkdev.cc common/xattr.c \
-	common/ipaddr.cc common/ceph_json.cc common/util.cc \
+	common/scrub_types.cc common/blkdev.cc common/ceph_json.cc \
+	common/util.cc common/xattr.c common/ipaddr.cc \
 	common/pick_address.cc common/linux_version.c \
 	common/solaris_errno.cc common/aix_errno.cc \
 	common/address_helper.cc mon/MonCap.cc mon/MonClient.cc \
@@ -1942,7 +1957,6 @@ am__libcommon_internal_la_SOURCES_DIST = ceph_ver.c \
 	osd/HitSet.cc mds/MDSMap.cc mds/FSMap.cc \
 	mds/inode_backtrace.cc mds/mdstypes.cc mds/flock.cc
 @ENABLE_SERVER_TRUE at am__objects_14 = common/xattr.lo common/ipaddr.lo \
- at ENABLE_SERVER_TRUE@	common/ceph_json.lo common/util.lo \
 @ENABLE_SERVER_TRUE@	common/pick_address.lo
 @LINUX_TRUE at am__objects_15 = common/linux_version.lo
 @SOLARIS_TRUE at am__objects_16 = common/solaris_errno.lo
@@ -1978,12 +1992,13 @@ am_libcommon_internal_la_OBJECTS = ceph_ver.lo \
 	common/bloom_filter.lo common/module.lo common/Readahead.lo \
 	common/Cycles.lo common/ContextCompletion.lo \
 	common/TracepointProvider.lo common/PluginRegistry.lo \
-	common/scrub_types.lo common/blkdev.lo $(am__objects_14) \
-	$(am__objects_15) $(am__objects_16) $(am__objects_17) \
-	$(am__objects_18) mon/MonCap.lo mon/MonClient.lo mon/MonMap.lo \
-	osd/OSDMap.lo osd/osd_types.lo osd/ECMsgTypes.lo osd/HitSet.lo \
-	mds/MDSMap.lo mds/FSMap.lo mds/inode_backtrace.lo \
-	mds/mdstypes.lo mds/flock.lo
+	common/scrub_types.lo common/blkdev.lo common/ceph_json.lo \
+	common/util.lo $(am__objects_14) $(am__objects_15) \
+	$(am__objects_16) $(am__objects_17) $(am__objects_18) \
+	mon/MonCap.lo mon/MonClient.lo mon/MonMap.lo osd/OSDMap.lo \
+	osd/osd_types.lo osd/ECMsgTypes.lo osd/HitSet.lo mds/MDSMap.lo \
+	mds/FSMap.lo mds/inode_backtrace.lo mds/mdstypes.lo \
+	mds/flock.lo
 libcommon_internal_la_OBJECTS = $(am_libcommon_internal_la_OBJECTS)
 am_libcompressor_la_OBJECTS = compressor/Compressor.lo \
 	compressor/AsyncCompressor.lo
@@ -2879,6 +2894,10 @@ am__librbd_internal_la_SOURCES_DIST = librbd/AioCompletion.cc \
 	librbd/ObjectMap.cc librbd/ObjectWatcher.cc \
 	librbd/Operations.cc librbd/Utils.cc \
 	librbd/exclusive_lock/AcquireRequest.cc \
+	librbd/exclusive_lock/AutomaticPolicy.cc \
+	librbd/exclusive_lock/BreakRequest.cc \
+	librbd/exclusive_lock/GetLockerRequest.cc \
+	librbd/exclusive_lock/ReacquireRequest.cc \
 	librbd/exclusive_lock/ReleaseRequest.cc \
 	librbd/exclusive_lock/StandardPolicy.cc \
 	librbd/image/CloseRequest.cc librbd/image/OpenRequest.cc \
@@ -2886,6 +2905,7 @@ am__librbd_internal_la_SOURCES_DIST = librbd/AioCompletion.cc \
 	librbd/image/RefreshRequest.cc librbd/image/SetSnapRequest.cc \
 	librbd/image_watcher/Notifier.cc \
 	librbd/image_watcher/NotifyLockOwner.cc \
+	librbd/image_watcher/RewatchRequest.cc \
 	librbd/journal/Replay.cc librbd/journal/StandardPolicy.cc \
 	librbd/object_map/InvalidateRequest.cc \
 	librbd/object_map/LockRequest.cc librbd/object_map/Request.cc \
@@ -2930,6 +2950,10 @@ am__librbd_internal_la_SOURCES_DIST = librbd/AioCompletion.cc \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd/Operations.lo \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd/Utils.lo \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd/exclusive_lock/AcquireRequest.lo \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd/exclusive_lock/AutomaticPolicy.lo \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd/exclusive_lock/BreakRequest.lo \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd/exclusive_lock/GetLockerRequest.lo \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd/exclusive_lock/ReacquireRequest.lo \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd/exclusive_lock/ReleaseRequest.lo \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd/exclusive_lock/StandardPolicy.lo \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd/image/CloseRequest.lo \
@@ -2939,6 +2963,7 @@ am__librbd_internal_la_SOURCES_DIST = librbd/AioCompletion.cc \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd/image/SetSnapRequest.lo \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd/image_watcher/Notifier.lo \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd/image_watcher/NotifyLockOwner.lo \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd/image_watcher/RewatchRequest.lo \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd/journal/Replay.lo \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd/journal/StandardPolicy.lo \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd/object_map/InvalidateRequest.lo \
@@ -2978,6 +3003,7 @@ am__librbd_mirror_internal_la_SOURCES_DIST =  \
 	tools/rbd_mirror/image_replayer/CloseImageRequest.cc \
 	tools/rbd_mirror/image_replayer/CreateImageRequest.cc \
 	tools/rbd_mirror/image_replayer/EventPreprocessor.cc \
+	tools/rbd_mirror/image_replayer/IsPrimaryRequest.cc \
 	tools/rbd_mirror/image_replayer/OpenImageRequest.cc \
 	tools/rbd_mirror/image_replayer/OpenLocalImageRequest.cc \
 	tools/rbd_mirror/image_replayer/ReplayStatusFormatter.cc \
@@ -3001,6 +3027,7 @@ am__librbd_mirror_internal_la_SOURCES_DIST =  \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	tools/rbd_mirror/image_replayer/CloseImageRequest.lo \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	tools/rbd_mirror/image_replayer/CreateImageRequest.lo \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	tools/rbd_mirror/image_replayer/EventPreprocessor.lo \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	tools/rbd_mirror/image_replayer/IsPrimaryRequest.lo \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	tools/rbd_mirror/image_replayer/OpenImageRequest.lo \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	tools/rbd_mirror/image_replayer/OpenLocalImageRequest.lo \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	tools/rbd_mirror/image_replayer/ReplayStatusFormatter.lo \
@@ -3128,7 +3155,7 @@ librbd_types_la_OBJECTS = $(am_librbd_types_la_OBJECTS)
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	libcls_lock_client.la \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	libcls_refcount_client.la \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	libcls_version_client.la
-am__DEPENDENCIES_12 = $(am__DEPENDENCIES_11)
+am__DEPENDENCIES_12 = $(am__DEPENDENCIES_11) $(am__DEPENDENCIES_1)
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at librgw_la_DEPENDENCIES = $(am__DEPENDENCIES_12) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_1) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_1) \
@@ -3567,14 +3594,13 @@ am__objects_41 = mds/ceph_dencoder-Capability.$(OBJEXT) \
 	mds/ceph_dencoder-MDSAuthCaps.$(OBJEXT) \
 	mds/ceph_dencoder-MDLog.$(OBJEXT)
 @ENABLE_CLIENT_TRUE at am__objects_42 = $(am__objects_41)
- at ENABLE_CLIENT_TRUE@am__objects_43 =  \
- at ENABLE_CLIENT_TRUE@	rgw/ceph_dencoder-rgw_dencoder.$(OBJEXT) \
- at ENABLE_CLIENT_TRUE@	rgw/ceph_dencoder-rgw_acl.$(OBJEXT) \
- at ENABLE_CLIENT_TRUE@	rgw/ceph_dencoder-rgw_basic_types.$(OBJEXT) \
- at ENABLE_CLIENT_TRUE@	rgw/ceph_dencoder-rgw_common.$(OBJEXT) \
- at ENABLE_CLIENT_TRUE@	rgw/ceph_dencoder-rgw_env.$(OBJEXT) \
- at ENABLE_CLIENT_TRUE@	rgw/ceph_dencoder-rgw_json_enc.$(OBJEXT) \
- at ENABLE_CLIENT_TRUE@	rgw/ceph_dencoder-rgw_keystone.$(OBJEXT)
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at am__objects_43 = rgw/ceph_dencoder-rgw_dencoder.$(OBJEXT) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	rgw/ceph_dencoder-rgw_acl.$(OBJEXT) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	rgw/ceph_dencoder-rgw_basic_types.$(OBJEXT) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	rgw/ceph_dencoder-rgw_common.$(OBJEXT) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	rgw/ceph_dencoder-rgw_env.$(OBJEXT) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	rgw/ceph_dencoder-rgw_json_enc.$(OBJEXT) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	rgw/ceph_dencoder-rgw_keystone.$(OBJEXT)
 am__objects_44 = $(am__objects_42) \
 	perfglue/ceph_dencoder-disabled_heap_profiler.$(OBJEXT) \
 	perfglue/ceph_dencoder-disabled_stubs.$(OBJEXT) \
@@ -3582,28 +3608,25 @@ am__objects_44 = $(am__objects_42) \
 @ENABLE_CLIENT_TRUE at am_ceph_dencoder_OBJECTS = test/encoding/ceph_dencoder-ceph_dencoder.$(OBJEXT) \
 @ENABLE_CLIENT_TRUE@	$(am__objects_44)
 ceph_dencoder_OBJECTS = $(am_ceph_dencoder_OBJECTS)
-am__DEPENDENCIES_17 = librgw.la $(am__DEPENDENCIES_1)
- at ENABLE_CLIENT_TRUE@am__DEPENDENCIES_18 = libcls_version_client.la \
- at ENABLE_CLIENT_TRUE@	libcls_log_client.la \
- at ENABLE_CLIENT_TRUE@	libcls_refcount_client.la \
- at ENABLE_CLIENT_TRUE@	libcls_user_client.la \
- at ENABLE_CLIENT_TRUE@	libcls_timeindex_client.la \
- at ENABLE_CLIENT_TRUE@	libcls_statelog_client.la \
- at ENABLE_CLIENT_TRUE@	libcls_lock_client.la \
- at ENABLE_CLIENT_TRUE@	libcls_refcount_client.la \
- at ENABLE_CLIENT_TRUE@	libcls_replica_log_client.la \
- at ENABLE_CLIENT_TRUE@	libcls_rgw_client.la libcls_rbd_client.la \
- at ENABLE_CLIENT_TRUE@	libcls_user_client.la \
- at ENABLE_CLIENT_TRUE@	libcls_numops_client.la \
- at ENABLE_CLIENT_TRUE@	libcls_journal_client.la
-am__DEPENDENCIES_19 = $(am__append_49) $(am__append_138) \
-	$(am__DEPENDENCIES_18) $(am__append_160)
- at ENABLE_CLIENT_TRUE@ceph_dencoder_DEPENDENCIES =  \
- at ENABLE_CLIENT_TRUE@	$(am__DEPENDENCIES_17) $(LIBRADOS) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at am__DEPENDENCIES_17 = libcls_version_client.la \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	libcls_log_client.la \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	libcls_refcount_client.la \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	libcls_user_client.la \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	libcls_timeindex_client.la \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	libcls_statelog_client.la
+am__DEPENDENCIES_18 = $(am__append_49) $(am__append_138) \
+	$(am__DEPENDENCIES_17) $(am__append_155) $(am__append_157) \
+	$(am__append_159) $(am__append_169)
+am__DEPENDENCIES_19 = librgw.la $(am__DEPENDENCIES_1)
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE at am__DEPENDENCIES_20 =  \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@	$(am__DEPENDENCIES_19) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@	$(am__DEPENDENCIES_12)
+ at ENABLE_CLIENT_TRUE@ceph_dencoder_DEPENDENCIES = $(LIBRADOS) \
 @ENABLE_CLIENT_TRUE@	$(LIBRBD_TYPES) $(LIBOSD_TYPES) \
 @ENABLE_CLIENT_TRUE@	$(LIBOS_TYPES) $(LIBMON_TYPES) \
- at ENABLE_CLIENT_TRUE@	$(am__DEPENDENCIES_19) \
- at ENABLE_CLIENT_TRUE@	$(am__DEPENDENCIES_10)
+ at ENABLE_CLIENT_TRUE@	$(am__DEPENDENCIES_18) \
+ at ENABLE_CLIENT_TRUE@	$(am__DEPENDENCIES_10) \
+ at ENABLE_CLIENT_TRUE@	$(am__DEPENDENCIES_20)
 ceph_dencoder_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
 	$(ceph_dencoder_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) \
@@ -3611,11 +3634,11 @@ ceph_dencoder_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 am__ceph_fuse_SOURCES_DIST = ceph_fuse.cc
 @ENABLE_CLIENT_TRUE@@WITH_FUSE_TRUE@@WITH_RADOS_TRUE at am_ceph_fuse_OBJECTS = ceph_fuse.$(OBJEXT)
 ceph_fuse_OBJECTS = $(am_ceph_fuse_OBJECTS)
-am__DEPENDENCIES_20 = libperfglue.la $(am__DEPENDENCIES_1) \
+am__DEPENDENCIES_21 = libperfglue.la $(am__DEPENDENCIES_1) \
 	$(am__DEPENDENCIES_1)
-am__DEPENDENCIES_21 = libclient_fuse.la $(am__DEPENDENCIES_1) \
-	$(am__DEPENDENCIES_20)
- at ENABLE_CLIENT_TRUE@@WITH_FUSE_TRUE@@WITH_RADOS_TRUE at ceph_fuse_DEPENDENCIES = $(am__DEPENDENCIES_21) \
+am__DEPENDENCIES_22 = libclient_fuse.la $(am__DEPENDENCIES_1) \
+	$(am__DEPENDENCIES_21)
+ at ENABLE_CLIENT_TRUE@@WITH_FUSE_TRUE@@WITH_RADOS_TRUE at ceph_fuse_DEPENDENCIES = $(am__DEPENDENCIES_22) \
 @ENABLE_CLIENT_TRUE@@WITH_FUSE_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_10)
 am__ceph_kvstore_tool_SOURCES_DIST = tools/ceph_kvstore_tool.cc
 @ENABLE_SERVER_TRUE at am_ceph_kvstore_tool_OBJECTS = tools/ceph_kvstore_tool-ceph_kvstore_tool.$(OBJEXT)
@@ -3631,10 +3654,10 @@ am__ceph_mds_SOURCES_DIST = ceph_mds.cc
 @ENABLE_SERVER_TRUE@@WITH_MDS_TRUE at am_ceph_mds_OBJECTS =  \
 @ENABLE_SERVER_TRUE@@WITH_MDS_TRUE@	ceph_mds.$(OBJEXT)
 ceph_mds_OBJECTS = $(am_ceph_mds_OBJECTS)
-am__DEPENDENCIES_22 = libmds.la $(am__DEPENDENCIES_1) \
-	$(am__DEPENDENCIES_20)
+am__DEPENDENCIES_23 = libmds.la $(am__DEPENDENCIES_1) \
+	$(am__DEPENDENCIES_21)
 @ENABLE_SERVER_TRUE@@WITH_MDS_TRUE at ceph_mds_DEPENDENCIES =  \
- at ENABLE_SERVER_TRUE@@WITH_MDS_TRUE@	$(am__DEPENDENCIES_22) \
+ at ENABLE_SERVER_TRUE@@WITH_MDS_TRUE@	$(am__DEPENDENCIES_23) \
 @ENABLE_SERVER_TRUE@@WITH_MDS_TRUE@	$(LIBOSDC) \
 @ENABLE_SERVER_TRUE@@WITH_MDS_TRUE@	$(am__DEPENDENCIES_10) \
 @ENABLE_SERVER_TRUE@@WITH_MDS_TRUE@	$(am__DEPENDENCIES_4)
@@ -3642,10 +3665,10 @@ am__ceph_mon_SOURCES_DIST = ceph_mon.cc
 @ENABLE_SERVER_TRUE@@WITH_MON_TRUE at am_ceph_mon_OBJECTS =  \
 @ENABLE_SERVER_TRUE@@WITH_MON_TRUE@	ceph_mon.$(OBJEXT)
 ceph_mon_OBJECTS = $(am_ceph_mon_OBJECTS)
-am__DEPENDENCIES_23 = libmon.a $(am__DEPENDENCIES_1) \
-	$(am__DEPENDENCIES_20) $(LIBMON_TYPES)
+am__DEPENDENCIES_24 = libmon.a $(am__DEPENDENCIES_1) \
+	$(am__DEPENDENCIES_21) $(LIBMON_TYPES)
 @ENABLE_SERVER_TRUE@@WITH_MON_TRUE at ceph_mon_DEPENDENCIES =  \
- at ENABLE_SERVER_TRUE@@WITH_MON_TRUE@	$(am__DEPENDENCIES_23) \
+ at ENABLE_SERVER_TRUE@@WITH_MON_TRUE@	$(am__DEPENDENCIES_24) \
 @ENABLE_SERVER_TRUE@@WITH_MON_TRUE@	$(am__DEPENDENCIES_16) \
 @ENABLE_SERVER_TRUE@@WITH_MON_TRUE@	$(am__DEPENDENCIES_10) \
 @ENABLE_SERVER_TRUE@@WITH_MON_TRUE@	$(am__DEPENDENCIES_4) \
@@ -3667,11 +3690,11 @@ am__ceph_objectstore_tool_SOURCES_DIST =  \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	tools/rebuild_mondb.$(OBJEXT) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	tools/RadosDump.$(OBJEXT)
 ceph_objectstore_tool_OBJECTS = $(am_ceph_objectstore_tool_OBJECTS)
-am__DEPENDENCIES_24 = libosd.a $(am__DEPENDENCIES_1) $(LIBOSDC) \
-	$(am__DEPENDENCIES_16) $(am__DEPENDENCIES_20) $(LIBOSD_TYPES) \
+am__DEPENDENCIES_25 = libosd.a $(am__DEPENDENCIES_1) $(LIBOSDC) \
+	$(am__DEPENDENCIES_16) $(am__DEPENDENCIES_21) $(LIBOSD_TYPES) \
 	$(LIBOS_TYPES)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at ceph_objectstore_tool_DEPENDENCIES =  \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_24) \
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_25) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_16) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_10) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_1) \
@@ -3681,7 +3704,7 @@ am__ceph_osd_SOURCES_DIST = ceph_osd.cc
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	ceph_osd.$(OBJEXT)
 ceph_osd_OBJECTS = $(am_ceph_osd_OBJECTS)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at ceph_osd_DEPENDENCIES = $(LIBOSDC) \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_24) \
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_25) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(LIBOSD_TYPES) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(LIBOS_TYPES) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_16) \
@@ -3710,7 +3733,7 @@ am__ceph_erasure_code_SOURCES_DIST =  \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at am_ceph_erasure_code_OBJECTS = test/erasure-code/ceph_erasure_code.$(OBJEXT)
 ceph_erasure_code_OBJECTS = $(am_ceph_erasure_code_OBJECTS)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at ceph_erasure_code_DEPENDENCIES =  \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_24) \
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_25) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_4) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_1) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_10) \
@@ -3723,7 +3746,7 @@ am__ceph_erasure_code_benchmark_SOURCES_DIST =  \
 ceph_erasure_code_benchmark_OBJECTS =  \
 	$(am_ceph_erasure_code_benchmark_OBJECTS)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at ceph_erasure_code_benchmark_DEPENDENCIES =  \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_24) \
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_25) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_4) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_1) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_10) \
@@ -3734,7 +3757,7 @@ am__ceph_erasure_code_non_regression_SOURCES_DIST =  \
 ceph_erasure_code_non_regression_OBJECTS =  \
 	$(am_ceph_erasure_code_non_regression_OBJECTS)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at ceph_erasure_code_non_regression_DEPENDENCIES =  \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_24) \
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_25) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_4) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_1) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_10) \
@@ -3784,13 +3807,13 @@ am__ceph_perf_msgr_client_SOURCES_DIST =  \
 	test/msgr/perf_msgr_client.cc
 @ENABLE_SERVER_TRUE at am_ceph_perf_msgr_client_OBJECTS = test/msgr/ceph_perf_msgr_client-perf_msgr_client.$(OBJEXT)
 ceph_perf_msgr_client_OBJECTS = $(am_ceph_perf_msgr_client_OBJECTS)
-am__DEPENDENCIES_25 = $(top_builddir)/src/gmock/lib/libgmock_main.la \
+am__DEPENDENCIES_26 = $(top_builddir)/src/gmock/lib/libgmock_main.la \
 	$(top_builddir)/src/gmock/lib/libgmock.la \
 	$(top_builddir)/src/gmock/gtest/lib/libgtest.la \
 	$(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1)
 @ENABLE_SERVER_TRUE at ceph_perf_msgr_client_DEPENDENCIES =  \
 @ENABLE_SERVER_TRUE@	$(am__DEPENDENCIES_16) \
- at ENABLE_SERVER_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_SERVER_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_SERVER_TRUE@	$(am__DEPENDENCIES_10)
 ceph_perf_msgr_client_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -3802,7 +3825,7 @@ am__ceph_perf_msgr_server_SOURCES_DIST =  \
 ceph_perf_msgr_server_OBJECTS = $(am_ceph_perf_msgr_server_OBJECTS)
 @ENABLE_SERVER_TRUE at ceph_perf_msgr_server_DEPENDENCIES =  \
 @ENABLE_SERVER_TRUE@	$(am__DEPENDENCIES_16) \
- at ENABLE_SERVER_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_SERVER_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_SERVER_TRUE@	$(am__DEPENDENCIES_10)
 ceph_perf_msgr_server_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -3814,7 +3837,7 @@ am__ceph_perf_objectstore_SOURCES_DIST =  \
 ceph_perf_objectstore_OBJECTS = $(am_ceph_perf_objectstore_OBJECTS)
 @ENABLE_SERVER_TRUE at ceph_perf_objectstore_DEPENDENCIES =  \
 @ENABLE_SERVER_TRUE@	$(am__DEPENDENCIES_16) \
- at ENABLE_SERVER_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_SERVER_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_SERVER_TRUE@	$(am__DEPENDENCIES_10)
 ceph_perf_objectstore_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -3837,13 +3860,13 @@ am__ceph_rgw_jsonparser_SOURCES_DIST = rgw/rgw_jsonparser.cc \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	rgw/rgw_env.$(OBJEXT) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	rgw/rgw_json_enc.$(OBJEXT)
 ceph_rgw_jsonparser_OBJECTS = $(am_ceph_rgw_jsonparser_OBJECTS)
- at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at ceph_rgw_jsonparser_DEPENDENCIES = $(am__DEPENDENCIES_17) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at ceph_rgw_jsonparser_DEPENDENCIES = $(am__DEPENDENCIES_19) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_12) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_10)
 am__ceph_rgw_multiparser_SOURCES_DIST = rgw/rgw_multiparser.cc
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at am_ceph_rgw_multiparser_OBJECTS = rgw/rgw_multiparser.$(OBJEXT)
 ceph_rgw_multiparser_OBJECTS = $(am_ceph_rgw_multiparser_OBJECTS)
- at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at ceph_rgw_multiparser_DEPENDENCIES = $(am__DEPENDENCIES_17) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at ceph_rgw_multiparser_DEPENDENCIES = $(am__DEPENDENCIES_19) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_12) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_10)
 am__ceph_scratchtool_SOURCES_DIST = tools/scratchtool.c
@@ -3914,7 +3937,7 @@ am__ceph_test_async_driver_SOURCES_DIST =  \
 ceph_test_async_driver_OBJECTS = $(am_ceph_test_async_driver_OBJECTS)
 @ENABLE_SERVER_TRUE at ceph_test_async_driver_DEPENDENCIES =  \
 @ENABLE_SERVER_TRUE@	$(am__DEPENDENCIES_16) \
- at ENABLE_SERVER_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_SERVER_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_SERVER_TRUE@	$(am__DEPENDENCIES_10)
 ceph_test_async_driver_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -3941,7 +3964,7 @@ ceph_test_cls_hello_OBJECTS = $(am_ceph_test_cls_hello_OBJECTS)
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE at ceph_test_cls_hello_DEPENDENCIES =  \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(LIBRADOS) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_1) \
- at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_10) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_9)
 ceph_test_cls_hello_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
@@ -3957,7 +3980,7 @@ ceph_test_cls_journal_OBJECTS = $(am_ceph_test_cls_journal_OBJECTS)
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(LIBRADOS) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_4) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_1) \
- at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_9)
 ceph_test_cls_journal_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -3970,7 +3993,7 @@ ceph_test_cls_lock_OBJECTS = $(am_ceph_test_cls_lock_OBJECTS)
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(LIBRADOS) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	libcls_lock_client.la \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_4) \
- at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_9)
 ceph_test_cls_lock_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -3982,7 +4005,7 @@ ceph_test_cls_log_OBJECTS = $(am_ceph_test_cls_log_OBJECTS)
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE at ceph_test_cls_log_DEPENDENCIES =  \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(LIBRADOS) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	libcls_log_client.la \
- at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_10) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_9)
 ceph_test_cls_log_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
@@ -3996,7 +4019,7 @@ ceph_test_cls_numops_OBJECTS = $(am_ceph_test_cls_numops_OBJECTS)
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE at ceph_test_cls_numops_DEPENDENCIES =  \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(LIBRADOS) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	libcls_numops_client.la \
- at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_10) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_9)
 ceph_test_cls_numops_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
@@ -4011,7 +4034,7 @@ ceph_test_cls_rbd_OBJECTS = $(am_ceph_test_cls_rbd_OBJECTS)
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	libcls_rbd_client.la \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	libcls_lock_client.la \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_4) \
- at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_9) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_1) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_3)
@@ -4026,7 +4049,7 @@ ceph_test_cls_refcount_OBJECTS = $(am_ceph_test_cls_refcount_OBJECTS)
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE at ceph_test_cls_refcount_DEPENDENCIES =  \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(LIBRADOS) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	libcls_refcount_client.la \
- at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_9)
 ceph_test_cls_refcount_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -4040,7 +4063,7 @@ ceph_test_cls_replica_log_OBJECTS =  \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE at ceph_test_cls_replica_log_DEPENDENCIES =  \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(LIBRADOS) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	libcls_replica_log_client.la \
- at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_10) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_9)
 ceph_test_cls_replica_log_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
@@ -4054,7 +4077,7 @@ ceph_test_cls_rgw_OBJECTS = $(am_ceph_test_cls_rgw_OBJECTS)
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_1) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	libcls_rgw_client.la \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_4) \
- at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_10) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_9)
 ceph_test_cls_rgw_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
@@ -4065,10 +4088,10 @@ am__ceph_test_cls_rgw_log_SOURCES_DIST = test/test_rgw_admin_log.cc
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at am_ceph_test_cls_rgw_log_OBJECTS = test/ceph_test_cls_rgw_log-test_rgw_admin_log.$(OBJEXT)
 ceph_test_cls_rgw_log_OBJECTS = $(am_ceph_test_cls_rgw_log_OBJECTS)
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at ceph_test_cls_rgw_log_DEPENDENCIES = $(LIBRADOS) \
- at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_17) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_19) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_12) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_10) \
- at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_1) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	libcls_version_client.la \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	libcls_log_client.la \
@@ -4084,10 +4107,10 @@ ceph_test_cls_rgw_log_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 am__ceph_test_cls_rgw_meta_SOURCES_DIST = test/test_rgw_admin_meta.cc
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at am_ceph_test_cls_rgw_meta_OBJECTS = test/ceph_test_cls_rgw_meta-test_rgw_admin_meta.$(OBJEXT)
 ceph_test_cls_rgw_meta_OBJECTS = $(am_ceph_test_cls_rgw_meta_OBJECTS)
- at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at ceph_test_cls_rgw_meta_DEPENDENCIES = $(am__DEPENDENCIES_17) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at ceph_test_cls_rgw_meta_DEPENDENCIES = $(am__DEPENDENCIES_19) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(LIBRADOS) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_10) \
- at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_1) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	libcls_timeindex_client.la \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	libcls_version_client.la \
@@ -4107,9 +4130,9 @@ am__ceph_test_cls_rgw_opstate_SOURCES_DIST =  \
 ceph_test_cls_rgw_opstate_OBJECTS =  \
 	$(am_ceph_test_cls_rgw_opstate_OBJECTS)
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at ceph_test_cls_rgw_opstate_DEPENDENCIES = $(LIBRADOS) \
- at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_17) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_19) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_10) \
- at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_1) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	libcls_version_client.la \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	libcls_log_client.la \
@@ -4131,7 +4154,7 @@ ceph_test_cls_statelog_OBJECTS = $(am_ceph_test_cls_statelog_OBJECTS)
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE at ceph_test_cls_statelog_DEPENDENCIES =  \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(LIBRADOS) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	libcls_statelog_client.la \
- at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_10) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_9)
 ceph_test_cls_statelog_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
@@ -4145,7 +4168,7 @@ ceph_test_cls_version_OBJECTS = $(am_ceph_test_cls_version_OBJECTS)
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE at ceph_test_cls_version_DEPENDENCIES =  \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(LIBRADOS) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	libcls_version_client.la \
- at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_9)
 ceph_test_cls_version_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -4155,9 +4178,9 @@ am__ceph_test_cors_SOURCES_DIST = test/test_cors.cc
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at am_ceph_test_cors_OBJECTS = test/ceph_test_cors-test_cors.$(OBJEXT)
 ceph_test_cors_OBJECTS = $(am_ceph_test_cors_OBJECTS)
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at ceph_test_cors_DEPENDENCIES = $(LIBRADOS) \
- at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_17) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_19) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_10) \
- at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_25)
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_26)
 ceph_test_cors_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
 	$(ceph_test_cors_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \
@@ -4170,7 +4193,7 @@ am__ceph_test_filejournal_SOURCES_DIST = test/test_filejournal.cc
 ceph_test_filejournal_OBJECTS = $(am_ceph_test_filejournal_OBJECTS)
 @ENABLE_SERVER_TRUE at ceph_test_filejournal_DEPENDENCIES =  \
 @ENABLE_SERVER_TRUE@	$(am__DEPENDENCIES_16) \
- at ENABLE_SERVER_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_SERVER_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_SERVER_TRUE@	$(am__DEPENDENCIES_10)
 ceph_test_filejournal_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -4182,7 +4205,7 @@ am__ceph_test_filestore_SOURCES_DIST =  \
 ceph_test_filestore_OBJECTS = $(am_ceph_test_filestore_OBJECTS)
 @ENABLE_SERVER_TRUE@@LINUX_TRUE at ceph_test_filestore_DEPENDENCIES =  \
 @ENABLE_SERVER_TRUE@@LINUX_TRUE@	$(am__DEPENDENCIES_16) \
- at ENABLE_SERVER_TRUE@@LINUX_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_SERVER_TRUE@@LINUX_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_SERVER_TRUE@@LINUX_TRUE@	$(am__DEPENDENCIES_10)
 ceph_test_filestore_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -4232,14 +4255,14 @@ am__ceph_test_keys_SOURCES_DIST = test/testkeys.cc
 @ENABLE_SERVER_TRUE@@WITH_MON_TRUE@	test/testkeys.$(OBJEXT)
 ceph_test_keys_OBJECTS = $(am_ceph_test_keys_OBJECTS)
 @ENABLE_SERVER_TRUE@@WITH_MON_TRUE at ceph_test_keys_DEPENDENCIES =  \
- at ENABLE_SERVER_TRUE@@WITH_MON_TRUE@	$(am__DEPENDENCIES_23) \
+ at ENABLE_SERVER_TRUE@@WITH_MON_TRUE@	$(am__DEPENDENCIES_24) \
 @ENABLE_SERVER_TRUE@@WITH_MON_TRUE@	$(am__DEPENDENCIES_10)
 am__ceph_test_keyvaluedb_SOURCES_DIST = test/objectstore/test_kv.cc
 @ENABLE_SERVER_TRUE@@LINUX_TRUE at am_ceph_test_keyvaluedb_OBJECTS = test/objectstore/ceph_test_keyvaluedb-test_kv.$(OBJEXT)
 ceph_test_keyvaluedb_OBJECTS = $(am_ceph_test_keyvaluedb_OBJECTS)
 @ENABLE_SERVER_TRUE@@LINUX_TRUE at ceph_test_keyvaluedb_DEPENDENCIES =  \
 @ENABLE_SERVER_TRUE@@LINUX_TRUE@	$(am__DEPENDENCIES_16) \
- at ENABLE_SERVER_TRUE@@LINUX_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_SERVER_TRUE@@LINUX_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_SERVER_TRUE@@LINUX_TRUE@	$(am__DEPENDENCIES_10)
 ceph_test_keyvaluedb_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -4252,7 +4275,7 @@ ceph_test_keyvaluedb_atomicity_OBJECTS =  \
 	$(am_ceph_test_keyvaluedb_atomicity_OBJECTS)
 @ENABLE_SERVER_TRUE at ceph_test_keyvaluedb_atomicity_DEPENDENCIES =  \
 @ENABLE_SERVER_TRUE@	$(am__DEPENDENCIES_16) \
- at ENABLE_SERVER_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_SERVER_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_SERVER_TRUE@	$(am__DEPENDENCIES_10)
 ceph_test_keyvaluedb_atomicity_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -4267,7 +4290,7 @@ ceph_test_keyvaluedb_iterators_OBJECTS =  \
 	$(am_ceph_test_keyvaluedb_iterators_OBJECTS)
 @ENABLE_SERVER_TRUE at ceph_test_keyvaluedb_iterators_DEPENDENCIES =  \
 @ENABLE_SERVER_TRUE@	$(am__DEPENDENCIES_16) \
- at ENABLE_SERVER_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_SERVER_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_SERVER_TRUE@	$(am__DEPENDENCIES_10)
 ceph_test_keyvaluedb_iterators_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -4289,7 +4312,7 @@ ceph_test_libcephfs_OBJECTS = $(am_ceph_test_libcephfs_OBJECTS)
 @ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE at ceph_test_libcephfs_DEPENDENCIES = $(LIBRADOS) \
 @ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE@	$(LIBCEPHFS) \
 @ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_4) \
- at ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_25)
+ at ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_26)
 ceph_test_libcephfs_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
 	$(ceph_test_libcephfs_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \
@@ -4307,7 +4330,7 @@ ceph_test_librbd_OBJECTS = $(am_ceph_test_librbd_OBJECTS)
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	libcls_journal_client.la \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librados_api.la \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	$(am__DEPENDENCIES_7) \
- at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	$(am__DEPENDENCIES_10) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	$(am__DEPENDENCIES_9)
 ceph_test_librbd_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
@@ -4324,7 +4347,7 @@ ceph_test_librbd_api_OBJECTS = $(am_ceph_test_librbd_api_OBJECTS)
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	$(LIBRBD) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	$(LIBRADOS) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	$(am__DEPENDENCIES_4) \
- at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	$(am__DEPENDENCIES_9)
 ceph_test_librbd_api_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -4350,7 +4373,7 @@ ceph_test_mon_msg_OBJECTS = $(am_ceph_test_mon_msg_OBJECTS)
 @ENABLE_SERVER_TRUE at ceph_test_mon_msg_DEPENDENCIES =  \
 @ENABLE_SERVER_TRUE@	$(am__DEPENDENCIES_16) $(LIBOSDC) \
 @ENABLE_SERVER_TRUE@	$(am__DEPENDENCIES_10) \
- at ENABLE_SERVER_TRUE@	$(am__DEPENDENCIES_25)
+ at ENABLE_SERVER_TRUE@	$(am__DEPENDENCIES_26)
 ceph_test_mon_msg_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
 	$(ceph_test_mon_msg_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \
@@ -4369,7 +4392,7 @@ am__ceph_test_msgr_SOURCES_DIST = test/msgr/test_msgr.cc
 ceph_test_msgr_OBJECTS = $(am_ceph_test_msgr_OBJECTS)
 @ENABLE_SERVER_TRUE at ceph_test_msgr_DEPENDENCIES =  \
 @ENABLE_SERVER_TRUE@	$(am__DEPENDENCIES_16) \
- at ENABLE_SERVER_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_SERVER_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_SERVER_TRUE@	$(am__DEPENDENCIES_10)
 ceph_test_msgr_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -4389,7 +4412,7 @@ am__ceph_test_object_map_SOURCES_DIST =  \
 ceph_test_object_map_OBJECTS = $(am_ceph_test_object_map_OBJECTS)
 @ENABLE_SERVER_TRUE at ceph_test_object_map_DEPENDENCIES =  \
 @ENABLE_SERVER_TRUE@	$(am__DEPENDENCIES_16) \
- at ENABLE_SERVER_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_SERVER_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_SERVER_TRUE@	$(am__DEPENDENCIES_10)
 ceph_test_object_map_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -4409,7 +4432,7 @@ am__ceph_test_objectstore_SOURCES_DIST =  \
 ceph_test_objectstore_OBJECTS = $(am_ceph_test_objectstore_OBJECTS)
 @ENABLE_SERVER_TRUE@@LINUX_TRUE at ceph_test_objectstore_DEPENDENCIES =  \
 @ENABLE_SERVER_TRUE@@LINUX_TRUE@	$(am__DEPENDENCIES_16) \
- at ENABLE_SERVER_TRUE@@LINUX_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_SERVER_TRUE@@LINUX_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_SERVER_TRUE@@LINUX_TRUE@	$(am__DEPENDENCIES_10)
 ceph_test_objectstore_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -4443,7 +4466,7 @@ ceph_test_rados_api_aio_OBJECTS =  \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE at ceph_test_rados_api_aio_DEPENDENCIES =  \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(LIBRADOS) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_4) \
- at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_9)
 ceph_test_rados_api_aio_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -4456,7 +4479,7 @@ ceph_test_rados_api_c_read_operations_OBJECTS =  \
 	$(am_ceph_test_rados_api_c_read_operations_OBJECTS)
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE at ceph_test_rados_api_c_read_operations_DEPENDENCIES =  \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(LIBRADOS) \
- at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_9)
 ceph_test_rados_api_c_read_operations_LINK = $(LIBTOOL) $(AM_V_lt) \
 	--tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link \
@@ -4469,7 +4492,7 @@ ceph_test_rados_api_c_write_operations_OBJECTS =  \
 	$(am_ceph_test_rados_api_c_write_operations_OBJECTS)
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE at ceph_test_rados_api_c_write_operations_DEPENDENCIES =  \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(LIBRADOS) \
- at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_9)
 ceph_test_rados_api_c_write_operations_LINK = $(LIBTOOL) $(AM_V_lt) \
 	--tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link \
@@ -4481,7 +4504,7 @@ ceph_test_rados_api_cls_OBJECTS =  \
 	$(am_ceph_test_rados_api_cls_OBJECTS)
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE at ceph_test_rados_api_cls_DEPENDENCIES =  \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(LIBRADOS) \
- at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_9)
 ceph_test_rados_api_cls_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -4495,7 +4518,7 @@ ceph_test_rados_api_cmd_OBJECTS =  \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_4) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(LIBRADOS) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_1) \
- at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_9)
 ceph_test_rados_api_cmd_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -4506,7 +4529,7 @@ am__ceph_test_rados_api_io_SOURCES_DIST = test/librados/io.cc
 ceph_test_rados_api_io_OBJECTS = $(am_ceph_test_rados_api_io_OBJECTS)
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE at ceph_test_rados_api_io_DEPENDENCIES =  \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(LIBRADOS) \
- at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_9)
 ceph_test_rados_api_io_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -4518,7 +4541,7 @@ ceph_test_rados_api_list_OBJECTS =  \
 	$(am_ceph_test_rados_api_list_OBJECTS)
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE at ceph_test_rados_api_list_DEPENDENCIES =  \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(LIBRADOS) \
- at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_9) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_10)
 ceph_test_rados_api_list_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
@@ -4531,7 +4554,7 @@ ceph_test_rados_api_lock_OBJECTS =  \
 	$(am_ceph_test_rados_api_lock_OBJECTS)
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE at ceph_test_rados_api_lock_DEPENDENCIES =  \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(LIBRADOS) \
- at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_9)
 ceph_test_rados_api_lock_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -4543,7 +4566,7 @@ ceph_test_rados_api_misc_OBJECTS =  \
 	$(am_ceph_test_rados_api_misc_OBJECTS)
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE at ceph_test_rados_api_misc_DEPENDENCIES =  \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(LIBRADOS) \
- at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_10) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_9)
 ceph_test_rados_api_misc_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
@@ -4556,7 +4579,7 @@ ceph_test_rados_api_nlist_OBJECTS =  \
 	$(am_ceph_test_rados_api_nlist_OBJECTS)
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE at ceph_test_rados_api_nlist_DEPENDENCIES =  \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(LIBRADOS) \
- at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_9)
 ceph_test_rados_api_nlist_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -4568,7 +4591,7 @@ ceph_test_rados_api_pool_OBJECTS =  \
 	$(am_ceph_test_rados_api_pool_OBJECTS)
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE at ceph_test_rados_api_pool_DEPENDENCIES =  \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(LIBRADOS) \
- at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_9)
 ceph_test_rados_api_pool_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -4581,7 +4604,7 @@ ceph_test_rados_api_snapshots_OBJECTS =  \
 	$(am_ceph_test_rados_api_snapshots_OBJECTS)
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE at ceph_test_rados_api_snapshots_DEPENDENCIES =  \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(LIBRADOS) \
- at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_9)
 ceph_test_rados_api_snapshots_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -4593,7 +4616,7 @@ ceph_test_rados_api_stat_OBJECTS =  \
 	$(am_ceph_test_rados_api_stat_OBJECTS)
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE at ceph_test_rados_api_stat_DEPENDENCIES =  \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(LIBRADOS) \
- at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_9)
 ceph_test_rados_api_stat_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -4607,7 +4630,7 @@ ceph_test_rados_api_tier_OBJECTS =  \
 	$(am_ceph_test_rados_api_tier_OBJECTS)
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE at ceph_test_rados_api_tier_DEPENDENCIES =  \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(LIBRADOS) \
- at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_10) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_9)
 ceph_test_rados_api_tier_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
@@ -4624,8 +4647,8 @@ ceph_test_rados_api_tmap_migrate_OBJECTS =  \
 	$(am_ceph_test_rados_api_tmap_migrate_OBJECTS)
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE at ceph_test_rados_api_tmap_migrate_DEPENDENCIES =  \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(LIBRADOS) \
- at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_25) \
- at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_22) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_26) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_23) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	libcls_cephfs_client.la \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_10) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_9)
@@ -4640,7 +4663,7 @@ ceph_test_rados_api_watch_notify_OBJECTS =  \
 	$(am_ceph_test_rados_api_watch_notify_OBJECTS)
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE at ceph_test_rados_api_watch_notify_DEPENDENCIES =  \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(LIBRADOS) \
- at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_9)
 ceph_test_rados_api_watch_notify_LINK = $(LIBTOOL) $(AM_V_lt) \
 	--tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link \
@@ -4692,7 +4715,7 @@ ceph_test_rados_striper_api_aio_OBJECTS =  \
 	$(am_ceph_test_rados_striper_api_aio_OBJECTS)
 @ENABLE_CLIENT_TRUE@@WITH_RADOSSTRIPER_TRUE@@WITH_RADOS_TRUE at ceph_test_rados_striper_api_aio_DEPENDENCIES = $(LIBRADOS) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSSTRIPER_TRUE@@WITH_RADOS_TRUE@	$(LIBRADOSSTRIPER) \
- at ENABLE_CLIENT_TRUE@@WITH_RADOSSTRIPER_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSSTRIPER_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSSTRIPER_TRUE@@WITH_RADOS_TRUE@	$(RADOS_STRIPER_TEST_LDADD)
 ceph_test_rados_striper_api_aio_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -4706,7 +4729,7 @@ ceph_test_rados_striper_api_io_OBJECTS =  \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSSTRIPER_TRUE@@WITH_RADOS_TRUE at ceph_test_rados_striper_api_io_DEPENDENCIES = $(LIBRADOS) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSSTRIPER_TRUE@@WITH_RADOS_TRUE@	$(LIBRADOSSTRIPER) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSSTRIPER_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_4) \
- at ENABLE_CLIENT_TRUE@@WITH_RADOSSTRIPER_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSSTRIPER_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSSTRIPER_TRUE@@WITH_RADOS_TRUE@	$(RADOS_STRIPER_TEST_LDADD)
 ceph_test_rados_striper_api_io_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -4719,7 +4742,7 @@ ceph_test_rados_striper_api_striping_OBJECTS =  \
 	$(am_ceph_test_rados_striper_api_striping_OBJECTS)
 @ENABLE_CLIENT_TRUE@@WITH_RADOSSTRIPER_TRUE@@WITH_RADOS_TRUE at ceph_test_rados_striper_api_striping_DEPENDENCIES = $(LIBRADOS) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSSTRIPER_TRUE@@WITH_RADOS_TRUE@	$(LIBRADOSSTRIPER) \
- at ENABLE_CLIENT_TRUE@@WITH_RADOSSTRIPER_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSSTRIPER_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSSTRIPER_TRUE@@WITH_RADOS_TRUE@	$(RADOS_STRIPER_TEST_LDADD)
 ceph_test_rados_striper_api_striping_LINK = $(LIBTOOL) $(AM_V_lt) \
 	--tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link \
@@ -4759,7 +4782,7 @@ ceph_test_rbd_mirror_OBJECTS = $(am_ceph_test_rbd_mirror_OBJECTS)
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librados_api.la \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	$(am__DEPENDENCIES_7) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	$(LIBOSDC) \
- at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	$(am__DEPENDENCIES_10) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	$(am__DEPENDENCIES_9)
 ceph_test_rbd_mirror_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
@@ -4791,10 +4814,10 @@ am__ceph_test_rgw_manifest_SOURCES_DIST =  \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at am_ceph_test_rgw_manifest_OBJECTS = test/rgw/ceph_test_rgw_manifest-test_rgw_manifest.$(OBJEXT)
 ceph_test_rgw_manifest_OBJECTS = $(am_ceph_test_rgw_manifest_OBJECTS)
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at ceph_test_rgw_manifest_DEPENDENCIES = $(LIBRADOS) \
- at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_17) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_19) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_12) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_10) \
- at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_1)
 ceph_test_rgw_manifest_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -4804,10 +4827,10 @@ am__ceph_test_rgw_obj_SOURCES_DIST = test/rgw/test_rgw_obj.cc
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at am_ceph_test_rgw_obj_OBJECTS = test/rgw/ceph_test_rgw_obj-test_rgw_obj.$(OBJEXT)
 ceph_test_rgw_obj_OBJECTS = $(am_ceph_test_rgw_obj_OBJECTS)
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at ceph_test_rgw_obj_DEPENDENCIES = $(LIBRADOS) \
- at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_17) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_19) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_12) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_10) \
- at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_1)
 ceph_test_rgw_obj_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -4819,10 +4842,10 @@ am__ceph_test_rgw_period_history_SOURCES_DIST =  \
 ceph_test_rgw_period_history_OBJECTS =  \
 	$(am_ceph_test_rgw_period_history_OBJECTS)
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at ceph_test_rgw_period_history_DEPENDENCIES = $(LIBRADOS) \
- at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_17) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_19) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_12) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_10) \
- at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_1)
 ceph_test_rgw_period_history_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -4837,8 +4860,8 @@ am__ceph_test_snap_mapper_SOURCES_DIST = test/test_snap_mapper.cc
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at am_ceph_test_snap_mapper_OBJECTS = test/ceph_test_snap_mapper-test_snap_mapper.$(OBJEXT)
 ceph_test_snap_mapper_OBJECTS = $(am_ceph_test_snap_mapper_OBJECTS)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at ceph_test_snap_mapper_DEPENDENCIES =  \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_24) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_10)
 ceph_test_snap_mapper_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -4850,7 +4873,7 @@ ceph_test_stress_watch_OBJECTS = $(am_ceph_test_stress_watch_OBJECTS)
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE at ceph_test_stress_watch_DEPENDENCIES =  \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(LIBRADOS) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_4) \
- at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_9)
 ceph_test_stress_watch_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -4880,7 +4903,7 @@ am__ceph_xattr_bench_SOURCES_DIST = test/xattr_bench.cc
 ceph_xattr_bench_OBJECTS = $(am_ceph_xattr_bench_OBJECTS)
 @ENABLE_SERVER_TRUE at ceph_xattr_bench_DEPENDENCIES =  \
 @ENABLE_SERVER_TRUE@	$(am__DEPENDENCIES_16) \
- at ENABLE_SERVER_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_SERVER_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_SERVER_TRUE@	$(am__DEPENDENCIES_10)
 ceph_xattr_bench_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -4898,7 +4921,7 @@ am__cephfs_data_scan_SOURCES_DIST = tools/cephfs/cephfs-data-scan.cc \
 @ENABLE_CLIENT_TRUE@@ENABLE_SERVER_TRUE@@WITH_MDS_TRUE@@WITH_RADOS_TRUE@	tools/cephfs/RoleSelector.$(OBJEXT) \
 @ENABLE_CLIENT_TRUE@@ENABLE_SERVER_TRUE@@WITH_MDS_TRUE@@WITH_RADOS_TRUE@	tools/cephfs/MDSUtility.$(OBJEXT)
 cephfs_data_scan_OBJECTS = $(am_cephfs_data_scan_OBJECTS)
- at ENABLE_CLIENT_TRUE@@ENABLE_SERVER_TRUE@@WITH_MDS_TRUE@@WITH_RADOS_TRUE at cephfs_data_scan_DEPENDENCIES = $(am__DEPENDENCIES_22) \
+ at ENABLE_CLIENT_TRUE@@ENABLE_SERVER_TRUE@@WITH_MDS_TRUE@@WITH_RADOS_TRUE at cephfs_data_scan_DEPENDENCIES = $(am__DEPENDENCIES_23) \
 @ENABLE_CLIENT_TRUE@@ENABLE_SERVER_TRUE@@WITH_MDS_TRUE@@WITH_RADOS_TRUE@	libcls_cephfs_client.la \
 @ENABLE_CLIENT_TRUE@@ENABLE_SERVER_TRUE@@WITH_MDS_TRUE@@WITH_RADOS_TRUE@	$(LIBRADOS) \
 @ENABLE_CLIENT_TRUE@@ENABLE_SERVER_TRUE@@WITH_MDS_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_10)
@@ -4918,7 +4941,7 @@ am__cephfs_journal_tool_SOURCES_DIST =  \
 @ENABLE_CLIENT_TRUE@@ENABLE_SERVER_TRUE@@WITH_MDS_TRUE@@WITH_RADOS_TRUE@	tools/cephfs/Resetter.$(OBJEXT) \
 @ENABLE_CLIENT_TRUE@@ENABLE_SERVER_TRUE@@WITH_MDS_TRUE@@WITH_RADOS_TRUE@	tools/cephfs/MDSUtility.$(OBJEXT)
 cephfs_journal_tool_OBJECTS = $(am_cephfs_journal_tool_OBJECTS)
- at ENABLE_CLIENT_TRUE@@ENABLE_SERVER_TRUE@@WITH_MDS_TRUE@@WITH_RADOS_TRUE at cephfs_journal_tool_DEPENDENCIES = $(am__DEPENDENCIES_22) \
+ at ENABLE_CLIENT_TRUE@@ENABLE_SERVER_TRUE@@WITH_MDS_TRUE@@WITH_RADOS_TRUE at cephfs_journal_tool_DEPENDENCIES = $(am__DEPENDENCIES_23) \
 @ENABLE_CLIENT_TRUE@@ENABLE_SERVER_TRUE@@WITH_MDS_TRUE@@WITH_RADOS_TRUE@	$(LIBRADOS) \
 @ENABLE_CLIENT_TRUE@@ENABLE_SERVER_TRUE@@WITH_MDS_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_10)
 am__cephfs_table_tool_SOURCES_DIST =  \
@@ -4929,7 +4952,7 @@ am__cephfs_table_tool_SOURCES_DIST =  \
 @ENABLE_CLIENT_TRUE@@ENABLE_SERVER_TRUE@@WITH_MDS_TRUE@@WITH_RADOS_TRUE@	tools/cephfs/RoleSelector.$(OBJEXT) \
 @ENABLE_CLIENT_TRUE@@ENABLE_SERVER_TRUE@@WITH_MDS_TRUE@@WITH_RADOS_TRUE@	tools/cephfs/MDSUtility.$(OBJEXT)
 cephfs_table_tool_OBJECTS = $(am_cephfs_table_tool_OBJECTS)
- at ENABLE_CLIENT_TRUE@@ENABLE_SERVER_TRUE@@WITH_MDS_TRUE@@WITH_RADOS_TRUE at cephfs_table_tool_DEPENDENCIES = $(am__DEPENDENCIES_22) \
+ at ENABLE_CLIENT_TRUE@@ENABLE_SERVER_TRUE@@WITH_MDS_TRUE@@WITH_RADOS_TRUE at cephfs_table_tool_DEPENDENCIES = $(am__DEPENDENCIES_23) \
 @ENABLE_CLIENT_TRUE@@ENABLE_SERVER_TRUE@@WITH_MDS_TRUE@@WITH_RADOS_TRUE@	$(LIBRADOS) \
 @ENABLE_CLIENT_TRUE@@ENABLE_SERVER_TRUE@@WITH_MDS_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_10)
 am_crushtool_OBJECTS = tools/crushtool.$(OBJEXT)
@@ -4941,7 +4964,7 @@ am__get_command_descriptions_SOURCES_DIST =  \
 get_command_descriptions_OBJECTS =  \
 	$(am_get_command_descriptions_OBJECTS)
 @ENABLE_SERVER_TRUE@@WITH_MON_TRUE at get_command_descriptions_DEPENDENCIES =  \
- at ENABLE_SERVER_TRUE@@WITH_MON_TRUE@	$(am__DEPENDENCIES_23) \
+ at ENABLE_SERVER_TRUE@@WITH_MON_TRUE@	$(am__DEPENDENCIES_24) \
 @ENABLE_SERVER_TRUE@@WITH_MON_TRUE@	$(LIBMON_TYPES) \
 @ENABLE_SERVER_TRUE@@WITH_MON_TRUE@	$(am__DEPENDENCIES_16) \
 @ENABLE_SERVER_TRUE@@WITH_MON_TRUE@	$(am__DEPENDENCIES_4) \
@@ -4955,7 +4978,7 @@ librados_config_OBJECTS = $(am_librados_config_OBJECTS)
 am__librgw_file_SOURCES_DIST = test/librgw_file.cc
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at am_librgw_file_OBJECTS = test/librgw_file-librgw_file.$(OBJEXT)
 librgw_file_OBJECTS = $(am_librgw_file_OBJECTS)
- at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at librgw_file_DEPENDENCIES = $(am__DEPENDENCIES_25) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at librgw_file_DEPENDENCIES = $(am__DEPENDENCIES_26) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	librgw.la \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	librados.la \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_1) \
@@ -4967,7 +4990,7 @@ librgw_file_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \
 am__librgw_file_aw_SOURCES_DIST = test/librgw_file_aw.cc
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at am_librgw_file_aw_OBJECTS = test/librgw_file_aw-librgw_file_aw.$(OBJEXT)
 librgw_file_aw_OBJECTS = $(am_librgw_file_aw_OBJECTS)
- at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at librgw_file_aw_DEPENDENCIES = $(am__DEPENDENCIES_25) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at librgw_file_aw_DEPENDENCIES = $(am__DEPENDENCIES_26) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	librgw.la \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	librados.la \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_1) \
@@ -4980,7 +5003,7 @@ librgw_file_aw_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 am__librgw_file_cd_SOURCES_DIST = test/librgw_file_cd.cc
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at am_librgw_file_cd_OBJECTS = test/librgw_file_cd-librgw_file_cd.$(OBJEXT)
 librgw_file_cd_OBJECTS = $(am_librgw_file_cd_OBJECTS)
- at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at librgw_file_cd_DEPENDENCIES = $(am__DEPENDENCIES_25) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at librgw_file_cd_DEPENDENCIES = $(am__DEPENDENCIES_26) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	librgw.la \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	librados.la \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_1) \
@@ -4993,7 +5016,7 @@ librgw_file_cd_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 am__librgw_file_gp_SOURCES_DIST = test/librgw_file_gp.cc
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at am_librgw_file_gp_OBJECTS = test/librgw_file_gp-librgw_file_gp.$(OBJEXT)
 librgw_file_gp_OBJECTS = $(am_librgw_file_gp_OBJECTS)
- at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at librgw_file_gp_DEPENDENCIES = $(am__DEPENDENCIES_25) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at librgw_file_gp_DEPENDENCIES = $(am__DEPENDENCIES_26) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	librgw.la \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	librados.la \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_1) \
@@ -5006,7 +5029,7 @@ librgw_file_gp_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 am__librgw_file_nfsns_SOURCES_DIST = test/librgw_file_nfsns.cc
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at am_librgw_file_nfsns_OBJECTS = test/librgw_file_nfsns-librgw_file_nfsns.$(OBJEXT)
 librgw_file_nfsns_OBJECTS = $(am_librgw_file_nfsns_OBJECTS)
- at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at librgw_file_nfsns_DEPENDENCIES = $(am__DEPENDENCIES_25) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at librgw_file_nfsns_DEPENDENCIES = $(am__DEPENDENCIES_26) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	librgw.la \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	librados.la \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_1) \
@@ -5053,10 +5076,10 @@ am__radosgw_SOURCES_DIST = rgw/rgw_fcgi_process.cc \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	civetweb/src/radosgw-civetweb.$(OBJEXT) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	rgw/rgw_main.$(OBJEXT)
 radosgw_OBJECTS = $(am_radosgw_OBJECTS)
-am__DEPENDENCIES_26 = $(am__DEPENDENCIES_1)
- at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at radosgw_DEPENDENCIES = $(am__DEPENDENCIES_17) \
+am__DEPENDENCIES_27 = $(am__DEPENDENCIES_1)
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at radosgw_DEPENDENCIES = $(am__DEPENDENCIES_19) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(LIBCIVETWEB) \
- at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_26) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_27) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_12) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_1) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_10)
@@ -5064,19 +5087,19 @@ am__radosgw_admin_SOURCES_DIST = rgw/rgw_admin.cc rgw/rgw_orphan.cc
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at am_radosgw_admin_OBJECTS = rgw/rgw_admin.$(OBJEXT) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	rgw/rgw_orphan.$(OBJEXT)
 radosgw_admin_OBJECTS = $(am_radosgw_admin_OBJECTS)
- at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at radosgw_admin_DEPENDENCIES = $(am__DEPENDENCIES_17) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at radosgw_admin_DEPENDENCIES = $(am__DEPENDENCIES_19) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_12) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_10)
 am__radosgw_object_expirer_SOURCES_DIST = rgw/rgw_object_expirer.cc
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at am_radosgw_object_expirer_OBJECTS = rgw/rgw_object_expirer.$(OBJEXT)
 radosgw_object_expirer_OBJECTS = $(am_radosgw_object_expirer_OBJECTS)
- at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at radosgw_object_expirer_DEPENDENCIES = $(am__DEPENDENCIES_17) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at radosgw_object_expirer_DEPENDENCIES = $(am__DEPENDENCIES_19) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_12) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_10)
 am__radosgw_token_SOURCES_DIST = rgw/rgw_token.cc
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at am_radosgw_token_OBJECTS = rgw/rgw_token.$(OBJEXT)
 radosgw_token_OBJECTS = $(am_radosgw_token_OBJECTS)
- at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at radosgw_token_DEPENDENCIES = $(am__DEPENDENCIES_17) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at radosgw_token_DEPENDENCIES = $(am__DEPENDENCIES_19) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_12) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_10)
 am__rbd_SOURCES_DIST = tools/rbd/rbd.cc tools/rbd/ArgumentTypes.cc \
@@ -5145,7 +5168,7 @@ rbd_OBJECTS = $(am_rbd_OBJECTS)
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	$(am__DEPENDENCIES_10) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	$(am__DEPENDENCIES_1) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	$(am__DEPENDENCIES_1) \
- at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	$(am__append_270)
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	$(am__append_280)
 am__rbd_fuse_SOURCES_DIST = rbd_fuse/rbd-fuse.cc
 @ENABLE_CLIENT_TRUE@@WITH_FUSE_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE at am_rbd_fuse_OBJECTS = rbd_fuse/rbd_fuse-rbd-fuse.$(OBJEXT)
 rbd_fuse_OBJECTS = $(am_rbd_fuse_OBJECTS)
@@ -5392,7 +5415,7 @@ test_build_librgw_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(test_build_librgw_LDFLAGS) $(LDFLAGS) -o $@
 am_unittest_addrs_OBJECTS = test/unittest_addrs-test_addrs.$(OBJEXT)
 unittest_addrs_OBJECTS = $(am_unittest_addrs_OBJECTS)
-unittest_addrs_DEPENDENCIES = $(am__DEPENDENCIES_25) \
+unittest_addrs_DEPENDENCIES = $(am__DEPENDENCIES_26) \
 	$(am__DEPENDENCIES_10)
 unittest_addrs_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -5401,7 +5424,7 @@ unittest_addrs_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 am_unittest_admin_socket_OBJECTS =  \
 	test/unittest_admin_socket-admin_socket.$(OBJEXT)
 unittest_admin_socket_OBJECTS = $(am_unittest_admin_socket_OBJECTS)
-unittest_admin_socket_DEPENDENCIES = $(am__DEPENDENCIES_25) \
+unittest_admin_socket_DEPENDENCIES = $(am__DEPENDENCIES_26) \
 	$(am__DEPENDENCIES_10)
 unittest_admin_socket_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -5409,7 +5432,7 @@ unittest_admin_socket_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(LDFLAGS) -o $@
 am_unittest_arch_OBJECTS = test/unittest_arch-test_arch.$(OBJEXT)
 unittest_arch_OBJECTS = $(am_unittest_arch_OBJECTS)
-unittest_arch_DEPENDENCIES = $(am__DEPENDENCIES_25) \
+unittest_arch_DEPENDENCIES = $(am__DEPENDENCIES_26) \
 	$(am__DEPENDENCIES_10)
 unittest_arch_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -5418,7 +5441,7 @@ unittest_arch_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 am_unittest_async_compressor_OBJECTS = test/common/unittest_async_compressor-test_async_compressor.$(OBJEXT)
 unittest_async_compressor_OBJECTS =  \
 	$(am_unittest_async_compressor_OBJECTS)
-unittest_async_compressor_DEPENDENCIES = $(am__DEPENDENCIES_25) \
+unittest_async_compressor_DEPENDENCIES = $(am__DEPENDENCIES_26) \
 	$(am__DEPENDENCIES_10) $(LIBCOMPRESSOR) $(am__DEPENDENCIES_4)
 unittest_async_compressor_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -5429,7 +5452,7 @@ am__unittest_base64_SOURCES_DIST = test/base64.cc
 unittest_base64_OBJECTS = $(am_unittest_base64_OBJECTS)
 @ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE at unittest_base64_DEPENDENCIES = $(LIBCEPHFS) \
 @ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_10) \
- at ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_25)
+ at ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_26)
 unittest_base64_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
 	$(unittest_base64_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \
@@ -5437,7 +5460,7 @@ unittest_base64_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 am_unittest_bit_vector_OBJECTS =  \
 	test/common/unittest_bit_vector-test_bit_vector.$(OBJEXT)
 unittest_bit_vector_OBJECTS = $(am_unittest_bit_vector_OBJECTS)
-unittest_bit_vector_DEPENDENCIES = $(am__DEPENDENCIES_25) \
+unittest_bit_vector_DEPENDENCIES = $(am__DEPENDENCIES_26) \
 	$(am__DEPENDENCIES_10)
 unittest_bit_vector_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -5446,7 +5469,7 @@ unittest_bit_vector_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 am_unittest_blkdev_OBJECTS =  \
 	test/common/unittest_blkdev-test_blkdev.$(OBJEXT)
 unittest_blkdev_OBJECTS = $(am_unittest_blkdev_OBJECTS)
-unittest_blkdev_DEPENDENCIES = $(am__DEPENDENCIES_25) \
+unittest_blkdev_DEPENDENCIES = $(am__DEPENDENCIES_26) \
 	$(am__DEPENDENCIES_10)
 unittest_blkdev_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -5455,7 +5478,7 @@ unittest_blkdev_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 am_unittest_bloom_filter_OBJECTS =  \
 	test/common/unittest_bloom_filter-test_bloom_filter.$(OBJEXT)
 unittest_bloom_filter_OBJECTS = $(am_unittest_bloom_filter_OBJECTS)
-unittest_bloom_filter_DEPENDENCIES = $(am__DEPENDENCIES_25) \
+unittest_bloom_filter_DEPENDENCIES = $(am__DEPENDENCIES_26) \
 	$(am__DEPENDENCIES_10)
 unittest_bloom_filter_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -5466,7 +5489,7 @@ am__unittest_bluefs_SOURCES_DIST = test/objectstore/test_bluefs.cc
 unittest_bluefs_OBJECTS = $(am_unittest_bluefs_OBJECTS)
 @ENABLE_SERVER_TRUE@@LINUX_TRUE at unittest_bluefs_DEPENDENCIES =  \
 @ENABLE_SERVER_TRUE@@LINUX_TRUE@	$(am__DEPENDENCIES_16) \
- at ENABLE_SERVER_TRUE@@LINUX_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_SERVER_TRUE@@LINUX_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_SERVER_TRUE@@LINUX_TRUE@	$(am__DEPENDENCIES_10)
 unittest_bluefs_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -5479,7 +5502,7 @@ unittest_bluestore_types_OBJECTS =  \
 	$(am_unittest_bluestore_types_OBJECTS)
 @ENABLE_SERVER_TRUE@@LINUX_TRUE at unittest_bluestore_types_DEPENDENCIES =  \
 @ENABLE_SERVER_TRUE@@LINUX_TRUE@	$(am__DEPENDENCIES_16) \
- at ENABLE_SERVER_TRUE@@LINUX_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_SERVER_TRUE@@LINUX_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_SERVER_TRUE@@LINUX_TRUE@	$(am__DEPENDENCIES_10)
 unittest_bluestore_types_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -5488,7 +5511,7 @@ unittest_bluestore_types_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 am_unittest_bufferlist_OBJECTS =  \
 	test/unittest_bufferlist-bufferlist.$(OBJEXT)
 unittest_bufferlist_OBJECTS = $(am_unittest_bufferlist_OBJECTS)
-unittest_bufferlist_DEPENDENCIES = $(am__DEPENDENCIES_25) \
+unittest_bufferlist_DEPENDENCIES = $(am__DEPENDENCIES_26) \
 	$(am__DEPENDENCIES_10)
 unittest_bufferlist_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -5497,7 +5520,7 @@ unittest_bufferlist_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 am_unittest_ceph_argparse_OBJECTS =  \
 	test/unittest_ceph_argparse-ceph_argparse.$(OBJEXT)
 unittest_ceph_argparse_OBJECTS = $(am_unittest_ceph_argparse_OBJECTS)
-unittest_ceph_argparse_DEPENDENCIES = $(am__DEPENDENCIES_25) \
+unittest_ceph_argparse_DEPENDENCIES = $(am__DEPENDENCIES_26) \
 	$(am__DEPENDENCIES_10)
 unittest_ceph_argparse_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -5507,7 +5530,7 @@ am_unittest_ceph_compatset_OBJECTS =  \
 	test/unittest_ceph_compatset-ceph_compatset.$(OBJEXT)
 unittest_ceph_compatset_OBJECTS =  \
 	$(am_unittest_ceph_compatset_OBJECTS)
-unittest_ceph_compatset_DEPENDENCIES = $(am__DEPENDENCIES_25) \
+unittest_ceph_compatset_DEPENDENCIES = $(am__DEPENDENCIES_26) \
 	$(am__DEPENDENCIES_10)
 unittest_ceph_compatset_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -5516,7 +5539,7 @@ unittest_ceph_compatset_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 am_unittest_ceph_crypto_OBJECTS =  \
 	test/unittest_ceph_crypto-ceph_crypto.$(OBJEXT)
 unittest_ceph_crypto_OBJECTS = $(am_unittest_ceph_crypto_OBJECTS)
-unittest_ceph_crypto_DEPENDENCIES = $(am__DEPENDENCIES_25) \
+unittest_ceph_crypto_DEPENDENCIES = $(am__DEPENDENCIES_26) \
 	$(am__DEPENDENCIES_10)
 unittest_ceph_crypto_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -5528,7 +5551,7 @@ am__unittest_chain_xattr_SOURCES_DIST =  \
 unittest_chain_xattr_OBJECTS = $(am_unittest_chain_xattr_OBJECTS)
 @ENABLE_SERVER_TRUE at unittest_chain_xattr_DEPENDENCIES =  \
 @ENABLE_SERVER_TRUE@	$(am__DEPENDENCIES_16) \
- at ENABLE_SERVER_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_SERVER_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_SERVER_TRUE@	$(am__DEPENDENCIES_10)
 unittest_chain_xattr_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -5542,9 +5565,9 @@ am__unittest_compression_plugin_SOURCES_DIST =  \
 unittest_compression_plugin_OBJECTS =  \
 	$(am_unittest_compression_plugin_OBJECTS)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at unittest_compression_plugin_DEPENDENCIES =  \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_24) \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_4) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_4) \
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_10) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_1)
 unittest_compression_plugin_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
@@ -5562,9 +5585,9 @@ am__objects_52 = compressor/unittest_compression_plugin_snappy-Compressor.$(OBJE
 unittest_compression_plugin_snappy_OBJECTS =  \
 	$(am_unittest_compression_plugin_snappy_OBJECTS)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at unittest_compression_plugin_snappy_DEPENDENCIES =  \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_24) \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_4) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_4) \
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_10) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(LIBCOMPRESSOR) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_1)
@@ -5586,9 +5609,9 @@ am__objects_53 = compressor/unittest_compression_plugin_zlib-Compressor.$(OBJEXT
 unittest_compression_plugin_zlib_OBJECTS =  \
 	$(am_unittest_compression_plugin_zlib_OBJECTS)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at unittest_compression_plugin_zlib_DEPENDENCIES =  \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_24) \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_4) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_4) \
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_10) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(LIBCOMPRESSOR) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_1)
@@ -5609,9 +5632,9 @@ am__objects_54 =  \
 unittest_compression_snappy_OBJECTS =  \
 	$(am_unittest_compression_snappy_OBJECTS)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at unittest_compression_snappy_DEPENDENCIES =  \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_24) \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_4) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_4) \
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_10) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_1)
 unittest_compression_snappy_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
@@ -5632,9 +5655,9 @@ am__objects_55 =  \
 unittest_compression_zlib_OBJECTS =  \
 	$(am_unittest_compression_zlib_OBJECTS)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at unittest_compression_zlib_DEPENDENCIES =  \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_24) \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_4) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_4) \
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_10) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_1)
 unittest_compression_zlib_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
@@ -5644,7 +5667,7 @@ unittest_compression_zlib_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 am_unittest_config_OBJECTS =  \
 	test/common/unittest_config-test_config.$(OBJEXT)
 unittest_config_OBJECTS = $(am_unittest_config_OBJECTS)
-unittest_config_DEPENDENCIES = $(am__DEPENDENCIES_25) \
+unittest_config_DEPENDENCIES = $(am__DEPENDENCIES_26) \
 	$(am__DEPENDENCIES_10)
 unittest_config_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -5653,7 +5676,7 @@ unittest_config_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 am_unittest_confutils_OBJECTS =  \
 	test/unittest_confutils-confutils.$(OBJEXT)
 unittest_confutils_OBJECTS = $(am_unittest_confutils_OBJECTS)
-unittest_confutils_DEPENDENCIES = $(am__DEPENDENCIES_25) \
+unittest_confutils_DEPENDENCIES = $(am__DEPENDENCIES_26) \
 	$(am__DEPENDENCIES_10)
 unittest_confutils_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -5662,7 +5685,7 @@ unittest_confutils_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 am_unittest_context_OBJECTS =  \
 	test/common/unittest_context-test_context.$(OBJEXT)
 unittest_context_OBJECTS = $(am_unittest_context_OBJECTS)
-unittest_context_DEPENDENCIES = $(am__DEPENDENCIES_25) \
+unittest_context_DEPENDENCIES = $(am__DEPENDENCIES_26) \
 	$(am__DEPENDENCIES_10)
 unittest_context_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -5671,7 +5694,7 @@ unittest_context_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 am_unittest_crc32c_OBJECTS =  \
 	test/common/unittest_crc32c-test_crc32c.$(OBJEXT)
 unittest_crc32c_OBJECTS = $(am_unittest_crc32c_OBJECTS)
-unittest_crc32c_DEPENDENCIES = $(am__DEPENDENCIES_25) \
+unittest_crc32c_DEPENDENCIES = $(am__DEPENDENCIES_26) \
 	$(am__DEPENDENCIES_10)
 unittest_crc32c_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -5680,7 +5703,7 @@ unittest_crc32c_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 am_unittest_crush_OBJECTS = test/crush/unittest_crush-crush.$(OBJEXT)
 unittest_crush_OBJECTS = $(am_unittest_crush_OBJECTS)
 unittest_crush_DEPENDENCIES = $(am__DEPENDENCIES_4) \
-	$(am__DEPENDENCIES_25) $(am__DEPENDENCIES_3) \
+	$(am__DEPENDENCIES_26) $(am__DEPENDENCIES_3) \
 	$(am__DEPENDENCIES_10)
 unittest_crush_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -5689,7 +5712,7 @@ unittest_crush_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 am_unittest_crush_wrapper_OBJECTS =  \
 	test/crush/unittest_crush_wrapper-CrushWrapper.$(OBJEXT)
 unittest_crush_wrapper_OBJECTS = $(am_unittest_crush_wrapper_OBJECTS)
-unittest_crush_wrapper_DEPENDENCIES = $(am__DEPENDENCIES_25) \
+unittest_crush_wrapper_DEPENDENCIES = $(am__DEPENDENCIES_26) \
 	$(am__DEPENDENCIES_10) $(LIBCRUSH)
 unittest_crush_wrapper_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -5697,7 +5720,7 @@ unittest_crush_wrapper_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(LDFLAGS) -o $@
 am_unittest_crypto_OBJECTS = test/unittest_crypto-crypto.$(OBJEXT)
 unittest_crypto_OBJECTS = $(am_unittest_crypto_OBJECTS)
-unittest_crypto_DEPENDENCIES = $(am__DEPENDENCIES_25) \
+unittest_crypto_DEPENDENCIES = $(am__DEPENDENCIES_26) \
 	$(am__DEPENDENCIES_10)
 unittest_crypto_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -5706,7 +5729,7 @@ unittest_crypto_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 am_unittest_crypto_init_OBJECTS =  \
 	test/unittest_crypto_init-crypto_init.$(OBJEXT)
 unittest_crypto_init_OBJECTS = $(am_unittest_crypto_init_OBJECTS)
-unittest_crypto_init_DEPENDENCIES = $(am__DEPENDENCIES_25) \
+unittest_crypto_init_DEPENDENCIES = $(am__DEPENDENCIES_26) \
 	$(am__DEPENDENCIES_10)
 unittest_crypto_init_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -5715,7 +5738,7 @@ unittest_crypto_init_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 am_unittest_daemon_config_OBJECTS =  \
 	test/unittest_daemon_config-daemon_config.$(OBJEXT)
 unittest_daemon_config_OBJECTS = $(am_unittest_daemon_config_OBJECTS)
-unittest_daemon_config_DEPENDENCIES = $(am__DEPENDENCIES_25) \
+unittest_daemon_config_DEPENDENCIES = $(am__DEPENDENCIES_26) \
 	$(am__DEPENDENCIES_10)
 unittest_daemon_config_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -5725,8 +5748,8 @@ am__unittest_ecbackend_SOURCES_DIST = test/osd/TestECBackend.cc
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at am_unittest_ecbackend_OBJECTS = test/osd/unittest_ecbackend-TestECBackend.$(OBJEXT)
 unittest_ecbackend_OBJECTS = $(am_unittest_ecbackend_OBJECTS)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at unittest_ecbackend_DEPENDENCIES =  \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_24) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_10)
 unittest_ecbackend_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -5738,7 +5761,7 @@ unittest_encoding_OBJECTS = $(am_unittest_encoding_OBJECTS)
 @ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE at unittest_encoding_DEPENDENCIES = $(LIBCEPHFS) \
 @ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE@	$(LIBRADOS) \
 @ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_10) \
- at ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_25)
+ at ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_26)
 unittest_encoding_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
 	$(unittest_encoding_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \
@@ -5749,9 +5772,9 @@ am__unittest_erasure_code_SOURCES_DIST = erasure-code/ErasureCode.cc \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	test/erasure-code/unittest_erasure_code-TestErasureCode.$(OBJEXT)
 unittest_erasure_code_OBJECTS = $(am_unittest_erasure_code_OBJECTS)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at unittest_erasure_code_DEPENDENCIES =  \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_24) \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_4) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_4) \
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_10)
 unittest_erasure_code_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -5765,9 +5788,9 @@ am__unittest_erasure_code_example_SOURCES_DIST =  \
 unittest_erasure_code_example_OBJECTS =  \
 	$(am_unittest_erasure_code_example_OBJECTS)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at unittest_erasure_code_example_DEPENDENCIES =  \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_24) \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_4) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_4) \
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_10)
 unittest_erasure_code_example_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -5780,9 +5803,9 @@ am__unittest_erasure_code_isa_SOURCES_DIST =  \
 @ENABLE_SERVER_TRUE@@WITH_BETTER_YASM_ELF64_TRUE@@WITH_OSD_TRUE@	test/erasure-code/unittest_erasure_code_isa-TestErasureCodeIsa.$(OBJEXT)
 unittest_erasure_code_isa_OBJECTS =  \
 	$(am_unittest_erasure_code_isa_OBJECTS)
- at ENABLE_SERVER_TRUE@@WITH_BETTER_YASM_ELF64_TRUE@@WITH_OSD_TRUE at unittest_erasure_code_isa_DEPENDENCIES = $(am__DEPENDENCIES_24) \
+ at ENABLE_SERVER_TRUE@@WITH_BETTER_YASM_ELF64_TRUE@@WITH_OSD_TRUE at unittest_erasure_code_isa_DEPENDENCIES = $(am__DEPENDENCIES_25) \
 @ENABLE_SERVER_TRUE@@WITH_BETTER_YASM_ELF64_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_4) \
- at ENABLE_SERVER_TRUE@@WITH_BETTER_YASM_ELF64_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_SERVER_TRUE@@WITH_BETTER_YASM_ELF64_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_SERVER_TRUE@@WITH_BETTER_YASM_ELF64_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_10) \
 @ENABLE_SERVER_TRUE@@WITH_BETTER_YASM_ELF64_TRUE@@WITH_OSD_TRUE@	libisa.la \
 @ENABLE_SERVER_TRUE@@WITH_BETTER_YASM_ELF64_TRUE@@WITH_OSD_TRUE@	$(LIBERASURE_CODE) \
@@ -5836,9 +5859,9 @@ am__objects_56 = erasure-code/unittest_erasure_code_jerasure-ErasureCode.$(OBJEX
 unittest_erasure_code_jerasure_OBJECTS =  \
 	$(am_unittest_erasure_code_jerasure_OBJECTS)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at unittest_erasure_code_jerasure_DEPENDENCIES =  \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_24) \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_4) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_4) \
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_10) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_1)
 unittest_erasure_code_jerasure_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
@@ -5859,9 +5882,9 @@ am__objects_57 =  \
 unittest_erasure_code_lrc_OBJECTS =  \
 	$(am_unittest_erasure_code_lrc_OBJECTS)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at unittest_erasure_code_lrc_DEPENDENCIES =  \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_24) \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_4) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_4) \
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_10) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_1)
 unittest_erasure_code_lrc_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
@@ -5876,9 +5899,9 @@ am__unittest_erasure_code_plugin_SOURCES_DIST =  \
 unittest_erasure_code_plugin_OBJECTS =  \
 	$(am_unittest_erasure_code_plugin_OBJECTS)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at unittest_erasure_code_plugin_DEPENDENCIES =  \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_24) \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_4) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_4) \
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_10) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_1)
 unittest_erasure_code_plugin_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
@@ -5892,9 +5915,9 @@ am__unittest_erasure_code_plugin_isa_SOURCES_DIST =  \
 @ENABLE_SERVER_TRUE@@WITH_BETTER_YASM_ELF64_TRUE@@WITH_OSD_TRUE@	test/erasure-code/unittest_erasure_code_plugin_isa-TestErasureCodePluginIsa.$(OBJEXT)
 unittest_erasure_code_plugin_isa_OBJECTS =  \
 	$(am_unittest_erasure_code_plugin_isa_OBJECTS)
- at ENABLE_SERVER_TRUE@@WITH_BETTER_YASM_ELF64_TRUE@@WITH_OSD_TRUE at unittest_erasure_code_plugin_isa_DEPENDENCIES = $(am__DEPENDENCIES_24) \
+ at ENABLE_SERVER_TRUE@@WITH_BETTER_YASM_ELF64_TRUE@@WITH_OSD_TRUE at unittest_erasure_code_plugin_isa_DEPENDENCIES = $(am__DEPENDENCIES_25) \
 @ENABLE_SERVER_TRUE@@WITH_BETTER_YASM_ELF64_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_4) \
- at ENABLE_SERVER_TRUE@@WITH_BETTER_YASM_ELF64_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_SERVER_TRUE@@WITH_BETTER_YASM_ELF64_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_SERVER_TRUE@@WITH_BETTER_YASM_ELF64_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_10) \
 @ENABLE_SERVER_TRUE@@WITH_BETTER_YASM_ELF64_TRUE@@WITH_OSD_TRUE@	$(LIBERASURE_CODE) \
 @ENABLE_SERVER_TRUE@@WITH_BETTER_YASM_ELF64_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_1)
@@ -5908,9 +5931,9 @@ am__unittest_erasure_code_plugin_jerasure_SOURCES_DIST =  \
 unittest_erasure_code_plugin_jerasure_OBJECTS =  \
 	$(am_unittest_erasure_code_plugin_jerasure_OBJECTS)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at unittest_erasure_code_plugin_jerasure_DEPENDENCIES =  \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_24) \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_4) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_4) \
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_10) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_1)
 unittest_erasure_code_plugin_jerasure_LINK = $(LIBTOOL) $(AM_V_lt) \
@@ -5923,9 +5946,9 @@ am__unittest_erasure_code_plugin_lrc_SOURCES_DIST =  \
 unittest_erasure_code_plugin_lrc_OBJECTS =  \
 	$(am_unittest_erasure_code_plugin_lrc_OBJECTS)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at unittest_erasure_code_plugin_lrc_DEPENDENCIES =  \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_24) \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_4) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_4) \
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_10) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_1)
 unittest_erasure_code_plugin_lrc_LINK = $(LIBTOOL) $(AM_V_lt) \
@@ -5938,9 +5961,9 @@ am__unittest_erasure_code_plugin_shec_SOURCES_DIST =  \
 unittest_erasure_code_plugin_shec_OBJECTS =  \
 	$(am_unittest_erasure_code_plugin_shec_OBJECTS)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at unittest_erasure_code_plugin_shec_DEPENDENCIES =  \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_24) \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_4) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_4) \
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_10) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_1)
 unittest_erasure_code_plugin_shec_LINK = $(LIBTOOL) $(AM_V_lt) \
@@ -5997,9 +6020,9 @@ am__objects_58 =  \
 unittest_erasure_code_shec_OBJECTS =  \
 	$(am_unittest_erasure_code_shec_OBJECTS)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at unittest_erasure_code_shec_DEPENDENCIES =  \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_24) \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_4) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_4) \
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_10) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_1)
 unittest_erasure_code_shec_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
@@ -6055,9 +6078,9 @@ am__objects_59 = erasure-code/unittest_erasure_code_shec_all-ErasureCode.$(OBJEX
 unittest_erasure_code_shec_all_OBJECTS =  \
 	$(am_unittest_erasure_code_shec_all_OBJECTS)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at unittest_erasure_code_shec_all_DEPENDENCIES =  \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_24) \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_4) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_4) \
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_10) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_1)
 unittest_erasure_code_shec_all_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
@@ -6113,9 +6136,9 @@ am__objects_60 = erasure-code/unittest_erasure_code_shec_arguments-ErasureCode.$
 unittest_erasure_code_shec_arguments_OBJECTS =  \
 	$(am_unittest_erasure_code_shec_arguments_OBJECTS)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at unittest_erasure_code_shec_arguments_DEPENDENCIES =  \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_24) \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_4) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_4) \
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_10) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_1)
 unittest_erasure_code_shec_arguments_LINK = $(LIBTOOL) $(AM_V_lt) \
@@ -6171,9 +6194,9 @@ am__objects_61 = erasure-code/unittest_erasure_code_shec_thread-ErasureCode.$(OB
 unittest_erasure_code_shec_thread_OBJECTS =  \
 	$(am_unittest_erasure_code_shec_thread_OBJECTS)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at unittest_erasure_code_shec_thread_DEPENDENCIES =  \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_24) \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_4) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_4) \
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_10) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_1)
 unittest_erasure_code_shec_thread_LINK = $(LIBTOOL) $(AM_V_lt) \
@@ -6182,7 +6205,7 @@ unittest_erasure_code_shec_thread_LINK = $(LIBTOOL) $(AM_V_lt) \
 	$(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@
 am_unittest_escape_OBJECTS = test/unittest_escape-escape.$(OBJEXT)
 unittest_escape_OBJECTS = $(am_unittest_escape_OBJECTS)
-unittest_escape_DEPENDENCIES = $(am__DEPENDENCIES_25) \
+unittest_escape_DEPENDENCIES = $(am__DEPENDENCIES_26) \
 	$(am__DEPENDENCIES_10)
 unittest_escape_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -6192,7 +6215,7 @@ am_unittest_formatter_OBJECTS =  \
 	test/unittest_formatter-formatter.$(OBJEXT) \
 	rgw/unittest_formatter-rgw_formats.$(OBJEXT)
 unittest_formatter_OBJECTS = $(am_unittest_formatter_OBJECTS)
-unittest_formatter_DEPENDENCIES = $(am__DEPENDENCIES_25) \
+unittest_formatter_DEPENDENCIES = $(am__DEPENDENCIES_26) \
 	$(am__DEPENDENCIES_10)
 unittest_formatter_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -6200,7 +6223,7 @@ unittest_formatter_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(LDFLAGS) -o $@
 am_unittest_gather_OBJECTS = test/unittest_gather-gather.$(OBJEXT)
 unittest_gather_OBJECTS = $(am_unittest_gather_OBJECTS)
-unittest_gather_DEPENDENCIES = $(am__DEPENDENCIES_25) \
+unittest_gather_DEPENDENCIES = $(am__DEPENDENCIES_26) \
 	$(am__DEPENDENCIES_10)
 unittest_gather_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -6209,8 +6232,8 @@ unittest_gather_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 am_unittest_heartbeatmap_OBJECTS =  \
 	test/unittest_heartbeatmap-heartbeat_map.$(OBJEXT)
 unittest_heartbeatmap_OBJECTS = $(am_unittest_heartbeatmap_OBJECTS)
-unittest_heartbeatmap_DEPENDENCIES = $(am__DEPENDENCIES_4) \
-	$(am__DEPENDENCIES_25) $(am__DEPENDENCIES_10)
+unittest_heartbeatmap_DEPENDENCIES = $(am__DEPENDENCIES_26) \
+	$(am__DEPENDENCIES_10)
 unittest_heartbeatmap_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
 	$(unittest_heartbeatmap_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \
@@ -6218,7 +6241,7 @@ unittest_heartbeatmap_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 am_unittest_histogram_OBJECTS =  \
 	test/common/unittest_histogram-histogram.$(OBJEXT)
 unittest_histogram_OBJECTS = $(am_unittest_histogram_OBJECTS)
-unittest_histogram_DEPENDENCIES = $(am__DEPENDENCIES_25) \
+unittest_histogram_DEPENDENCIES = $(am__DEPENDENCIES_26) \
 	$(am__DEPENDENCIES_10)
 unittest_histogram_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -6228,8 +6251,8 @@ am__unittest_hitset_SOURCES_DIST = test/osd/hitset.cc
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at am_unittest_hitset_OBJECTS = test/osd/unittest_hitset-hitset.$(OBJEXT)
 unittest_hitset_OBJECTS = $(am_unittest_hitset_OBJECTS)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at unittest_hitset_DEPENDENCIES =  \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_24) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_10)
 unittest_hitset_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -6238,7 +6261,7 @@ unittest_hitset_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 am_unittest_interval_set_OBJECTS =  \
 	test/common/unittest_interval_set-test_interval_set.$(OBJEXT)
 unittest_interval_set_OBJECTS = $(am_unittest_interval_set_OBJECTS)
-unittest_interval_set_DEPENDENCIES = $(am__DEPENDENCIES_25) \
+unittest_interval_set_DEPENDENCIES = $(am__DEPENDENCIES_26) \
 	$(am__DEPENDENCIES_10)
 unittest_interval_set_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -6247,7 +6270,7 @@ unittest_interval_set_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 am_unittest_io_priority_OBJECTS =  \
 	test/common/unittest_io_priority-test_io_priority.$(OBJEXT)
 unittest_io_priority_OBJECTS = $(am_unittest_io_priority_OBJECTS)
-unittest_io_priority_DEPENDENCIES = $(am__DEPENDENCIES_25) \
+unittest_io_priority_DEPENDENCIES = $(am__DEPENDENCIES_26) \
 	$(am__DEPENDENCIES_10)
 unittest_io_priority_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -6256,7 +6279,7 @@ unittest_io_priority_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 am_unittest_ipaddr_OBJECTS =  \
 	test/unittest_ipaddr-test_ipaddr.$(OBJEXT)
 unittest_ipaddr_OBJECTS = $(am_unittest_ipaddr_OBJECTS)
-unittest_ipaddr_DEPENDENCIES = $(am__DEPENDENCIES_25) \
+unittest_ipaddr_DEPENDENCIES = $(am__DEPENDENCIES_26) \
 	$(am__DEPENDENCIES_10)
 unittest_ipaddr_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -6289,7 +6312,7 @@ unittest_journal_OBJECTS = $(am_unittest_journal_OBJECTS)
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	libcls_journal_client.la \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	librados_test_stub.la \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	librados_internal.la \
- at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_10) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_9) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(LIBRADOS)
@@ -6302,7 +6325,7 @@ am__unittest_lfnindex_SOURCES_DIST = test/os/TestLFNIndex.cc
 unittest_lfnindex_OBJECTS = $(am_unittest_lfnindex_OBJECTS)
 @ENABLE_SERVER_TRUE at unittest_lfnindex_DEPENDENCIES =  \
 @ENABLE_SERVER_TRUE@	$(am__DEPENDENCIES_16) \
- at ENABLE_SERVER_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_SERVER_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_SERVER_TRUE@	$(am__DEPENDENCIES_10)
 unittest_lfnindex_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -6314,7 +6337,7 @@ unittest_libcephfs_config_OBJECTS =  \
 	$(am_unittest_libcephfs_config_OBJECTS)
 @ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE at unittest_libcephfs_config_DEPENDENCIES = $(LIBCEPHFS) \
 @ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_10) \
- at ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_25)
+ at ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_26)
 unittest_libcephfs_config_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
 	$(unittest_libcephfs_config_CXXFLAGS) $(CXXFLAGS) \
@@ -6325,7 +6348,7 @@ unittest_librados_OBJECTS = $(am_unittest_librados_OBJECTS)
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE at unittest_librados_DEPENDENCIES =  \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(LIBRADOS) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_10) \
- at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_25)
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_26)
 unittest_librados_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
 	$(unittest_librados_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \
@@ -6338,19 +6361,23 @@ unittest_librados_config_OBJECTS =  \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE at unittest_librados_config_DEPENDENCIES =  \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(LIBRADOS) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_10) \
- at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_25)
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_26)
 unittest_librados_config_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
 	$(unittest_librados_config_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \
 	$(LDFLAGS) -o $@
-am__unittest_librbd_SOURCES_DIST = test/librbd/test_main.cc \
-	test/librbd/test_mock_fixture.cc \
+am__unittest_librbd_SOURCES_DIST = test/librbd/test_BlockGuard.cc \
+	test/librbd/test_main.cc test/librbd/test_mock_fixture.cc \
 	test/librbd/test_mock_ExclusiveLock.cc \
 	test/librbd/test_mock_Journal.cc \
 	test/librbd/test_mock_ObjectWatcher.cc \
 	test/librbd/exclusive_lock/test_mock_AcquireRequest.cc \
+	test/librbd/exclusive_lock/test_mock_BreakRequest.cc \
+	test/librbd/exclusive_lock/test_mock_GetLockerRequest.cc \
+	test/librbd/exclusive_lock/test_mock_ReacquireRequest.cc \
 	test/librbd/exclusive_lock/test_mock_ReleaseRequest.cc \
 	test/librbd/image/test_mock_RefreshRequest.cc \
+	test/librbd/image_watcher/test_mock_RewatchRequest.cc \
 	test/librbd/journal/test_mock_Replay.cc \
 	test/librbd/object_map/test_mock_InvalidateRequest.cc \
 	test/librbd/object_map/test_mock_LockRequest.cc \
@@ -6367,14 +6394,19 @@ am__unittest_librbd_SOURCES_DIST = test/librbd/test_main.cc \
 	test/librbd/operation/test_mock_SnapshotRemoveRequest.cc \
 	test/librbd/operation/test_mock_SnapshotRollbackRequest.cc \
 	test/librbd/operation/test_mock_SnapshotUnprotectRequest.cc
- at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE at am_unittest_librbd_OBJECTS = test/librbd/unittest_librbd-test_main.$(OBJEXT) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE at am_unittest_librbd_OBJECTS = test/librbd/unittest_librbd-test_BlockGuard.$(OBJEXT) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	test/librbd/unittest_librbd-test_main.$(OBJEXT) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	test/librbd/unittest_librbd-test_mock_fixture.$(OBJEXT) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	test/librbd/unittest_librbd-test_mock_ExclusiveLock.$(OBJEXT) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	test/librbd/unittest_librbd-test_mock_Journal.$(OBJEXT) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	test/librbd/unittest_librbd-test_mock_ObjectWatcher.$(OBJEXT) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	test/librbd/exclusive_lock/unittest_librbd-test_mock_AcquireRequest.$(OBJEXT) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	test/librbd/exclusive_lock/unittest_librbd-test_mock_BreakRequest.$(OBJEXT) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	test/librbd/exclusive_lock/unittest_librbd-test_mock_GetLockerRequest.$(OBJEXT) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	test/librbd/exclusive_lock/unittest_librbd-test_mock_ReacquireRequest.$(OBJEXT) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	test/librbd/exclusive_lock/unittest_librbd-test_mock_ReleaseRequest.$(OBJEXT) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	test/librbd/image/unittest_librbd-test_mock_RefreshRequest.$(OBJEXT) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	test/librbd/image_watcher/unittest_librbd-test_mock_RewatchRequest.$(OBJEXT) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	test/librbd/journal/unittest_librbd-test_mock_Replay.$(OBJEXT) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	test/librbd/object_map/unittest_librbd-test_mock_InvalidateRequest.$(OBJEXT) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	test/librbd/object_map/unittest_librbd-test_mock_LockRequest.$(OBJEXT) \
@@ -6406,7 +6438,7 @@ unittest_librbd_OBJECTS = $(am_unittest_librbd_OBJECTS)
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librados_internal.la \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	$(LIBRADOS) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	$(LIBOSDC) \
- at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	$(am__DEPENDENCIES_10) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	$(am__DEPENDENCIES_9)
 unittest_librbd_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
@@ -6415,14 +6447,14 @@ unittest_librbd_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(LDFLAGS) -o $@
 am_unittest_log_OBJECTS = log/unittest_log-test.$(OBJEXT)
 unittest_log_OBJECTS = $(am_unittest_log_OBJECTS)
-unittest_log_DEPENDENCIES = $(am__DEPENDENCIES_4) \
-	$(am__DEPENDENCIES_25)
+unittest_log_DEPENDENCIES = $(am__DEPENDENCIES_26) \
+	$(am__DEPENDENCIES_10)
 unittest_log_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \
 	$(LIBTOOLFLAGS) --mode=link $(CXXLD) $(unittest_log_CXXFLAGS) \
 	$(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@
 am_unittest_lru_OBJECTS = test/common/unittest_lru-test_lru.$(OBJEXT)
 unittest_lru_OBJECTS = $(am_unittest_lru_OBJECTS)
-unittest_lru_DEPENDENCIES = $(am__DEPENDENCIES_25) \
+unittest_lru_DEPENDENCIES = $(am__DEPENDENCIES_26) \
 	$(am__DEPENDENCIES_10)
 unittest_lru_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \
 	$(LIBTOOLFLAGS) --mode=link $(CXXLD) $(unittest_lru_CXXFLAGS) \
@@ -6431,8 +6463,8 @@ am__unittest_mds_authcap_SOURCES_DIST = test/mds/TestMDSAuthCaps.cc
 @ENABLE_SERVER_TRUE@@WITH_MDS_TRUE at am_unittest_mds_authcap_OBJECTS = test/mds/unittest_mds_authcap-TestMDSAuthCaps.$(OBJEXT)
 unittest_mds_authcap_OBJECTS = $(am_unittest_mds_authcap_OBJECTS)
 @ENABLE_SERVER_TRUE@@WITH_MDS_TRUE at unittest_mds_authcap_DEPENDENCIES =  \
- at ENABLE_SERVER_TRUE@@WITH_MDS_TRUE@	$(am__DEPENDENCIES_22) \
- at ENABLE_SERVER_TRUE@@WITH_MDS_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_SERVER_TRUE@@WITH_MDS_TRUE@	$(am__DEPENDENCIES_23) \
+ at ENABLE_SERVER_TRUE@@WITH_MDS_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_SERVER_TRUE@@WITH_MDS_TRUE@	$(am__DEPENDENCIES_10)
 unittest_mds_authcap_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -6441,7 +6473,7 @@ unittest_mds_authcap_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 am_unittest_mds_types_OBJECTS =  \
 	test/fs/unittest_mds_types-mds_types.$(OBJEXT)
 unittest_mds_types_OBJECTS = $(am_unittest_mds_types_OBJECTS)
-unittest_mds_types_DEPENDENCIES = $(am__DEPENDENCIES_25) \
+unittest_mds_types_DEPENDENCIES = $(am__DEPENDENCIES_26) \
 	$(am__DEPENDENCIES_10)
 unittest_mds_types_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -6449,7 +6481,7 @@ unittest_mds_types_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(LDFLAGS) -o $@
 am_unittest_mime_OBJECTS = test/unittest_mime-mime.$(OBJEXT)
 unittest_mime_OBJECTS = $(am_unittest_mime_OBJECTS)
-unittest_mime_DEPENDENCIES = $(am__DEPENDENCIES_25) \
+unittest_mime_DEPENDENCIES = $(am__DEPENDENCIES_26) \
 	$(am__DEPENDENCIES_10)
 unittest_mime_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -6459,8 +6491,8 @@ am__unittest_mon_moncap_SOURCES_DIST = test/mon/moncap.cc
 @ENABLE_SERVER_TRUE@@WITH_MON_TRUE at am_unittest_mon_moncap_OBJECTS = test/mon/unittest_mon_moncap-moncap.$(OBJEXT)
 unittest_mon_moncap_OBJECTS = $(am_unittest_mon_moncap_OBJECTS)
 @ENABLE_SERVER_TRUE@@WITH_MON_TRUE at unittest_mon_moncap_DEPENDENCIES =  \
- at ENABLE_SERVER_TRUE@@WITH_MON_TRUE@	$(am__DEPENDENCIES_23) \
- at ENABLE_SERVER_TRUE@@WITH_MON_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_SERVER_TRUE@@WITH_MON_TRUE@	$(am__DEPENDENCIES_24) \
+ at ENABLE_SERVER_TRUE@@WITH_MON_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_SERVER_TRUE@@WITH_MON_TRUE@	$(am__DEPENDENCIES_10)
 unittest_mon_moncap_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -6470,8 +6502,8 @@ am__unittest_mon_pgmap_SOURCES_DIST = test/mon/PGMap.cc
 @ENABLE_SERVER_TRUE@@WITH_MON_TRUE at am_unittest_mon_pgmap_OBJECTS = test/mon/unittest_mon_pgmap-PGMap.$(OBJEXT)
 unittest_mon_pgmap_OBJECTS = $(am_unittest_mon_pgmap_OBJECTS)
 @ENABLE_SERVER_TRUE@@WITH_MON_TRUE at unittest_mon_pgmap_DEPENDENCIES =  \
- at ENABLE_SERVER_TRUE@@WITH_MON_TRUE@	$(am__DEPENDENCIES_23) \
- at ENABLE_SERVER_TRUE@@WITH_MON_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_SERVER_TRUE@@WITH_MON_TRUE@	$(am__DEPENDENCIES_24) \
+ at ENABLE_SERVER_TRUE@@WITH_MON_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_SERVER_TRUE@@WITH_MON_TRUE@	$(am__DEPENDENCIES_10)
 unittest_mon_pgmap_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -6480,7 +6512,7 @@ unittest_mon_pgmap_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 am_unittest_mutex_debug_OBJECTS =  \
 	test/common/unittest_mutex_debug-test_mutex_debug.$(OBJEXT)
 unittest_mutex_debug_OBJECTS = $(am_unittest_mutex_debug_OBJECTS)
-unittest_mutex_debug_DEPENDENCIES = $(am__DEPENDENCIES_25) \
+unittest_mutex_debug_DEPENDENCIES = $(am__DEPENDENCIES_26) \
 	$(am__DEPENDENCIES_10) $(am__DEPENDENCIES_3)
 unittest_mutex_debug_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -6493,8 +6525,8 @@ am__unittest_osd_osdcap_SOURCES_DIST = test/osd/osdcap.cc
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at am_unittest_osd_osdcap_OBJECTS = test/osd/unittest_osd_osdcap-osdcap.$(OBJEXT)
 unittest_osd_osdcap_OBJECTS = $(am_unittest_osd_osdcap_OBJECTS)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at unittest_osd_osdcap_DEPENDENCIES =  \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_24) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_10)
 unittest_osd_osdcap_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -6503,7 +6535,7 @@ unittest_osd_osdcap_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 am_unittest_osd_types_OBJECTS =  \
 	test/osd/unittest_osd_types-types.$(OBJEXT)
 unittest_osd_types_OBJECTS = $(am_unittest_osd_types_OBJECTS)
-unittest_osd_types_DEPENDENCIES = $(am__DEPENDENCIES_25) \
+unittest_osd_types_DEPENDENCIES = $(am__DEPENDENCIES_26) \
 	$(am__DEPENDENCIES_10)
 unittest_osd_types_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -6512,7 +6544,7 @@ unittest_osd_types_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 am_unittest_osdmap_OBJECTS =  \
 	test/osd/unittest_osdmap-TestOSDMap.$(OBJEXT)
 unittest_osdmap_OBJECTS = $(am_unittest_osdmap_OBJECTS)
-unittest_osdmap_DEPENDENCIES = $(am__DEPENDENCIES_25) \
+unittest_osdmap_DEPENDENCIES = $(am__DEPENDENCIES_26) \
 	$(am__DEPENDENCIES_4) $(am__DEPENDENCIES_10)
 unittest_osdmap_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -6522,8 +6554,8 @@ am__unittest_osdscrub_SOURCES_DIST = test/osd/TestOSDScrub.cc
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at am_unittest_osdscrub_OBJECTS = test/osd/unittest_osdscrub-TestOSDScrub.$(OBJEXT)
 unittest_osdscrub_OBJECTS = $(am_unittest_osdscrub_OBJECTS)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at unittest_osdscrub_DEPENDENCIES =  \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_24) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_10) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_1)
 unittest_osdscrub_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
@@ -6534,7 +6566,7 @@ am__unittest_pageset_SOURCES_DIST = test/test_pageset.cc
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at am_unittest_pageset_OBJECTS = test/unittest_pageset-test_pageset.$(OBJEXT)
 unittest_pageset_OBJECTS = $(am_unittest_pageset_OBJECTS)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at unittest_pageset_DEPENDENCIES =  \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_25)
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_26)
 unittest_pageset_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
 	$(unittest_pageset_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \
@@ -6542,7 +6574,7 @@ unittest_pageset_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 am_unittest_perf_counters_OBJECTS =  \
 	test/unittest_perf_counters-perf_counters.$(OBJEXT)
 unittest_perf_counters_OBJECTS = $(am_unittest_perf_counters_OBJECTS)
-unittest_perf_counters_DEPENDENCIES = $(am__DEPENDENCIES_25) \
+unittest_perf_counters_DEPENDENCIES = $(am__DEPENDENCIES_26) \
 	$(am__DEPENDENCIES_10)
 unittest_perf_counters_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -6552,8 +6584,8 @@ am__unittest_pglog_SOURCES_DIST = test/osd/TestPGLog.cc
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at am_unittest_pglog_OBJECTS = test/osd/unittest_pglog-TestPGLog.$(OBJEXT)
 unittest_pglog_OBJECTS = $(am_unittest_pglog_OBJECTS)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at unittest_pglog_DEPENDENCIES =  \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_24) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_10) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__DEPENDENCIES_1)
 unittest_pglog_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
@@ -6564,7 +6596,7 @@ am_unittest_prebufferedstreambuf_OBJECTS = test/unittest_prebufferedstreambuf-te
 unittest_prebufferedstreambuf_OBJECTS =  \
 	$(am_unittest_prebufferedstreambuf_OBJECTS)
 unittest_prebufferedstreambuf_DEPENDENCIES = $(am__DEPENDENCIES_4) \
-	$(am__DEPENDENCIES_25) $(am__DEPENDENCIES_3)
+	$(am__DEPENDENCIES_26) $(am__DEPENDENCIES_3)
 unittest_prebufferedstreambuf_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
 	$(unittest_prebufferedstreambuf_CXXFLAGS) $(CXXFLAGS) \
@@ -6572,7 +6604,7 @@ unittest_prebufferedstreambuf_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 am_unittest_prioritized_queue_OBJECTS = test/common/unittest_prioritized_queue-test_prioritized_queue.$(OBJEXT)
 unittest_prioritized_queue_OBJECTS =  \
 	$(am_unittest_prioritized_queue_OBJECTS)
-unittest_prioritized_queue_DEPENDENCIES = $(am__DEPENDENCIES_25) \
+unittest_prioritized_queue_DEPENDENCIES = $(am__DEPENDENCIES_26) \
 	$(am__DEPENDENCIES_10)
 unittest_prioritized_queue_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -6622,7 +6654,7 @@ unittest_rbd_mirror_OBJECTS = $(am_unittest_rbd_mirror_OBJECTS)
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	$(LIBRBD_TYPES) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	$(LIBRADOS) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	$(LIBOSDC) \
- at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	$(am__DEPENDENCIES_10) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	$(am__DEPENDENCIES_9)
 unittest_rbd_mirror_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
@@ -6638,7 +6670,7 @@ unittest_rbd_replay_OBJECTS = $(am_unittest_rbd_replay_OBJECTS)
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	$(am__DEPENDENCIES_10) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd_replay.la \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd_replay_ios.la \
- at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	$(am__DEPENDENCIES_25)
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	$(am__DEPENDENCIES_26)
 unittest_rbd_replay_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
 	$(unittest_rbd_replay_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \
@@ -6646,7 +6678,7 @@ unittest_rbd_replay_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 am_unittest_readahead_OBJECTS =  \
 	test/common/unittest_readahead-Readahead.$(OBJEXT)
 unittest_readahead_OBJECTS = $(am_unittest_readahead_OBJECTS)
-unittest_readahead_DEPENDENCIES = $(am__DEPENDENCIES_25) \
+unittest_readahead_DEPENDENCIES = $(am__DEPENDENCIES_26) \
 	$(am__DEPENDENCIES_10)
 unittest_readahead_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -6658,7 +6690,7 @@ am__unittest_rocksdb_option_SOURCES_DIST =  \
 unittest_rocksdb_option_OBJECTS =  \
 	$(am_unittest_rocksdb_option_OBJECTS)
 @ENABLE_SERVER_TRUE@@WITH_DLIBROCKSDB_TRUE at unittest_rocksdb_option_DEPENDENCIES = $(am__DEPENDENCIES_16) \
- at ENABLE_SERVER_TRUE@@WITH_DLIBROCKSDB_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_SERVER_TRUE@@WITH_DLIBROCKSDB_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_SERVER_TRUE@@WITH_DLIBROCKSDB_TRUE@	$(am__DEPENDENCIES_10)
 unittest_rocksdb_option_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -6670,7 +6702,7 @@ am__unittest_rocksdb_option_static_SOURCES_DIST =  \
 unittest_rocksdb_option_static_OBJECTS =  \
 	$(am_unittest_rocksdb_option_static_OBJECTS)
 @ENABLE_SERVER_TRUE@@WITH_SLIBROCKSDB_TRUE at unittest_rocksdb_option_static_DEPENDENCIES = $(am__DEPENDENCIES_16) \
- at ENABLE_SERVER_TRUE@@WITH_SLIBROCKSDB_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_SERVER_TRUE@@WITH_SLIBROCKSDB_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_SERVER_TRUE@@WITH_SLIBROCKSDB_TRUE@	$(am__DEPENDENCIES_10)
 unittest_rocksdb_option_static_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -6681,7 +6713,7 @@ am__unittest_run_cmd_SOURCES_DIST = test/run_cmd.cc
 unittest_run_cmd_OBJECTS = $(am_unittest_run_cmd_OBJECTS)
 @ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE at unittest_run_cmd_DEPENDENCIES = $(LIBCEPHFS) \
 @ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_10) \
- at ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_25)
+ at ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_26)
 unittest_run_cmd_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
 	$(unittest_run_cmd_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \
@@ -6689,7 +6721,7 @@ unittest_run_cmd_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 am_unittest_safe_io_OBJECTS =  \
 	test/common/unittest_safe_io-test_safe_io.$(OBJEXT)
 unittest_safe_io_OBJECTS = $(am_unittest_safe_io_OBJECTS)
-unittest_safe_io_DEPENDENCIES = $(am__DEPENDENCIES_25) \
+unittest_safe_io_DEPENDENCIES = $(am__DEPENDENCIES_26) \
 	$(am__DEPENDENCIES_10)
 unittest_safe_io_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -6698,7 +6730,7 @@ unittest_safe_io_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 am_unittest_shared_cache_OBJECTS =  \
 	test/common/unittest_shared_cache-test_shared_cache.$(OBJEXT)
 unittest_shared_cache_OBJECTS = $(am_unittest_shared_cache_OBJECTS)
-unittest_shared_cache_DEPENDENCIES = $(am__DEPENDENCIES_25) \
+unittest_shared_cache_DEPENDENCIES = $(am__DEPENDENCIES_26) \
 	$(am__DEPENDENCIES_10)
 unittest_shared_cache_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -6707,7 +6739,7 @@ unittest_shared_cache_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 am_unittest_sharedptr_registry_OBJECTS = test/common/unittest_sharedptr_registry-test_sharedptr_registry.$(OBJEXT)
 unittest_sharedptr_registry_OBJECTS =  \
 	$(am_unittest_sharedptr_registry_OBJECTS)
-unittest_sharedptr_registry_DEPENDENCIES = $(am__DEPENDENCIES_25) \
+unittest_sharedptr_registry_DEPENDENCIES = $(am__DEPENDENCIES_26) \
 	$(am__DEPENDENCIES_10)
 unittest_sharedptr_registry_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -6715,7 +6747,7 @@ unittest_sharedptr_registry_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LDFLAGS) $(LDFLAGS) -o $@
 am_unittest_shunique_lock_OBJECTS = test/common/unittest_shunique_lock-test_shunique_lock.$(OBJEXT)
 unittest_shunique_lock_OBJECTS = $(am_unittest_shunique_lock_OBJECTS)
-unittest_shunique_lock_DEPENDENCIES = $(am__DEPENDENCIES_25) \
+unittest_shunique_lock_DEPENDENCIES = $(am__DEPENDENCIES_26) \
 	$(am__DEPENDENCIES_10) $(am__DEPENDENCIES_3)
 unittest_shunique_lock_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -6723,7 +6755,7 @@ unittest_shunique_lock_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(LDFLAGS) -o $@
 am_unittest_signals_OBJECTS = test/unittest_signals-signals.$(OBJEXT)
 unittest_signals_OBJECTS = $(am_unittest_signals_OBJECTS)
-unittest_signals_DEPENDENCIES = $(am__DEPENDENCIES_25) \
+unittest_signals_DEPENDENCIES = $(am__DEPENDENCIES_26) \
 	$(am__DEPENDENCIES_10)
 unittest_signals_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -6734,7 +6766,7 @@ am__unittest_simple_spin_SOURCES_DIST = test/simple_spin.cc
 unittest_simple_spin_OBJECTS = $(am_unittest_simple_spin_OBJECTS)
 @ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE at unittest_simple_spin_DEPENDENCIES = $(LIBCEPHFS) \
 @ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_10) \
- at ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_25)
+ at ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE@	$(am__DEPENDENCIES_26)
 unittest_simple_spin_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
 	$(unittest_simple_spin_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \
@@ -6742,7 +6774,7 @@ unittest_simple_spin_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 am_unittest_sloppy_crc_map_OBJECTS = test/common/unittest_sloppy_crc_map-test_sloppy_crc_map.$(OBJEXT)
 unittest_sloppy_crc_map_OBJECTS =  \
 	$(am_unittest_sloppy_crc_map_OBJECTS)
-unittest_sloppy_crc_map_DEPENDENCIES = $(am__DEPENDENCIES_25) \
+unittest_sloppy_crc_map_DEPENDENCIES = $(am__DEPENDENCIES_26) \
 	$(am__DEPENDENCIES_10)
 unittest_sloppy_crc_map_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -6751,7 +6783,7 @@ unittest_sloppy_crc_map_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 am_unittest_str_list_OBJECTS =  \
 	test/unittest_str_list-test_str_list.$(OBJEXT)
 unittest_str_list_OBJECTS = $(am_unittest_str_list_OBJECTS)
-unittest_str_list_DEPENDENCIES = $(am__DEPENDENCIES_25) \
+unittest_str_list_DEPENDENCIES = $(am__DEPENDENCIES_26) \
 	$(am__DEPENDENCIES_10)
 unittest_str_list_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -6760,7 +6792,7 @@ unittest_str_list_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 am_unittest_str_map_OBJECTS =  \
 	test/common/unittest_str_map-test_str_map.$(OBJEXT)
 unittest_str_map_OBJECTS = $(am_unittest_str_map_OBJECTS)
-unittest_str_map_DEPENDENCIES = $(am__DEPENDENCIES_25) \
+unittest_str_map_DEPENDENCIES = $(am__DEPENDENCIES_26) \
 	$(am__DEPENDENCIES_10)
 unittest_str_map_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -6769,7 +6801,7 @@ unittest_str_map_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 am_unittest_striper_OBJECTS =  \
 	test/unittest_striper-test_striper.$(OBJEXT)
 unittest_striper_OBJECTS = $(am_unittest_striper_OBJECTS)
-unittest_striper_DEPENDENCIES = $(LIBOSDC) $(am__DEPENDENCIES_25) \
+unittest_striper_DEPENDENCIES = $(LIBOSDC) $(am__DEPENDENCIES_26) \
 	$(am__DEPENDENCIES_10)
 unittest_striper_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -6777,7 +6809,7 @@ unittest_striper_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(LDFLAGS) -o $@
 am_unittest_strtol_OBJECTS = test/unittest_strtol-strtol.$(OBJEXT)
 unittest_strtol_OBJECTS = $(am_unittest_strtol_OBJECTS)
-unittest_strtol_DEPENDENCIES = $(am__DEPENDENCIES_25) \
+unittest_strtol_DEPENDENCIES = $(am__DEPENDENCIES_26) \
 	$(am__DEPENDENCIES_10)
 unittest_strtol_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -6786,8 +6818,8 @@ unittest_strtol_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 am_unittest_subprocess_OBJECTS =  \
 	test/unittest_subprocess-test_subprocess.$(OBJEXT)
 unittest_subprocess_OBJECTS = $(am_unittest_subprocess_OBJECTS)
-unittest_subprocess_DEPENDENCIES = $(am__DEPENDENCIES_4) \
-	$(am__DEPENDENCIES_25)
+unittest_subprocess_DEPENDENCIES = $(am__DEPENDENCIES_26) \
+	$(am__DEPENDENCIES_10)
 unittest_subprocess_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
 	$(unittest_subprocess_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \
@@ -6795,7 +6827,7 @@ unittest_subprocess_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 am_unittest_tableformatter_OBJECTS = test/common/unittest_tableformatter-test_tableformatter.$(OBJEXT)
 unittest_tableformatter_OBJECTS =  \
 	$(am_unittest_tableformatter_OBJECTS)
-unittest_tableformatter_DEPENDENCIES = $(am__DEPENDENCIES_25) \
+unittest_tableformatter_DEPENDENCIES = $(am__DEPENDENCIES_26) \
 	$(am__DEPENDENCIES_10)
 unittest_tableformatter_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -6804,8 +6836,8 @@ unittest_tableformatter_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 am_unittest_texttable_OBJECTS =  \
 	test/unittest_texttable-test_texttable.$(OBJEXT)
 unittest_texttable_OBJECTS = $(am_unittest_texttable_OBJECTS)
-unittest_texttable_DEPENDENCIES = $(am__DEPENDENCIES_4) \
-	$(am__DEPENDENCIES_25)
+unittest_texttable_DEPENDENCIES = $(am__DEPENDENCIES_26) \
+	$(am__DEPENDENCIES_10)
 unittest_texttable_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
 	$(unittest_texttable_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \
@@ -6813,7 +6845,7 @@ unittest_texttable_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 am_unittest_throttle_OBJECTS =  \
 	test/common/unittest_throttle-Throttle.$(OBJEXT)
 unittest_throttle_OBJECTS = $(am_unittest_throttle_OBJECTS)
-unittest_throttle_DEPENDENCIES = $(am__DEPENDENCIES_25) \
+unittest_throttle_DEPENDENCIES = $(am__DEPENDENCIES_26) \
 	$(am__DEPENDENCIES_10)
 unittest_throttle_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -6823,7 +6855,7 @@ am_unittest_time_OBJECTS =  \
 	test/common/unittest_time-test_time.$(OBJEXT)
 unittest_time_OBJECTS = $(am_unittest_time_OBJECTS)
 unittest_time_DEPENDENCIES = $(am__DEPENDENCIES_4) \
-	$(am__DEPENDENCIES_25) $(am__DEPENDENCIES_1) \
+	$(am__DEPENDENCIES_26) $(am__DEPENDENCIES_1) \
 	$(am__DEPENDENCIES_3)
 unittest_time_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -6835,7 +6867,7 @@ am__unittest_transaction_SOURCES_DIST =  \
 unittest_transaction_OBJECTS = $(am_unittest_transaction_OBJECTS)
 @ENABLE_SERVER_TRUE at unittest_transaction_DEPENDENCIES =  \
 @ENABLE_SERVER_TRUE@	$(am__DEPENDENCIES_16) \
- at ENABLE_SERVER_TRUE@	$(am__DEPENDENCIES_25) \
+ at ENABLE_SERVER_TRUE@	$(am__DEPENDENCIES_26) \
 @ENABLE_SERVER_TRUE@	$(am__DEPENDENCIES_10)
 unittest_transaction_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -6843,7 +6875,7 @@ unittest_transaction_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(LDFLAGS) -o $@
 am_unittest_utf8_OBJECTS = test/unittest_utf8-utf8.$(OBJEXT)
 unittest_utf8_OBJECTS = $(am_unittest_utf8_OBJECTS)
-unittest_utf8_DEPENDENCIES = $(am__DEPENDENCIES_25) \
+unittest_utf8_DEPENDENCIES = $(am__DEPENDENCIES_26) \
 	$(am__DEPENDENCIES_10)
 unittest_utf8_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -6853,7 +6885,7 @@ am_unittest_util_OBJECTS =  \
 	test/common/unittest_util-test_util.$(OBJEXT)
 unittest_util_OBJECTS = $(am_unittest_util_OBJECTS)
 unittest_util_DEPENDENCIES = $(am__DEPENDENCIES_4) \
-	$(am__DEPENDENCIES_25) $(am__DEPENDENCIES_1) \
+	$(am__DEPENDENCIES_26) $(am__DEPENDENCIES_1) \
 	$(am__DEPENDENCIES_3)
 unittest_util_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -6863,7 +6895,7 @@ am_unittest_weighted_priority_queue_OBJECTS = test/common/unittest_weighted_prio
 unittest_weighted_priority_queue_OBJECTS =  \
 	$(am_unittest_weighted_priority_queue_OBJECTS)
 unittest_weighted_priority_queue_DEPENDENCIES =  \
-	$(am__DEPENDENCIES_25) $(am__DEPENDENCIES_10)
+	$(am__DEPENDENCIES_26) $(am__DEPENDENCIES_10)
 unittest_weighted_priority_queue_LINK = $(LIBTOOL) $(AM_V_lt) \
 	--tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link \
 	$(CXXLD) $(unittest_weighted_priority_queue_CXXFLAGS) \
@@ -6871,7 +6903,7 @@ unittest_weighted_priority_queue_LINK = $(LIBTOOL) $(AM_V_lt) \
 am_unittest_workqueue_OBJECTS =  \
 	test/unittest_workqueue-test_workqueue.$(OBJEXT)
 unittest_workqueue_OBJECTS = $(am_unittest_workqueue_OBJECTS)
-unittest_workqueue_DEPENDENCIES = $(am__DEPENDENCIES_25) \
+unittest_workqueue_DEPENDENCIES = $(am__DEPENDENCIES_26) \
 	$(am__DEPENDENCIES_10)
 unittest_workqueue_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -6879,8 +6911,8 @@ unittest_workqueue_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(LDFLAGS) -o $@
 am_unittest_xlist_OBJECTS = test/unittest_xlist-test_xlist.$(OBJEXT)
 unittest_xlist_OBJECTS = $(am_unittest_xlist_OBJECTS)
-unittest_xlist_DEPENDENCIES = $(am__DEPENDENCIES_25) \
-	$(am__DEPENDENCIES_4)
+unittest_xlist_DEPENDENCIES = $(am__DEPENDENCIES_26) \
+	$(am__DEPENDENCIES_10)
 unittest_xlist_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
 	$(unittest_xlist_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \
@@ -7873,23 +7905,29 @@ am__noinst_HEADERS_DIST = arch/intel.h arch/arm.h arch/probe.h \
 	librbd/AioCompletion.h librbd/AioImageRequest.h \
 	librbd/AioImageRequestWQ.h librbd/AioObjectRequest.h \
 	librbd/AsyncObjectThrottle.h librbd/AsyncOperation.h \
-	librbd/AsyncRequest.h librbd/CopyupRequest.h \
-	librbd/DiffIterate.h librbd/ExclusiveLock.h librbd/ImageCtx.h \
-	librbd/ImageState.h librbd/ImageWatcher.h librbd/internal.h \
-	librbd/Journal.h librbd/LibrbdAdminSocketHook.h \
-	librbd/LibrbdWriteback.h librbd/MirroringWatcher.h \
-	librbd/ObjectMap.h librbd/ObjectWatcher.h librbd/Operations.h \
+	librbd/AsyncRequest.h librbd/BlockGuard.h \
+	librbd/CopyupRequest.h librbd/DiffIterate.h \
+	librbd/ExclusiveLock.h librbd/ImageCtx.h librbd/ImageState.h \
+	librbd/ImageWatcher.h librbd/internal.h librbd/Journal.h \
+	librbd/LibrbdAdminSocketHook.h librbd/LibrbdWriteback.h \
+	librbd/MirroringWatcher.h librbd/ObjectMap.h \
+	librbd/ObjectWatcher.h librbd/Operations.h \
 	librbd/parent_types.h librbd/SnapInfo.h librbd/TaskFinisher.h \
 	librbd/Utils.h librbd/WatchNotifyTypes.h \
 	librbd/exclusive_lock/AcquireRequest.h \
+	librbd/exclusive_lock/AutomaticPolicy.h \
+	librbd/exclusive_lock/BreakRequest.h \
+	librbd/exclusive_lock/GetLockerRequest.h \
 	librbd/exclusive_lock/Policy.h \
+	librbd/exclusive_lock/ReacquireRequest.h \
 	librbd/exclusive_lock/ReleaseRequest.h \
 	librbd/exclusive_lock/StandardPolicy.h \
-	librbd/image/CloseRequest.h librbd/image/OpenRequest.h \
-	librbd/image/RefreshParentRequest.h \
+	librbd/exclusive_lock/Types.h librbd/image/CloseRequest.h \
+	librbd/image/OpenRequest.h librbd/image/RefreshParentRequest.h \
 	librbd/image/RefreshRequest.h librbd/image/SetSnapRequest.h \
 	librbd/image_watcher/Notifier.h \
 	librbd/image_watcher/NotifyLockOwner.h \
+	librbd/image_watcher/RewatchRequest.h \
 	librbd/journal/DisabledPolicy.h librbd/journal/Policy.h \
 	librbd/journal/Replay.h librbd/journal/StandardPolicy.h \
 	librbd/journal/Types.h librbd/journal/TypeTraits.h \
@@ -7962,12 +8000,12 @@ am__noinst_HEADERS_DIST = arch/intel.h arch/arm.h arch/probe.h \
 	cls/replica_log/cls_replica_log_types.h \
 	cls/replica_log/cls_replica_log_ops.h \
 	cls/replica_log/cls_replica_log_client.h \
-	cls/rgw/cls_rgw_client.h cls/rgw/cls_rgw_ops.h \
-	cls/rgw/cls_rgw_types.h cls/user/cls_user_client.h \
-	cls/user/cls_user_ops.h cls/user/cls_user_types.h \
-	cls/cephfs/cls_cephfs.h cls/cephfs/cls_cephfs_client.h \
+	cls/user/cls_user_client.h cls/user/cls_user_ops.h \
+	cls/user/cls_user_types.h cls/cephfs/cls_cephfs.h \
+	cls/cephfs/cls_cephfs_client.h \
 	cls/journal/cls_journal_client.h \
-	cls/journal/cls_journal_types.h \
+	cls/journal/cls_journal_types.h cls/rgw/cls_rgw_client.h \
+	cls/rgw/cls_rgw_ops.h cls/rgw/cls_rgw_types.h \
 	key_value_store/key_value_structure.h \
 	key_value_store/kv_flat_btree_async.h \
 	key_value_store/kvs_arg_types.h rbd_replay/ActionTypes.h \
@@ -8046,6 +8084,7 @@ am__noinst_HEADERS_DIST = arch/intel.h arch/arm.h arch/probe.h \
 	tools/rbd_mirror/image_replayer/CloseImageRequest.h \
 	tools/rbd_mirror/image_replayer/CreateImageRequest.h \
 	tools/rbd_mirror/image_replayer/EventPreprocessor.h \
+	tools/rbd_mirror/image_replayer/IsPrimaryRequest.h \
 	tools/rbd_mirror/image_replayer/OpenImageRequest.h \
 	tools/rbd_mirror/image_replayer/OpenLocalImageRequest.h \
 	tools/rbd_mirror/image_replayer/ReplayStatusFormatter.h \
@@ -8568,7 +8607,7 @@ top_srcdir = @top_srcdir@
 AUTOMAKE_OPTIONS = gnu subdir-objects
 SUBDIRS = ocf java
 DIST_SUBDIRS = gmock ocf java
-BUILT_SOURCES = $(am__append_281) $(am__append_313)
+BUILT_SOURCES = $(am__append_291) $(am__append_323)
 
 # extra bits
 EXTRA_DIST = $(am__append_32) ceph-detect-init/AUTHORS.rst \
@@ -9290,8 +9329,8 @@ EXTRA_DIST = $(am__append_32) ceph-detect-init/AUTHORS.rst \
 	spdk/include/spdk/queue_extras.h spdk/include/spdk/file.h \
 	spdk/include/spdk/assert.h spdk/include/spdk/barrier.h \
 	spdk/include/spdk/mmio.h tracing/CMakeLists.txt \
-	tracing/tracing-common.h $(am__append_282) $(am__append_286) \
-	$(am__append_290) pybind/CMakeLists.txt \
+	tracing/tracing-common.h $(am__append_292) $(am__append_296) \
+	$(am__append_300) pybind/CMakeLists.txt \
 	$(srcdir)/$(shell_scripts:%=%.in) $(srcdir)/vstart.sh \
 	$(srcdir)/stop.sh ceph-run $(srcdir)/ceph-osd-prestart.sh \
 	$(srcdir)/ceph_common.sh $(srcdir)/init-radosgw \
@@ -9561,14 +9600,15 @@ noinst_HEADERS = arch/intel.h arch/arm.h arch/probe.h \
 	include/unordered_set.h include/unordered_map.h \
 	include/timegm.h include/event_type.h $(am__append_133) \
 	$(am__append_136) $(am__append_137) $(am__append_142) \
-	$(am__append_152) $(am__append_154) $(am__append_157) \
-	$(am__append_158) $(am__append_164) $(am__append_200) \
-	$(am__append_206) $(am__append_218) $(am__append_226) \
-	$(am__append_232) $(am__append_244) test/bench/backend.h \
-	test/bench/bencher.h test/bench/detailed_stat_collector.h \
-	test/bench/distribution.h test/bench/dumb_backend.h \
-	test/bench/rados_backend.h test/bench/rbd_backend.h \
-	test/bench/stat_collector.h test/bench/testfilestore_backend.h \
+	$(am__append_153) $(am__append_160) $(am__append_161) \
+	$(am__append_166) $(am__append_167) $(am__append_173) \
+	$(am__append_209) $(am__append_215) $(am__append_228) \
+	$(am__append_236) $(am__append_242) $(am__append_254) \
+	test/bench/backend.h test/bench/bencher.h \
+	test/bench/detailed_stat_collector.h test/bench/distribution.h \
+	test/bench/dumb_backend.h test/bench/rados_backend.h \
+	test/bench/rbd_backend.h test/bench/stat_collector.h \
+	test/bench/testfilestore_backend.h \
 	test/common/ObjectContents.h test/encoding/types.h \
 	test/objectstore/DeterministicOpSequence.h \
 	test/objectstore/FileStoreDiff.h \
@@ -9587,7 +9627,7 @@ noinst_HEADERS = arch/intel.h arch/arm.h arch/probe.h \
 	test/system/st_rados_list_objects.h \
 	test/system/st_rados_notify.h test/system/st_rados_watch.h \
 	test/system/systest_runnable.h test/system/systest_settings.h \
-	test/unit.h test/journal/RadosTestFixture.h $(am__append_269) \
+	test/unit.h test/journal/RadosTestFixture.h $(am__append_279) \
 	tools/cephfs/JournalTool.h tools/cephfs/JournalScanner.h \
 	tools/cephfs/JournalFilter.h tools/cephfs/EventOutput.h \
 	tools/cephfs/Resetter.h tools/cephfs/Dumper.h \
@@ -9599,14 +9639,14 @@ noinst_HEADERS = arch/intel.h arch/arm.h arch/probe.h \
 	bash_completion/ceph bash_completion/rados bash_completion/rbd \
 	bash_completion/radosgw-admin mount/canonicalize.c \
 	mount/mtab.c objclass/objclass.h
-bin_SCRIPTS = $(am__append_31) $(am__append_299) $(am__append_310) \
-	$(am__append_316)
-sbin_SCRIPTS = $(am__append_302)
+bin_SCRIPTS = $(am__append_31) $(am__append_309) $(am__append_320) \
+	$(am__append_326)
+sbin_SCRIPTS = $(am__append_312)
 su_sbin_SCRIPTS = 
 dist_bin_SCRIPTS = 
 lib_LTLIBRARIES = $(am__append_132) $(am__append_135) \
-	$(am__append_141) $(am__append_147) $(am__append_280) \
-	$(am__append_308) $(am__append_309)
+	$(am__append_141) $(am__append_148) $(am__append_290) \
+	$(am__append_318) $(am__append_319)
 noinst_LTLIBRARIES = libarch.la libauth.la libcrush.la libmon_types.la \
 	$(am__append_50) libosd_types.la $(am__append_88) \
 	liberasure_code.la libcompressor.la libosdc.la \
@@ -9614,27 +9654,29 @@ noinst_LTLIBRARIES = libarch.la libauth.la libcrush.la libmon_types.la \
 	libjson_spirit.la liblog.la libperfglue.la \
 	libcommon_internal.la libcommon_crc.la $(am__append_115) \
 	libcommon.la $(am__append_118) libmsg.la $(am__append_128) \
-	librbd_types.la $(am__append_139) $(am__append_149) \
-	$(am__append_153) $(am__append_159) $(am__append_219) \
-	$(am__append_229) $(am__append_234) $(am__append_262) \
-	$(am__append_273) $(am__append_300)
+	librbd_types.la $(am__append_139) $(am__append_150) \
+	$(am__append_154) $(am__append_156) $(am__append_158) \
+	$(am__append_168) $(am__append_229) $(am__append_239) \
+	$(am__append_244) $(am__append_272) $(am__append_283) \
+	$(am__append_310)
 noinst_LIBRARIES = $(am__append_33) $(am__append_46) libos_types.a \
 	$(am__append_58) $(am__append_62) $(am__append_68)
-radoslib_LTLIBRARIES = $(am__append_155) $(am__append_156)
+radoslib_LTLIBRARIES = $(am__append_162) $(am__append_163) \
+	$(am__append_164) $(am__append_165)
 
 # like bin_PROGRAMS, but these targets are only built for debug builds
-bin_DEBUGPROGRAMS = $(am__append_98) $(am__append_151) \
-	$(am__append_166) $(am__append_220) $(am__append_221) \
-	$(am__append_222) $(am__append_223) $(am__append_225) \
-	$(am__append_227) $(am__append_233) $(am__append_235) \
-	$(am__append_236) $(am__append_239) $(am__append_241) \
-	$(am__append_242) $(am__append_243) $(am__append_245) \
-	$(am__append_247) $(am__append_249) $(am__append_250) \
-	$(am__append_256) ceph_test_timers ceph_test_signal_handlers \
-	ceph_test_rewrite_latency ceph_test_crypto $(am__append_261) \
+bin_DEBUGPROGRAMS = $(am__append_98) $(am__append_152) \
+	$(am__append_175) $(am__append_230) $(am__append_231) \
+	$(am__append_232) $(am__append_233) $(am__append_235) \
+	$(am__append_237) $(am__append_243) $(am__append_245) \
+	$(am__append_246) $(am__append_249) $(am__append_251) \
+	$(am__append_252) $(am__append_253) $(am__append_255) \
+	$(am__append_257) $(am__append_259) $(am__append_260) \
+	$(am__append_266) ceph_test_timers ceph_test_signal_handlers \
+	ceph_test_rewrite_latency ceph_test_crypto $(am__append_271) \
 	ceph_bench_log ceph_test_objectcacher_stress \
-	ceph_test_cfuse_cache_invalidate $(am__append_265) \
-	$(am__append_266) $(am__append_275) $(am__append_276) \
+	ceph_test_cfuse_cache_invalidate $(am__append_275) \
+	$(am__append_276) $(am__append_285) $(am__append_286) \
 	ceph_psim
 
 # like sbin_SCRIPTS but can be used to install to e.g. /usr/sbin
@@ -9644,12 +9686,12 @@ ceph_sbindir = $(sbindir)
 su_sbindir = /sbin
 
 # C/C++ tests to build and executed will be appended to this
-check_TESTPROGRAMS = $(am__append_182) $(am__append_186) \
-	$(am__append_189) $(am__append_224) $(am__append_228) \
-	$(am__append_237) $(am__append_246) $(am__append_248) \
-	$(am__append_252) $(am__append_253) $(am__append_257) \
-	$(am__append_258) $(am__append_259) $(am__append_260) \
-	unittest_addrs $(am__append_264) unittest_bloom_filter \
+check_TESTPROGRAMS = $(am__append_191) $(am__append_195) \
+	$(am__append_198) $(am__append_234) $(am__append_238) \
+	$(am__append_247) $(am__append_256) $(am__append_258) \
+	$(am__append_262) $(am__append_263) $(am__append_267) \
+	$(am__append_268) $(am__append_269) $(am__append_270) \
+	unittest_addrs $(am__append_274) unittest_bloom_filter \
 	unittest_histogram unittest_prioritized_queue \
 	unittest_weighted_priority_queue unittest_str_map \
 	unittest_mutex_debug unittest_shunique_lock \
@@ -9711,7 +9753,7 @@ check_TESTPROGRAMS = $(am__append_182) $(am__append_186) \
 # GNU Library Public License for more details.
 #
 check_SCRIPTS = ceph-detect-init/run-tox.sh ceph-disk/run-tox.sh \
-	$(am__append_163) $(am__append_231) \
+	$(am__append_172) $(am__append_241) \
 	test/ceph_objectstore_tool.py test/test-ceph-helpers.sh \
 	test/cephtool-test-osd.sh test/cephtool-test-mon.sh \
 	test/cephtool-test-mds.sh test/cephtool-test-rados.sh \
@@ -9836,8 +9878,8 @@ LIBCOMMON_DEPS = libcommon_internal.la libcommon_crc.la \
 	$(LIBMSG) $(LIBAUTH) $(LIBCRUSH) $(LIBJSON_SPIRIT) $(LIBLOG) \
 	$(LIBARCH) $(BOOST_RANDOM_LIBS) -luuid $(am__append_116)
 LIBRADOS_DEPS = $(am__append_129)
-LIBRGW_DEPS = $(am__append_146)
-LIBCIVETWEB_DEPS = $(am__append_148)
+LIBRGW_DEPS = $(am__append_146) $(am__append_147)
+LIBCIVETWEB_DEPS = $(am__append_149)
 
 # This is used by the dencoder test
 
@@ -9845,16 +9887,17 @@ LIBCIVETWEB_DEPS = $(am__append_148)
 DENCODER_SOURCES = $(am__append_48) perfglue/disabled_heap_profiler.cc \
 	perfglue/disabled_stubs.cc $(am__append_143)
 DENCODER_DEPS = $(am__append_49) $(am__append_138) $(am__append_144) \
-	$(am__append_160)
+	$(am__append_155) $(am__append_157) $(am__append_159) \
+	$(am__append_169)
 radoslibdir = $(libdir)/rados-classes
-LOCAL_ALL = ceph-detect-init-all ceph-disk-all $(am__append_283) \
-	$(am__append_287) $(am__append_291)
-LOCAL_CLEAN = ceph-detect-init-clean ceph-disk-clean $(am__append_284) \
-	$(am__append_288) $(am__append_292) base-clean-local
+LOCAL_ALL = ceph-detect-init-all ceph-disk-all $(am__append_293) \
+	$(am__append_297) $(am__append_301)
+LOCAL_CLEAN = ceph-detect-init-clean ceph-disk-clean $(am__append_294) \
+	$(am__append_298) $(am__append_302) base-clean-local
 LOCAL_INSTALLDATA = ceph-detect-init-install-data \
 	ceph-disk-install-data base-install-data-local
-LOCAL_INSTALLEXEC = $(am__append_285) $(am__append_289) \
-	$(am__append_293)
+LOCAL_INSTALLEXEC = $(am__append_295) $(am__append_299) \
+	$(am__append_303)
 libarch_la_SOURCES = \
 	arch/intel.c \
 	arch/arm.c \
@@ -10024,7 +10067,7 @@ erasure_codelib_LTLIBRARIES = libec_jerasure_generic.la \
 	libec_jerasure.la libec_lrc.la libec_shec_generic.la \
 	$(am__append_81) $(am__append_83) $(am__append_85) \
 	libec_shec.la $(am__append_90)
-check_LTLIBRARIES = $(am__append_171)
+check_LTLIBRARIES = $(am__append_180)
 jerasure_sources = \
   erasure-code/ErasureCode.cc \
   erasure-code/jerasure/jerasure/src/cauchy.c \
@@ -10331,7 +10374,7 @@ liberasure_code_la_DEPENDENCIES = $(erasure_codelib_LTLIBRARIES)
 @LINUX_TRUE at liberasure_code_la_LIBADD = -ldl
 compressorlibdir = $(pkglibdir)/compressor
 compressorlib_LTLIBRARIES = libceph_zlib.la libceph_snappy.la \
-	$(am__append_207)
+	$(am__append_216)
 zlib_sources = \
   compressor/Compressor.cc \
   compressor/zlib/CompressionPluginZlib.cc \
@@ -10437,12 +10480,13 @@ libcommon_internal_la_SOURCES = ceph_ver.c common/AsyncOpTracker.cc \
 	common/bloom_filter.cc common/module.c common/Readahead.cc \
 	common/Cycles.cc common/ContextCompletion.cc \
 	common/TracepointProvider.cc common/PluginRegistry.cc \
-	common/scrub_types.cc common/blkdev.cc $(am__append_108) \
-	$(am__append_109) $(am__append_110) $(am__append_111) \
-	$(am__append_112) mon/MonCap.cc mon/MonClient.cc mon/MonMap.cc \
-	osd/OSDMap.cc osd/osd_types.cc osd/ECMsgTypes.cc osd/HitSet.cc \
-	mds/MDSMap.cc mds/FSMap.cc mds/inode_backtrace.cc \
-	mds/mdstypes.cc mds/flock.cc
+	common/scrub_types.cc common/blkdev.cc common/ceph_json.cc \
+	common/util.cc $(am__append_108) $(am__append_109) \
+	$(am__append_110) $(am__append_111) $(am__append_112) \
+	mon/MonCap.cc mon/MonClient.cc mon/MonMap.cc osd/OSDMap.cc \
+	osd/osd_types.cc osd/ECMsgTypes.cc osd/HitSet.cc mds/MDSMap.cc \
+	mds/FSMap.cc mds/inode_backtrace.cc mds/mdstypes.cc \
+	mds/flock.cc
 
 # inject crc in common
 libcommon_crc_la_SOURCES = common/sctp_crc32.c common/crc32c.cc \
@@ -10568,6 +10612,10 @@ librbd_types_la_SOURCES = \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd/Operations.cc \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd/Utils.cc \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd/exclusive_lock/AcquireRequest.cc \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd/exclusive_lock/AutomaticPolicy.cc \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd/exclusive_lock/BreakRequest.cc \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd/exclusive_lock/GetLockerRequest.cc \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd/exclusive_lock/ReacquireRequest.cc \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd/exclusive_lock/ReleaseRequest.cc \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd/exclusive_lock/StandardPolicy.cc \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd/image/CloseRequest.cc \
@@ -10577,6 +10625,7 @@ librbd_types_la_SOURCES = \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd/image/SetSnapRequest.cc \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd/image_watcher/Notifier.cc \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd/image_watcher/NotifyLockOwner.cc \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd/image_watcher/RewatchRequest.cc \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd/journal/Replay.cc \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd/journal/StandardPolicy.cc \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	librbd/object_map/InvalidateRequest.cc \
@@ -10758,10 +10807,10 @@ librbd_types_la_SOURCES = \
 @ENABLE_CLIENT_TRUE@	cls/replica_log/cls_replica_log_ops.cc \
 @ENABLE_CLIENT_TRUE@	cls/replica_log/cls_replica_log_client.cc
 
- at ENABLE_CLIENT_TRUE@libcls_rgw_client_la_SOURCES = \
- at ENABLE_CLIENT_TRUE@	cls/rgw/cls_rgw_client.cc \
- at ENABLE_CLIENT_TRUE@	cls/rgw/cls_rgw_types.cc \
- at ENABLE_CLIENT_TRUE@	cls/rgw/cls_rgw_ops.cc
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE at libcls_rgw_client_la_SOURCES = \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@	cls/rgw/cls_rgw_client.cc \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@	cls/rgw/cls_rgw_types.cc \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOSGW_TRUE@	cls/rgw/cls_rgw_ops.cc
 
 @ENABLE_CLIENT_TRUE at libcls_rbd_client_la_SOURCES = \
 @ENABLE_CLIENT_TRUE@	cls/rbd/cls_rbd_client.cc \
@@ -10816,14 +10865,14 @@ librbd_types_la_SOURCES = \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at libcls_user_la_SOURCES = cls/user/cls_user.cc
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at libcls_user_la_LIBADD = $(PTHREAD_LIBS) $(EXTRALIBS)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at libcls_user_la_LDFLAGS = ${AM_LDFLAGS} -module -avoid-version -shared -export-symbols-regex '.*__cls_.*'
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at libcls_rgw_la_SOURCES = \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	cls/rgw/cls_rgw.cc \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	cls/rgw/cls_rgw_ops.cc \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	cls/rgw/cls_rgw_types.cc \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	common/ceph_json.cc
-
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at libcls_rgw_la_LIBADD = libjson_spirit.la $(PTHREAD_LIBS) $(EXTRALIBS)
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at libcls_rgw_la_LDFLAGS = ${AM_LDFLAGS} -module -avoid-version -shared -export-symbols-regex '.*__cls_.*'
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@@WITH_RADOSGW_TRUE at libcls_rgw_la_SOURCES = \
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@@WITH_RADOSGW_TRUE@	cls/rgw/cls_rgw.cc \
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@@WITH_RADOSGW_TRUE@	cls/rgw/cls_rgw_ops.cc \
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@@WITH_RADOSGW_TRUE@	cls/rgw/cls_rgw_types.cc \
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@@WITH_RADOSGW_TRUE@	common/ceph_json.cc
+
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@@WITH_RADOSGW_TRUE at libcls_rgw_la_LIBADD = libjson_spirit.la $(PTHREAD_LIBS) $(EXTRALIBS)
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@@WITH_RADOSGW_TRUE at libcls_rgw_la_LDFLAGS = ${AM_LDFLAGS} -module -avoid-version -shared -export-symbols-regex '.*__cls_.*'
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at libcls_cephfs_la_SOURCES = cls/cephfs/cls_cephfs.cc
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at libcls_cephfs_la_LIBADD = $(PTHREAD_LIBS) $(EXTRALIBS)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at libcls_cephfs_la_LDFLAGS = ${AM_LDFLAGS} -module -avoid-version -shared -export-symbols-regex '.*__cls_.*'
@@ -10894,7 +10943,7 @@ librbd_types_la_SOURCES = \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(LIBOSD) $(LIBCOMMON) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(BOOST_PROGRAM_OPTIONS_LIBS) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(CEPH_GLOBAL) \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_165)
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_174)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at ceph_erasure_code_non_regression_SOURCES = \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	test/erasure-code/ceph_erasure_code_non_regression.cc
 
@@ -10902,7 +10951,7 @@ librbd_types_la_SOURCES = \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(LIBOSD) $(LIBCOMMON) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(BOOST_PROGRAM_OPTIONS_LIBS) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(CEPH_GLOBAL) \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_167)
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_176)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at ceph_erasure_code_SOURCES = \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	test/erasure-code/ceph_erasure_code.cc
 
@@ -10910,7 +10959,7 @@ librbd_types_la_SOURCES = \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(LIBOSD) $(LIBCOMMON) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(BOOST_PROGRAM_OPTIONS_LIBS) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(CEPH_GLOBAL) \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_169)
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_178)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at libec_example_la_SOURCES = \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	erasure-code/ErasureCode.cc \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	test/erasure-code/ErasureCodePluginExample.cc
@@ -10922,7 +10971,7 @@ librbd_types_la_SOURCES = \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	${AM_LDFLAGS} -module \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	-avoid-version -shared \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	-rpath /nowhere \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_170)
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_179)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at libec_missing_entry_point_la_SOURCES = test/erasure-code/ErasureCodePluginMissingEntryPoint.cc
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at libec_missing_entry_point_la_CFLAGS = ${AM_CFLAGS}
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at libec_missing_entry_point_la_CXXFLAGS = ${AM_CXXFLAGS}
@@ -10931,7 +10980,7 @@ librbd_types_la_SOURCES = \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	${AM_LDFLAGS} -module \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	-avoid-version -shared \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	-rpath /nowhere \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_172)
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_181)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at libec_missing_version_la_SOURCES = test/erasure-code/ErasureCodePluginMissingVersion.cc
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at libec_missing_version_la_CFLAGS = ${AM_CFLAGS}
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at libec_missing_version_la_CXXFLAGS = ${AM_CXXFLAGS}
@@ -10940,7 +10989,7 @@ librbd_types_la_SOURCES = \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	${AM_LDFLAGS} -module \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	-avoid-version -shared \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	-rpath /nowhere \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_173)
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_182)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at libec_hangs_la_SOURCES = test/erasure-code/ErasureCodePluginHangs.cc
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at libec_hangs_la_CFLAGS = ${AM_CFLAGS}
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at libec_hangs_la_CXXFLAGS = ${AM_CXXFLAGS}
@@ -10949,7 +10998,7 @@ librbd_types_la_SOURCES = \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	${AM_LDFLAGS} -module \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	-avoid-version -shared \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	-rpath /nowhere \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_174)
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_183)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at libec_fail_to_initialize_la_SOURCES = test/erasure-code/ErasureCodePluginFailToInitialize.cc
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at libec_fail_to_initialize_la_CFLAGS = ${AM_CFLAGS}
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at libec_fail_to_initialize_la_CXXFLAGS = ${AM_CXXFLAGS}
@@ -10958,7 +11007,7 @@ librbd_types_la_SOURCES = \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	${AM_LDFLAGS} -module \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	-avoid-version -shared \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	-rpath /nowhere \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_175)
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_184)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at libec_fail_to_register_la_SOURCES = test/erasure-code/ErasureCodePluginFailToRegister.cc
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at libec_fail_to_register_la_CFLAGS = ${AM_CFLAGS}
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at libec_fail_to_register_la_CXXFLAGS = ${AM_CXXFLAGS}
@@ -10967,7 +11016,7 @@ librbd_types_la_SOURCES = \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	${AM_LDFLAGS} -module \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	-avoid-version -shared \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	-rpath /nowhere \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_176)
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_185)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at libec_test_jerasure_neon_la_SOURCES = test/erasure-code/TestJerasurePluginNEON.cc
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at libec_test_jerasure_neon_la_CFLAGS = ${AM_CFLAGS}
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at libec_test_jerasure_neon_la_CXXFLAGS = ${AM_CXXFLAGS}
@@ -10976,7 +11025,7 @@ librbd_types_la_SOURCES = \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	${AM_LDFLAGS} -module \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	-avoid-version -shared \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	-rpath /nowhere \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_177)
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_186)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at libec_test_jerasure_sse4_la_SOURCES = test/erasure-code/TestJerasurePluginSSE4.cc
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at libec_test_jerasure_sse4_la_CFLAGS = ${AM_CFLAGS}
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at libec_test_jerasure_sse4_la_CXXFLAGS = ${AM_CXXFLAGS}
@@ -10985,7 +11034,7 @@ librbd_types_la_SOURCES = \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	${AM_LDFLAGS} -module \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	-avoid-version -shared \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	-rpath /nowhere \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_178)
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_187)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at libec_test_jerasure_sse3_la_SOURCES = test/erasure-code/TestJerasurePluginSSE3.cc
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at libec_test_jerasure_sse3_la_CFLAGS = ${AM_CFLAGS}
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at libec_test_jerasure_sse3_la_CXXFLAGS = ${AM_CXXFLAGS}
@@ -10994,7 +11043,7 @@ librbd_types_la_SOURCES = \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	${AM_LDFLAGS} -module \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	-avoid-version -shared \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	-rpath /nowhere \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_179)
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_188)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at libec_test_jerasure_generic_la_SOURCES = test/erasure-code/TestJerasurePluginGeneric.cc
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at libec_test_jerasure_generic_la_CFLAGS = ${AM_CFLAGS}
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at libec_test_jerasure_generic_la_CXXFLAGS = ${AM_CXXFLAGS}
@@ -11003,7 +11052,7 @@ librbd_types_la_SOURCES = \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	${AM_LDFLAGS} -module \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	-avoid-version -shared \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	-rpath /nowhere \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_180)
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_189)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at unittest_erasure_code_plugin_SOURCES = \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	erasure-code/ErasureCode.cc \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	test/erasure-code/TestErasureCodePlugin.cc 
@@ -11013,7 +11062,7 @@ librbd_types_la_SOURCES = \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(LIBOSD) $(LIBCOMMON) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(UNITTEST_LDADD) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(CEPH_GLOBAL) \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_181)
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_190)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at unittest_erasure_code_SOURCES = \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	erasure-code/ErasureCode.cc \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	test/erasure-code/TestErasureCode.cc
@@ -11036,7 +11085,7 @@ librbd_types_la_SOURCES = \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(LIBOSD) $(LIBCOMMON) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(UNITTEST_LDADD) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(CEPH_GLOBAL) \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_183)
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_192)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at unittest_erasure_code_plugin_jerasure_SOURCES = \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	test/erasure-code/TestErasureCodePluginJerasure.cc
 
@@ -11045,7 +11094,7 @@ librbd_types_la_SOURCES = \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(LIBOSD) $(LIBCOMMON) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(UNITTEST_LDADD) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(CEPH_GLOBAL) \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_184)
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_193)
 @ENABLE_SERVER_TRUE@@WITH_BETTER_YASM_ELF64_TRUE@@WITH_OSD_TRUE at unittest_erasure_code_isa_SOURCES = \
 @ENABLE_SERVER_TRUE@@WITH_BETTER_YASM_ELF64_TRUE@@WITH_OSD_TRUE@	erasure-code/ErasureCode.cc \
 @ENABLE_SERVER_TRUE@@WITH_BETTER_YASM_ELF64_TRUE@@WITH_OSD_TRUE@	test/erasure-code/TestErasureCodeIsa.cc
@@ -11057,7 +11106,7 @@ librbd_types_la_SOURCES = \
 @ENABLE_SERVER_TRUE@@WITH_BETTER_YASM_ELF64_TRUE@@WITH_OSD_TRUE@	$(CEPH_GLOBAL) \
 @ENABLE_SERVER_TRUE@@WITH_BETTER_YASM_ELF64_TRUE@@WITH_OSD_TRUE@	libisa.la \
 @ENABLE_SERVER_TRUE@@WITH_BETTER_YASM_ELF64_TRUE@@WITH_OSD_TRUE@	$(LIBERASURE_CODE) \
- at ENABLE_SERVER_TRUE@@WITH_BETTER_YASM_ELF64_TRUE@@WITH_OSD_TRUE@	$(am__append_185)
+ at ENABLE_SERVER_TRUE@@WITH_BETTER_YASM_ELF64_TRUE@@WITH_OSD_TRUE@	$(am__append_194)
 @ENABLE_SERVER_TRUE@@WITH_BETTER_YASM_ELF64_TRUE@@WITH_OSD_TRUE at unittest_erasure_code_plugin_isa_SOURCES = \
 @ENABLE_SERVER_TRUE@@WITH_BETTER_YASM_ELF64_TRUE@@WITH_OSD_TRUE@	erasure-code/ErasureCode.cc \
 @ENABLE_SERVER_TRUE@@WITH_BETTER_YASM_ELF64_TRUE@@WITH_OSD_TRUE@	test/erasure-code/TestErasureCodePluginIsa.cc
@@ -11068,7 +11117,7 @@ librbd_types_la_SOURCES = \
 @ENABLE_SERVER_TRUE@@WITH_BETTER_YASM_ELF64_TRUE@@WITH_OSD_TRUE@	$(UNITTEST_LDADD) \
 @ENABLE_SERVER_TRUE@@WITH_BETTER_YASM_ELF64_TRUE@@WITH_OSD_TRUE@	$(CEPH_GLOBAL) \
 @ENABLE_SERVER_TRUE@@WITH_BETTER_YASM_ELF64_TRUE@@WITH_OSD_TRUE@	$(LIBERASURE_CODE) \
- at ENABLE_SERVER_TRUE@@WITH_BETTER_YASM_ELF64_TRUE@@WITH_OSD_TRUE@	$(am__append_187)
+ at ENABLE_SERVER_TRUE@@WITH_BETTER_YASM_ELF64_TRUE@@WITH_OSD_TRUE@	$(am__append_196)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at unittest_erasure_code_lrc_SOURCES = \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	test/erasure-code/TestErasureCodeLrc.cc \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	${lrc_sources}
@@ -11078,7 +11127,7 @@ librbd_types_la_SOURCES = \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(LIBOSD) $(LIBCOMMON) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(UNITTEST_LDADD) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(CEPH_GLOBAL) \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_188)
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_197)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at unittest_erasure_code_plugin_lrc_SOURCES = \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	test/erasure-code/TestErasureCodePluginLrc.cc
 
@@ -11087,7 +11136,7 @@ librbd_types_la_SOURCES = \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(LIBOSD) $(LIBCOMMON) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(UNITTEST_LDADD) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(CEPH_GLOBAL) \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_190)
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_199)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at unittest_erasure_code_shec_SOURCES = \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	test/erasure-code/TestErasureCodeShec.cc \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	${shec_sources}
@@ -11108,7 +11157,7 @@ librbd_types_la_SOURCES = \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(LIBOSD) $(LIBCOMMON) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(UNITTEST_LDADD) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(CEPH_GLOBAL) \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_191)
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_200)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at unittest_erasure_code_shec_all_SOURCES = \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	test/erasure-code/TestErasureCodeShec_all.cc \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	${shec_sources}
@@ -11129,7 +11178,7 @@ librbd_types_la_SOURCES = \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(LIBOSD) $(LIBCOMMON) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(UNITTEST_LDADD) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(CEPH_GLOBAL) \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_192)
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_201)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at unittest_erasure_code_shec_thread_SOURCES = \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	test/erasure-code/TestErasureCodeShec_thread.cc \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	${shec_sources}
@@ -11150,7 +11199,7 @@ librbd_types_la_SOURCES = \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(LIBOSD) $(LIBCOMMON) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(UNITTEST_LDADD) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(CEPH_GLOBAL) \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_193)
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_202)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at unittest_erasure_code_shec_arguments_SOURCES = \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	test/erasure-code/TestErasureCodeShec_arguments.cc \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	${shec_sources}
@@ -11171,7 +11220,7 @@ librbd_types_la_SOURCES = \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(LIBOSD) $(LIBCOMMON) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(UNITTEST_LDADD) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(CEPH_GLOBAL) \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_194)
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_203)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at unittest_erasure_code_plugin_shec_SOURCES = \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@        test/erasure-code/TestErasureCodePluginShec.cc
 
@@ -11180,7 +11229,7 @@ librbd_types_la_SOURCES = \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(LIBOSD) $(LIBCOMMON) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(UNITTEST_LDADD) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(CEPH_GLOBAL) \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_195)
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_204)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at libec_test_shec_neon_la_SOURCES = test/erasure-code/TestShecPluginNEON.cc
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at libec_test_shec_neon_la_CFLAGS = ${AM_CFLAGS}
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at libec_test_shec_neon_la_CXXFLAGS = ${AM_CXXFLAGS}
@@ -11189,7 +11238,7 @@ librbd_types_la_SOURCES = \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	${AM_LDFLAGS} -module \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	-avoid-version -shared \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	-rpath /nowhere \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_196)
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_205)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at libec_test_shec_sse4_la_SOURCES = test/erasure-code/TestShecPluginSSE4.cc
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at libec_test_shec_sse4_la_CFLAGS = ${AM_CFLAGS}
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at libec_test_shec_sse4_la_CXXFLAGS = ${AM_CXXFLAGS}
@@ -11198,7 +11247,7 @@ librbd_types_la_SOURCES = \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	${AM_LDFLAGS} -module \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	-avoid-version -shared \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	-rpath /nowhere \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_197)
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_206)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at libec_test_shec_sse3_la_SOURCES = test/erasure-code/TestShecPluginSSE3.cc
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at libec_test_shec_sse3_la_CFLAGS = ${AM_CFLAGS}
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at libec_test_shec_sse3_la_CXXFLAGS = ${AM_CXXFLAGS}
@@ -11207,7 +11256,7 @@ librbd_types_la_SOURCES = \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	${AM_LDFLAGS} -module \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	-avoid-version -shared \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	-rpath /nowhere \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_198)
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_207)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at libec_test_shec_generic_la_SOURCES = test/erasure-code/TestShecPluginGeneric.cc
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at libec_test_shec_generic_la_CFLAGS = ${AM_CFLAGS}
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at libec_test_shec_generic_la_CXXFLAGS = ${AM_CXXFLAGS}
@@ -11216,7 +11265,7 @@ librbd_types_la_SOURCES = \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	${AM_LDFLAGS} -module \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	-avoid-version -shared \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	-rpath /nowhere \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_199)
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_208)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at unittest_erasure_code_example_SOURCES = \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	erasure-code/ErasureCode.cc \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	test/erasure-code/TestErasureCodeExample.cc
@@ -11234,7 +11283,7 @@ librbd_types_la_SOURCES = \
 @ENABLE_SERVER_TRUE@@ENABLE_XIO_TRUE@	$(CEPH_GLOBAL) \
 @ENABLE_SERVER_TRUE@@ENABLE_XIO_TRUE@	$(PTHREAD_LIBS) \
 @ENABLE_SERVER_TRUE@@ENABLE_XIO_TRUE@	$(EXTRALIBS) \
- at ENABLE_SERVER_TRUE@@ENABLE_XIO_TRUE@	$(am__append_201)
+ at ENABLE_SERVER_TRUE@@ENABLE_XIO_TRUE@	$(am__append_210)
 @ENABLE_SERVER_TRUE@@ENABLE_XIO_TRUE at simple_client_SOURCES = \
 @ENABLE_SERVER_TRUE@@ENABLE_XIO_TRUE@	test/messenger/simple_client.cc \
 @ENABLE_SERVER_TRUE@@ENABLE_XIO_TRUE@	test/messenger/simple_dispatcher.cc
@@ -11246,7 +11295,7 @@ librbd_types_la_SOURCES = \
 @ENABLE_SERVER_TRUE@@ENABLE_XIO_TRUE@	$(CEPH_GLOBAL) \
 @ENABLE_SERVER_TRUE@@ENABLE_XIO_TRUE@	$(PTHREAD_LIBS) \
 @ENABLE_SERVER_TRUE@@ENABLE_XIO_TRUE@	$(EXTRALIBS) \
- at ENABLE_SERVER_TRUE@@ENABLE_XIO_TRUE@	$(am__append_202)
+ at ENABLE_SERVER_TRUE@@ENABLE_XIO_TRUE@	$(am__append_211)
 @ENABLE_SERVER_TRUE@@ENABLE_XIO_TRUE at xio_server_SOURCES = \
 @ENABLE_SERVER_TRUE@@ENABLE_XIO_TRUE@	test/messenger/xio_server.cc \
 @ENABLE_SERVER_TRUE@@ENABLE_XIO_TRUE@	test/messenger/xio_dispatcher.cc
@@ -11258,7 +11307,7 @@ librbd_types_la_SOURCES = \
 @ENABLE_SERVER_TRUE@@ENABLE_XIO_TRUE@	$(LIBCOMMON) \
 @ENABLE_SERVER_TRUE@@ENABLE_XIO_TRUE@	$(PTHREAD_LIBS) \
 @ENABLE_SERVER_TRUE@@ENABLE_XIO_TRUE@	$(EXTRALIBS) \
- at ENABLE_SERVER_TRUE@@ENABLE_XIO_TRUE@	$(am__append_204)
+ at ENABLE_SERVER_TRUE@@ENABLE_XIO_TRUE@	$(am__append_213)
 @ENABLE_SERVER_TRUE@@ENABLE_XIO_TRUE at xio_client_SOURCES = \
 @ENABLE_SERVER_TRUE@@ENABLE_XIO_TRUE@	test/messenger/xio_client.cc \
 @ENABLE_SERVER_TRUE@@ENABLE_XIO_TRUE@	test/messenger/xio_dispatcher.cc
@@ -11270,7 +11319,7 @@ librbd_types_la_SOURCES = \
 @ENABLE_SERVER_TRUE@@ENABLE_XIO_TRUE@	$(LIBCOMMON) \
 @ENABLE_SERVER_TRUE@@ENABLE_XIO_TRUE@	$(PTHREAD_LIBS) \
 @ENABLE_SERVER_TRUE@@ENABLE_XIO_TRUE@	$(EXTRALIBS) \
- at ENABLE_SERVER_TRUE@@ENABLE_XIO_TRUE@	$(am__append_205)
+ at ENABLE_SERVER_TRUE@@ENABLE_XIO_TRUE@	$(am__append_214)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at libceph_example_la_SOURCES = \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	compressor/Compressor.cc \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	test/compressor/compressor_plugin_example.cc
@@ -11288,7 +11337,7 @@ librbd_types_la_SOURCES = \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(LIBOSD) $(LIBCOMMON) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(UNITTEST_LDADD) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(CEPH_GLOBAL) \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_208)
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_217)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at unittest_compression_snappy_SOURCES = \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	test/compressor/test_compression_snappy.cc \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	${snappy_sources}
@@ -11298,7 +11347,7 @@ librbd_types_la_SOURCES = \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(LIBOSD) $(LIBCOMMON) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(UNITTEST_LDADD) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(CEPH_GLOBAL) \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_209)
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_218)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at unittest_compression_snappy_LDFLAGS = -lsnappy
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at unittest_compression_plugin_snappy_SOURCES = \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	test/compressor/test_compression_plugin_snappy.cc \
@@ -11310,7 +11359,7 @@ librbd_types_la_SOURCES = \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(UNITTEST_LDADD) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(CEPH_GLOBAL) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(LIBCOMPRESSOR) \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_210)
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_219)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at unittest_compression_plugin_snappy_LDFLAGS = -lsnappy
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at unittest_compression_zlib_SOURCES = \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	test/compressor/test_compression_zlib.cc \
@@ -11321,7 +11370,7 @@ librbd_types_la_SOURCES = \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(LIBOSD) $(LIBCOMMON) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(UNITTEST_LDADD) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(CEPH_GLOBAL) \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_211)
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_220)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at unittest_compression_zlib_LDFLAGS = -lz
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at unittest_compression_plugin_zlib_SOURCES = \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	test/compressor/test_compression_plugin_zlib.cc \
@@ -11333,7 +11382,7 @@ librbd_types_la_SOURCES = \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(UNITTEST_LDADD) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(CEPH_GLOBAL) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(LIBCOMPRESSOR) \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_212)
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_221)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at unittest_compression_plugin_zlib_LDFLAGS = -lz
 
 # This should use LIBMDS_TYPES once it exists
@@ -11341,23 +11390,17 @@ librbd_types_la_SOURCES = \
 @ENABLE_CLIENT_TRUE@	test/encoding/ceph_dencoder.cc \
 @ENABLE_CLIENT_TRUE@	$(DENCODER_SOURCES)
 
- at ENABLE_CLIENT_TRUE@ceph_dencoder_LDADD = \
- at ENABLE_CLIENT_TRUE@	$(LIBRGW) \
- at ENABLE_CLIENT_TRUE@	$(LIBRADOS) \
- at ENABLE_CLIENT_TRUE@	$(LIBRBD_TYPES) \
- at ENABLE_CLIENT_TRUE@	$(LIBOSD_TYPES) \
- at ENABLE_CLIENT_TRUE@	$(LIBOS_TYPES) \
- at ENABLE_CLIENT_TRUE@	$(LIBMON_TYPES) \
- at ENABLE_CLIENT_TRUE@	$(DENCODER_DEPS) \
- at ENABLE_CLIENT_TRUE@	$(CEPH_GLOBAL)
-
+ at ENABLE_CLIENT_TRUE@ceph_dencoder_LDADD = $(LIBRADOS) $(LIBRBD_TYPES) \
+ at ENABLE_CLIENT_TRUE@	$(LIBOSD_TYPES) $(LIBOS_TYPES) \
+ at ENABLE_CLIENT_TRUE@	$(LIBMON_TYPES) $(DENCODER_DEPS) \
+ at ENABLE_CLIENT_TRUE@	$(CEPH_GLOBAL) $(am__append_226)
 
 # These should always use explicit _CFLAGS/_CXXFLAGS so avoid basename conflicts
 @ENABLE_CLIENT_TRUE at ceph_dencoder_CFLAGS = ${AM_CFLAGS} \
- at ENABLE_CLIENT_TRUE@	$(am__append_213)
+ at ENABLE_CLIENT_TRUE@	$(am__append_222)
 @ENABLE_CLIENT_TRUE at ceph_dencoder_CXXFLAGS = ${AM_CXXFLAGS} \
- at ENABLE_CLIENT_TRUE@	$(am__append_214) $(am__append_215) \
- at ENABLE_CLIENT_TRUE@	$(am__append_216)
+ at ENABLE_CLIENT_TRUE@	$(am__append_223) $(am__append_224) \
+ at ENABLE_CLIENT_TRUE@	$(am__append_225)
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE at libradostest_la_SOURCES = \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	test/librados/test.cc \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@	test/librados/TestCase.cc
@@ -11616,14 +11659,19 @@ librbd_types_la_SOURCES = \
 
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE at librbd_test_mock_la_CXXFLAGS = $(UNITTEST_CXXFLAGS)
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE at unittest_librbd_SOURCES = \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@        test/librbd/test_BlockGuard.cc \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@        test/librbd/test_main.cc \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	test/librbd/test_mock_fixture.cc \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	test/librbd/test_mock_ExclusiveLock.cc \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	test/librbd/test_mock_Journal.cc \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	test/librbd/test_mock_ObjectWatcher.cc \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	test/librbd/exclusive_lock/test_mock_AcquireRequest.cc \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	test/librbd/exclusive_lock/test_mock_BreakRequest.cc \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	test/librbd/exclusive_lock/test_mock_GetLockerRequest.cc \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	test/librbd/exclusive_lock/test_mock_ReacquireRequest.cc \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	test/librbd/exclusive_lock/test_mock_ReleaseRequest.cc \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	test/librbd/image/test_mock_RefreshRequest.cc \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	test/librbd/image_watcher/test_mock_RewatchRequest.cc \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	test/librbd/journal/test_mock_Replay.cc \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	test/librbd/object_map/test_mock_InvalidateRequest.cc \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	test/librbd/object_map/test_mock_LockRequest.cc \
@@ -11796,7 +11844,7 @@ librbd_types_la_SOURCES = \
 @ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE@	test/libcephfs/multiclient.cc \
 @ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE@	test/libcephfs/access.cc \
 @ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE@	test/libcephfs/acl.cc \
- at ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE@	$(am__append_238)
+ at ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE@	$(am__append_248)
 @ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE at ceph_test_libcephfs_LDADD = $(LIBRADOS) $(LIBCEPHFS) $(LIBCOMMON) $(UNITTEST_LDADD)
 @ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE at ceph_test_libcephfs_CXXFLAGS = $(UNITTEST_CXXFLAGS)
 @ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE at unittest_encoding_SOURCES = test/encoding.cc
@@ -11819,7 +11867,7 @@ librbd_types_la_SOURCES = \
 @ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE@	-Wignored-qualifiers \
 @ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE@	-Wold-style-definition \
 @ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE@	-Wtype-limits \
- at ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE@	$(am__append_240)
+ at ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE@	$(am__append_250)
 @ENABLE_CLIENT_TRUE@@WITH_BUILD_TESTS_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE at test_build_librgw_SOURCES = \
 @ENABLE_CLIENT_TRUE@@WITH_BUILD_TESTS_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	test/buildtest_skeleton.cc \
 @ENABLE_CLIENT_TRUE@@WITH_BUILD_TESTS_TRUE@@WITH_RADOSGW_TRUE@@WITH_RADOS_TRUE@	$(librgw_la_SOURCES)
@@ -12047,13 +12095,13 @@ librbd_types_la_SOURCES = \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at unittest_osdscrub_LDADD =  \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(LIBOSD) $(UNITTEST_LDADD) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(CEPH_GLOBAL) \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_254)
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_264)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at unittest_pglog_SOURCES = test/osd/TestPGLog.cc
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at unittest_pglog_CXXFLAGS = $(UNITTEST_CXXFLAGS)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at unittest_pglog_LDADD = $(LIBOSD) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(UNITTEST_LDADD) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(CEPH_GLOBAL) \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_255)
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_265)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at unittest_hitset_SOURCES = test/osd/hitset.cc
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at unittest_hitset_CXXFLAGS = $(UNITTEST_CXXFLAGS)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at unittest_hitset_LDADD = $(LIBOSD) $(UNITTEST_LDADD) $(CEPH_GLOBAL)
@@ -12120,7 +12168,7 @@ UNITTEST_CXXFLAGS = \
 UNITTEST_LDADD = $(top_builddir)/src/gmock/lib/libgmock_main.la \
 	$(top_builddir)/src/gmock/lib/libgmock.la \
 	$(top_builddir)/src/gmock/gtest/lib/libgtest.la \
-	$(PTHREAD_LIBS) $(am__append_263)
+	$(PTHREAD_LIBS) $(am__append_273)
 unittest_addrs_SOURCES = test/test_addrs.cc
 unittest_addrs_CXXFLAGS = $(UNITTEST_CXXFLAGS)
 unittest_addrs_LDADD = $(UNITTEST_LDADD) $(CEPH_GLOBAL)
@@ -12185,7 +12233,7 @@ unittest_str_list_SOURCES = test/test_str_list.cc
 unittest_str_list_CXXFLAGS = $(UNITTEST_CXXFLAGS)
 unittest_str_list_LDADD = $(UNITTEST_LDADD) $(CEPH_GLOBAL)
 unittest_log_SOURCES = log/test.cc
-unittest_log_LDADD = $(LIBCOMMON) $(UNITTEST_LDADD)
+unittest_log_LDADD = $(UNITTEST_LDADD) $(CEPH_GLOBAL)
 unittest_log_CXXFLAGS = $(UNITTEST_CXXFLAGS) -O2
 unittest_throttle_SOURCES = test/common/Throttle.cc
 unittest_throttle_LDADD = $(UNITTEST_LDADD) $(CEPH_GLOBAL)
@@ -12218,7 +12266,7 @@ unittest_bufferlist_SOURCES = test/bufferlist.cc
 unittest_bufferlist_LDADD = $(UNITTEST_LDADD) $(CEPH_GLOBAL)
 unittest_bufferlist_CXXFLAGS = $(UNITTEST_CXXFLAGS)
 unittest_xlist_SOURCES = test/test_xlist.cc
-unittest_xlist_LDADD = $(UNITTEST_LDADD) $(LIBCOMMON)
+unittest_xlist_LDADD = $(UNITTEST_LDADD) $(CEPH_GLOBAL)
 unittest_xlist_CXXFLAGS = $(UNITTEST_CXXFLAGS)
 unittest_crc32c_SOURCES = test/common/test_crc32c.cc
 unittest_crc32c_LDADD = $(UNITTEST_LDADD) $(CEPH_GLOBAL)
@@ -12266,7 +12314,7 @@ unittest_safe_io_SOURCES = test/common/test_safe_io.cc
 unittest_safe_io_LDADD = $(UNITTEST_LDADD) $(CEPH_GLOBAL)
 unittest_safe_io_CXXFLAGS = $(UNITTEST_CXXFLAGS)
 unittest_heartbeatmap_SOURCES = test/heartbeat_map.cc
-unittest_heartbeatmap_LDADD = $(LIBCOMMON) $(UNITTEST_LDADD) $(CEPH_GLOBAL)
+unittest_heartbeatmap_LDADD = $(UNITTEST_LDADD) $(CEPH_GLOBAL)
 unittest_heartbeatmap_CXXFLAGS = $(UNITTEST_CXXFLAGS)
 
 # why does this include rgw/rgw_formats.cc...?
@@ -12283,7 +12331,7 @@ unittest_ipaddr_SOURCES = test/test_ipaddr.cc
 unittest_ipaddr_LDADD = $(UNITTEST_LDADD) $(CEPH_GLOBAL)
 unittest_ipaddr_CXXFLAGS = $(UNITTEST_CXXFLAGS)
 unittest_texttable_SOURCES = test/test_texttable.cc
-unittest_texttable_LDADD = $(LIBCOMMON) $(UNITTEST_LDADD)
+unittest_texttable_LDADD = $(UNITTEST_LDADD) $(CEPH_GLOBAL)
 unittest_texttable_CXXFLAGS = $(UNITTEST_CXXFLAGS)
 unittest_on_exit_SOURCES = test/on_exit.cc
 unittest_on_exit_LDADD = $(PTHREAD_LIBS)
@@ -12300,7 +12348,7 @@ unittest_interval_set_SOURCES = test/common/test_interval_set.cc
 unittest_interval_set_CXXFLAGS = $(UNITTEST_CXXFLAGS)
 unittest_interval_set_LDADD = $(UNITTEST_LDADD) $(CEPH_GLOBAL)
 unittest_subprocess_SOURCES = test/test_subprocess.cc
-unittest_subprocess_LDADD = $(LIBCOMMON) $(UNITTEST_LDADD)
+unittest_subprocess_LDADD = $(UNITTEST_LDADD) $(CEPH_GLOBAL)
 unittest_subprocess_CXXFLAGS = $(UNITTEST_CXXFLAGS)
 unittest_async_compressor_SOURCES = test/common/test_async_compressor.cc
 unittest_async_compressor_CXXFLAGS = $(UNITTEST_CXXFLAGS)
@@ -12363,7 +12411,7 @@ ceph_test_cfuse_cache_invalidate_SOURCES = test/test_cfuse_cache_invalidate.cc
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	tools/rbd/action/Snap.cc \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	tools/rbd/action/Status.cc \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	tools/rbd/action/Watch.cc \
- at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	$(am__append_268)
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	$(am__append_278)
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE at rbd_LDADD = libjournal.la \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	libcls_journal_client.la \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	libcls_rbd_client.la \
@@ -12374,7 +12422,7 @@ ceph_test_cfuse_cache_invalidate_SOURCES = test/test_cfuse_cache_invalidate.cc
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	$(CEPH_GLOBAL) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	$(BOOST_REGEX_LIBS) \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	$(BOOST_PROGRAM_OPTIONS_LIBS) \
- at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	$(am__append_270)
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	$(am__append_280)
 @ENABLE_CLIENT_TRUE@@LINUX_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE at rbd_nbd_SOURCES = tools/rbd_nbd/rbd-nbd.cc
 @ENABLE_CLIENT_TRUE@@LINUX_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE at rbd_nbd_CXXFLAGS = $(AM_CXXFLAGS)
 @ENABLE_CLIENT_TRUE@@LINUX_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE at rbd_nbd_LDADD = $(LIBRBD) $(LIBRADOS) $(CEPH_GLOBAL) $(BOOST_REGEX_LIBS)
@@ -12395,6 +12443,7 @@ ceph_test_cfuse_cache_invalidate_SOURCES = test/test_cfuse_cache_invalidate.cc
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	tools/rbd_mirror/image_replayer/CloseImageRequest.cc \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	tools/rbd_mirror/image_replayer/CreateImageRequest.cc \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	tools/rbd_mirror/image_replayer/EventPreprocessor.cc \
+ at ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	tools/rbd_mirror/image_replayer/IsPrimaryRequest.cc \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	tools/rbd_mirror/image_replayer/OpenImageRequest.cc \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	tools/rbd_mirror/image_replayer/OpenLocalImageRequest.cc \
 @ENABLE_CLIENT_TRUE@@WITH_RADOS_TRUE@@WITH_RBD_TRUE@	tools/rbd_mirror/image_replayer/ReplayStatusFormatter.cc \
@@ -12442,7 +12491,7 @@ ceph_test_cfuse_cache_invalidate_SOURCES = test/test_cfuse_cache_invalidate.cc
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(LIBOSD) $(LIBOS) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(CEPH_GLOBAL) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(BOOST_PROGRAM_OPTIONS_LIBS) \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_277)
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_287)
 @ENABLE_CLIENT_TRUE@@ENABLE_SERVER_TRUE@@WITH_MDS_TRUE@@WITH_RADOS_TRUE at cephfs_journal_tool_SOURCES = \
 @ENABLE_CLIENT_TRUE@@ENABLE_SERVER_TRUE@@WITH_MDS_TRUE@@WITH_RADOS_TRUE@	tools/cephfs/cephfs-journal-tool.cc \
 @ENABLE_CLIENT_TRUE@@ENABLE_SERVER_TRUE@@WITH_MDS_TRUE@@WITH_RADOS_TRUE@	tools/cephfs/JournalTool.cc \
@@ -12548,7 +12597,7 @@ editpaths = sed \
 	-e 's|@@GCOV_PREFIX_STRIP[@][@]|$(GCOV_PREFIX_STRIP)|g'
 
 shell_scripts = ceph-debugpack ceph-post-file ceph-crush-location \
-	$(am__append_312)
+	$(am__append_322)
 doc_DATA = $(srcdir)/sample.ceph.conf sample.fetch_config
 
 # various scripts in $(libexecdir)
@@ -12568,11 +12617,11 @@ AM_TESTS_ENVIRONMENT = export CEPH_ROOT="$(abs_top_srcdir)"; export \
 	PATH="$(abs_srcdir):$$PATH";
 
 # pybind
-python_PYTHON = $(am__append_294) $(am__append_306) $(am__append_311)
+python_PYTHON = $(am__append_304) $(am__append_316) $(am__append_321)
 @ENABLE_CLIENT_TRUE at bash_completiondir = $(sysconfdir)/bash_completion.d
 @ENABLE_CLIENT_TRUE at bash_completion_DATA =  \
 @ENABLE_CLIENT_TRUE@	$(srcdir)/bash_completion/ceph \
- at ENABLE_CLIENT_TRUE@	$(am__append_296) $(am__append_298)
+ at ENABLE_CLIENT_TRUE@	$(am__append_306) $(am__append_308)
 @ENABLE_CLIENT_TRUE at ceph_syn_SOURCES = ceph_syn.cc \
 @ENABLE_CLIENT_TRUE@	client/SyntheticClient.cc # uses g_conf.. \
 @ENABLE_CLIENT_TRUE@	needs cleanup
@@ -12601,7 +12650,7 @@ python_PYTHON = $(am__append_294) $(am__append_306) $(am__append_311)
 @ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE@	1:0:0 \
 @ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE@	-export-symbols-regex \
 @ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE@	'^ceph_.*' \
- at ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE@	$(am__append_307)
+ at ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE@	$(am__append_317)
 
 # jni library (java source is in src/java)
 @ENABLE_CEPHFS_JAVA_TRUE@@ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE at libcephfs_jni_la_SOURCES = \
@@ -12614,7 +12663,7 @@ python_PYTHON = $(am__append_294) $(am__append_306) $(am__append_311)
 @ENABLE_CEPHFS_JAVA_TRUE@@ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE at libcephfs_jni_la_CPPFLAGS = $(JDK_CPPFLAGS) $(AM_CPPFLAGS)
 @ENABLE_CEPHFS_JAVA_TRUE@@ENABLE_CLIENT_TRUE@@WITH_CEPHFS_TRUE@@WITH_RADOS_TRUE at libcephfs_jni_la_LDFLAGS = ${AM_LDFLAGS} -version-info 1:0:0
 @ENABLE_SERVER_TRUE at ceph_sbin_SCRIPTS = ceph-create-keys \
- at ENABLE_SERVER_TRUE@	$(am__append_315)
+ at ENABLE_SERVER_TRUE@	$(am__append_325)
 @ENABLE_SERVER_TRUE@@WITH_MON_TRUE at ceph_mon_SOURCES = ceph_mon.cc
 @ENABLE_SERVER_TRUE@@WITH_MON_TRUE at ceph_mon_LDADD = $(LIBMON) $(LIBOS) $(CEPH_GLOBAL) $(LIBCOMMON) $(LIBAUTH) $(LIBCOMMON) $(LIBMON_TYPES)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE at ceph_osd_SOURCES = ceph_osd.cc
@@ -12622,7 +12671,7 @@ python_PYTHON = $(am__append_294) $(am__append_306) $(am__append_311)
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(LIBOSD) $(LIBOSD_TYPES) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(LIBOS_TYPES) $(LIBOS) \
 @ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(CEPH_GLOBAL) $(LIBCOMMON) \
- at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_317)
+ at ENABLE_SERVER_TRUE@@WITH_OSD_TRUE@	$(am__append_327)
 @ENABLE_SERVER_TRUE@@WITH_MDS_TRUE at ceph_mds_SOURCES = ceph_mds.cc
 @ENABLE_SERVER_TRUE@@WITH_MDS_TRUE at ceph_mds_LDADD = $(LIBMDS) $(LIBOSDC) $(CEPH_GLOBAL) $(LIBCOMMON)
 @ENABLE_COVERAGE_TRUE@@ENABLE_SERVER_TRUE at COV_DIR = $(DESTDIR)$(libdir)/ceph/coverage
@@ -13737,12 +13786,12 @@ common/scrub_types.lo: common/$(am__dirstamp) \
 	common/$(DEPDIR)/$(am__dirstamp)
 common/blkdev.lo: common/$(am__dirstamp) \
 	common/$(DEPDIR)/$(am__dirstamp)
+common/util.lo: common/$(am__dirstamp) \
+	common/$(DEPDIR)/$(am__dirstamp)
 common/xattr.lo: common/$(am__dirstamp) \
 	common/$(DEPDIR)/$(am__dirstamp)
 common/ipaddr.lo: common/$(am__dirstamp) \
 	common/$(DEPDIR)/$(am__dirstamp)
-common/util.lo: common/$(am__dirstamp) \
-	common/$(DEPDIR)/$(am__dirstamp)
 common/pick_address.lo: common/$(am__dirstamp) \
 	common/$(DEPDIR)/$(am__dirstamp)
 common/linux_version.lo: common/$(am__dirstamp) \
@@ -15094,6 +15143,18 @@ librbd/exclusive_lock/$(DEPDIR)/$(am__dirstamp):
 librbd/exclusive_lock/AcquireRequest.lo:  \
 	librbd/exclusive_lock/$(am__dirstamp) \
 	librbd/exclusive_lock/$(DEPDIR)/$(am__dirstamp)
+librbd/exclusive_lock/AutomaticPolicy.lo:  \
+	librbd/exclusive_lock/$(am__dirstamp) \
+	librbd/exclusive_lock/$(DEPDIR)/$(am__dirstamp)
+librbd/exclusive_lock/BreakRequest.lo:  \
+	librbd/exclusive_lock/$(am__dirstamp) \
+	librbd/exclusive_lock/$(DEPDIR)/$(am__dirstamp)
+librbd/exclusive_lock/GetLockerRequest.lo:  \
+	librbd/exclusive_lock/$(am__dirstamp) \
+	librbd/exclusive_lock/$(DEPDIR)/$(am__dirstamp)
+librbd/exclusive_lock/ReacquireRequest.lo:  \
+	librbd/exclusive_lock/$(am__dirstamp) \
+	librbd/exclusive_lock/$(DEPDIR)/$(am__dirstamp)
 librbd/exclusive_lock/ReleaseRequest.lo:  \
 	librbd/exclusive_lock/$(am__dirstamp) \
 	librbd/exclusive_lock/$(DEPDIR)/$(am__dirstamp)
@@ -15128,6 +15189,9 @@ librbd/image_watcher/Notifier.lo:  \
 librbd/image_watcher/NotifyLockOwner.lo:  \
 	librbd/image_watcher/$(am__dirstamp) \
 	librbd/image_watcher/$(DEPDIR)/$(am__dirstamp)
+librbd/image_watcher/RewatchRequest.lo:  \
+	librbd/image_watcher/$(am__dirstamp) \
+	librbd/image_watcher/$(DEPDIR)/$(am__dirstamp)
 librbd/journal/$(am__dirstamp):
 	@$(MKDIR_P) librbd/journal
 	@: > librbd/journal/$(am__dirstamp)
@@ -15254,6 +15318,9 @@ tools/rbd_mirror/image_replayer/CreateImageRequest.lo:  \
 tools/rbd_mirror/image_replayer/EventPreprocessor.lo:  \
 	tools/rbd_mirror/image_replayer/$(am__dirstamp) \
 	tools/rbd_mirror/image_replayer/$(DEPDIR)/$(am__dirstamp)
+tools/rbd_mirror/image_replayer/IsPrimaryRequest.lo:  \
+	tools/rbd_mirror/image_replayer/$(am__dirstamp) \
+	tools/rbd_mirror/image_replayer/$(DEPDIR)/$(am__dirstamp)
 tools/rbd_mirror/image_replayer/OpenImageRequest.lo:  \
 	tools/rbd_mirror/image_replayer/$(am__dirstamp) \
 	tools/rbd_mirror/image_replayer/$(DEPDIR)/$(am__dirstamp)
@@ -18154,6 +18221,9 @@ test/librados/unittest_librados_config-librados_config.$(OBJEXT):  \
 unittest_librados_config$(EXEEXT): $(unittest_librados_config_OBJECTS) $(unittest_librados_config_DEPENDENCIES) $(EXTRA_unittest_librados_config_DEPENDENCIES) 
 	@rm -f unittest_librados_config$(EXEEXT)
 	$(AM_V_CXXLD)$(unittest_librados_config_LINK) $(unittest_librados_config_OBJECTS) $(unittest_librados_config_LDADD) $(LIBS)
+test/librbd/unittest_librbd-test_BlockGuard.$(OBJEXT):  \
+	test/librbd/$(am__dirstamp) \
+	test/librbd/$(DEPDIR)/$(am__dirstamp)
 test/librbd/unittest_librbd-test_main.$(OBJEXT):  \
 	test/librbd/$(am__dirstamp) \
 	test/librbd/$(DEPDIR)/$(am__dirstamp)
@@ -18178,6 +18248,15 @@ test/librbd/exclusive_lock/$(DEPDIR)/$(am__dirstamp):
 test/librbd/exclusive_lock/unittest_librbd-test_mock_AcquireRequest.$(OBJEXT):  \
 	test/librbd/exclusive_lock/$(am__dirstamp) \
 	test/librbd/exclusive_lock/$(DEPDIR)/$(am__dirstamp)
+test/librbd/exclusive_lock/unittest_librbd-test_mock_BreakRequest.$(OBJEXT):  \
+	test/librbd/exclusive_lock/$(am__dirstamp) \
+	test/librbd/exclusive_lock/$(DEPDIR)/$(am__dirstamp)
+test/librbd/exclusive_lock/unittest_librbd-test_mock_GetLockerRequest.$(OBJEXT):  \
+	test/librbd/exclusive_lock/$(am__dirstamp) \
+	test/librbd/exclusive_lock/$(DEPDIR)/$(am__dirstamp)
+test/librbd/exclusive_lock/unittest_librbd-test_mock_ReacquireRequest.$(OBJEXT):  \
+	test/librbd/exclusive_lock/$(am__dirstamp) \
+	test/librbd/exclusive_lock/$(DEPDIR)/$(am__dirstamp)
 test/librbd/exclusive_lock/unittest_librbd-test_mock_ReleaseRequest.$(OBJEXT):  \
 	test/librbd/exclusive_lock/$(am__dirstamp) \
 	test/librbd/exclusive_lock/$(DEPDIR)/$(am__dirstamp)
@@ -18190,6 +18269,15 @@ test/librbd/image/$(DEPDIR)/$(am__dirstamp):
 test/librbd/image/unittest_librbd-test_mock_RefreshRequest.$(OBJEXT):  \
 	test/librbd/image/$(am__dirstamp) \
 	test/librbd/image/$(DEPDIR)/$(am__dirstamp)
+test/librbd/image_watcher/$(am__dirstamp):
+	@$(MKDIR_P) test/librbd/image_watcher
+	@: > test/librbd/image_watcher/$(am__dirstamp)
+test/librbd/image_watcher/$(DEPDIR)/$(am__dirstamp):
+	@$(MKDIR_P) test/librbd/image_watcher/$(DEPDIR)
+	@: > test/librbd/image_watcher/$(DEPDIR)/$(am__dirstamp)
+test/librbd/image_watcher/unittest_librbd-test_mock_RewatchRequest.$(OBJEXT):  \
+	test/librbd/image_watcher/$(am__dirstamp) \
+	test/librbd/image_watcher/$(DEPDIR)/$(am__dirstamp)
 test/librbd/journal/unittest_librbd-test_mock_Replay.$(OBJEXT):  \
 	test/librbd/journal/$(am__dirstamp) \
 	test/librbd/journal/$(DEPDIR)/$(am__dirstamp)
@@ -19051,6 +19139,7 @@ mostlyclean-compile:
 	-rm -f test/librbd/*.lo
 	-rm -f test/librbd/exclusive_lock/*.$(OBJEXT)
 	-rm -f test/librbd/image/*.$(OBJEXT)
+	-rm -f test/librbd/image_watcher/*.$(OBJEXT)
 	-rm -f test/librbd/journal/*.$(OBJEXT)
 	-rm -f test/librbd/journal/*.lo
 	-rm -f test/librbd/mock/*.$(OBJEXT)
@@ -19658,6 +19747,10 @@ distclean-compile:
 @AMDEP_TRUE@@am__include@ @am__quote at librbd/$(DEPDIR)/librbd.Plo at am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote at librbd/$(DEPDIR)/librbd_la-librbd.Plo at am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote at librbd/exclusive_lock/$(DEPDIR)/AcquireRequest.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at librbd/exclusive_lock/$(DEPDIR)/AutomaticPolicy.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at librbd/exclusive_lock/$(DEPDIR)/BreakRequest.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at librbd/exclusive_lock/$(DEPDIR)/GetLockerRequest.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at librbd/exclusive_lock/$(DEPDIR)/ReacquireRequest.Plo at am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote at librbd/exclusive_lock/$(DEPDIR)/ReleaseRequest.Plo at am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote at librbd/exclusive_lock/$(DEPDIR)/StandardPolicy.Plo at am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote at librbd/image/$(DEPDIR)/CloseRequest.Plo at am__quote@
@@ -19667,6 +19760,7 @@ distclean-compile:
 @AMDEP_TRUE@@am__include@ @am__quote at librbd/image/$(DEPDIR)/SetSnapRequest.Plo at am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote at librbd/image_watcher/$(DEPDIR)/Notifier.Plo at am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote at librbd/image_watcher/$(DEPDIR)/NotifyLockOwner.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at librbd/image_watcher/$(DEPDIR)/RewatchRequest.Plo at am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote at librbd/journal/$(DEPDIR)/Replay.Plo at am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote at librbd/journal/$(DEPDIR)/StandardPolicy.Plo at am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote at librbd/journal/$(DEPDIR)/Types.Plo at am__quote@
@@ -20286,14 +20380,19 @@ distclean-compile:
 @AMDEP_TRUE@@am__include@ @am__quote at test/librbd/$(DEPDIR)/librbd_test_la-test_librbd.Plo at am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote at test/librbd/$(DEPDIR)/librbd_test_la-test_mirroring.Plo at am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote at test/librbd/$(DEPDIR)/librbd_test_la-test_support.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at test/librbd/$(DEPDIR)/unittest_librbd-test_BlockGuard.Po at am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote at test/librbd/$(DEPDIR)/unittest_librbd-test_main.Po at am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote at test/librbd/$(DEPDIR)/unittest_librbd-test_mock_ExclusiveLock.Po at am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote at test/librbd/$(DEPDIR)/unittest_librbd-test_mock_Journal.Po at am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote at test/librbd/$(DEPDIR)/unittest_librbd-test_mock_ObjectWatcher.Po at am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote at test/librbd/$(DEPDIR)/unittest_librbd-test_mock_fixture.Po at am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote at test/librbd/exclusive_lock/$(DEPDIR)/unittest_librbd-test_mock_AcquireRequest.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at test/librbd/exclusive_lock/$(DEPDIR)/unittest_librbd-test_mock_BreakRequest.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at test/librbd/exclusive_lock/$(DEPDIR)/unittest_librbd-test_mock_GetLockerRequest.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at test/librbd/exclusive_lock/$(DEPDIR)/unittest_librbd-test_mock_ReacquireRequest.Po at am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote at test/librbd/exclusive_lock/$(DEPDIR)/unittest_librbd-test_mock_ReleaseRequest.Po at am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote at test/librbd/image/$(DEPDIR)/unittest_librbd-test_mock_RefreshRequest.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at test/librbd/image_watcher/$(DEPDIR)/unittest_librbd-test_mock_RewatchRequest.Po at am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote at test/librbd/journal/$(DEPDIR)/librbd_test_la-test_Entries.Plo at am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote at test/librbd/journal/$(DEPDIR)/librbd_test_la-test_Replay.Plo at am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote at test/librbd/journal/$(DEPDIR)/unittest_librbd-test_mock_Replay.Po at am__quote@
@@ -20485,6 +20584,7 @@ distclean-compile:
 @AMDEP_TRUE@@am__include@ @am__quote at tools/rbd_mirror/image_replayer/$(DEPDIR)/CloseImageRequest.Plo at am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote at tools/rbd_mirror/image_replayer/$(DEPDIR)/CreateImageRequest.Plo at am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote at tools/rbd_mirror/image_replayer/$(DEPDIR)/EventPreprocessor.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at tools/rbd_mirror/image_replayer/$(DEPDIR)/IsPrimaryRequest.Plo at am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote at tools/rbd_mirror/image_replayer/$(DEPDIR)/OpenImageRequest.Plo at am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote at tools/rbd_mirror/image_replayer/$(DEPDIR)/OpenLocalImageRequest.Plo at am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote at tools/rbd_mirror/image_replayer/$(DEPDIR)/ReplayStatusFormatter.Plo at am__quote@
@@ -29283,6 +29383,20 @@ test/librados/unittest_librados_config-librados_config.obj: test/librados/librad
 @AMDEP_TRUE@@am__fastdepCXX_FALSE@	DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
 @am__fastdepCXX_FALSE@	$(AM_V_CXX at am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(unittest_librados_config_CXXFLAGS) $(CXXFLAGS) -c -o test/librados/unittest_librados_config-librados_config.obj `if test -f 'test/librados/librados_config.cc'; then $(CYGPATH_W) 'test/librados/librados_config.cc'; else $(CYGPATH_W) '$(srcdir)/test/librados/librados_config.cc'; fi`
 
+test/librbd/unittest_librbd-test_BlockGuard.o: test/librbd/test_BlockGuard.cc
+ at am__fastdepCXX_TRUE@	$(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(unittest_librbd_CXXFLAGS) $(CXXFLAGS) -MT test/librbd/unittest_librbd-test_BlockGuard.o -MD -MP -MF test/librbd/$(DEPDIR)/unittest_librbd-test_BlockGuard.Tpo -c -o test/librbd/unittest_librbd-test_BlockGuard.o `test -f 'test/librbd/test_BlockGuard.cc' || echo '$(srcdir)/'`test/librbd/test_BlockGuard.cc
+ at am__fastdepCXX_TRUE@	$(AM_V_at)$(am__mv) test/librbd/$(DEPDIR)/unittest_librbd-test_BlockGuard.Tpo test/librbd/$(DEPDIR)/unittest_librbd-test_BlockGuard.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@	$(AM_V_CXX)source='test/librbd/test_BlockGuard.cc' object='test/librbd/unittest_librbd-test_BlockGuard.o' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@	DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@	$(AM_V_CXX at am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(unittest_librbd_CXXFLAGS) $(CXXFLAGS) -c -o test/librbd/unittest_librbd-test_BlockGuard.o `test -f 'test/librbd/test_BlockGuard.cc' || echo '$(srcdir)/'`test/librbd/test_BlockGuard.cc
+
+test/librbd/unittest_librbd-test_BlockGuard.obj: test/librbd/test_BlockGuard.cc
+ at am__fastdepCXX_TRUE@	$(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(unittest_librbd_CXXFLAGS) $(CXXFLAGS) -MT test/librbd/unittest_librbd-test_BlockGuard.obj -MD -MP -MF test/librbd/$(DEPDIR)/unittest_librbd-test_BlockGuard.Tpo -c -o test/librbd/unittest_librbd-test_BlockGuard.obj `if test -f 'test/librbd/test_BlockGuard.cc'; then $(CYGPATH_W) 'test/librbd/test_BlockGuard.cc'; else $(CYGPATH_W) '$(srcdir)/test/librbd/test_BlockGuard.cc'; fi`
+ at am__fastdepCXX_TRUE@	$(AM_V_at)$(am__mv) test/librbd/$(DEPDIR)/unittest_librbd-test_BlockGuard.Tpo test/librbd/$(DEPDIR)/unittest_librbd-test_BlockGuard.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@	$(AM_V_CXX)source='test/librbd/test_BlockGuard.cc' object='test/librbd/unittest_librbd-test_BlockGuard.obj' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@	DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@	$(AM_V_CXX at am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(unittest_librbd_CXXFLAGS) $(CXXFLAGS) -c -o test/librbd/unittest_librbd-test_BlockGuard.obj `if test -f 'test/librbd/test_BlockGuard.cc'; then $(CYGPATH_W) 'test/librbd/test_BlockGuard.cc'; else $(CYGPATH_W) '$(srcdir)/test/librbd/test_BlockGuard.cc'; fi`
+
 test/librbd/unittest_librbd-test_main.o: test/librbd/test_main.cc
 @am__fastdepCXX_TRUE@	$(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(unittest_librbd_CXXFLAGS) $(CXXFLAGS) -MT test/librbd/unittest_librbd-test_main.o -MD -MP -MF test/librbd/$(DEPDIR)/unittest_librbd-test_main.Tpo -c -o test/librbd/unittest_librbd-test_main.o `test -f 'test/librbd/test_main.cc' || echo '$(srcdir)/'`test/librbd/test_main.cc
 @am__fastdepCXX_TRUE@	$(AM_V_at)$(am__mv) test/librbd/$(DEPDIR)/unittest_librbd-test_main.Tpo test/librbd/$(DEPDIR)/unittest_librbd-test_main.Po
@@ -29367,6 +29481,48 @@ test/librbd/exclusive_lock/unittest_librbd-test_mock_AcquireRequest.obj: test/li
 @AMDEP_TRUE@@am__fastdepCXX_FALSE@	DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
 @am__fastdepCXX_FALSE@	$(AM_V_CXX at am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(unittest_librbd_CXXFLAGS) $(CXXFLAGS) -c -o test/librbd/exclusive_lock/unittest_librbd-test_mock_AcquireRequest.obj `if test -f 'test/librbd/exclusive_lock/test_mock_AcquireRequest.cc'; then $(CYGPATH_W) 'test/librbd/exclusive_lock/test_mock_AcquireRequest.cc'; else $(CYGPATH_W) '$(srcdir)/test/librbd/exclusive_lock/test_mock_AcquireRequest.cc'; fi`
 
+test/librbd/exclusive_lock/unittest_librbd-test_mock_BreakRequest.o: test/librbd/exclusive_lock/test_mock_BreakRequest.cc
+ at am__fastdepCXX_TRUE@	$(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(unittest_librbd_CXXFLAGS) $(CXXFLAGS) -MT test/librbd/exclusive_lock/unittest_librbd-test_mock_BreakRequest.o -MD -MP -MF test/librbd/exclusive_lock/$(DEPDIR)/unittest_librbd-test_mock_BreakRequest.Tpo -c -o test/librbd/exclusive_lock/unittest_librbd-test_mock_BreakRequest.o `test -f 'test/librbd/exclusive_lock/test_mock_BreakRequest.cc' || echo '$(srcdir)/'`test/librbd/exclusive_ [...]
+ at am__fastdepCXX_TRUE@	$(AM_V_at)$(am__mv) test/librbd/exclusive_lock/$(DEPDIR)/unittest_librbd-test_mock_BreakRequest.Tpo test/librbd/exclusive_lock/$(DEPDIR)/unittest_librbd-test_mock_BreakRequest.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@	$(AM_V_CXX)source='test/librbd/exclusive_lock/test_mock_BreakRequest.cc' object='test/librbd/exclusive_lock/unittest_librbd-test_mock_BreakRequest.o' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@	DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@	$(AM_V_CXX at am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(unittest_librbd_CXXFLAGS) $(CXXFLAGS) -c -o test/librbd/exclusive_lock/unittest_librbd-test_mock_BreakRequest.o `test -f 'test/librbd/exclusive_lock/test_mock_BreakRequest.cc' || echo '$(srcdir)/'`test/librbd/exclusive_lock/test_mock_BreakRequest.cc
+
+test/librbd/exclusive_lock/unittest_librbd-test_mock_BreakRequest.obj: test/librbd/exclusive_lock/test_mock_BreakRequest.cc
+ at am__fastdepCXX_TRUE@	$(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(unittest_librbd_CXXFLAGS) $(CXXFLAGS) -MT test/librbd/exclusive_lock/unittest_librbd-test_mock_BreakRequest.obj -MD -MP -MF test/librbd/exclusive_lock/$(DEPDIR)/unittest_librbd-test_mock_BreakRequest.Tpo -c -o test/librbd/exclusive_lock/unittest_librbd-test_mock_BreakRequest.obj `if test -f 'test/librbd/exclusive_lock/test_mock_BreakRequest.cc'; then $(CYGPATH_W) 'test/librbd/excl [...]
+ at am__fastdepCXX_TRUE@	$(AM_V_at)$(am__mv) test/librbd/exclusive_lock/$(DEPDIR)/unittest_librbd-test_mock_BreakRequest.Tpo test/librbd/exclusive_lock/$(DEPDIR)/unittest_librbd-test_mock_BreakRequest.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@	$(AM_V_CXX)source='test/librbd/exclusive_lock/test_mock_BreakRequest.cc' object='test/librbd/exclusive_lock/unittest_librbd-test_mock_BreakRequest.obj' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@	DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@	$(AM_V_CXX at am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(unittest_librbd_CXXFLAGS) $(CXXFLAGS) -c -o test/librbd/exclusive_lock/unittest_librbd-test_mock_BreakRequest.obj `if test -f 'test/librbd/exclusive_lock/test_mock_BreakRequest.cc'; then $(CYGPATH_W) 'test/librbd/exclusive_lock/test_mock_BreakRequest.cc'; else $(CYGPATH_W) '$(srcdir)/test/librbd/exclusive_lock/test_mock_BreakRequest.cc'; fi`
+
+test/librbd/exclusive_lock/unittest_librbd-test_mock_GetLockerRequest.o: test/librbd/exclusive_lock/test_mock_GetLockerRequest.cc
+ at am__fastdepCXX_TRUE@	$(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(unittest_librbd_CXXFLAGS) $(CXXFLAGS) -MT test/librbd/exclusive_lock/unittest_librbd-test_mock_GetLockerRequest.o -MD -MP -MF test/librbd/exclusive_lock/$(DEPDIR)/unittest_librbd-test_mock_GetLockerRequest.Tpo -c -o test/librbd/exclusive_lock/unittest_librbd-test_mock_GetLockerRequest.o `test -f 'test/librbd/exclusive_lock/test_mock_GetLockerRequest.cc' || echo '$(srcdir)/'`test/l [...]
+ at am__fastdepCXX_TRUE@	$(AM_V_at)$(am__mv) test/librbd/exclusive_lock/$(DEPDIR)/unittest_librbd-test_mock_GetLockerRequest.Tpo test/librbd/exclusive_lock/$(DEPDIR)/unittest_librbd-test_mock_GetLockerRequest.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@	$(AM_V_CXX)source='test/librbd/exclusive_lock/test_mock_GetLockerRequest.cc' object='test/librbd/exclusive_lock/unittest_librbd-test_mock_GetLockerRequest.o' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@	DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@	$(AM_V_CXX at am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(unittest_librbd_CXXFLAGS) $(CXXFLAGS) -c -o test/librbd/exclusive_lock/unittest_librbd-test_mock_GetLockerRequest.o `test -f 'test/librbd/exclusive_lock/test_mock_GetLockerRequest.cc' || echo '$(srcdir)/'`test/librbd/exclusive_lock/test_mock_GetLockerRequest.cc
+
+test/librbd/exclusive_lock/unittest_librbd-test_mock_GetLockerRequest.obj: test/librbd/exclusive_lock/test_mock_GetLockerRequest.cc
+ at am__fastdepCXX_TRUE@	$(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(unittest_librbd_CXXFLAGS) $(CXXFLAGS) -MT test/librbd/exclusive_lock/unittest_librbd-test_mock_GetLockerRequest.obj -MD -MP -MF test/librbd/exclusive_lock/$(DEPDIR)/unittest_librbd-test_mock_GetLockerRequest.Tpo -c -o test/librbd/exclusive_lock/unittest_librbd-test_mock_GetLockerRequest.obj `if test -f 'test/librbd/exclusive_lock/test_mock_GetLockerRequest.cc'; then $(CYGPATH_W) ' [...]
+ at am__fastdepCXX_TRUE@	$(AM_V_at)$(am__mv) test/librbd/exclusive_lock/$(DEPDIR)/unittest_librbd-test_mock_GetLockerRequest.Tpo test/librbd/exclusive_lock/$(DEPDIR)/unittest_librbd-test_mock_GetLockerRequest.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@	$(AM_V_CXX)source='test/librbd/exclusive_lock/test_mock_GetLockerRequest.cc' object='test/librbd/exclusive_lock/unittest_librbd-test_mock_GetLockerRequest.obj' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@	DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@	$(AM_V_CXX at am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(unittest_librbd_CXXFLAGS) $(CXXFLAGS) -c -o test/librbd/exclusive_lock/unittest_librbd-test_mock_GetLockerRequest.obj `if test -f 'test/librbd/exclusive_lock/test_mock_GetLockerRequest.cc'; then $(CYGPATH_W) 'test/librbd/exclusive_lock/test_mock_GetLockerRequest.cc'; else $(CYGPATH_W) '$(srcdir)/test/librbd/exclusive_lock/test_mock_GetLockerRequest.cc'; fi`
+
+test/librbd/exclusive_lock/unittest_librbd-test_mock_ReacquireRequest.o: test/librbd/exclusive_lock/test_mock_ReacquireRequest.cc
+ at am__fastdepCXX_TRUE@	$(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(unittest_librbd_CXXFLAGS) $(CXXFLAGS) -MT test/librbd/exclusive_lock/unittest_librbd-test_mock_ReacquireRequest.o -MD -MP -MF test/librbd/exclusive_lock/$(DEPDIR)/unittest_librbd-test_mock_ReacquireRequest.Tpo -c -o test/librbd/exclusive_lock/unittest_librbd-test_mock_ReacquireRequest.o `test -f 'test/librbd/exclusive_lock/test_mock_ReacquireRequest.cc' || echo '$(srcdir)/'`test/l [...]
+ at am__fastdepCXX_TRUE@	$(AM_V_at)$(am__mv) test/librbd/exclusive_lock/$(DEPDIR)/unittest_librbd-test_mock_ReacquireRequest.Tpo test/librbd/exclusive_lock/$(DEPDIR)/unittest_librbd-test_mock_ReacquireRequest.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@	$(AM_V_CXX)source='test/librbd/exclusive_lock/test_mock_ReacquireRequest.cc' object='test/librbd/exclusive_lock/unittest_librbd-test_mock_ReacquireRequest.o' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@	DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@	$(AM_V_CXX at am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(unittest_librbd_CXXFLAGS) $(CXXFLAGS) -c -o test/librbd/exclusive_lock/unittest_librbd-test_mock_ReacquireRequest.o `test -f 'test/librbd/exclusive_lock/test_mock_ReacquireRequest.cc' || echo '$(srcdir)/'`test/librbd/exclusive_lock/test_mock_ReacquireRequest.cc
+
+test/librbd/exclusive_lock/unittest_librbd-test_mock_ReacquireRequest.obj: test/librbd/exclusive_lock/test_mock_ReacquireRequest.cc
+ at am__fastdepCXX_TRUE@	$(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(unittest_librbd_CXXFLAGS) $(CXXFLAGS) -MT test/librbd/exclusive_lock/unittest_librbd-test_mock_ReacquireRequest.obj -MD -MP -MF test/librbd/exclusive_lock/$(DEPDIR)/unittest_librbd-test_mock_ReacquireRequest.Tpo -c -o test/librbd/exclusive_lock/unittest_librbd-test_mock_ReacquireRequest.obj `if test -f 'test/librbd/exclusive_lock/test_mock_ReacquireRequest.cc'; then $(CYGPATH_W) ' [...]
+ at am__fastdepCXX_TRUE@	$(AM_V_at)$(am__mv) test/librbd/exclusive_lock/$(DEPDIR)/unittest_librbd-test_mock_ReacquireRequest.Tpo test/librbd/exclusive_lock/$(DEPDIR)/unittest_librbd-test_mock_ReacquireRequest.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@	$(AM_V_CXX)source='test/librbd/exclusive_lock/test_mock_ReacquireRequest.cc' object='test/librbd/exclusive_lock/unittest_librbd-test_mock_ReacquireRequest.obj' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@	DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@	$(AM_V_CXX at am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(unittest_librbd_CXXFLAGS) $(CXXFLAGS) -c -o test/librbd/exclusive_lock/unittest_librbd-test_mock_ReacquireRequest.obj `if test -f 'test/librbd/exclusive_lock/test_mock_ReacquireRequest.cc'; then $(CYGPATH_W) 'test/librbd/exclusive_lock/test_mock_ReacquireRequest.cc'; else $(CYGPATH_W) '$(srcdir)/test/librbd/exclusive_lock/test_mock_ReacquireRequest.cc'; fi`
+
 test/librbd/exclusive_lock/unittest_librbd-test_mock_ReleaseRequest.o: test/librbd/exclusive_lock/test_mock_ReleaseRequest.cc
 @am__fastdepCXX_TRUE@	$(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(unittest_librbd_CXXFLAGS) $(CXXFLAGS) -MT test/librbd/exclusive_lock/unittest_librbd-test_mock_ReleaseRequest.o -MD -MP -MF test/librbd/exclusive_lock/$(DEPDIR)/unittest_librbd-test_mock_ReleaseRequest.Tpo -c -o test/librbd/exclusive_lock/unittest_librbd-test_mock_ReleaseRequest.o `test -f 'test/librbd/exclusive_lock/test_mock_ReleaseRequest.cc' || echo '$(srcdir)/'`test/librbd/ex [...]
 @am__fastdepCXX_TRUE@	$(AM_V_at)$(am__mv) test/librbd/exclusive_lock/$(DEPDIR)/unittest_librbd-test_mock_ReleaseRequest.Tpo test/librbd/exclusive_lock/$(DEPDIR)/unittest_librbd-test_mock_ReleaseRequest.Po
@@ -29395,6 +29551,20 @@ test/librbd/image/unittest_librbd-test_mock_RefreshRequest.obj: test/librbd/imag
 @AMDEP_TRUE@@am__fastdepCXX_FALSE@	DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
 @am__fastdepCXX_FALSE@	$(AM_V_CXX at am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(unittest_librbd_CXXFLAGS) $(CXXFLAGS) -c -o test/librbd/image/unittest_librbd-test_mock_RefreshRequest.obj `if test -f 'test/librbd/image/test_mock_RefreshRequest.cc'; then $(CYGPATH_W) 'test/librbd/image/test_mock_RefreshRequest.cc'; else $(CYGPATH_W) '$(srcdir)/test/librbd/image/test_mock_RefreshRequest.cc'; fi`
 
+test/librbd/image_watcher/unittest_librbd-test_mock_RewatchRequest.o: test/librbd/image_watcher/test_mock_RewatchRequest.cc
+ at am__fastdepCXX_TRUE@	$(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(unittest_librbd_CXXFLAGS) $(CXXFLAGS) -MT test/librbd/image_watcher/unittest_librbd-test_mock_RewatchRequest.o -MD -MP -MF test/librbd/image_watcher/$(DEPDIR)/unittest_librbd-test_mock_RewatchRequest.Tpo -c -o test/librbd/image_watcher/unittest_librbd-test_mock_RewatchRequest.o `test -f 'test/librbd/image_watcher/test_mock_RewatchRequest.cc' || echo '$(srcdir)/'`test/librbd/image_ [...]
+ at am__fastdepCXX_TRUE@	$(AM_V_at)$(am__mv) test/librbd/image_watcher/$(DEPDIR)/unittest_librbd-test_mock_RewatchRequest.Tpo test/librbd/image_watcher/$(DEPDIR)/unittest_librbd-test_mock_RewatchRequest.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@	$(AM_V_CXX)source='test/librbd/image_watcher/test_mock_RewatchRequest.cc' object='test/librbd/image_watcher/unittest_librbd-test_mock_RewatchRequest.o' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@	DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@	$(AM_V_CXX at am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(unittest_librbd_CXXFLAGS) $(CXXFLAGS) -c -o test/librbd/image_watcher/unittest_librbd-test_mock_RewatchRequest.o `test -f 'test/librbd/image_watcher/test_mock_RewatchRequest.cc' || echo '$(srcdir)/'`test/librbd/image_watcher/test_mock_RewatchRequest.cc
+
+test/librbd/image_watcher/unittest_librbd-test_mock_RewatchRequest.obj: test/librbd/image_watcher/test_mock_RewatchRequest.cc
+ at am__fastdepCXX_TRUE@	$(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(unittest_librbd_CXXFLAGS) $(CXXFLAGS) -MT test/librbd/image_watcher/unittest_librbd-test_mock_RewatchRequest.obj -MD -MP -MF test/librbd/image_watcher/$(DEPDIR)/unittest_librbd-test_mock_RewatchRequest.Tpo -c -o test/librbd/image_watcher/unittest_librbd-test_mock_RewatchRequest.obj `if test -f 'test/librbd/image_watcher/test_mock_RewatchRequest.cc'; then $(CYGPATH_W) 'test/librbd/ [...]
+ at am__fastdepCXX_TRUE@	$(AM_V_at)$(am__mv) test/librbd/image_watcher/$(DEPDIR)/unittest_librbd-test_mock_RewatchRequest.Tpo test/librbd/image_watcher/$(DEPDIR)/unittest_librbd-test_mock_RewatchRequest.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@	$(AM_V_CXX)source='test/librbd/image_watcher/test_mock_RewatchRequest.cc' object='test/librbd/image_watcher/unittest_librbd-test_mock_RewatchRequest.obj' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@	DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@	$(AM_V_CXX at am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(unittest_librbd_CXXFLAGS) $(CXXFLAGS) -c -o test/librbd/image_watcher/unittest_librbd-test_mock_RewatchRequest.obj `if test -f 'test/librbd/image_watcher/test_mock_RewatchRequest.cc'; then $(CYGPATH_W) 'test/librbd/image_watcher/test_mock_RewatchRequest.cc'; else $(CYGPATH_W) '$(srcdir)/test/librbd/image_watcher/test_mock_RewatchRequest.cc'; fi`
+
 test/librbd/journal/unittest_librbd-test_mock_Replay.o: test/librbd/journal/test_mock_Replay.cc
 @am__fastdepCXX_TRUE@	$(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(unittest_librbd_CXXFLAGS) $(CXXFLAGS) -MT test/librbd/journal/unittest_librbd-test_mock_Replay.o -MD -MP -MF test/librbd/journal/$(DEPDIR)/unittest_librbd-test_mock_Replay.Tpo -c -o test/librbd/journal/unittest_librbd-test_mock_Replay.o `test -f 'test/librbd/journal/test_mock_Replay.cc' || echo '$(srcdir)/'`test/librbd/journal/test_mock_Replay.cc
 @am__fastdepCXX_TRUE@	$(AM_V_at)$(am__mv) test/librbd/journal/$(DEPDIR)/unittest_librbd-test_mock_Replay.Tpo test/librbd/journal/$(DEPDIR)/unittest_librbd-test_mock_Replay.Po
@@ -32473,6 +32643,8 @@ distclean-generic:
 	-rm -f test/librbd/exclusive_lock/$(am__dirstamp)
 	-rm -f test/librbd/image/$(DEPDIR)/$(am__dirstamp)
 	-rm -f test/librbd/image/$(am__dirstamp)
+	-rm -f test/librbd/image_watcher/$(DEPDIR)/$(am__dirstamp)
+	-rm -f test/librbd/image_watcher/$(am__dirstamp)
 	-rm -f test/librbd/journal/$(DEPDIR)/$(am__dirstamp)
 	-rm -f test/librbd/journal/$(am__dirstamp)
 	-rm -f test/librbd/mock/$(DEPDIR)/$(am__dirstamp)
@@ -32545,7 +32717,7 @@ clean-am: clean-binPROGRAMS clean-checkLTLIBRARIES clean-checkPROGRAMS \
 	clean-sbinPROGRAMS clean-su_sbinPROGRAMS mostlyclean-am
 
 distclean: distclean-recursive
-	-rm -rf ./$(DEPDIR) arch/$(DEPDIR) auth/$(DEPDIR) auth/cephx/$(DEPDIR) auth/none/$(DEPDIR) auth/unknown/$(DEPDIR) civetweb/src/$(DEPDIR) client/$(DEPDIR) cls/cephfs/$(DEPDIR) cls/hello/$(DEPDIR) cls/journal/$(DEPDIR) cls/lock/$(DEPDIR) cls/log/$(DEPDIR) cls/numops/$(DEPDIR) cls/rbd/$(DEPDIR) cls/refcount/$(DEPDIR) cls/replica_log/$(DEPDIR) cls/rgw/$(DEPDIR) cls/statelog/$(DEPDIR) cls/timeindex/$(DEPDIR) cls/user/$(DEPDIR) cls/version/$(DEPDIR) common/$(DEPDIR) compressor/$(DEPDIR) compr [...]
+	-rm -rf ./$(DEPDIR) arch/$(DEPDIR) auth/$(DEPDIR) auth/cephx/$(DEPDIR) auth/none/$(DEPDIR) auth/unknown/$(DEPDIR) civetweb/src/$(DEPDIR) client/$(DEPDIR) cls/cephfs/$(DEPDIR) cls/hello/$(DEPDIR) cls/journal/$(DEPDIR) cls/lock/$(DEPDIR) cls/log/$(DEPDIR) cls/numops/$(DEPDIR) cls/rbd/$(DEPDIR) cls/refcount/$(DEPDIR) cls/replica_log/$(DEPDIR) cls/rgw/$(DEPDIR) cls/statelog/$(DEPDIR) cls/timeindex/$(DEPDIR) cls/user/$(DEPDIR) cls/version/$(DEPDIR) common/$(DEPDIR) compressor/$(DEPDIR) compr [...]
 	-rm -f Makefile
 distclean-am: clean-am distclean-compile distclean-generic \
 	distclean-hdr distclean-tags
@@ -32603,7 +32775,7 @@ install-ps-am:
 installcheck-am:
 
 maintainer-clean: maintainer-clean-recursive
-	-rm -rf ./$(DEPDIR) arch/$(DEPDIR) auth/$(DEPDIR) auth/cephx/$(DEPDIR) auth/none/$(DEPDIR) auth/unknown/$(DEPDIR) civetweb/src/$(DEPDIR) client/$(DEPDIR) cls/cephfs/$(DEPDIR) cls/hello/$(DEPDIR) cls/journal/$(DEPDIR) cls/lock/$(DEPDIR) cls/log/$(DEPDIR) cls/numops/$(DEPDIR) cls/rbd/$(DEPDIR) cls/refcount/$(DEPDIR) cls/replica_log/$(DEPDIR) cls/rgw/$(DEPDIR) cls/statelog/$(DEPDIR) cls/timeindex/$(DEPDIR) cls/user/$(DEPDIR) cls/version/$(DEPDIR) common/$(DEPDIR) compressor/$(DEPDIR) compr [...]
+	-rm -rf ./$(DEPDIR) arch/$(DEPDIR) auth/$(DEPDIR) auth/cephx/$(DEPDIR) auth/none/$(DEPDIR) auth/unknown/$(DEPDIR) civetweb/src/$(DEPDIR) client/$(DEPDIR) cls/cephfs/$(DEPDIR) cls/hello/$(DEPDIR) cls/journal/$(DEPDIR) cls/lock/$(DEPDIR) cls/log/$(DEPDIR) cls/numops/$(DEPDIR) cls/rbd/$(DEPDIR) cls/refcount/$(DEPDIR) cls/replica_log/$(DEPDIR) cls/rgw/$(DEPDIR) cls/statelog/$(DEPDIR) cls/timeindex/$(DEPDIR) cls/user/$(DEPDIR) cls/version/$(DEPDIR) common/$(DEPDIR) compressor/$(DEPDIR) compr [...]
 	-rm -f Makefile
 maintainer-clean-am: distclean-am maintainer-clean-generic
 
diff --git a/src/ceph-create-keys b/src/ceph-create-keys
index 8031b2f..d1dce42 100755
--- a/src/ceph-create-keys
+++ b/src/ceph-create-keys
@@ -30,7 +30,8 @@ def get_ceph_gid():
     return gid
 
 def wait_for_quorum(cluster, mon_id):
-    while True:
+    wait_count = 600  # 10 minutes
+    while wait_count > 0:
         p = subprocess.Popen(
             args=[
                 'ceph',
@@ -48,11 +49,13 @@ def wait_for_quorum(cluster, mon_id):
         if returncode != 0:
             LOG.info('ceph-mon admin socket not ready yet.')
             time.sleep(1)
+            wait_count -= 1
             continue
 
         if out == '':
             LOG.info('ceph-mon admin socket returned no data')
             time.sleep(1)
+            wait_count -= 1
             continue
 
         try:
@@ -65,10 +68,14 @@ def wait_for_quorum(cluster, mon_id):
         if state not in QUORUM_STATES:
             LOG.info('ceph-mon is not in quorum: %r', state)
             time.sleep(1)
+            wait_count -= 1
             continue
 
         break
 
+    if wait_count == 0:
+        raise SystemExit("ceph-mon was not able to join quorum within 10 minutes")
+
 
 def get_key(cluster, mon_id):
     path = '/etc/ceph/{cluster}.client.admin.keyring'.format(
@@ -86,7 +93,8 @@ def get_key(cluster, mon_id):
         os.makedirs(pathdir)
         os.chmod(pathdir, 0770)
         os.chown(pathdir, get_ceph_uid(), get_ceph_gid())
-    while True:
+    wait_count = 600  # 10 minutes
+    while wait_count > 0:
         try:
             with file(tmp, 'w') as f:
                 os.fchmod(f.fileno(), 0600)
@@ -134,6 +142,7 @@ def get_key(cluster, mon_id):
                 else:
                     LOG.info('Cannot get or create admin key')
                     time.sleep(1)
+                    wait_count -= 1
                     continue
 
             os.rename(tmp, path)
@@ -147,6 +156,10 @@ def get_key(cluster, mon_id):
                 else:
                     raise
 
+    if wait_count == 0:
+        raise SystemExit("Could not get or create the admin key after 10 minutes")
+
+
 def bootstrap_key(cluster, type_):
     path = '/var/lib/ceph/bootstrap-{type}/{cluster}.keyring'.format(
         type=type_,
@@ -176,7 +189,8 @@ def bootstrap_key(cluster, type_):
         os.chmod(pathdir, 0770)
         os.chown(pathdir, get_ceph_uid(), get_ceph_gid())
 
-    while True:
+    wait_count = 600  # 10 minutes
+    while wait_count > 0:
         try:
             with file(tmp, 'w') as f:
                 os.fchmod(f.fileno(), 0600)
@@ -193,6 +207,7 @@ def bootstrap_key(cluster, type_):
                 else:
                     LOG.info('Cannot get or create bootstrap key for %s', type_)
                     time.sleep(1)
+                    wait_count -= 1
                     continue
 
             os.rename(tmp, path)
@@ -205,6 +220,8 @@ def bootstrap_key(cluster, type_):
                     pass
                 else:
                     raise
+    if wait_count == 0:
+        raise SystemExit("Could not get or create %s bootstrap key after 10 minutes" % type_)
 
 
 def parse_args():
diff --git a/src/ceph-disk/ceph_disk/main.py b/src/ceph-disk/ceph_disk/main.py
index 8937427..16420af 100755
--- a/src/ceph-disk/ceph_disk/main.py
+++ b/src/ceph-disk/ceph_disk/main.py
@@ -170,6 +170,7 @@ class Ptype(object):
                 return True
         return False
 
+
 DEFAULT_FS_TYPE = 'xfs'
 SYSFS = '/sys'
 
@@ -267,7 +268,12 @@ class Error(Exception):
 
     def __str__(self):
         doc = _bytes2str(self.__doc__.strip())
-        return ': '.join([doc] + [_bytes2str(a) for a in self.args])
+        try:
+            str_type = basestring
+        except NameError:
+            str_type = str
+        args = [a if isinstance(a, str_type) else str(a) for a in self.args]
+        return ': '.join([doc] + [_bytes2str(a) for a in args])
 
 
 class MountError(Error):
@@ -1385,16 +1391,22 @@ def check_journal_reqs(args):
         'ceph-osd', '--check-allows-journal',
         '-i', '0',
         '--cluster', args.cluster,
+        '--setuser', get_ceph_user(),
+        '--setgroup', get_ceph_group(),
     ])
     _, _, wants_journal = command([
         'ceph-osd', '--check-wants-journal',
         '-i', '0',
         '--cluster', args.cluster,
+        '--setuser', get_ceph_user(),
+        '--setgroup', get_ceph_group(),
     ])
     _, _, needs_journal = command([
         'ceph-osd', '--check-needs-journal',
         '-i', '0',
         '--cluster', args.cluster,
+        '--setuser', get_ceph_user(),
+        '--setgroup', get_ceph_group(),
     ])
     return (not allows_journal, not wants_journal, not needs_journal)
 
@@ -1756,6 +1768,13 @@ class Prepare(object):
             default='/etc/ceph/dmcrypt-keys',
             help='directory where dm-crypt keys are stored',
         )
+        parser.add_argument(
+            '--prepare-key',
+            metavar='PATH',
+            help='bootstrap-osd keyring path template (%(default)s)',
+            default='{statedir}/bootstrap-osd/{cluster}.keyring',
+            dest='prepare_key_template',
+        )
         return parser
 
     @staticmethod
@@ -2277,9 +2296,14 @@ class Lockbox(object):
         key_size = CryptHelpers.get_dmcrypt_keysize(self.args)
         key = open('/dev/urandom', 'rb').read(key_size / 8)
         base64_key = base64.b64encode(key)
+        cluster = self.args.cluster
+        bootstrap = self.args.prepare_key_template.format(cluster=cluster,
+                                                          statedir=STATEDIR)
         command_check_call(
             [
                 'ceph',
+                '--name', 'client.bootstrap-osd',
+                '--keyring', bootstrap,
                 'config-key',
                 'put',
                 'dm-crypt/osd/' + self.args.osd_uuid + '/luks',
@@ -2289,6 +2313,8 @@ class Lockbox(object):
         keyring, stderr, ret = command(
             [
                 'ceph',
+                '--name', 'client.bootstrap-osd',
+                '--keyring', bootstrap,
                 'auth',
                 'get-or-create',
                 'client.osd-lockbox.' + self.args.osd_uuid,
@@ -2882,10 +2908,19 @@ def start_daemon(
                 ],
             )
         elif os.path.exists(os.path.join(path, 'systemd')):
+            # ensure there is no duplicate ceph-osd at .service
+            command_check_call(
+                [
+                    'systemctl',
+                    'disable',
+                    'ceph-osd@{osd_id}'.format(osd_id=osd_id),
+                ],
+            )
             command_check_call(
                 [
                     'systemctl',
                     'enable',
+                    '--runtime',
                     'ceph-osd@{osd_id}'.format(osd_id=osd_id),
                 ],
             )
@@ -2943,6 +2978,7 @@ def stop_daemon(
                 [
                     'systemctl',
                     'disable',
+                    '--runtime',
                     'ceph-osd@{osd_id}'.format(osd_id=osd_id),
                 ],
             )
@@ -4302,6 +4338,8 @@ def main_trigger(args):
         )
         return
 
+    if get_ceph_user() == 'ceph':
+        command_check_call(['chown', 'ceph:ceph', args.dev])
     parttype = get_partition_type(args.dev)
     partid = get_partition_uuid(args.dev)
 
@@ -5008,6 +5046,7 @@ def main_catch(func, args):
 def run():
     main(sys.argv[1:])
 
+
 if __name__ == '__main__':
     main(sys.argv[1:])
     warned_about = {}
diff --git a/src/ceph_mds.cc b/src/ceph_mds.cc
index efe22e6..58bda1b 100644
--- a/src/ceph_mds.cc
+++ b/src/ceph_mds.cc
@@ -136,9 +136,12 @@ int main(int argc, const char **argv)
       "MDS names may not start with a numeric digit." << dendl;
   }
 
+  uint64_t nonce = 0;
+  get_random_bytes((char*)&nonce, sizeof(nonce));
+
   Messenger *msgr = Messenger::create(g_ceph_context, g_conf->ms_type,
 				      entity_name_t::MDS(-1), "mds",
-				      getpid());
+				      nonce);
   if (!msgr)
     exit(1);
   msgr->set_cluster_protocol(CEPH_MDS_PROTOCOL);
diff --git a/src/client/Client.cc b/src/client/Client.cc
index 112bf40..2d3284b 100644
--- a/src/client/Client.cc
+++ b/src/client/Client.cc
@@ -568,8 +568,6 @@ int Client::init()
 	       << cpp_strerror(-ret) << dendl;
   }
 
-  populate_metadata();
-
   client_lock.Lock();
   initialized = true;
   client_lock.Unlock();
@@ -1934,7 +1932,7 @@ MetaSession *Client::_get_or_open_mds_session(mds_rank_t mds)
  * Populate a map of strings with client-identifying metadata,
  * such as the hostname.  Call this once at initialization.
  */
-void Client::populate_metadata()
+void Client::populate_metadata(const std::string &mount_root)
 {
   // Hostname
   struct utsname u;
@@ -1950,7 +1948,9 @@ void Client::populate_metadata()
   metadata["entity_id"] = cct->_conf->name.get_id();
 
   // Our mount position
-  metadata["root"] = cct->_conf->client_mountpoint;
+  if (!mount_root.empty()) {
+    metadata["root"] = mount_root;
+  }
 
   // Ceph version
   metadata["ceph_version"] = pretty_version_to_str();
@@ -4741,6 +4741,9 @@ void Client::handle_cap_flushsnap_ack(MetaSession *session, Inode *in, MClientCa
     } else {
       ldout(cct, 5) << "handle_cap_flushedsnap mds." << mds << " flushed snap follows " << follows
 	      << " on " << *in << dendl;
+      InodeRef tmp_ref;
+      if (in->get_num_ref() == 1)
+	tmp_ref = in; // make sure inode not get freed while erasing item from in->cap_snaps
       in->cap_snaps.erase(follows);
       if (in->flushing_caps == 0 && in->cap_snaps.empty())
 	in->flushing_cap_item.remove_myself();
@@ -5449,6 +5452,12 @@ int Client::mds_command(
     return -ENOENT;
   }
 
+  if (metadata.empty()) {
+    // We are called on an unmounted client, so metadata
+    // won't be initialized yet.
+    populate_metadata("");
+  }
+
   // Send commands to targets
   C_GatherBuilder gather(cct, onfinish);
   for (const auto target_gid : non_laggy) {
@@ -5509,6 +5518,8 @@ void Client::handle_command_reply(MCommandReply *m)
     op.on_finish->complete(m->r);
   }
 
+  commands.erase(opiter);
+
   m->put();
 }
 
@@ -5553,9 +5564,12 @@ int Client::mount(const std::string &mount_root, bool require_mds)
     }
   }
 
+  populate_metadata(mount_root.empty() ? "/" : mount_root);
+
   filepath fp(CEPH_INO_ROOT);
-  if (!mount_root.empty())
+  if (!mount_root.empty()) {
     fp = filepath(mount_root.c_str());
+  }
   while (true) {
     MetaRequest *req = new MetaRequest(CEPH_MDS_OP_GETATTR);
     req->set_filepath(fp);
@@ -5874,7 +5888,7 @@ int Client::_lookup(Inode *dir, const string& dname, int mask,
 
   if (dname == "..") {
     if (dir->dn_set.empty())
-      r = -ENOENT;
+      *target = dir;
     else
       *target = dir->get_first_parent()->dir->parent_inode; //dirs can't be hard-linked
     goto done;
@@ -6089,31 +6103,35 @@ int Client::link(const char *relexisting, const char *relpath)
   tout(cct) << relpath << std::endl;
 
   filepath existing(relexisting);
-  filepath path(relpath);
-  string name = path.last_dentry();
-  path.pop_dentry();
 
   InodeRef in, dir;
   int r = path_walk(existing, &in);
   if (r < 0)
-    goto out;
+    return r;
+  if (std::string(relpath) == "/") {
+    r = -EEXIST;
+    return r;
+  }
+  filepath path(relpath);
+  string name = path.last_dentry();
+  path.pop_dentry();
+
   r = path_walk(path, &dir);
   if (r < 0)
-    goto out;
+    return r;
   if (cct->_conf->client_permissions) {
     if (S_ISDIR(in->mode)) {
       r = -EPERM;
-      goto out;
+      return r;
     }
     r = may_hardlink(in.get());
     if (r < 0)
-      goto out;
+      return r;
     r = may_create(dir.get());
     if (r < 0)
-      goto out;
+      return r;
   }
   r = _link(in.get(), dir.get(), name.c_str());
- out:
   return r;
 }
 
@@ -6123,6 +6141,9 @@ int Client::unlink(const char *relpath)
   tout(cct) << "unlink" << std::endl;
   tout(cct) << relpath << std::endl;
 
+  if (std::string(relpath) == "/")
+    return -EISDIR;
+
   filepath path(relpath);
   string name = path.last_dentry();
   path.pop_dentry();
@@ -6145,6 +6166,9 @@ int Client::rename(const char *relfrom, const char *relto)
   tout(cct) << relfrom << std::endl;
   tout(cct) << relto << std::endl;
 
+  if (std::string(relfrom) == "/" || std::string(relto) == "/")
+    return -EBUSY;
+
   filepath from(relfrom);
   filepath to(relto);
   string fromname = from.last_dentry();
@@ -6183,6 +6207,9 @@ int Client::mkdir(const char *relpath, mode_t mode)
   tout(cct) << mode << std::endl;
   ldout(cct, 10) << "mkdir: " << relpath << dendl;
 
+  if (std::string(relpath) == "/")
+    return -EEXIST;
+
   filepath path(relpath);
   string name = path.last_dentry();
   path.pop_dentry();
@@ -6255,6 +6282,10 @@ int Client::rmdir(const char *relpath)
   Mutex::Locker lock(client_lock);
   tout(cct) << "rmdir" << std::endl;
   tout(cct) << relpath << std::endl;
+
+  if (std::string(relpath) == "/")
+    return -EBUSY;
+
   filepath path(relpath);
   string name = path.last_dentry();
   path.pop_dentry();
@@ -6277,6 +6308,10 @@ int Client::mknod(const char *relpath, mode_t mode, dev_t rdev)
   tout(cct) << relpath << std::endl;
   tout(cct) << mode << std::endl;
   tout(cct) << rdev << std::endl;
+
+  if (std::string(relpath) == "/")
+    return -EEXIST;
+
   filepath path(relpath);
   string name = path.last_dentry();
   path.pop_dentry();
@@ -6301,6 +6336,9 @@ int Client::symlink(const char *target, const char *relpath)
   tout(cct) << target << std::endl;
   tout(cct) << relpath << std::endl;
 
+  if (std::string(relpath) == "/")
+    return -EEXIST;
+
   filepath path(relpath);
   string name = path.last_dentry();
   path.pop_dentry();
@@ -6428,15 +6466,8 @@ int Client::_do_setattr(Inode *in, struct stat *attr, int mask, int uid, int gid
   }
 
   if (in->caps_issued_mask(CEPH_CAP_AUTH_EXCL)) {
-    if (mask & CEPH_SETATTR_MODE) {
-      in->ctime = ceph_clock_now(cct);
-      in->cap_dirtier_uid = uid;
-      in->cap_dirtier_gid = gid;
-      in->mode = (in->mode & ~07777) | (attr->st_mode & 07777);
-      mark_caps_dirty(in, CEPH_CAP_AUTH_EXCL);
-      mask &= ~CEPH_SETATTR_MODE;
-      ldout(cct,10) << "changing mode to " << attr->st_mode << dendl;
-    }
+    bool kill_sguid = false;
+
     if (mask & CEPH_SETATTR_UID) {
       in->ctime = ceph_clock_now(cct);
       in->cap_dirtier_uid = uid;
@@ -6444,6 +6475,7 @@ int Client::_do_setattr(Inode *in, struct stat *attr, int mask, int uid, int gid
       in->uid = attr->st_uid;
       mark_caps_dirty(in, CEPH_CAP_AUTH_EXCL);
       mask &= ~CEPH_SETATTR_UID;
+      kill_sguid = true;
       ldout(cct,10) << "changing uid to " << attr->st_uid << dendl;
     }
     if (mask & CEPH_SETATTR_GID) {
@@ -6453,8 +6485,25 @@ int Client::_do_setattr(Inode *in, struct stat *attr, int mask, int uid, int gid
       in->gid = attr->st_gid;
       mark_caps_dirty(in, CEPH_CAP_AUTH_EXCL);
       mask &= ~CEPH_SETATTR_GID;
+      kill_sguid = true;
       ldout(cct,10) << "changing gid to " << attr->st_gid << dendl;
     }
+
+    if (mask & CEPH_SETATTR_MODE) {
+      in->ctime = ceph_clock_now(cct);
+      in->cap_dirtier_uid = uid;
+      in->cap_dirtier_gid = gid;
+      in->mode = (in->mode & ~07777) | (attr->st_mode & 07777);
+      mark_caps_dirty(in, CEPH_CAP_AUTH_EXCL);
+      mask &= ~CEPH_SETATTR_MODE;
+      ldout(cct,10) << "changing mode to " << attr->st_mode << dendl;
+    } else if (kill_sguid && S_ISREG(in->mode)) {
+      /* Must squash the any setuid/setgid bits with an ownership change */
+      in->mode &= ~S_ISUID;
+      if ((in->mode & (S_ISGID|S_IXGRP)) == (S_ISGID|S_IXGRP))
+	in->mode &= ~S_ISGID;
+      mark_caps_dirty(in, CEPH_CAP_AUTH_EXCL);
+    }
   }
   if (in->caps_issued_mask(CEPH_CAP_FILE_EXCL)) {
     if (mask & (CEPH_SETATTR_MTIME|CEPH_SETATTR_ATIME)) {
@@ -7203,16 +7252,14 @@ int Client::readdir_r_cb(dir_result_t *d, add_dirent_cb_t cb, void *p)
   if (dirp->offset == 1) {
     ldout(cct, 15) << " including .." << dendl;
     uint64_t next_off = 2;
-    if (!diri->dn_set.empty()) {
-      InodeRef& in = diri->get_first_parent()->inode;
-      fill_stat(in, &st);
-      fill_dirent(&de, "..", S_IFDIR, st.st_ino, next_off);
-    } else {
-      /* must be at the root (no parent),
-       * so we add the dotdot with a special inode (3) */
-      fill_dirent(&de, "..", S_IFDIR, CEPH_INO_DOTDOT, next_off);
-    }
+    InodeRef in;
+    if (diri->dn_set.empty())
+      in = diri;
+    else
+      in = diri->get_first_parent()->inode;
 
+    fill_stat(in, &st);
+    fill_dirent(&de, "..", S_IFDIR, st.st_ino, next_off);
 
     client_lock.Unlock();
     int r = cb(p, &de, &st, -1, next_off);
@@ -9480,6 +9527,7 @@ int Client::ll_walk(const char* name, Inode **out, struct stat *attr)
   } else {
     assert(in);
     fill_stat(in, attr);
+    _ll_get(in.get());
     *out = in.get();
     return 0;
   }
@@ -9601,13 +9649,6 @@ int Client::ll_getattr(Inode *in, struct stat *attr, int uid, int gid)
   tout(cct) << "ll_getattr" << std::endl;
   tout(cct) << vino.ino.val << std::endl;
 
-  /* special case for dotdot (..) */
-  if (vino.ino.val == CEPH_INO_DOTDOT) {
-    attr->st_mode = S_IFDIR | 0755;
-    attr->st_nlink = 2;
-    return 0;
-  }
-
   int res;
   if (vino.snapid < CEPH_NOSNAP)
     res = 0;
diff --git a/src/client/Client.h b/src/client/Client.h
index 7129cab..51376c0 100644
--- a/src/client/Client.h
+++ b/src/client/Client.h
@@ -439,7 +439,7 @@ protected:
 
   // Optional extra metadata about me to send to the MDS
   std::map<std::string, std::string> metadata;
-  void populate_metadata();
+  void populate_metadata(const std::string &mount_root);
 
 
   /* async block write barrier support */
diff --git a/src/cls/Makefile-client.am b/src/cls/Makefile-client.am
index 3e26db0..d0ce2ac 100644
--- a/src/cls/Makefile-client.am
+++ b/src/cls/Makefile-client.am
@@ -34,12 +34,14 @@ libcls_replica_log_client_la_SOURCES = \
 noinst_LTLIBRARIES += libcls_replica_log_client.la
 DENCODER_DEPS += libcls_replica_log_client.la
 
+if WITH_RADOSGW
 libcls_rgw_client_la_SOURCES = \
 	cls/rgw/cls_rgw_client.cc \
 	cls/rgw/cls_rgw_types.cc \
 	cls/rgw/cls_rgw_ops.cc
 noinst_LTLIBRARIES += libcls_rgw_client.la
 DENCODER_DEPS += libcls_rgw_client.la
+endif
 
 libcls_rbd_client_la_SOURCES = \
 	cls/rbd/cls_rbd_client.cc \
@@ -92,9 +94,6 @@ noinst_HEADERS += \
 	cls/replica_log/cls_replica_log_types.h \
 	cls/replica_log/cls_replica_log_ops.h \
 	cls/replica_log/cls_replica_log_client.h \
-	cls/rgw/cls_rgw_client.h \
-	cls/rgw/cls_rgw_ops.h \
-	cls/rgw/cls_rgw_types.h \
 	cls/user/cls_user_client.h \
 	cls/user/cls_user_ops.h \
 	cls/user/cls_user_types.h \
@@ -102,3 +101,10 @@ noinst_HEADERS += \
 	cls/cephfs/cls_cephfs_client.h \
 	cls/journal/cls_journal_client.h \
 	cls/journal/cls_journal_types.h
+
+if WITH_RADOSGW
+noinst_HEADERS += \
+	cls/rgw/cls_rgw_client.h \
+	cls/rgw/cls_rgw_ops.h \
+	cls/rgw/cls_rgw_types.h
+endif
diff --git a/src/cls/Makefile-server.am b/src/cls/Makefile-server.am
index 9b081bc..36ad471 100644
--- a/src/cls/Makefile-server.am
+++ b/src/cls/Makefile-server.am
@@ -60,6 +60,7 @@ libcls_user_la_LIBADD = $(PTHREAD_LIBS) $(EXTRALIBS)
 libcls_user_la_LDFLAGS = ${AM_LDFLAGS} -module -avoid-version -shared -export-symbols-regex '.*__cls_.*'
 radoslib_LTLIBRARIES += libcls_user.la
 
+if WITH_RADOSGW
 libcls_rgw_la_SOURCES = \
 	cls/rgw/cls_rgw.cc \
 	cls/rgw/cls_rgw_ops.cc \
@@ -68,6 +69,8 @@ libcls_rgw_la_SOURCES = \
 libcls_rgw_la_LIBADD = libjson_spirit.la $(PTHREAD_LIBS) $(EXTRALIBS)
 libcls_rgw_la_LDFLAGS = ${AM_LDFLAGS} -module -avoid-version -shared -export-symbols-regex '.*__cls_.*'
 radoslib_LTLIBRARIES += libcls_rgw.la
+endif
+
 
 libcls_cephfs_la_SOURCES = cls/cephfs/cls_cephfs.cc
 libcls_cephfs_la_LIBADD = $(PTHREAD_LIBS) $(EXTRALIBS)
diff --git a/src/cls/lock/cls_lock.cc b/src/cls/lock/cls_lock.cc
index 048ec40..95acbf4 100644
--- a/src/cls/lock/cls_lock.cc
+++ b/src/cls/lock/cls_lock.cc
@@ -45,6 +45,7 @@ cls_method_handle_t h_break_lock;
 cls_method_handle_t h_get_info;
 cls_method_handle_t h_list_locks;
 cls_method_handle_t h_assert_locked;
+cls_method_handle_t h_set_cookie;
 
 #define LOCK_PREFIX    "lock."
 
@@ -513,6 +514,94 @@ int assert_locked(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
   return 0;
 }
 
+/**
+ * Update the cookie associated with an object lock
+ *
+ * Input:
+ * @param cls_lock_set_cookie_op request input
+ *
+ * Output:
+ * @param none
+ *
+ * @return 0 on success, -errno on failure.
+ */
+int set_cookie(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+  CLS_LOG(20, "set_cookie");
+
+  cls_lock_set_cookie_op op;
+  try {
+    bufferlist::iterator iter = in->begin();
+    ::decode(op, iter);
+  } catch (const buffer::error& err) {
+    return -EINVAL;
+  }
+
+  if (op.type != LOCK_EXCLUSIVE && op.type != LOCK_SHARED) {
+    return -EINVAL;
+  }
+
+  if (op.name.empty()) {
+    return -EINVAL;
+  }
+
+  // see if there's already a locker
+  lock_info_t linfo;
+  int r = read_lock(hctx, op.name, &linfo);
+  if (r < 0) {
+    CLS_ERR("Could not read lock info: %s", cpp_strerror(r).c_str());
+    return r;
+  }
+
+  if (linfo.lockers.empty()) {
+    CLS_LOG(20, "object not locked");
+    return -EBUSY;
+  }
+
+  if (linfo.lock_type != op.type) {
+    CLS_LOG(20, "lock type mismatch: current=%s, assert=%s",
+            cls_lock_type_str(linfo.lock_type), cls_lock_type_str(op.type));
+    return -EBUSY;
+  }
+
+  if (linfo.tag != op.tag) {
+    CLS_LOG(20, "lock tag mismatch: current=%s, assert=%s", linfo.tag.c_str(),
+            op.tag.c_str());
+    return -EBUSY;
+  }
+
+  entity_inst_t inst;
+  r = cls_get_request_origin(hctx, &inst);
+  assert(r == 0);
+
+  locker_id_t id;
+  id.cookie = op.cookie;
+  id.locker = inst.name;
+
+  map<locker_id_t, locker_info_t>::iterator iter = linfo.lockers.find(id);
+  if (iter == linfo.lockers.end()) {
+    CLS_LOG(20, "not locked by client");
+    return -EBUSY;
+  }
+
+  id.cookie = op.new_cookie;
+  if (linfo.lockers.count(id) != 0) {
+    CLS_LOG(20, "lock cookie in-use");
+    return -EBUSY;
+  }
+
+  locker_info_t locker_info(iter->second);
+  linfo.lockers.erase(iter);
+
+  linfo.lockers[id] = locker_info;
+  r = write_lock(hctx, op.name, linfo);
+  if (r < 0) {
+    CLS_ERR("Could not update lock info: %s", cpp_strerror(r).c_str());
+    return r;
+  }
+  return 0;
+}
+
 void __cls_init()
 {
   CLS_LOG(20, "Loaded lock class!");
@@ -536,6 +625,9 @@ void __cls_init()
   cls_register_cxx_method(h_class, "assert_locked",
                           CLS_METHOD_RD | CLS_METHOD_PROMOTE,
                           assert_locked, &h_assert_locked);
+  cls_register_cxx_method(h_class, "set_cookie",
+                          CLS_METHOD_RD | CLS_METHOD_WR | CLS_METHOD_PROMOTE,
+                          set_cookie, &h_set_cookie);
 
   return;
 }
diff --git a/src/cls/lock/cls_lock_client.cc b/src/cls/lock/cls_lock_client.cc
index 30466fb..fc2790b 100644
--- a/src/cls/lock/cls_lock_client.cc
+++ b/src/cls/lock/cls_lock_client.cc
@@ -189,6 +189,22 @@ namespace rados {
         rados_op->exec("lock", "assert_locked", in);
       }
 
+      void set_cookie(librados::ObjectWriteOperation *rados_op,
+                      const std::string& name, ClsLockType type,
+                      const std::string& cookie, const std::string& tag,
+                      const std::string& new_cookie)
+      {
+        cls_lock_set_cookie_op op;
+        op.name = name;
+        op.type = type;
+        op.cookie = cookie;
+        op.tag = tag;
+        op.new_cookie = new_cookie;
+        bufferlist in;
+        ::encode(op, in);
+        rados_op->exec("lock", "set_cookie", in);
+      }
+
       void Lock::assert_locked_exclusive(ObjectOperation *op)
       {
         assert_locked(op, name, LOCK_EXCLUSIVE, cookie, tag);
diff --git a/src/cls/lock/cls_lock_client.h b/src/cls/lock/cls_lock_client.h
index b60d25e..13fb3f1 100644
--- a/src/cls/lock/cls_lock_client.h
+++ b/src/cls/lock/cls_lock_client.h
@@ -59,6 +59,11 @@ namespace rados {
                                 const std::string& cookie,
                                 const std::string& tag);
 
+      extern void set_cookie(librados::ObjectWriteOperation *rados_op,
+                             const std::string& name, ClsLockType type,
+                             const std::string& cookie, const std::string& tag,
+                             const std::string& new_cookie);
+
       class Lock {
 	std::string name;
 	std::string cookie;
diff --git a/src/cls/lock/cls_lock_ops.cc b/src/cls/lock/cls_lock_ops.cc
index 7de8326..4b9a8a3 100644
--- a/src/cls/lock/cls_lock_ops.cc
+++ b/src/cls/lock/cls_lock_ops.cc
@@ -188,3 +188,24 @@ void cls_lock_assert_op::generate_test_instances(list<cls_lock_assert_op*>& o)
   o.push_back(new cls_lock_assert_op);
 }
 
+void cls_lock_set_cookie_op::dump(Formatter *f) const
+{
+  f->dump_string("name", name);
+  f->dump_string("type", cls_lock_type_str(type));
+  f->dump_string("cookie", cookie);
+  f->dump_string("tag", tag);
+  f->dump_string("new_cookie", new_cookie);
+}
+
+void cls_lock_set_cookie_op::generate_test_instances(list<cls_lock_set_cookie_op*>& o)
+{
+  cls_lock_set_cookie_op *i = new cls_lock_set_cookie_op;
+  i->name = "name";
+  i->type = LOCK_SHARED;
+  i->cookie = "cookie";
+  i->tag = "tag";
+  i->new_cookie = "new cookie";
+  o.push_back(i);
+  o.push_back(new cls_lock_set_cookie_op);
+}
+
diff --git a/src/cls/lock/cls_lock_ops.h b/src/cls/lock/cls_lock_ops.h
index d2076f1..b002f0c 100644
--- a/src/cls/lock/cls_lock_ops.h
+++ b/src/cls/lock/cls_lock_ops.h
@@ -203,4 +203,40 @@ struct cls_lock_assert_op
 };
 WRITE_CLASS_ENCODER(cls_lock_assert_op)
 
+struct cls_lock_set_cookie_op
+{
+  string name;
+  ClsLockType type;
+  string cookie;
+  string tag;
+  string new_cookie;
+
+  cls_lock_set_cookie_op() : type(LOCK_NONE) {}
+
+  void encode(bufferlist &bl) const {
+    ENCODE_START(1, 1, bl);
+    ::encode(name, bl);
+    uint8_t t = (uint8_t)type;
+    ::encode(t, bl);
+    ::encode(cookie, bl);
+    ::encode(tag, bl);
+    ::encode(new_cookie, bl);
+    ENCODE_FINISH(bl);
+  }
+  void decode(bufferlist::iterator &bl) {
+    DECODE_START_LEGACY_COMPAT_LEN(1, 1, 1, bl);
+    ::decode(name, bl);
+    uint8_t t;
+    ::decode(t, bl);
+    type = (ClsLockType)t;
+    ::decode(cookie, bl);
+    ::decode(tag, bl);
+    ::decode(new_cookie, bl);
+    DECODE_FINISH(bl);
+  }
+  void dump(Formatter *f) const;
+  static void generate_test_instances(list<cls_lock_set_cookie_op*>& o);
+};
+WRITE_CLASS_ENCODER(cls_lock_set_cookie_op)
+
 #endif
diff --git a/src/cls/rgw/cls_rgw.cc b/src/cls/rgw/cls_rgw.cc
index 46c2a3f..58247b5 100644
--- a/src/cls/rgw/cls_rgw.cc
+++ b/src/cls/rgw/cls_rgw.cc
@@ -28,6 +28,7 @@ cls_method_handle_t h_rgw_bucket_set_tag_timeout;
 cls_method_handle_t h_rgw_bucket_list;
 cls_method_handle_t h_rgw_bucket_check_index;
 cls_method_handle_t h_rgw_bucket_rebuild_index;
+cls_method_handle_t h_rgw_bucket_update_stats;
 cls_method_handle_t h_rgw_bucket_prepare_op;
 cls_method_handle_t h_rgw_bucket_complete_op;
 cls_method_handle_t h_rgw_bucket_link_olh;
@@ -52,9 +53,6 @@ cls_method_handle_t h_rgw_gc_list;
 cls_method_handle_t h_rgw_gc_remove;
 
 
-#define ROUND_BLOCK_SIZE 4096
-
-
 #define BI_PREFIX_CHAR 0x80
 
 #define BI_BUCKET_OBJS_INDEX          0
@@ -72,11 +70,6 @@ static string bucket_index_prefixes[] = { "", /* special handling for the objs l
                                           /* this must be the last index */
                                           "9999_",};
 
-static uint64_t get_rounded_size(uint64_t size)
-{
-  return (size + ROUND_BLOCK_SIZE - 1) & ~(ROUND_BLOCK_SIZE - 1);
-}
-
 static bool bi_is_objs_index(const string& s) {
   return ((unsigned char)s[0] != BI_PREFIX_CHAR);
 }
@@ -540,7 +533,7 @@ static int check_index(cls_method_context_t hctx, struct rgw_bucket_dir_header *
       struct rgw_bucket_category_stats& stats = calc_header->stats[entry.meta.category];
       stats.num_entries++;
       stats.total_size += entry.meta.accounted_size;
-      stats.total_size_rounded += get_rounded_size(entry.meta.accounted_size);
+      stats.total_size_rounded += cls_rgw_get_rounded_size(entry.meta.accounted_size);
 
       start_obj = kiter->first;
     }
@@ -583,6 +576,38 @@ int rgw_bucket_rebuild_index(cls_method_context_t hctx, bufferlist *in, bufferli
   return write_bucket_header(hctx, &calc_header);
 }
 
+int rgw_bucket_update_stats(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+  // decode request
+  rgw_cls_bucket_update_stats_op op;
+  auto iter = in->begin();
+  try {
+    ::decode(op, iter);
+  } catch (buffer::error& err) {
+    CLS_LOG(1, "ERROR: %s(): failed to decode request\n", __func__);
+    return -EINVAL;
+  }
+
+  struct rgw_bucket_dir_header header;
+  int rc = read_bucket_header(hctx, &header);
+  if (rc < 0) {
+    CLS_LOG(1, "ERROR: %s(): failed to read header\n", __func__);
+    return rc;
+  }
+
+  for (auto& s : op.stats) {
+    auto& dest = header.stats[s.first];
+    if (op.absolute) {
+      dest = s.second;
+    } else {
+      dest.total_size += s.second.total_size;
+      dest.total_size_rounded += s.second.total_size_rounded;
+      dest.num_entries += s.second.num_entries;
+    }
+  }
+
+  return write_bucket_header(hctx, &header);
+}
 
 int rgw_bucket_init_index(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
 {
@@ -713,7 +738,7 @@ static void unaccount_entry(struct rgw_bucket_dir_header& header, struct rgw_buc
   struct rgw_bucket_category_stats& stats = header.stats[entry.meta.category];
   stats.num_entries--;
   stats.total_size -= entry.meta.accounted_size;
-  stats.total_size_rounded -= get_rounded_size(entry.meta.accounted_size);
+  stats.total_size_rounded -= cls_rgw_get_rounded_size(entry.meta.accounted_size);
 }
 
 static void log_entry(const char *func, const char *str, struct rgw_bucket_dir_entry *entry)
@@ -898,7 +923,7 @@ int rgw_bucket_complete_op(cls_method_context_t hctx, bufferlist *in, bufferlist
       entry.tag = op.tag;
       stats.num_entries++;
       stats.total_size += meta.accounted_size;
-      stats.total_size_rounded += get_rounded_size(meta.accounted_size);
+      stats.total_size_rounded += cls_rgw_get_rounded_size(meta.accounted_size);
       bufferlist new_key_bl;
       ::encode(entry, new_key_bl);
       int ret = cls_cxx_map_set_val(hctx, idx, &new_key_bl);
@@ -1928,7 +1953,7 @@ int rgw_dir_suggest_changes(cls_method_context_t hctx, bufferlist *in, bufferlis
         CLS_LOG(10, "total_entries: %" PRId64 " -> %" PRId64 "\n", old_stats.num_entries, old_stats.num_entries - 1);
         old_stats.num_entries--;
         old_stats.total_size -= cur_disk.meta.accounted_size;
-        old_stats.total_size_rounded -= get_rounded_size(cur_disk.meta.accounted_size);
+        old_stats.total_size_rounded -= cls_rgw_get_rounded_size(cur_disk.meta.accounted_size);
         header_changed = true;
       }
       struct rgw_bucket_category_stats& stats =
@@ -1955,7 +1980,7 @@ int rgw_dir_suggest_changes(cls_method_context_t hctx, bufferlist *in, bufferlis
                 cur_change.key.name.c_str(), cur_change.key.instance.c_str(), stats.num_entries, stats.num_entries + 1);
         stats.num_entries++;
         stats.total_size += cur_change.meta.accounted_size;
-        stats.total_size_rounded += get_rounded_size(cur_change.meta.accounted_size);
+        stats.total_size_rounded += cls_rgw_get_rounded_size(cur_change.meta.accounted_size);
         header_changed = true;
         cur_change.index_ver = header.ver;
         bufferlist cur_state_bl;
@@ -2260,6 +2285,11 @@ static int list_plain_entries(cls_method_context_t hctx, const string& name, con
 {
   string filter = name;
   string start_key = marker;
+
+  string first_instance_idx;
+  encode_obj_versioned_data_key(string(), &first_instance_idx);
+  string end_key = first_instance_idx;
+
   int count = 0;
   map<string, bufferlist> keys;
   do {
@@ -2275,6 +2305,11 @@ static int list_plain_entries(cls_method_context_t hctx, const string& name, con
 
     map<string, bufferlist>::iterator iter;
     for (iter = keys.begin(); iter != keys.end(); ++iter) {
+      if (iter->first >= end_key) {
+        /* past the end of plain namespace */
+        return count;
+      }
+
       rgw_cls_bi_entry entry;
       entry.type = PlainIdx;
       entry.idx = iter->first;
@@ -2292,12 +2327,15 @@ static int list_plain_entries(cls_method_context_t hctx, const string& name, con
 
       CLS_LOG(20, "%s(): entry.idx=%s e.key.name=%s", __func__, escape_str(entry.idx).c_str(), escape_str(e.key.name).c_str());
 
-      if (e.key.name != name) {
+      if (!name.empty() && e.key.name != name) {
         return count;
       }
 
       entries->push_back(entry);
       count++;
+      if (count >= (int)max) {
+        return count;
+      }
       start_key = entry.idx;
     }
   } while (!keys.empty());
@@ -2311,13 +2349,20 @@ static int list_instance_entries(cls_method_context_t hctx, const string& name,
   cls_rgw_obj_key key(name);
   string first_instance_idx;
   encode_obj_versioned_data_key(key, &first_instance_idx);
-  string start_key = first_instance_idx;
+  string start_key;
+
+  if (!name.empty()) {
+    start_key = first_instance_idx;
+  } else {
+    start_key = BI_PREFIX_CHAR;
+    start_key.append(bucket_index_prefixes[BI_BUCKET_OBJ_INSTANCE_INDEX]);
+  }
+  string filter = start_key;
   if (bi_entry_gt(marker, start_key)) {
     start_key = marker;
   }
   int count = 0;
   map<string, bufferlist> keys;
-  string filter = first_instance_idx;
   bool started = true;
   do {
     if (count >= (int)max) {
@@ -2329,13 +2374,13 @@ static int list_instance_entries(cls_method_context_t hctx, const string& name,
     if (started) {
       ret = cls_cxx_map_get_val(hctx, start_key, &keys[start_key]);
       if (ret == -ENOENT) {
-        ret = cls_cxx_map_get_vals(hctx, start_key, filter, BI_GET_NUM_KEYS, &keys);
+        ret = cls_cxx_map_get_vals(hctx, start_key, string(), BI_GET_NUM_KEYS, &keys);
       }
       started = false;
     } else {
-      ret = cls_cxx_map_get_vals(hctx, start_key, filter, BI_GET_NUM_KEYS, &keys);
+      ret = cls_cxx_map_get_vals(hctx, start_key, string(), BI_GET_NUM_KEYS, &keys);
     }
-    CLS_LOG(20, "%s(): start_key=%s keys.size()=%d", __func__, escape_str(start_key).c_str(), (int)keys.size());
+    CLS_LOG(20, "%s(): start_key=%s first_instance_idx=%s keys.size()=%d", __func__, escape_str(start_key).c_str(), escape_str(first_instance_idx).c_str(), (int)keys.size());
     if (ret < 0) {
       return ret;
     }
@@ -2347,6 +2392,10 @@ static int list_instance_entries(cls_method_context_t hctx, const string& name,
       entry.idx = iter->first;
       entry.data = iter->second;
 
+      if (!filter.empty() && entry.idx.compare(0, filter.size(), filter) != 0) {
+        return count;
+      }
+
       CLS_LOG(20, "%s(): entry.idx=%s", __func__, escape_str(entry.idx).c_str());
 
       bufferlist::iterator biter = entry.data.begin();
@@ -2359,7 +2408,85 @@ static int list_instance_entries(cls_method_context_t hctx, const string& name,
         return -EIO;
       }
 
-      if (e.key.name != name) {
+      if (!name.empty() && e.key.name != name) {
+        return count;
+      }
+
+      entries->push_back(entry);
+      count++;
+      start_key = entry.idx;
+    }
+  } while (!keys.empty());
+
+  return count;
+}
+
+static int list_olh_entries(cls_method_context_t hctx, const string& name, const string& marker, uint32_t max,
+                            list<rgw_cls_bi_entry> *entries)
+{
+  cls_rgw_obj_key key(name);
+  string first_instance_idx;
+  encode_olh_data_key(key, &first_instance_idx);
+  string start_key;
+
+  if (!name.empty()) {
+    start_key = first_instance_idx;
+  } else {
+    start_key = BI_PREFIX_CHAR;
+    start_key.append(bucket_index_prefixes[BI_BUCKET_OLH_DATA_INDEX]);
+  }
+  string filter = start_key;
+  if (bi_entry_gt(marker, start_key)) {
+    start_key = marker;
+  }
+  int count = 0;
+  map<string, bufferlist> keys;
+  bool started = true;
+  do {
+    if (count >= (int)max) {
+      return count;
+    }
+    keys.clear();
+#define BI_GET_NUM_KEYS 128
+    int ret;
+    if (started) {
+      ret = cls_cxx_map_get_val(hctx, start_key, &keys[start_key]);
+      if (ret == -ENOENT) {
+        ret = cls_cxx_map_get_vals(hctx, start_key, string(), BI_GET_NUM_KEYS, &keys);
+      }
+      started = false;
+    } else {
+      ret = cls_cxx_map_get_vals(hctx, start_key, string(), BI_GET_NUM_KEYS, &keys);
+    }
+    CLS_LOG(20, "%s(): start_key=%s first_instance_idx=%s keys.size()=%d", __func__, escape_str(start_key).c_str(), escape_str(first_instance_idx).c_str(), (int)keys.size());
+    if (ret < 0) {
+      return ret;
+    }
+
+    map<string, bufferlist>::iterator iter;
+    for (iter = keys.begin(); iter != keys.end(); ++iter) {
+      rgw_cls_bi_entry entry;
+      entry.type = OLHIdx;
+      entry.idx = iter->first;
+      entry.data = iter->second;
+
+      if (!filter.empty() && entry.idx.compare(0, filter.size(), filter) != 0) {
+        return count;
+      }
+
+      CLS_LOG(20, "%s(): entry.idx=%s", __func__, escape_str(entry.idx).c_str());
+
+      bufferlist::iterator biter = entry.data.begin();
+
+      rgw_bucket_olh_entry e;
+      try {
+        ::decode(e, biter);
+      } catch (buffer::error& err) {
+        CLS_LOG(0, "ERROR: %s(): failed to decode buffer (size=%d)", __func__, entry.data.length());
+        return -EIO;
+      }
+
+      if (!name.empty() && e.key.name != name) {
         return count;
       }
 
@@ -2388,31 +2515,37 @@ static int rgw_bi_list_op(cls_method_context_t hctx, bufferlist *in, bufferlist
 
   string filter = op.name;
 #define MAX_BI_LIST_ENTRIES 1000
-  int32_t max = (op.max < MAX_BI_LIST_ENTRIES ? op.max : MAX_BI_LIST_ENTRIES);
+  int32_t max = (op.max < MAX_BI_LIST_ENTRIES ? op.max : MAX_BI_LIST_ENTRIES) + 1; /* one extra entry for identifying truncation */
   string start_key = op.marker;
-  int ret = list_plain_entries(hctx, op.name, op.marker, max, &op_ret.entries);
+  int ret = list_plain_entries(hctx, op.name, op.marker, max, &op_ret.entries) + 1; /* one extra entry for identifying truncation */
   if (ret < 0) {
     CLS_LOG(0, "ERROR: %s(): list_plain_entries retured ret=%d", __func__, ret);
     return ret;
   }
   int count = ret;
 
+  CLS_LOG(20, "found %d plain entries", count);
+
   ret = list_instance_entries(hctx, op.name, op.marker, max - count, &op_ret.entries);
   if (ret < 0) {
     CLS_LOG(0, "ERROR: %s(): list_instance_entries retured ret=%d", __func__, ret);
     return ret;
   }
 
-  cls_rgw_obj_key key(op.name);
-  rgw_cls_bi_entry entry;
-  encode_olh_data_key(key, &entry.idx);
-  ret = cls_cxx_map_get_val(hctx, entry.idx, &entry.data);
-  if (ret < 0 && ret != -ENOENT) {
-    CLS_LOG(0, "ERROR: %s(): cls_cxx_map_get_val retured ret=%d", __func__, ret);
+  count += ret;
+
+  ret = list_olh_entries(hctx, op.name, op.marker, max - count, &op_ret.entries);
+  if (ret < 0) {
+    CLS_LOG(0, "ERROR: %s(): list_instance_entries retured ret=%d", __func__, ret);
     return ret;
-  } else if (ret >= 0) {
-    entry.type = OLHIdx;
-    op_ret.entries.push_back(entry);
+  }
+
+  count += ret;
+
+  op_ret.is_truncated = (count >= max);
+  while (count >= max) {
+    op_ret.entries.pop_back();
+    count--;
   }
 
   ::encode(op_ret, *out);
@@ -3233,6 +3366,7 @@ void __cls_init()
   cls_register_cxx_method(h_class, "bucket_list", CLS_METHOD_RD, rgw_bucket_list, &h_rgw_bucket_list);
   cls_register_cxx_method(h_class, "bucket_check_index", CLS_METHOD_RD, rgw_bucket_check_index, &h_rgw_bucket_check_index);
   cls_register_cxx_method(h_class, "bucket_rebuild_index", CLS_METHOD_RD | CLS_METHOD_WR, rgw_bucket_rebuild_index, &h_rgw_bucket_rebuild_index);
+  cls_register_cxx_method(h_class, "bucket_update_stats", CLS_METHOD_RD | CLS_METHOD_WR, rgw_bucket_update_stats, &h_rgw_bucket_update_stats);
   cls_register_cxx_method(h_class, "bucket_prepare_op", CLS_METHOD_RD | CLS_METHOD_WR, rgw_bucket_prepare_op, &h_rgw_bucket_prepare_op);
   cls_register_cxx_method(h_class, "bucket_complete_op", CLS_METHOD_RD | CLS_METHOD_WR, rgw_bucket_complete_op, &h_rgw_bucket_complete_op);
   cls_register_cxx_method(h_class, "bucket_link_olh", CLS_METHOD_RD | CLS_METHOD_WR, rgw_bucket_link_olh, &h_rgw_bucket_link_olh);
diff --git a/src/cls/rgw/cls_rgw_client.cc b/src/cls/rgw/cls_rgw_client.cc
index 1cf1156..c144860 100644
--- a/src/cls/rgw/cls_rgw_client.cc
+++ b/src/cls/rgw/cls_rgw_client.cc
@@ -138,6 +138,17 @@ int CLSRGWIssueSetTagTimeout::issue_op(int shard_id, const string& oid)
   return issue_bucket_set_tag_timeout_op(io_ctx, oid, tag_timeout, &manager);
 }
 
+void cls_rgw_bucket_update_stats(librados::ObjectWriteOperation& o, bool absolute,
+                                 const map<uint8_t, rgw_bucket_category_stats>& stats)
+{
+  struct rgw_cls_bucket_update_stats_op call;
+  call.absolute = absolute;
+  call.stats = stats;
+  bufferlist in;
+  ::encode(call, in);
+  o.exec("rgw", "bucket_update_stats", in);
+}
+
 void cls_rgw_bucket_prepare_op(ObjectWriteOperation& o, RGWModifyOp op, string& tag,
                                const cls_rgw_obj_key& key, const string& locator, bool log_op,
                                uint16_t bilog_flags)
@@ -277,6 +288,15 @@ int cls_rgw_bi_put(librados::IoCtx& io_ctx, const string oid, rgw_cls_bi_entry&
   return 0;
 }
 
+void cls_rgw_bi_put(ObjectWriteOperation& op, const string oid, rgw_cls_bi_entry& entry)
+{
+  bufferlist in, out;
+  struct rgw_cls_bi_put_op call;
+  call.entry = entry;
+  ::encode(call, in);
+  op.exec("rgw", "bi_put", in);
+}
+
 int cls_rgw_bi_list(librados::IoCtx& io_ctx, const string oid,
                    const string& name, const string& marker, uint32_t max,
                    list<rgw_cls_bi_entry> *entries, bool *is_truncated)
diff --git a/src/cls/rgw/cls_rgw_client.h b/src/cls/rgw/cls_rgw_client.h
index 1b02a5e..4331cca 100644
--- a/src/cls/rgw/cls_rgw_client.h
+++ b/src/cls/rgw/cls_rgw_client.h
@@ -304,6 +304,9 @@ public:
     CLSRGWConcurrentIO(ioc, _bucket_objs, _max_aio), tag_timeout(_tag_timeout) {}
 };
 
+void cls_rgw_bucket_update_stats(librados::ObjectWriteOperation& o, bool absolute,
+                                 const map<uint8_t, rgw_bucket_category_stats>& stats);
+
 void cls_rgw_bucket_prepare_op(librados::ObjectWriteOperation& o, RGWModifyOp op, string& tag,
                                const cls_rgw_obj_key& key, const string& locator, bool log_op,
                                uint16_t bilog_op);
@@ -324,6 +327,7 @@ int cls_rgw_bi_get(librados::IoCtx& io_ctx, const string oid,
                    BIIndexType index_type, cls_rgw_obj_key& key,
                    rgw_cls_bi_entry *entry);
 int cls_rgw_bi_put(librados::IoCtx& io_ctx, const string oid, rgw_cls_bi_entry& entry);
+void cls_rgw_bi_put(librados::ObjectWriteOperation& op, const string oid, rgw_cls_bi_entry& entry);
 int cls_rgw_bi_list(librados::IoCtx& io_ctx, const string oid,
                    const string& name, const string& marker, uint32_t max,
                    list<rgw_cls_bi_entry> *entries, bool *is_truncated);
diff --git a/src/cls/rgw/cls_rgw_ops.cc b/src/cls/rgw/cls_rgw_ops.cc
index 2cf3619..0b3701b 100644
--- a/src/cls/rgw/cls_rgw_ops.cc
+++ b/src/cls/rgw/cls_rgw_ops.cc
@@ -343,6 +343,29 @@ void rgw_cls_check_index_ret::dump(Formatter *f) const
   ::encode_json("calculated_header", calculated_header, f);
 }
 
+void rgw_cls_bucket_update_stats_op::generate_test_instances(list<rgw_cls_bucket_update_stats_op*>& o)
+{
+  rgw_cls_bucket_update_stats_op *r = new rgw_cls_bucket_update_stats_op;
+  r->absolute = true;
+  rgw_bucket_category_stats& s = r->stats[0];
+  s.total_size = 1;
+  s.total_size_rounded = 4096;
+  s.num_entries = 1;
+  o.push_back(r);
+
+  o.push_back(new rgw_cls_bucket_update_stats_op);
+}
+
+void rgw_cls_bucket_update_stats_op::dump(Formatter *f) const
+{
+  ::encode_json("absolute", absolute, f);
+  map<int, rgw_bucket_category_stats> s;
+  for (auto& entry : stats) {
+    s[(int)entry.first] = entry.second;
+  }
+  ::encode_json("stats", s, f);
+}
+
 void cls_rgw_bi_log_list_op::dump(Formatter *f) const
 {
   f->dump_string("marker", marker);
diff --git a/src/cls/rgw/cls_rgw_ops.h b/src/cls/rgw/cls_rgw_ops.h
index 15a638a..90241eb 100644
--- a/src/cls/rgw/cls_rgw_ops.h
+++ b/src/cls/rgw/cls_rgw_ops.h
@@ -443,6 +443,30 @@ struct rgw_cls_check_index_ret
 };
 WRITE_CLASS_ENCODER(rgw_cls_check_index_ret)
 
+struct rgw_cls_bucket_update_stats_op
+{
+  bool absolute{false};
+  map<uint8_t, rgw_bucket_category_stats> stats;
+
+  rgw_cls_bucket_update_stats_op() {}
+
+  void encode(bufferlist &bl) const {
+    ENCODE_START(1, 1, bl);
+    ::encode(absolute, bl);
+    ::encode(stats, bl);
+    ENCODE_FINISH(bl);
+  }
+  void decode(bufferlist::iterator &bl) {
+    DECODE_START(1, bl);
+    ::decode(absolute, bl);
+    ::decode(stats, bl);
+    DECODE_FINISH(bl);
+  }
+  void dump(Formatter *f) const;
+  static void generate_test_instances(list<rgw_cls_bucket_update_stats_op *>& o);
+};
+WRITE_CLASS_ENCODER(rgw_cls_bucket_update_stats_op)
+
 struct rgw_cls_obj_remove_op {
   list<string> keep_attr_prefixes;
 
diff --git a/src/cls/rgw/cls_rgw_types.cc b/src/cls/rgw/cls_rgw_types.cc
index 79a5272..27a413e 100644
--- a/src/cls/rgw/cls_rgw_types.cc
+++ b/src/cls/rgw/cls_rgw_types.cc
@@ -237,6 +237,38 @@ void rgw_cls_bi_entry::dump(Formatter *f) const
   dump_bi_entry(data, type, f);
 }
 
+bool rgw_cls_bi_entry::get_info(cls_rgw_obj_key *key, uint8_t *category, rgw_bucket_category_stats *accounted_stats)
+{
+  bool account = false;
+  bufferlist::iterator iter = data.begin();
+  switch (type) {
+    case PlainIdx:
+    case InstanceIdx:
+      {
+        rgw_bucket_dir_entry entry;
+        ::decode(entry, iter);
+        *key = entry.key;
+        *category = entry.meta.category;
+        accounted_stats->num_entries++;
+        accounted_stats->total_size += entry.meta.accounted_size;
+        accounted_stats->total_size_rounded += cls_rgw_get_rounded_size(entry.meta.accounted_size);
+        account = true;
+      }
+      break;
+    case OLHIdx:
+      {
+        rgw_bucket_olh_entry entry;
+        ::decode(entry, iter);
+        *key = entry.key;
+      }
+      break;
+    default:
+      break;
+  }
+
+  return account;
+}
+
 void rgw_bucket_olh_entry::dump(Formatter *f) const
 {
   encode_json("key", key, f);
diff --git a/src/cls/rgw/cls_rgw_types.h b/src/cls/rgw/cls_rgw_types.h
index cf143ce..c7942ac 100644
--- a/src/cls/rgw/cls_rgw_types.h
+++ b/src/cls/rgw/cls_rgw_types.h
@@ -49,6 +49,13 @@ enum RGWCheckMTimeType {
   CLS_RGW_CHECK_TIME_MTIME_GE = 4,
 };
 
+#define ROUND_BLOCK_SIZE 4096
+
+static inline uint64_t cls_rgw_get_rounded_size(uint64_t size)
+{
+  return (size + ROUND_BLOCK_SIZE - 1) & ~(ROUND_BLOCK_SIZE - 1);
+}
+
 struct rgw_bucket_pending_info {
   RGWPendingState state;
   ceph::real_time timestamp;
@@ -361,6 +368,8 @@ enum BIIndexType {
   OLHIdx        = 3,
 };
 
+struct rgw_bucket_category_stats;
+
 struct rgw_cls_bi_entry {
   BIIndexType type;
   string idx;
@@ -388,6 +397,8 @@ struct rgw_cls_bi_entry {
 
   void dump(Formatter *f) const;
   void decode_json(JSONObj *obj, cls_rgw_obj_key *effective_key = NULL);
+
+  bool get_info(cls_rgw_obj_key *key, uint8_t *category, rgw_bucket_category_stats *accounted_stats);
 };
 WRITE_CLASS_ENCODER(rgw_cls_bi_entry)
 
@@ -886,6 +897,10 @@ struct cls_rgw_obj_chain {
   static void generate_test_instances(list<cls_rgw_obj_chain*>& ls) {
     ls.push_back(new cls_rgw_obj_chain);
   }
+
+  bool empty() {
+    return objs.empty();
+  }
 };
 WRITE_CLASS_ENCODER(cls_rgw_obj_chain)
 
diff --git a/src/common/Makefile.am b/src/common/Makefile.am
index 86df75d..d7ab284 100644
--- a/src/common/Makefile.am
+++ b/src/common/Makefile.am
@@ -82,12 +82,14 @@ libcommon_internal_la_SOURCES = \
 
 common/PluginRegistry.cc: ./ceph_ver.h
 
+libcommon_internal_la_SOURCES += \
+	common/ceph_json.cc \
+	common/util.cc
+
 if ENABLE_SERVER
 libcommon_internal_la_SOURCES += \
 	common/xattr.c \
 	common/ipaddr.cc \
-	common/ceph_json.cc \
-	common/util.cc \
 	common/pick_address.cc
 endif
 
diff --git a/src/common/ceph_hash.cc b/src/common/ceph_hash.cc
index c581806..c738e31 100644
--- a/src/common/ceph_hash.cc
+++ b/src/common/ceph_hash.cc
@@ -82,7 +82,7 @@ unsigned ceph_str_hash_rjenkins(const char *str, unsigned length)
  */
 unsigned ceph_str_hash_linux(const char *str, unsigned length)
 {
-	unsigned long hash = 0;
+	unsigned hash = 0;
 
 	while (length--) {
 		unsigned char c = *str++;
diff --git a/src/common/config_opts.h b/src/common/config_opts.h
index 3795f0e..789e041 100644
--- a/src/common/config_opts.h
+++ b/src/common/config_opts.h
@@ -748,12 +748,14 @@ OPTION(osd_recovery_delay_start, OPT_FLOAT, 0)
 OPTION(osd_recovery_max_active, OPT_INT, 3)
 OPTION(osd_recovery_max_single_start, OPT_INT, 1)
 OPTION(osd_recovery_max_chunk, OPT_U64, 8<<20)  // max size of push chunk
+OPTION(osd_recovery_max_omap_entries_per_chunk, OPT_U64, 64000) // max number of omap entries per chunk; 0 to disable limit
 OPTION(osd_copyfrom_max_chunk, OPT_U64, 8<<20)   // max size of a COPYFROM chunk
 OPTION(osd_push_per_object_cost, OPT_U64, 1000)  // push cost per object
 OPTION(osd_max_push_cost, OPT_U64, 8<<20)  // max size of push message
 OPTION(osd_max_push_objects, OPT_U64, 10)  // max objects in single push op
 OPTION(osd_recovery_forget_lost_objects, OPT_BOOL, false)   // off for now
 OPTION(osd_max_scrubs, OPT_INT, 1)
+OPTION(osd_scrub_during_recovery, OPT_BOOL, true) // Allow new scrubs to start while recovery is active on the OSD
 OPTION(osd_scrub_begin_hour, OPT_INT, 0)
 OPTION(osd_scrub_end_hour, OPT_INT, 24)
 OPTION(osd_scrub_load_threshold, OPT_FLOAT, 0.5)
@@ -800,6 +802,8 @@ OPTION(osd_debug_skip_full_check_in_backfill_reservation, OPT_BOOL, false)
 OPTION(osd_debug_reject_backfill_probability, OPT_DOUBLE, 0)
 OPTION(osd_debug_inject_copyfrom_error, OPT_BOOL, false)  // inject failure during copyfrom completion
 OPTION(osd_debug_randomize_hobject_sort_order, OPT_BOOL, false)
+OPTION(osd_enxio_on_misdirected_op, OPT_BOOL, false)
+OPTION(osd_debug_verify_cached_snaps, OPT_BOOL, false)
 OPTION(osd_enable_op_tracker, OPT_BOOL, true) // enable/disable OSD op tracking
 OPTION(osd_num_op_tracker_shard, OPT_U32, 32) // The number of shards for holding the ops
 OPTION(osd_op_history_size, OPT_U32, 20)    // Max number of completed ops to track
@@ -1173,6 +1177,7 @@ OPTION(rbd_tracing, OPT_BOOL, false) // true if LTTng-UST tracepoints should be
 OPTION(rbd_validate_pool, OPT_BOOL, true) // true if empty pools should be validated for RBD compatibility
 OPTION(rbd_validate_names, OPT_BOOL, true) // true if image specs should be validated
 OPTION(rbd_mirroring_resync_after_disconnect, OPT_BOOL, false) // automatically start image resync after mirroring is disconnected due to being laggy
+OPTION(rbd_auto_exclusive_lock_until_manual_request, OPT_BOOL, true) // whether to automatically acquire/release exclusive lock until it is explicitly requested, i.e. before we know the user of librbd is properly using the lock API
 
 /*
  * The following options change the behavior for librbd's image creation methods that
@@ -1284,6 +1289,8 @@ OPTION(rgw_keystone_token_cache_size, OPT_INT, 10000)  // max number of entries
 OPTION(rgw_keystone_revocation_interval, OPT_INT, 15 * 60)  // seconds between tokens revocation check
 OPTION(rgw_keystone_verify_ssl, OPT_BOOL, true) // should we try to verify keystone's ssl
 OPTION(rgw_keystone_implicit_tenants, OPT_BOOL, false)  // create new users in their own tenants of the same name
+OPTION(rgw_cross_domain_policy, OPT_STR, "<allow-access-from domain=\"*\" secure=\"false\" />")
+OPTION(rgw_healthcheck_disabling_path, OPT_STR, "") // path that existence causes the healthcheck to respond 503
 OPTION(rgw_s3_auth_use_rados, OPT_BOOL, true)  // should we try to use the internal credentials for s3?
 OPTION(rgw_s3_auth_use_keystone, OPT_BOOL, false)  // should we try to use keystone for s3?
 
@@ -1327,6 +1334,9 @@ OPTION(rgw_nfs_lru_lanes, OPT_INT, 5)
 OPTION(rgw_nfs_lru_lane_hiwat, OPT_INT, 911)
 OPTION(rgw_nfs_fhcache_partitions, OPT_INT, 3)
 OPTION(rgw_nfs_fhcache_size, OPT_INT, 2017) /* 3*2017=6051 */
+OPTION(rgw_nfs_namespace_expire_secs, OPT_INT, 300) /* namespace invalidate
+						     * timer */
+OPTION(rgw_nfs_max_gc, OPT_INT, 300) /* max gc events per cycle */
 OPTION(rgw_nfs_write_completion_interval_s, OPT_INT, 10) /* stateless (V3)
 							  * commit
 							  * delay */
@@ -1335,6 +1345,7 @@ OPTION(rgw_zone, OPT_STR, "") // zone name
 OPTION(rgw_zone_root_pool, OPT_STR, ".rgw.root")    // pool where zone specific info is stored
 OPTION(rgw_default_zone_info_oid, OPT_STR, "default.zone")  // oid where default zone info is stored
 OPTION(rgw_region, OPT_STR, "") // region name
+OPTION(rgw_region_root_pool, OPT_STR, ".rgw.root")  // pool where all region info is stored
 OPTION(rgw_default_region_info_oid, OPT_STR, "default.region")  // oid where default region info is stored
 OPTION(rgw_zonegroup, OPT_STR, "") // zone group name
 OPTION(rgw_zonegroup_root_pool, OPT_STR, ".rgw.root")  // pool where all zone group info is stored
@@ -1421,6 +1432,7 @@ OPTION(rgw_objexp_hints_num_shards, OPT_U32, 127) // maximum number of parts in
 OPTION(rgw_objexp_chunk_size, OPT_U32, 100) // maximum number of entries in a single operation when processing objexp data
 
 OPTION(rgw_enable_static_website, OPT_BOOL, false) // enable static website feature
+OPTION(rgw_log_http_headers, OPT_STR, "" ) // list of HTTP headers to log when seen, ignores case (e.g., http_x_forwarded_for
 
 OPTION(rgw_num_async_rados_threads, OPT_INT, 32) // num of threads to use for async rados operations
 OPTION(rgw_md_notify_interval_msec, OPT_INT, 200) // metadata changes notification interval to followers
diff --git a/src/common/util.cc b/src/common/util.cc
index 2c60cb2..ee4b84e 100644
--- a/src/common/util.cc
+++ b/src/common/util.cc
@@ -115,47 +115,72 @@ int get_fs_stats(ceph_data_stats_t &stats, const char *path)
   return 0;
 }
 
-static bool lsb_release_set(char *buf, const char *prefix,
+static char* value_sanitize(char *value)
+{
+  while (isspace(*value) || *value == '"')
+    value++;
+
+  char* end = value + strlen(value) - 1;
+  while (end > value && (isspace(*end) || *end == '"'))
+    end--;
+
+  *(end + 1) = '\0';
+
+  return value;
+}
+
+static bool value_set(char *buf, const char *prefix,
 			    map<string, string> *pm, const char *key)
 {
   if (strncmp(buf, prefix, strlen(prefix))) {
     return false;
   }
 
-  if (buf[strlen(buf)-1] == '\n')
-    buf[strlen(buf)-1] = '\0';
-
-  char *value = buf + strlen(prefix) + 1;
-  (*pm)[key] = value;
+  (*pm)[key] = value_sanitize(buf + strlen(prefix));
   return true;
 }
 
-static void lsb_release_parse(map<string, string> *m, CephContext *cct)
+static void file_values_parse(const map<string, string>& kvm, FILE *fp, map<string, string> *m, CephContext *cct) {
+  char buf[512];
+  while (fgets(buf, sizeof(buf) - 1, fp) != NULL) {
+    for (auto& kv : kvm) {
+      if (value_set(buf, kv.second.c_str(), m, kv.first.c_str()))
+        continue;
+    }
+  }
+}
+
+static bool os_release_parse(map<string, string> *m, CephContext *cct)
 {
-  FILE *fp = popen("lsb_release -idrc", "r");
+  static const map<string, string> kvm = {
+    { "distro", "ID=" },
+    { "distro_description", "PRETTY_NAME=" },
+    { "distro_version", "VERSION_ID=" }
+  };
+
+  FILE *fp = fopen("/etc/os-release", "r");
   if (!fp) {
     int ret = -errno;
-    lderr(cct) << "lsb_release_parse - failed to call lsb_release binary with error: " << cpp_strerror(ret) << dendl;
-    return;
+    lderr(cct) << "os_release_parse - failed to open /etc/os-release: " << cpp_strerror(ret) << dendl;
+    return false;
   }
 
-  char buf[512];
-  while (fgets(buf, sizeof(buf) - 1, fp) != NULL) {
-    if (lsb_release_set(buf, "Distributor ID:", m, "distro"))
-      continue;
-    if (lsb_release_set(buf, "Description:", m, "distro_description"))
-      continue;
-    if (lsb_release_set(buf, "Release:", m, "distro_version"))
-      continue;
-    if (lsb_release_set(buf, "Codename:", m, "distro_codename"))
-      continue;
-
-    lderr(cct) << "unhandled output: " << buf << dendl;
+  file_values_parse(kvm, fp, m, cct);
+
+  fclose(fp);
+
+  return true;
+}
+
+static void distro_detect(map<string, string> *m, CephContext *cct)
+{
+  if (!os_release_parse(m, cct)) {
+    lderr(cct) << "distro_detect - /etc/os-release is required" << dendl;
   }
 
-  if (pclose(fp)) {
-    int ret = -errno;
-    lderr(cct) << "lsb_release_parse - pclose failed: " << cpp_strerror(ret) << dendl;
+  for (const char* rk: {"distro", "distro_version"}) {
+    if (m->find(rk) == m->end())
+      lderr(cct) << "distro_detect - can't detect " << rk << dendl;
   }
 }
 
@@ -218,7 +243,7 @@ void collect_sys_info(map<string, string> *m, CephContext *cct)
   }
 
   // distro info
-  lsb_release_parse(m, cct);
+  distro_detect(m, cct);
 }
 
 void dump_services(Formatter* f, const map<string, list<int> >& services, const char* type)
diff --git a/src/crush/CrushCompiler.cc b/src/crush/CrushCompiler.cc
index 22e5913..b1e4a27 100644
--- a/src/crush/CrushCompiler.cc
+++ b/src/crush/CrushCompiler.cc
@@ -746,8 +746,8 @@ int CrushCompiler::parse_crush(iter_t const& i)
 { 
   find_used_bucket_ids(i);
 
-  int r = 0;
   for (iter_t p = i->children.begin(); p != i->children.end(); p++) {
+    int r = 0;
     switch (p->value.id().to_long()) {
     case crush_grammar::_tunable:
       r = parse_tunable(p);
@@ -767,11 +767,11 @@ int CrushCompiler::parse_crush(iter_t const& i)
     default:
       assert(0);
     }
+    if (r < 0) {
+      return r;
+    }
   }
 
-  if (r < 0)
-    return r;
-
   //err << "max_devices " << crush.get_max_devices() << std::endl;
   crush.finalize();
   
diff --git a/src/global/global_init.cc b/src/global/global_init.cc
index 38636df..daff81a 100644
--- a/src/global/global_init.cc
+++ b/src/global/global_init.cc
@@ -36,6 +36,10 @@
 #include <errno.h>
 #include <deque>
 
+#ifdef HAVE_SYS_PRCTL_H
+#include <sys/prctl.h>
+#endif
+
 #define dout_subsys ceph_subsys_
 
 static void global_init_set_globals(CephContext *cct)
@@ -270,6 +274,12 @@ void global_init(std::vector < const char * > *alt_def_args,
     }
   }
 
+#if defined(HAVE_SYS_PRCTL_H)
+  if (prctl(PR_SET_DUMPABLE, 1) == -1) {
+    cerr << "warning: unable to set dumpable flag: " << cpp_strerror(errno) << std::endl;
+  }
+#endif
+
   // Expand metavariables. Invoke configuration observers. Open log file.
   g_conf->apply_changes(NULL);
 
diff --git a/src/include/ceph_fs.h b/src/include/ceph_fs.h
index e8f5f2f..3f11286 100644
--- a/src/include/ceph_fs.h
+++ b/src/include/ceph_fs.h
@@ -28,7 +28,6 @@
 
 #define CEPH_INO_ROOT   1
 #define CEPH_INO_CEPH   2       /* hidden .ceph dir */
-#define CEPH_INO_DOTDOT 3	/* used by ceph fuse for parent (..) */
 #define CEPH_INO_LOST_AND_FOUND 4	/* reserved ino for use in recovery */
 
 /* arbitrary limit on max # of monitors (cluster of 3 is typical) */
diff --git a/src/include/filepath.h b/src/include/filepath.h
index 15c3783..b4c3307 100644
--- a/src/include/filepath.h
+++ b/src/include/filepath.h
@@ -130,6 +130,7 @@ class filepath {
 
   const string& last_dentry() const {
     if (bits.empty() && path.length() > 0) parse_bits();
+    assert(!bits.empty());
     return bits[ bits.size()-1 ];
   }
 
diff --git a/src/include/rados/rgw_file.h b/src/include/rados/rgw_file.h
index 02d7634..9eab48f 100644
--- a/src/include/rados/rgw_file.h
+++ b/src/include/rados/rgw_file.h
@@ -26,7 +26,7 @@ extern "C" {
 
 #define LIBRGW_FILE_VER_MAJOR 1
 #define LIBRGW_FILE_VER_MINOR 1
-#define LIBRGW_FILE_VER_EXTRA 0
+#define LIBRGW_FILE_VER_EXTRA 1
 
 #define LIBRGW_FILE_VERSION(maj, min, extra) ((maj << 16) + (min << 8) + extra)
 #define LIBRGW_FILE_VERSION_CODE LIBRGW_FILE_VERSION(LIBRGW_FILE_VER_MAJOR, LIBRGW_FILE_VER_MINOR, LIBRGW_FILE_VER_EXTRA)
@@ -116,6 +116,16 @@ int rgw_mount(librgw_t rgw, const char *uid, const char *key,
 	      uint32_t flags);
 
 /*
+ register invalidate callbacks
+*/
+#define RGW_REG_INVALIDATE_FLAG_NONE    0x0000
+
+typedef void (*rgw_fh_callback_t)(void *handle, struct rgw_fh_hk fh_hk);
+
+int rgw_register_invalidate(struct rgw_fs *rgw_fs, rgw_fh_callback_t cb,
+			    void *arg, uint32_t flags);
+
+/*
  detach rgw namespace
 */
 #define RGW_UMOUNT_FLAG_NONE    0x0000
diff --git a/src/include/rbd/librbd.h b/src/include/rbd/librbd.h
index c636494..1aad578 100644
--- a/src/include/rbd/librbd.h
+++ b/src/include/rbd/librbd.h
@@ -32,7 +32,7 @@ extern "C" {
 
 #define LIBRBD_VER_MAJOR 0
 #define LIBRBD_VER_MINOR 1
-#define LIBRBD_VER_EXTRA 10
+#define LIBRBD_VER_EXTRA 11
 
 #define LIBRBD_VERSION(maj, min, extra) ((maj << 16) + (min << 8) + extra)
 
@@ -42,6 +42,7 @@ extern "C" {
 #define LIBRBD_SUPPORTS_AIO_FLUSH 1
 #define LIBRBD_SUPPORTS_INVALIDATE 1
 #define LIBRBD_SUPPORTS_AIO_OPEN 1
+#define LIBRBD_SUPPORTS_LOCKING 1
 
 #if __GNUC__ >= 4
   #define CEPH_RBD_API    __attribute__ ((visibility ("default")))
@@ -86,9 +87,9 @@ typedef struct {
   uint64_t obj_size;
   uint64_t num_objs;
   int order;
-  char block_name_prefix[RBD_MAX_BLOCK_NAME_SIZE];
-  int64_t parent_pool;			      /* deprecated */
-  char parent_name[RBD_MAX_IMAGE_NAME_SIZE];  /* deprecated */
+  char block_name_prefix[RBD_MAX_BLOCK_NAME_SIZE]; /* deprecated */
+  int64_t parent_pool;			           /* deprecated */
+  char parent_name[RBD_MAX_IMAGE_NAME_SIZE];       /* deprecated */
 } rbd_image_info_t;
 
 typedef enum {
@@ -134,6 +135,11 @@ typedef struct {
   bool up;
 } rbd_mirror_image_status_t;
 
+typedef enum {
+  RBD_LOCK_MODE_EXCLUSIVE = 0,
+  RBD_LOCK_MODE_SHARED = 1,
+} rbd_lock_mode_t;
+
 CEPH_RBD_API void rbd_version(int *major, int *minor, int *extra);
 
 /* image options */
@@ -287,6 +293,10 @@ CEPH_RBD_API int rbd_get_stripe_unit(rbd_image_t image, uint64_t *stripe_unit);
 CEPH_RBD_API int rbd_get_stripe_count(rbd_image_t image,
                                       uint64_t *stripe_count);
 CEPH_RBD_API int rbd_get_overlap(rbd_image_t image, uint64_t *overlap);
+CEPH_RBD_API int rbd_get_id(rbd_image_t image, char *id, size_t id_len);
+CEPH_RBD_API int rbd_get_block_name_prefix(rbd_image_t image,
+                                           char *prefix, size_t prefix_len);
+CEPH_RBD_API int64_t rbd_get_data_pool_id(rbd_image_t image);
 CEPH_RBD_API int rbd_get_parent_info(rbd_image_t image,
 			             char *parent_poolname, size_t ppoolnamelen,
 			             char *parent_name, size_t pnamelen,
@@ -297,6 +307,16 @@ CEPH_RBD_API int rbd_set_image_notification(rbd_image_t image, int fd, int type)
 
 /* exclusive lock feature */
 CEPH_RBD_API int rbd_is_exclusive_lock_owner(rbd_image_t image, int *is_owner);
+CEPH_RBD_API int rbd_lock_acquire(rbd_image_t image, rbd_lock_mode_t lock_mode);
+CEPH_RBD_API int rbd_lock_release(rbd_image_t image);
+CEPH_RBD_API int rbd_lock_get_owners(rbd_image_t image,
+                                     rbd_lock_mode_t *lock_mode,
+                                     char **lock_owners,
+                                     size_t *max_lock_owners);
+CEPH_RBD_API void rbd_lock_get_owners_cleanup(char **lock_owners,
+                                              size_t lock_owner_count);
+CEPH_RBD_API int rbd_lock_break(rbd_image_t image, rbd_lock_mode_t lock_mode,
+                                const char *lock_owner);
 
 /* object map feature */
 CEPH_RBD_API int rbd_rebuild_object_map(rbd_image_t image,
diff --git a/src/include/rbd/librbd.hpp b/src/include/rbd/librbd.hpp
index 4dd4e6b..b3f192c 100644
--- a/src/include/rbd/librbd.hpp
+++ b/src/include/rbd/librbd.hpp
@@ -197,6 +197,9 @@ public:
   int resize(uint64_t size);
   int resize_with_progress(uint64_t size, ProgressContext& pctx);
   int stat(image_info_t &info, size_t infosize);
+  int get_id(std::string *id);
+  std::string get_block_name_prefix();
+  int64_t get_data_pool_id();
   int parent_info(std::string *parent_poolname, std::string *parent_name,
 		      std::string *parent_snapname);
   int old_format(uint8_t *old);
@@ -209,6 +212,11 @@ public:
 
   /* exclusive lock feature */
   int is_exclusive_lock_owner(bool *is_owner);
+  int lock_acquire(rbd_lock_mode_t lock_mode);
+  int lock_release();
+  int lock_get_owners(rbd_lock_mode_t *lock_mode,
+                      std::list<std::string> *lock_owners);
+  int lock_break(rbd_lock_mode_t lock_mode, const std::string &lock_owner);
 
   /* object map feature */
   int rebuild_object_map(ProgressContext &prog_ctx);
diff --git a/src/include/str_list.h b/src/include/str_list.h
index 4ba0cad..12cf128 100644
--- a/src/include/str_list.h
+++ b/src/include/str_list.h
@@ -91,4 +91,12 @@ inline std::string str_join(const std::vector<std::string>& v, std::string sep)
   return r;
 }
 
+static inline std::vector<std::string> get_str_vec(const std::string& str)
+{
+  std::vector<std::string> str_vec;
+  const char *delims = ";,= \t";
+  get_str_vec(str, delims, str_vec);
+  return std::move(str_vec);
+}
+
 #endif
diff --git a/src/include/utime.h b/src/include/utime.h
index d08d804..da73f47 100644
--- a/src/include/utime.h
+++ b/src/include/utime.h
@@ -148,7 +148,7 @@ public:
   utime_t round_to_minute() {
     struct tm bdt;
     time_t tt = sec();
-    gmtime_r(&tt, &bdt);
+    localtime_r(&tt, &bdt);
     bdt.tm_sec = 0;
     tt = mktime(&bdt);
     return utime_t(tt, 0);
@@ -157,7 +157,7 @@ public:
   utime_t round_to_hour() {
     struct tm bdt;
     time_t tt = sec();
-    gmtime_r(&tt, &bdt);
+    localtime_r(&tt, &bdt);
     bdt.tm_sec = 0;
     bdt.tm_min = 0;
     tt = mktime(&bdt);
diff --git a/src/journal/FutureImpl.cc b/src/journal/FutureImpl.cc
index 1597c73..e46a3c9 100644
--- a/src/journal/FutureImpl.cc
+++ b/src/journal/FutureImpl.cc
@@ -39,13 +39,12 @@ void FutureImpl::flush(Context *on_safe) {
         m_contexts.push_back(on_safe);
       }
 
-      prev_future = prepare_flush(&flush_handlers);
+      prev_future = prepare_flush(&flush_handlers, m_lock);
     }
   }
 
   // instruct prior futures to flush as well
   while (prev_future) {
-    Mutex::Locker locker(prev_future->m_lock);
     prev_future = prev_future->prepare_flush(&flush_handlers);
   }
 
@@ -62,6 +61,12 @@ void FutureImpl::flush(Context *on_safe) {
 }
 
 FutureImplPtr FutureImpl::prepare_flush(FlushHandlers *flush_handlers) {
+  Mutex::Locker locker(m_lock);
+  return prepare_flush(flush_handlers, m_lock);
+}
+
+FutureImplPtr FutureImpl::prepare_flush(FlushHandlers *flush_handlers,
+                                        Mutex &lock) {
   assert(m_lock.is_locked());
 
   if (m_flush_state == FLUSH_STATE_NONE) {
diff --git a/src/journal/FutureImpl.h b/src/journal/FutureImpl.h
index 0054272..96d2d24 100644
--- a/src/journal/FutureImpl.h
+++ b/src/journal/FutureImpl.h
@@ -113,6 +113,7 @@ private:
   Contexts m_contexts;
 
   FutureImplPtr prepare_flush(FlushHandlers *flush_handlers);
+  FutureImplPtr prepare_flush(FlushHandlers *flush_handlers, Mutex &lock);
 
   void consistent(int r);
   void finish_unlock();
diff --git a/src/journal/JournalMetadata.cc b/src/journal/JournalMetadata.cc
index 7b09586..967529d 100644
--- a/src/journal/JournalMetadata.cc
+++ b/src/journal/JournalMetadata.cc
@@ -848,6 +848,8 @@ void JournalMetadata::handle_watch_reset() {
   if (r < 0) {
     if (r == -ENOENT) {
       ldout(m_cct, 5) << __func__ << ": journal header not found" << dendl;
+    } else if (r == -EBLACKLISTED) {
+      ldout(m_cct, 5) << __func__ << ": client blacklisted" << dendl;
     } else {
       lderr(m_cct) << __func__ << ": failed to watch journal: "
                    << cpp_strerror(r) << dendl;
@@ -871,6 +873,8 @@ void JournalMetadata::handle_watch_notify(uint64_t notify_id, uint64_t cookie) {
 void JournalMetadata::handle_watch_error(int err) {
   if (err == -ENOTCONN) {
     ldout(m_cct, 5) << "journal watch error: header removed" << dendl;
+  } else if (err == -EBLACKLISTED) {
+    lderr(m_cct) << "journal watch error: client blacklisted" << dendl;
   } else {
     lderr(m_cct) << "journal watch error: " << cpp_strerror(err) << dendl;
   }
diff --git a/src/librados/RadosClient.cc b/src/librados/RadosClient.cc
index 38dcb09..51bc0be 100644
--- a/src/librados/RadosClient.cc
+++ b/src/librados/RadosClient.cc
@@ -758,6 +758,12 @@ int librados::RadosClient::blacklist_add(const string& client_address,
   cmds.push_back(cmd.str());
   bufferlist inbl;
   int r = mon_command(cmds, inbl, NULL, NULL);
+  if (r < 0) {
+    return r;
+  }
+
+  // ensure we have the latest osd map epoch before proceeding
+  r = wait_for_latest_osdmap();
   return r;
 }
 
diff --git a/src/librados/librados.cc b/src/librados/librados.cc
index 05dfac5..1dbb43f 100644
--- a/src/librados/librados.cc
+++ b/src/librados/librados.cc
@@ -5514,7 +5514,7 @@ librados::ObjectCursor librados::IoCtx::object_list_begin()
 {
   hobject_t *h = new hobject_t(io_ctx_impl->objecter->enumerate_objects_begin());
   ObjectCursor oc;
-  oc.c_cursor = (rados_object_list_cursor)h;
+  oc.set((rados_object_list_cursor)h);
   return oc;
 }
 
@@ -5523,7 +5523,7 @@ librados::ObjectCursor librados::IoCtx::object_list_end()
 {
   hobject_t *h = new hobject_t(io_ctx_impl->objecter->enumerate_objects_end());
   librados::ObjectCursor oc;
-  oc.c_cursor = (rados_object_list_cursor)h;
+  oc.set((rados_object_list_cursor)h);
   return oc;
 }
 
diff --git a/src/librbd/AioImageRequestWQ.cc b/src/librbd/AioImageRequestWQ.cc
index a3e8aac..26e265e 100644
--- a/src/librbd/AioImageRequestWQ.cc
+++ b/src/librbd/AioImageRequestWQ.cc
@@ -6,6 +6,7 @@
 #include "librbd/AioCompletion.h"
 #include "librbd/AioImageRequest.h"
 #include "librbd/ExclusiveLock.h"
+#include "librbd/exclusive_lock/Policy.h"
 #include "librbd/ImageCtx.h"
 #include "librbd/ImageState.h"
 #include "librbd/internal.h"
@@ -443,6 +444,17 @@ void AioImageRequestWQ::queue(AioImageRequest<> *req) {
 
   assert(m_image_ctx.owner_lock.is_locked());
   bool write_op = req->is_write_op();
+  bool lock_required = (write_op && is_lock_required()) ||
+    (!write_op && m_require_lock_on_read);
+
+  if (lock_required && !m_image_ctx.get_exclusive_lock_policy()->may_auto_request_lock()) {
+    lderr(cct) << "op requires exclusive lock" << dendl;
+    req->fail(-EROFS);
+    delete req;
+    finish_in_flight_op();
+    return;
+  }
+
   if (write_op) {
     m_queued_writes.inc();
   } else {
@@ -451,8 +463,7 @@ void AioImageRequestWQ::queue(AioImageRequest<> *req) {
 
   ThreadPool::PointerWQ<AioImageRequest<> >::queue(req);
 
-  if ((write_op && is_lock_required()) ||
-      (!write_op && m_require_lock_on_read)) {
+  if (lock_required) {
     m_image_ctx.exclusive_lock->request_lock(nullptr);
   }
 }
@@ -462,6 +473,7 @@ void AioImageRequestWQ::handle_refreshed(int r, AioImageRequest<> *req) {
   ldout(cct, 15) << "resuming IO after image refresh: r=" << r << ", "
                  << "req=" << req << dendl;
   if (r < 0) {
+    process_finish();
     req->fail(r);
     finish_queued_op(req);
     delete req;
diff --git a/src/librbd/AioObjectRequest.cc b/src/librbd/AioObjectRequest.cc
index cf76176..36ec0e7 100644
--- a/src/librbd/AioObjectRequest.cc
+++ b/src/librbd/AioObjectRequest.cc
@@ -454,12 +454,10 @@ void AbstractAioObjectWrite::send() {
 void AbstractAioObjectWrite::send_pre() {
   assert(m_ictx->owner_lock.is_locked());
 
-  bool write = false;
   {
     RWLock::RLocker snap_lock(m_ictx->snap_lock);
     if (m_ictx->object_map == nullptr) {
       m_object_exist = true;
-      write = true;
     } else {
       // should have been flushed prior to releasing lock
       assert(m_ictx->exclusive_lock->is_lock_owner());
@@ -469,27 +467,20 @@ void AbstractAioObjectWrite::send_pre() {
       pre_object_map_update(&new_state);
 
       RWLock::WLocker object_map_locker(m_ictx->object_map_lock);
-      if (m_ictx->object_map->update_required(m_object_no, new_state)) {
-        ldout(m_ictx->cct, 20) << "send_pre " << this << " " << m_oid << " "
-                               << m_object_off << "~" << m_object_len
-                               << dendl;
-        m_state = LIBRBD_AIO_WRITE_PRE;
-
-        Context *ctx = util::create_context_callback<AioObjectRequest>(this);
-        bool updated = m_ictx->object_map->aio_update(m_object_no, new_state,
-                                                      {}, ctx);
-        assert(updated);
-      } else {
-        write = true;
+      ldout(m_ictx->cct, 20) << "send_pre " << this << " " << m_oid << " "
+                             << m_object_off << "~" << m_object_len
+                             << dendl;
+      m_state = LIBRBD_AIO_WRITE_PRE;
+
+      if (m_ictx->object_map->aio_update<AioObjectRequest>(
+            CEPH_NOSNAP, m_object_no, new_state, {}, this)) {
+        return;
       }
     }
   }
 
-  // avoid possible recursive lock attempts
-  if (write) {
-    // no object map update required
-    send_write();
-  }
+  // no object map update required
+  send_write();
 }
 
 bool AbstractAioObjectWrite::send_post() {
@@ -503,20 +494,16 @@ bool AbstractAioObjectWrite::send_post() {
   assert(m_ictx->exclusive_lock->is_lock_owner());
 
   RWLock::WLocker object_map_locker(m_ictx->object_map_lock);
-  if (!m_ictx->object_map->update_required(m_object_no, OBJECT_NONEXISTENT)) {
-    return true;
-  }
-
   ldout(m_ictx->cct, 20) << "send_post " << this << " " << m_oid << " "
                          << m_object_off << "~" << m_object_len << dendl;
   m_state = LIBRBD_AIO_WRITE_POST;
 
-  Context *ctx = util::create_context_callback<AioObjectRequest>(this);
-  bool updated = m_ictx->object_map->aio_update(m_object_no,
-                                                OBJECT_NONEXISTENT,
-      			                  OBJECT_PENDING, ctx);
-  assert(updated);
-  return false;
+  if (m_ictx->object_map->aio_update<AioObjectRequest>(
+        CEPH_NOSNAP, m_object_no, OBJECT_NONEXISTENT, OBJECT_PENDING, this)) {
+    return false;
+  }
+
+  return true;
 }
 
 void AbstractAioObjectWrite::send_write() {
diff --git a/src/librbd/BlockGuard.h b/src/librbd/BlockGuard.h
new file mode 100644
index 0000000..36eb8be
--- /dev/null
+++ b/src/librbd/BlockGuard.h
@@ -0,0 +1,172 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef CEPH_LIBRBD_IO_BLOCK_GUARD_H
+#define CEPH_LIBRBD_IO_BLOCK_GUARD_H
+
+#include "include/int_types.h"
+#include "common/dout.h"
+#include "common/Mutex.h"
+#include <boost/intrusive/list.hpp>
+#include <boost/intrusive/set.hpp>
+#include <deque>
+#include <list>
+#include "include/assert.h"
+
+#define dout_subsys ceph_subsys_rbd
+#undef dout_prefix
+#define dout_prefix *_dout << "librbd::BlockGuard: " << this << " " \
+                           <<  __func__ << ": "
+
+namespace librbd {
+
+struct BlockExtent {
+  uint64_t block_start = 0;
+  uint64_t block_end = 0;
+
+  BlockExtent() {
+  }
+  BlockExtent(uint64_t block_start, uint64_t block_end)
+    : block_start(block_start), block_end(block_end) {
+  }
+};
+
+struct BlockGuardCell {
+};
+
+/**
+ * Helper class to restrict and order concurrent IO to the same block. The
+ * definition of a block is dependent upon the user of this class. It might
+ * represent a backing object, 512 byte sectors, etc.
+ */
+template <typename BlockOperation>
+class BlockGuard {
+private:
+  struct DetainedBlockExtent;
+
+public:
+  typedef std::list<BlockOperation> BlockOperations;
+
+  BlockGuard(CephContext *cct)
+    : m_cct(cct), m_lock("librbd::BlockGuard::m_lock") {
+  }
+
+  BlockGuard(const BlockGuard&) = delete;
+  BlockGuard &operator=(const BlockGuard&) = delete;
+
+  /**
+   * Detain future IO for a range of blocks. the guard will assume
+   * ownership of the provided operation if the operation is blocked.
+   * @return 0 upon success and IO can be issued
+   *         >0 if the IO is blocked,
+   *         <0 upon error
+   */
+  int detain(const BlockExtent &block_extent, BlockOperation *block_operation,
+             BlockGuardCell **cell) {
+    Mutex::Locker locker(m_lock);
+    ldout(m_cct, 20) << "block_start=" << block_extent.block_start << ", "
+                     << "block_end=" << block_extent.block_end << ", "
+                     << "free_slots=" << m_free_detained_block_extents.size()
+                     << dendl;
+
+    DetainedBlockExtent *detained_block_extent;
+    auto it = m_detained_block_extents.find(block_extent);
+    if (it != m_detained_block_extents.end()) {
+      // request against an already detained block
+      detained_block_extent = &(*it);
+      if (block_operation != nullptr) {
+        detained_block_extent->block_operations.emplace_back(
+          std::move(*block_operation));
+      }
+
+      // alert the caller that the IO was detained
+      *cell = nullptr;
+      return detained_block_extent->block_operations.size();
+    } else {
+      if (!m_free_detained_block_extents.empty()) {
+        detained_block_extent = &m_free_detained_block_extents.front();
+        detained_block_extent->block_operations.clear();
+        m_free_detained_block_extents.pop_front();
+      } else {
+        ldout(m_cct, 20) << "no free detained block cells" << dendl;
+        m_detained_block_extent_pool.emplace_back();
+        detained_block_extent = &m_detained_block_extent_pool.back();
+      }
+
+      detained_block_extent->block_extent = block_extent;
+      m_detained_block_extents.insert(*detained_block_extent);
+      *cell = reinterpret_cast<BlockGuardCell*>(detained_block_extent);
+      return 0;
+    }
+  }
+
+  /**
+   * Release any detained IO operations from the provided cell.
+   */
+  void release(BlockGuardCell *cell, BlockOperations *block_operations) {
+    Mutex::Locker locker(m_lock);
+
+    assert(cell != nullptr);
+    auto &detained_block_extent = reinterpret_cast<DetainedBlockExtent &>(
+      *cell);
+    ldout(m_cct, 20) << "block_start="
+                     << detained_block_extent.block_extent.block_start << ", "
+                     << "block_end="
+                     << detained_block_extent.block_extent.block_end << ", "
+                     << "pending_ops="
+                     << (detained_block_extent.block_operations.empty() ?
+                          0 : detained_block_extent.block_operations.size() - 1)
+                     << dendl;
+
+    *block_operations = std::move(detained_block_extent.block_operations);
+    m_detained_block_extents.erase(detained_block_extent.block_extent);
+    m_free_detained_block_extents.push_back(detained_block_extent);
+  }
+
+private:
+  struct DetainedBlockExtent : public boost::intrusive::list_base_hook<>,
+                               public boost::intrusive::set_base_hook<> {
+    DetainedBlockExtent() {
+    }
+    DetainedBlockExtent(const BlockExtent &block_extent)
+      : block_extent(block_extent) {
+    }
+
+    BlockExtent block_extent;
+    BlockOperations block_operations;
+  };
+
+  struct DetainedBlockExtentCompare {
+    bool operator()(const DetainedBlockExtent &lhs,
+                    const DetainedBlockExtent &rhs) const {
+      // check for range overlap (lhs < rhs)
+      if (lhs.block_extent.block_end <= rhs.block_extent.block_start) {
+        return true;
+      }
+      return false;
+    }
+  };
+
+  typedef std::deque<DetainedBlockExtent> DetainedBlockExtentsPool;
+  typedef boost::intrusive::list<DetainedBlockExtent> DetainedBlockExtents;
+  typedef boost::intrusive::set<
+    DetainedBlockExtent,
+    boost::intrusive::compare<DetainedBlockExtentCompare> >
+      BlockExtentToDetainedBlockExtents;
+
+  CephContext *m_cct;
+
+  Mutex m_lock;
+  DetainedBlockExtentsPool m_detained_block_extent_pool;
+  DetainedBlockExtents m_free_detained_block_extents;
+  BlockExtentToDetainedBlockExtents m_detained_block_extents;
+
+};
+
+} // namespace librbd
+
+#undef dout_subsys
+#undef dout_prefix
+#define dout_prefix *_dout
+
+#endif // CEPH_LIBRBD_IO_BLOCK_GUARD_H
diff --git a/src/librbd/CMakeLists.txt b/src/librbd/CMakeLists.txt
index 8254a2e..c013cf0 100644
--- a/src/librbd/CMakeLists.txt
+++ b/src/librbd/CMakeLists.txt
@@ -27,6 +27,10 @@ set(librbd_internal_srcs
   Operations.cc
   Utils.cc
   exclusive_lock/AcquireRequest.cc
+  exclusive_lock/AutomaticPolicy.cc
+  exclusive_lock/BreakRequest.cc
+  exclusive_lock/GetLockerRequest.cc
+  exclusive_lock/ReacquireRequest.cc
   exclusive_lock/ReleaseRequest.cc
   exclusive_lock/StandardPolicy.cc
   image/CloseRequest.cc
@@ -36,6 +40,7 @@ set(librbd_internal_srcs
   image/SetSnapRequest.cc
   image_watcher/Notifier.cc
   image_watcher/NotifyLockOwner.cc
+  image_watcher/RewatchRequest.cc
   journal/Replay.cc
   journal/StandardPolicy.cc
   object_map/InvalidateRequest.cc
diff --git a/src/librbd/CopyupRequest.cc b/src/librbd/CopyupRequest.cc
index b95544b..d9750b6 100644
--- a/src/librbd/CopyupRequest.cc
+++ b/src/librbd/CopyupRequest.cc
@@ -46,9 +46,8 @@ public:
       RWLock::WLocker object_map_locker(m_image_ctx.object_map_lock);
       assert(m_image_ctx.exclusive_lock->is_lock_owner());
       assert(m_image_ctx.object_map != nullptr);
-      bool sent = m_image_ctx.object_map->aio_update(m_object_no, OBJECT_EXISTS,
-                                                     boost::optional<uint8_t>(),
-                                                     this);
+      bool sent = m_image_ctx.object_map->aio_update<Context>(
+        CEPH_NOSNAP, m_object_no, OBJECT_EXISTS, {}, this);
       return (sent ? 0 : 1);
     }
 
@@ -64,8 +63,9 @@ public:
       return 1;
     }
 
-    m_image_ctx.object_map->aio_update(snap_id, m_object_no, m_object_no + 1,
-                                       state, boost::optional<uint8_t>(), this);
+    bool sent = m_image_ctx.object_map->aio_update<Context>(
+      snap_id, m_object_no, state, {}, this);
+    assert(sent);
     return 0;
   }
 
diff --git a/src/librbd/DiffIterate.cc b/src/librbd/DiffIterate.cc
index 9b0a3ac..c3b9511 100644
--- a/src/librbd/DiffIterate.cc
+++ b/src/librbd/DiffIterate.cc
@@ -131,6 +131,9 @@ private:
     ldout(cct, 20) << "  diff " << diff << " end_exists=" << end_exists
                    << dendl;
     if (diff.empty()) {
+      if (m_diff_context.from_snap_id == 0 && !end_exists) {
+        compute_parent_overlap(diffs);
+      }
       return;
     } else if (m_diff_context.whole_object) {
       // provide the full object extents to the callback
@@ -261,7 +264,7 @@ int DiffIterate::execute() {
     RWLock::RLocker l(m_image_ctx.snap_lock);
     RWLock::RLocker l2(m_image_ctx.parent_lock);
     uint64_t overlap = end_size;
-    m_image_ctx.get_parent_overlap(from_snap_id, &overlap);
+    m_image_ctx.get_parent_overlap(m_image_ctx.snap_id, &overlap);
     r = 0;
     if (m_image_ctx.parent && overlap > 0) {
       ldout(cct, 10) << " first getting parent diff" << dendl;
@@ -382,8 +385,8 @@ int DiffIterate::diff_object_map(uint64_t from_snap_id, uint64_t to_snap_id,
     }
 
     BitVector<2> object_map;
-    std::string oid(ObjectMap::object_map_name(m_image_ctx.id,
-                                               current_snap_id));
+    std::string oid(ObjectMap<>::object_map_name(m_image_ctx.id,
+                                                 current_snap_id));
     r = cls_client::object_map_load(&m_image_ctx.md_ctx, oid, &object_map);
     if (r < 0) {
       lderr(cct) << "diff_object_map: failed to load object map " << oid
diff --git a/src/librbd/ExclusiveLock.cc b/src/librbd/ExclusiveLock.cc
index 4c0501c..3fedf8d 100644
--- a/src/librbd/ExclusiveLock.cc
+++ b/src/librbd/ExclusiveLock.cc
@@ -7,9 +7,11 @@
 #include "common/errno.h"
 #include "librbd/AioImageRequestWQ.h"
 #include "librbd/ImageCtx.h"
+#include "librbd/ImageState.h"
 #include "librbd/ImageWatcher.h"
 #include "librbd/Utils.h"
 #include "librbd/exclusive_lock/AcquireRequest.h"
+#include "librbd/exclusive_lock/ReacquireRequest.h"
 #include "librbd/exclusive_lock/ReleaseRequest.h"
 #include <sstream>
 
@@ -25,10 +27,10 @@ namespace {
 
 const std::string WATCHER_LOCK_COOKIE_PREFIX = "auto";
 
-template <typename I>
-struct C_SendReleaseRequest : public Context {
-  ReleaseRequest<I>* request;
-  explicit C_SendReleaseRequest(ReleaseRequest<I>* request) : request(request) {
+template <typename R>
+struct C_SendRequest : public Context {
+  R* request;
+  explicit C_SendRequest(R* request) : request(request) {
   }
   virtual void finish(int r) override {
     request->send();
@@ -60,6 +62,7 @@ bool ExclusiveLock<I>::is_lock_owner() const {
   switch (m_state) {
   case STATE_LOCKED:
   case STATE_POST_ACQUIRING:
+  case STATE_REACQUIRING:
   case STATE_PRE_RELEASING:
   case STATE_PRE_SHUTTING_DOWN:
     lock_owner = true;
@@ -135,7 +138,7 @@ void ExclusiveLock<I>::shut_down(Context *on_shut_down) {
   }
 
   // if stalled in request state machine -- abort
-  handle_lock_released();
+  handle_peer_notification(0);
 }
 
 template <typename I>
@@ -195,36 +198,62 @@ void ExclusiveLock<I>::release_lock(Context *on_released) {
 }
 
 template <typename I>
-void ExclusiveLock<I>::handle_watch_registered() {
-  Mutex::Locker locker(m_lock);
-  if (m_state != STATE_WAITING_FOR_REGISTER) {
-    return;
+void ExclusiveLock<I>::reacquire_lock(Context *on_reacquired) {
+  {
+    Mutex::Locker locker(m_lock);
+    assert(m_image_ctx.owner_lock.is_locked());
+
+    if (m_state == STATE_WAITING_FOR_REGISTER) {
+      // restart the acquire lock process now that watch is valid
+      ldout(m_image_ctx.cct, 10) << this << " " << __func__ << ": "
+                                 << "woke up waiting acquire" << dendl;
+      Action active_action = get_active_action();
+      assert(active_action == ACTION_TRY_LOCK ||
+             active_action == ACTION_REQUEST_LOCK);
+      execute_next_action();
+    } else if (!is_shutdown() &&
+               (m_state == STATE_LOCKED ||
+                m_state == STATE_ACQUIRING ||
+                m_state == STATE_POST_ACQUIRING ||
+                m_state == STATE_WAITING_FOR_PEER)) {
+      // interlock the lock operation with other image state ops
+      ldout(m_image_ctx.cct, 10) << this << " " << __func__ << dendl;
+      execute_action(ACTION_REACQUIRE_LOCK, on_reacquired);
+      return;
+    }
   }
 
-  ldout(m_image_ctx.cct, 10) << this << " " << __func__ << dendl;
-  Action active_action = get_active_action();
-  assert(active_action == ACTION_TRY_LOCK ||
-         active_action == ACTION_REQUEST_LOCK);
-  execute_next_action();
+  // ignore request if shutdown or not in a locked-related state
+  if (on_reacquired != nullptr) {
+    on_reacquired->complete(0);
+  }
 }
 
 template <typename I>
-void ExclusiveLock<I>::handle_lock_released() {
-  Mutex::Locker locker(m_lock);
-  if (m_state != STATE_WAITING_FOR_PEER) {
-    return;
+void ExclusiveLock<I>::handle_peer_notification(int r) {
+  {
+    Mutex::Locker locker(m_lock);
+    if (m_state != STATE_WAITING_FOR_PEER) {
+      return;
+    }
+
+    ldout(m_image_ctx.cct, 10) << this << " " << __func__ << dendl;
+    assert(get_active_action() == ACTION_REQUEST_LOCK);
+
+    if (r >= 0) {
+      execute_next_action();
+      return;
+    }
   }
 
-  ldout(m_image_ctx.cct, 10) << this << " " << __func__ << dendl;
-  assert(get_active_action() == ACTION_REQUEST_LOCK);
-  execute_next_action();
+  handle_acquire_lock(r);
 }
 
 template <typename I>
 void ExclusiveLock<I>::assert_header_locked(librados::ObjectWriteOperation *op) {
   Mutex::Locker locker(m_lock);
   rados::cls::lock::assert_locked(op, RBD_LOCK_NAME, LOCK_EXCLUSIVE,
-                                  encode_lock_cookie(), WATCHER_LOCK_TAG);
+                                  m_cookie, WATCHER_LOCK_TAG);
 }
 
 template <typename I>
@@ -256,6 +285,7 @@ bool ExclusiveLock<I>::is_transition_state() const {
   case STATE_WAITING_FOR_PEER:
   case STATE_WAITING_FOR_REGISTER:
   case STATE_POST_ACQUIRING:
+  case STATE_REACQUIRING:
   case STATE_PRE_RELEASING:
   case STATE_RELEASING:
   case STATE_PRE_SHUTTING_DOWN:
@@ -309,6 +339,9 @@ void ExclusiveLock<I>::execute_next_action() {
   case ACTION_REQUEST_LOCK:
     send_acquire_lock();
     break;
+  case ACTION_REACQUIRE_LOCK:
+    send_reacquire_lock();
+    break;
   case ACTION_RELEASE_LOCK:
     send_release_lock();
     break;
@@ -384,15 +417,15 @@ void ExclusiveLock<I>::send_acquire_lock() {
     return;
   }
 
+  m_cookie = encode_lock_cookie();
+
   using el = ExclusiveLock<I>;
   AcquireRequest<I>* req = AcquireRequest<I>::create(
-    m_image_ctx, encode_lock_cookie(),
+    m_image_ctx, m_cookie,
     util::create_context_callback<el, &el::handle_acquiring_lock>(this),
     util::create_context_callback<el, &el::handle_acquire_lock>(this));
-
-  m_lock.Unlock();
-  req->send();
-  m_lock.Lock();
+  m_image_ctx.op_work_queue->queue(new C_SendRequest<AcquireRequest<I> >(req),
+                                   0);
 }
 
 template <typename I>
@@ -424,12 +457,13 @@ void ExclusiveLock<I>::handle_acquire_lock(int r) {
   {
     m_lock.Lock();
     assert(m_state == STATE_ACQUIRING ||
-           m_state == STATE_POST_ACQUIRING);
+           m_state == STATE_POST_ACQUIRING ||
+           m_state == STATE_WAITING_FOR_PEER);
 
     Action action = get_active_action();
     assert(action == ACTION_TRY_LOCK || action == ACTION_REQUEST_LOCK);
     if (action == ACTION_REQUEST_LOCK && r < 0 && r != -EBLACKLISTED &&
-        r != -EPERM) {
+        r != -EPERM && r != -EROFS) {
       m_state = STATE_WAITING_FOR_PEER;
       m_lock.Unlock();
 
@@ -456,6 +490,93 @@ void ExclusiveLock<I>::handle_acquire_lock(int r) {
 }
 
 template <typename I>
+void ExclusiveLock<I>::send_reacquire_lock() {
+  assert(m_lock.is_locked());
+
+  CephContext *cct = m_image_ctx.cct;
+  if (m_state != STATE_LOCKED) {
+    complete_active_action(m_state, 0);
+    return;
+  }
+
+  m_watch_handle = m_image_ctx.image_watcher->get_watch_handle();
+  if (m_watch_handle == 0) {
+    // watch (re)failed while recovering
+    lderr(cct) << this << " " << __func__ << ": "
+               << "aborting reacquire due to invalid watch handle" << dendl;
+    complete_active_action(STATE_LOCKED, 0);
+    return;
+  }
+
+  m_new_cookie = encode_lock_cookie();
+  if (m_cookie == m_new_cookie) {
+    ldout(cct, 10) << this << " " << __func__ << ": "
+                   << "skipping reacquire since cookie still valid" << dendl;
+    complete_active_action(STATE_LOCKED, 0);
+    return;
+  }
+
+  ldout(cct, 10) << this << " " << __func__ << dendl;
+  m_state = STATE_REACQUIRING;
+
+  using el = ExclusiveLock<I>;
+  ReacquireRequest<I>* req = ReacquireRequest<I>::create(
+    m_image_ctx, m_cookie, m_new_cookie,
+    util::create_context_callback<el, &el::handle_reacquire_lock>(this));
+  req->send();
+}
+
+template <typename I>
+void ExclusiveLock<I>::handle_reacquire_lock(int r) {
+  Mutex::Locker locker(m_lock);
+
+  CephContext *cct = m_image_ctx.cct;
+  ldout(cct, 10) << this << " " << __func__ << ": r=" << r << dendl;
+
+  assert(m_state == STATE_REACQUIRING);
+  if (r < 0) {
+    if (r == -EOPNOTSUPP) {
+      ldout(cct, 10) << this << " " << __func__ << ": "
+                     << "updating lock is not supported" << dendl;
+    } else {
+      lderr(cct) << this << " " << __func__ << ": "
+                 << "failed to update lock cookie: " << cpp_strerror(r)
+                 << dendl;
+    }
+
+    if (!is_shutdown()) {
+      // queue a release and re-acquire of the lock since cookie cannot
+      // be updated on older OSDs
+      execute_action(ACTION_RELEASE_LOCK, nullptr);
+
+      assert(!m_actions_contexts.empty());
+      ActionContexts &action_contexts(m_actions_contexts.front());
+
+      // reacquire completes when the request lock completes
+      Contexts contexts;
+      std::swap(contexts, action_contexts.second);
+      if (contexts.empty()) {
+        execute_action(ACTION_REQUEST_LOCK, nullptr);
+      } else {
+        for (auto ctx : contexts) {
+          ctx = new FunctionContext([ctx, r](int acquire_ret_val) {
+              if (acquire_ret_val >= 0) {
+                acquire_ret_val = r;
+              }
+              ctx->complete(acquire_ret_val);
+            });
+          execute_action(ACTION_REQUEST_LOCK, ctx);
+        }
+      }
+    }
+  } else {
+    m_cookie = m_new_cookie;
+  }
+
+  complete_active_action(STATE_LOCKED, 0);
+}
+
+template <typename I>
 void ExclusiveLock<I>::send_release_lock() {
   assert(m_lock.is_locked());
   if (m_state == STATE_UNLOCKED) {
@@ -468,12 +589,12 @@ void ExclusiveLock<I>::send_release_lock() {
 
   using el = ExclusiveLock<I>;
   ReleaseRequest<I>* req = ReleaseRequest<I>::create(
-    m_image_ctx, encode_lock_cookie(),
+    m_image_ctx, m_cookie,
     util::create_context_callback<el, &el::handle_releasing_lock>(this),
-    util::create_context_callback<el, &el::handle_release_lock>(this));
-
-  // send in alternate thread context to avoid re-entrant locking
-  m_image_ctx.op_work_queue->queue(new C_SendReleaseRequest<I>(req), 0);
+    util::create_context_callback<el, &el::handle_release_lock>(this),
+    false);
+  m_image_ctx.op_work_queue->queue(new C_SendRequest<ReleaseRequest<I> >(req),
+                                   0);
 }
 
 template <typename I>
@@ -504,6 +625,7 @@ void ExclusiveLock<I>::handle_release_lock(int r) {
       lock_request_needed = m_image_ctx.aio_work_queue->is_lock_request_needed();
       m_lock.Lock();
 
+      m_cookie = "";
       m_watch_handle = 0;
     }
     complete_active_action(r < 0 ? STATE_LOCKED : STATE_UNLOCKED, r);
@@ -540,14 +662,15 @@ void ExclusiveLock<I>::send_shutdown_release() {
   std::string cookie;
   {
     Mutex::Locker locker(m_lock);
-    cookie = encode_lock_cookie();
+    cookie = m_cookie;
   }
 
   using el = ExclusiveLock<I>;
   ReleaseRequest<I>* req = ReleaseRequest<I>::create(
     m_image_ctx, cookie,
     util::create_context_callback<el, &el::handle_shutdown_releasing>(this),
-    util::create_context_callback<el, &el::handle_shutdown_released>(this));
+    util::create_context_callback<el, &el::handle_shutdown_released>(this),
+    true);
   req->send();
 }
 
diff --git a/src/librbd/ExclusiveLock.h b/src/librbd/ExclusiveLock.h
index e82da20..531567a 100644
--- a/src/librbd/ExclusiveLock.h
+++ b/src/librbd/ExclusiveLock.h
@@ -41,8 +41,9 @@ public:
   void request_lock(Context *on_locked);
   void release_lock(Context *on_released);
 
-  void handle_watch_registered();
-  void handle_lock_released();
+  void reacquire_lock(Context *on_reacquired = nullptr);
+
+  void handle_peer_notification(int r);
 
   void assert_header_locked(librados::ObjectWriteOperation *op);
 
@@ -51,6 +52,8 @@ public:
 private:
 
   /**
+   * @verbatim
+   *
    * <start>                              * * > WAITING_FOR_REGISTER --------\
    *    |                                 * (watch not registered)           |
    *    |                                 *                                  |
@@ -69,11 +72,21 @@ private:
    *                            |          (release_lock)           v
    *                      PRE_RELEASING <------------------------ LOCKED
    *
+   * <LOCKED state>
+   *    |
+   *    v
+   * REACQUIRING -------------------------------------> <finish>
+   *    .                                                 ^
+   *    .                                                 |
+   *    . . . > <RELEASE action> ---> <ACQUIRE action> ---/
+   *
    * <UNLOCKED/LOCKED states>
    *    |
    *    |
    *    v
    * PRE_SHUTTING_DOWN ---> SHUTTING_DOWN ---> SHUTDOWN ---> <finish>
+   *
+   * @endverbatim
    */
   enum State {
     STATE_UNINITIALIZED,
@@ -84,16 +97,18 @@ private:
     STATE_POST_ACQUIRING,
     STATE_WAITING_FOR_PEER,
     STATE_WAITING_FOR_REGISTER,
+    STATE_REACQUIRING,
     STATE_PRE_RELEASING,
     STATE_RELEASING,
     STATE_PRE_SHUTTING_DOWN,
     STATE_SHUTTING_DOWN,
-    STATE_SHUTDOWN,
+    STATE_SHUTDOWN
   };
 
   enum Action {
     ACTION_TRY_LOCK,
     ACTION_REQUEST_LOCK,
+    ACTION_REACQUIRE_LOCK,
     ACTION_RELEASE_LOCK,
     ACTION_SHUT_DOWN
   };
@@ -130,6 +145,8 @@ private:
 
   mutable Mutex m_lock;
   State m_state;
+  std::string m_cookie;
+  std::string m_new_cookie;
   uint64_t m_watch_handle;
 
   ActionsContexts m_actions_contexts;
@@ -156,6 +173,9 @@ private:
   void handle_acquiring_lock(int r);
   void handle_acquire_lock(int r);
 
+  void send_reacquire_lock();
+  void handle_reacquire_lock(int r);
+
   void send_release_lock();
   void handle_releasing_lock(int r);
   void handle_release_lock(int r);
diff --git a/src/librbd/ImageCtx.cc b/src/librbd/ImageCtx.cc
index 4226d42..d3cb29f 100644
--- a/src/librbd/ImageCtx.cc
+++ b/src/librbd/ImageCtx.cc
@@ -15,6 +15,7 @@
 #include "librbd/AsyncOperation.h"
 #include "librbd/AsyncRequest.h"
 #include "librbd/ExclusiveLock.h"
+#include "librbd/exclusive_lock/AutomaticPolicy.h"
 #include "librbd/exclusive_lock/StandardPolicy.h"
 #include "librbd/internal.h"
 #include "librbd/ImageCtx.h"
@@ -92,44 +93,42 @@ struct C_ShutDownCache : public Context {
 struct C_InvalidateCache : public Context {
   ImageCtx *image_ctx;
   bool purge_on_error;
-  bool reentrant_safe;
   Context *on_finish;
 
-  C_InvalidateCache(ImageCtx *_image_ctx, bool _purge_on_error,
-                    bool _reentrant_safe, Context *_on_finish)
-    : image_ctx(_image_ctx), purge_on_error(_purge_on_error),
-      reentrant_safe(_reentrant_safe), on_finish(_on_finish) {
+  C_InvalidateCache(ImageCtx *_image_ctx, bool _purge_on_error, Context *_on_finish)
+    : image_ctx(_image_ctx), purge_on_error(_purge_on_error), on_finish(_on_finish) {
   }
   virtual void finish(int r) {
-    assert(image_ctx->cache_lock.is_locked());
-    CephContext *cct = image_ctx->cct;
-
-    if (r == -EBLACKLISTED) {
-      lderr(cct) << "Blacklisted during flush!  Purging cache..." << dendl;
-      image_ctx->object_cacher->purge_set(image_ctx->object_set);
-    } else if (r != 0 && purge_on_error) {
-      lderr(cct) << "invalidate cache encountered error "
-                 << cpp_strerror(r) << " !Purging cache..." << dendl;
-      image_ctx->object_cacher->purge_set(image_ctx->object_set);
-    } else if (r != 0) {
-      lderr(cct) << "flush_cache returned " << r << dendl;
-    }
+    {
+      RWLock::RLocker owner_Locker(image_ctx->owner_lock);
+      Mutex::Locker cache_locker(image_ctx->cache_lock);
+      CephContext *cct = image_ctx->cct;
+
+      if (r == -EBLACKLISTED) {
+	lderr(cct) << "Blacklisted during flush!  Purging cache..." << dendl;
+	image_ctx->object_cacher->purge_set(image_ctx->object_set);
+      } else if (r != 0 && purge_on_error) {
+	lderr(cct) << "invalidate cache encountered error "
+		   << cpp_strerror(r) << " !Purging cache..." << dendl;
+	image_ctx->object_cacher->purge_set(image_ctx->object_set);
+      } else if (r != 0) {
+	lderr(cct) << "flush_cache returned " << r << dendl;
+      }
 
-    loff_t unclean = image_ctx->object_cacher->release_set(
-      image_ctx->object_set);
-    if (unclean == 0) {
-      r = 0;
-    } else {
-      lderr(cct) << "could not release all objects from cache: "
-                 << unclean << " bytes remain" << dendl;
-      r = -EBUSY;
+      loff_t unclean = image_ctx->object_cacher->release_set(
+	image_ctx->object_set);
+      if (unclean == 0) {
+	r = 0;
+      } else {
+	lderr(cct) << "could not release all objects from cache: "
+		   << unclean << " bytes remain" << dendl;
+	if (r == 0) {
+	  r = -EBUSY;
+	}
+      }
     }
 
-    if (reentrant_safe) {
-      on_finish->complete(r);
-    } else {
-      image_ctx->op_work_queue->queue(on_finish, r);
-    }
+    on_finish->complete(r);
   }
 
 };
@@ -191,7 +190,11 @@ struct C_InvalidateCache : public Context {
                                   cct->_conf->rbd_op_thread_timeout,
                                   thread_pool_singleton);
 
-    exclusive_lock_policy = new exclusive_lock::StandardPolicy(this);
+    if (cct->_conf->rbd_auto_exclusive_lock_until_manual_request) {
+      exclusive_lock_policy = new exclusive_lock::AutomaticPolicy(this);
+    } else {
+      exclusive_lock_policy = new exclusive_lock::StandardPolicy(this);
+    }
     journal_policy = new journal::StandardPolicy(this);
   }
 
@@ -737,8 +740,10 @@ struct C_InvalidateCache : public Context {
     object_cacher->release_set(object_set);
     cache_lock.Unlock();
 
-    C_ShutDownCache *shut_down = new C_ShutDownCache(this, on_finish);
-    flush_cache(new C_InvalidateCache(this, true, false, shut_down));
+    Context *shut_down = util::create_async_context_callback(
+      *this, new C_ShutDownCache(this, on_finish));
+    flush_cache(util::create_async_context_callback(
+      *this, new C_InvalidateCache(this, true, shut_down)));
   }
 
   int ImageCtx::invalidate_cache(bool purge_on_error) {
@@ -752,13 +757,14 @@ struct C_InvalidateCache : public Context {
     cache_lock.Unlock();
 
     C_SaferCond ctx;
-    flush_cache(new C_InvalidateCache(this, purge_on_error, true, &ctx));
+    flush_cache(util::create_async_context_callback(
+      *this, new C_InvalidateCache(this, purge_on_error, &ctx)));
 
     int result = ctx.wait();
     return result;
   }
 
-  void ImageCtx::invalidate_cache(Context *on_finish) {
+  void ImageCtx::invalidate_cache(bool purge_on_error, Context *on_finish) {
     if (object_cacher == NULL) {
       op_work_queue->queue(on_finish, 0);
       return;
@@ -768,7 +774,9 @@ struct C_InvalidateCache : public Context {
     object_cacher->release_set(object_set);
     cache_lock.Unlock();
 
-    flush_cache(new C_InvalidateCache(this, false, false, on_finish));
+    on_finish = util::create_async_context_callback(*this, on_finish);
+    flush_cache(util::create_async_context_callback(
+      *this, new C_InvalidateCache(this, purge_on_error, on_finish)));
   }
 
   void ImageCtx::clear_nonexistence_cache() {
@@ -778,6 +786,11 @@ struct C_InvalidateCache : public Context {
     object_cacher->clear_nonexistence(object_set);
   }
 
+  bool ImageCtx::is_cache_empty() {
+    Mutex::Locker locker(cache_lock);
+    return object_cacher->set_is_empty(object_set);
+  }
+
   void ImageCtx::register_watch(Context *on_finish) {
     assert(image_watcher == NULL);
     image_watcher = new ImageWatcher<>(*this);
@@ -995,8 +1008,8 @@ struct C_InvalidateCache : public Context {
     return new ExclusiveLock<ImageCtx>(*this);
   }
 
-  ObjectMap *ImageCtx::create_object_map(uint64_t snap_id) {
-    return new ObjectMap(*this, snap_id);
+  ObjectMap<ImageCtx> *ImageCtx::create_object_map(uint64_t snap_id) {
+    return new ObjectMap<ImageCtx>(*this, snap_id);
   }
 
   Journal<ImageCtx> *ImageCtx::create_journal() {
diff --git a/src/librbd/ImageCtx.h b/src/librbd/ImageCtx.h
index b8a3bf6..3cbb083 100644
--- a/src/librbd/ImageCtx.h
+++ b/src/librbd/ImageCtx.h
@@ -45,7 +45,7 @@ namespace librbd {
   template <typename> class ImageWatcher;
   template <typename> class Journal;
   class LibrbdAdminSocketHook;
-  class ObjectMap;
+  template <typename> class ObjectMap;
   template <typename> class Operations;
   class LibrbdWriteback;
 
@@ -141,7 +141,7 @@ namespace librbd {
     Operations<ImageCtx> *operations;
 
     ExclusiveLock<ImageCtx> *exclusive_lock;
-    ObjectMap *object_map;
+    ObjectMap<ImageCtx> *object_map;
 
     xlist<operation::ResizeRequest<ImageCtx>*> resize_reqs;
 
@@ -268,9 +268,10 @@ namespace librbd {
     void user_flushed();
     void flush_cache(Context *onfinish);
     void shut_down_cache(Context *on_finish);
-    int invalidate_cache(bool purge_on_error=false);
-    void invalidate_cache(Context *on_finish);
+    int invalidate_cache(bool purge_on_error);
+    void invalidate_cache(bool purge_on_error, Context *on_finish);
     void clear_nonexistence_cache();
+    bool is_cache_empty();
     void register_watch(Context *on_finish);
     uint64_t prune_parent_extents(vector<pair<uint64_t,uint64_t> >& objectx,
 				  uint64_t overlap);
@@ -287,7 +288,7 @@ namespace librbd {
     void apply_metadata(const std::map<std::string, bufferlist> &meta);
 
     ExclusiveLock<ImageCtx> *create_exclusive_lock();
-    ObjectMap *create_object_map(uint64_t snap_id);
+    ObjectMap<ImageCtx> *create_object_map(uint64_t snap_id);
     Journal<ImageCtx> *create_journal();
 
     void clear_pending_completions();
diff --git a/src/librbd/ImageState.cc b/src/librbd/ImageState.cc
index 5b450b0..28cf427 100644
--- a/src/librbd/ImageState.cc
+++ b/src/librbd/ImageState.cc
@@ -16,7 +16,7 @@
 
 #define dout_subsys ceph_subsys_rbd
 #undef dout_prefix
-#define dout_prefix *_dout << "librbd::ImageState: "
+#define dout_prefix *_dout << "librbd::ImageState: " << this << " "
 
 namespace librbd {
 
@@ -234,7 +234,8 @@ ImageState<I>::ImageState(I *image_ctx)
   : m_image_ctx(image_ctx), m_state(STATE_UNINITIALIZED),
     m_lock(util::unique_lock_name("librbd::ImageState::m_lock", this)),
     m_last_refresh(0), m_refresh_seq(0),
-    m_update_watchers(new ImageUpdateWatchers(image_ctx->cct)) {
+    m_update_watchers(new ImageUpdateWatchers(image_ctx->cct)),
+    m_skip_open_parent_image(false) {
 }
 
 template <typename I>
@@ -244,19 +245,20 @@ ImageState<I>::~ImageState() {
 }
 
 template <typename I>
-int ImageState<I>::open() {
+int ImageState<I>::open(bool skip_open_parent) {
   C_SaferCond ctx;
-  open(&ctx);
+  open(skip_open_parent, &ctx);
   return ctx.wait();
 }
 
 template <typename I>
-void ImageState<I>::open(Context *on_finish) {
+void ImageState<I>::open(bool skip_open_parent, Context *on_finish) {
   CephContext *cct = m_image_ctx->cct;
   ldout(cct, 20) << __func__ << dendl;
 
   m_lock.Lock();
   assert(m_state == STATE_UNINITIALIZED);
+  m_skip_open_parent_image = skip_open_parent;
 
   Action action(ACTION_TYPE_OPEN);
   action.refresh_seq = m_refresh_seq;
@@ -318,18 +320,6 @@ template <typename I>
 void ImageState<I>::refresh(Context *on_finish) {
   CephContext *cct = m_image_ctx->cct;
   ldout(cct, 20) << __func__ << dendl;
-  refresh(false, on_finish);
-}
-
-template <typename I>
-void ImageState<I>::acquire_lock_refresh(Context *on_finish) {
-  CephContext *cct = m_image_ctx->cct;
-  ldout(cct, 20) << __func__ << dendl;
-  refresh(true, on_finish);
-}
-
-template <typename I>
-void ImageState<I>::refresh(bool acquiring_lock, Context *on_finish) {
 
   m_lock.Lock();
   if (is_closed()) {
@@ -340,7 +330,6 @@ void ImageState<I>::refresh(bool acquiring_lock, Context *on_finish) {
 
   Action action(ACTION_TYPE_REFRESH);
   action.refresh_seq = m_refresh_seq;
-  action.refresh_acquiring_lock = acquiring_lock;
   execute_action_unlock(action, on_finish);
 }
 
@@ -378,6 +367,37 @@ void ImageState<I>::snap_set(const std::string &snap_name, Context *on_finish) {
 }
 
 template <typename I>
+void ImageState<I>::prepare_lock(Context *on_ready) {
+  CephContext *cct = m_image_ctx->cct;
+  ldout(cct, 10) << __func__ << dendl;
+
+  m_lock.Lock();
+  if (is_closed()) {
+    m_lock.Unlock();
+    on_ready->complete(-ESHUTDOWN);
+    return;
+  }
+
+  Action action(ACTION_TYPE_LOCK);
+  action.on_ready = on_ready;
+  execute_action_unlock(action, nullptr);
+}
+
+template <typename I>
+void ImageState<I>::handle_prepare_lock_complete() {
+  CephContext *cct = m_image_ctx->cct;
+  ldout(cct, 10) << __func__ << dendl;
+
+  m_lock.Lock();
+  if (m_state != STATE_PREPARING_LOCK) {
+    m_lock.Unlock();
+    return;
+  }
+
+  complete_action_unlock(STATE_OPEN, 0);
+}
+
+template <typename I>
 int ImageState<I>::register_update_watcher(UpdateWatchCtx *watcher,
 					 uint64_t *handle) {
   CephContext *cct = m_image_ctx->cct;
@@ -426,6 +446,7 @@ bool ImageState<I>::is_transition_state() const {
   case STATE_CLOSING:
   case STATE_REFRESHING:
   case STATE_SETTING_SNAP:
+  case STATE_PREPARING_LOCK:
     break;
   }
   return true;
@@ -479,6 +500,9 @@ void ImageState<I>::execute_next_action_unlock() {
   case ACTION_TYPE_SET_SNAP:
     send_set_snap_unlock();
     return;
+  case ACTION_TYPE_LOCK:
+    send_prepare_lock_unlock();
+    return;
   }
   assert(false);
 }
@@ -533,7 +557,7 @@ void ImageState<I>::send_open_unlock() {
     *m_image_ctx, create_context_callback<
       ImageState<I>, &ImageState<I>::handle_open>(this));
   image::OpenRequest<I> *req = image::OpenRequest<I>::create(
-    m_image_ctx, ctx);
+    m_image_ctx, m_skip_open_parent_image, ctx);
 
   m_lock.Unlock();
   req->send();
@@ -598,7 +622,7 @@ void ImageState<I>::send_refresh_unlock() {
     *m_image_ctx, create_context_callback<
       ImageState<I>, &ImageState<I>::handle_refresh>(this));
   image::RefreshRequest<I> *req = image::RefreshRequest<I>::create(
-    *m_image_ctx, action_context.refresh_acquiring_lock, ctx);
+    *m_image_ctx, false, false, ctx);
 
   m_lock.Unlock();
   req->send();
@@ -663,6 +687,30 @@ void ImageState<I>::handle_set_snap(int r) {
   complete_action_unlock(STATE_OPEN, r);
 }
 
+template <typename I>
+void ImageState<I>::send_prepare_lock_unlock() {
+  CephContext *cct = m_image_ctx->cct;
+  ldout(cct, 10) << this << " " << __func__ << dendl;
+
+  assert(m_lock.is_locked());
+  m_state = STATE_PREPARING_LOCK;
+
+  assert(!m_actions_contexts.empty());
+  ActionContexts &action_contexts(m_actions_contexts.front());
+  assert(action_contexts.first.action_type == ACTION_TYPE_LOCK);
+
+  Context *on_ready = action_contexts.first.on_ready;
+  m_lock.Unlock();
+
+  if (on_ready == nullptr) {
+    complete_action_unlock(STATE_OPEN, 0);
+    return;
+  }
+
+  // wake up the lock handler now that its safe to proceed
+  on_ready->complete(0);
+}
+
 } // namespace librbd
 
 template class librbd::ImageState<librbd::ImageCtx>;
diff --git a/src/librbd/ImageState.h b/src/librbd/ImageState.h
index bad4277..b6e7ce6 100644
--- a/src/librbd/ImageState.h
+++ b/src/librbd/ImageState.h
@@ -25,8 +25,8 @@ public:
   ImageState(ImageCtxT *image_ctx);
   ~ImageState();
 
-  int open();
-  void open(Context *on_finish);
+  int open(bool skip_open_parent);
+  void open(bool skip_open_parent, Context *on_finish);
 
   int close();
   void close(Context *on_finish);
@@ -38,10 +38,12 @@ public:
   int refresh();
   int refresh_if_required();
   void refresh(Context *on_finish);
-  void acquire_lock_refresh(Context *on_finish);
 
   void snap_set(const std::string &snap_name, Context *on_finish);
 
+  void prepare_lock(Context *on_ready);
+  void handle_prepare_lock_complete();
+
   int register_update_watcher(UpdateWatchCtx *watcher, uint64_t *handle);
   int unregister_update_watcher(uint64_t handle);
   void flush_update_watchers(Context *on_finish);
@@ -55,21 +57,23 @@ private:
     STATE_OPENING,
     STATE_CLOSING,
     STATE_REFRESHING,
-    STATE_SETTING_SNAP
+    STATE_SETTING_SNAP,
+    STATE_PREPARING_LOCK
   };
 
   enum ActionType {
     ACTION_TYPE_OPEN,
     ACTION_TYPE_CLOSE,
     ACTION_TYPE_REFRESH,
-    ACTION_TYPE_SET_SNAP
+    ACTION_TYPE_SET_SNAP,
+    ACTION_TYPE_LOCK
   };
 
   struct Action {
     ActionType action_type;
     uint64_t refresh_seq = 0;
-    bool refresh_acquiring_lock = false;
     std::string snap_name;
+    Context *on_ready = nullptr;
 
     Action(ActionType action_type) : action_type(action_type) {
     }
@@ -79,10 +83,11 @@ private:
       }
       switch (action_type) {
       case ACTION_TYPE_REFRESH:
-        return (refresh_seq == action.refresh_seq &&
-                refresh_acquiring_lock == action.refresh_acquiring_lock);
+        return (refresh_seq == action.refresh_seq);
       case ACTION_TYPE_SET_SNAP:
         return snap_name == action.snap_name;
+      case ACTION_TYPE_LOCK:
+        return false;
       default:
         return true;
       }
@@ -104,11 +109,11 @@ private:
 
   ImageUpdateWatchers *m_update_watchers;
 
+  bool m_skip_open_parent_image;
+
   bool is_transition_state() const;
   bool is_closed() const;
 
-  void refresh(bool acquiring_lock, Context *on_finish);
-
   void append_context(const Action &action, Context *context);
   void execute_next_action_unlock();
   void execute_action_unlock(const Action &action, Context *context);
@@ -126,6 +131,8 @@ private:
   void send_set_snap_unlock();
   void handle_set_snap(int r);
 
+  void send_prepare_lock_unlock();
+
 };
 
 } // namespace librbd
diff --git a/src/librbd/ImageWatcher.cc b/src/librbd/ImageWatcher.cc
index 38a42a9..9d386c2 100644
--- a/src/librbd/ImageWatcher.cc
+++ b/src/librbd/ImageWatcher.cc
@@ -13,6 +13,7 @@
 #include "librbd/exclusive_lock/Policy.h"
 #include "librbd/image_watcher/Notifier.h"
 #include "librbd/image_watcher/NotifyLockOwner.h"
+#include "librbd/image_watcher/RewatchRequest.h"
 #include "include/encoding.h"
 #include "common/errno.h"
 #include "common/WorkQueue.h"
@@ -125,31 +126,42 @@ void ImageWatcher<I>::handle_register_watch(int r) {
 
 template <typename I>
 void ImageWatcher<I>::unregister_watch(Context *on_finish) {
-  ldout(m_image_ctx.cct, 10) << this << " unregistering image watcher" << dendl;
+  CephContext *cct = m_image_ctx.cct;
+  ldout(cct, 10) << this << " unregistering image watcher" << dendl;
 
   cancel_async_requests();
 
-  C_Gather *g = new C_Gather(m_image_ctx.cct, create_async_context_callback(
-          m_image_ctx, on_finish));
-  m_task_finisher->cancel_all(g->new_sub());
-
+  C_Gather *gather_ctx = nullptr;
   {
-    RWLock::WLocker l(m_watch_lock);
-    if (m_watch_state == WATCH_STATE_REGISTERED) {
+    RWLock::WLocker watch_locker(m_watch_lock);
+    if (m_watch_state == WATCH_STATE_REWATCHING) {
+      ldout(cct, 10) << this << " delaying unregister until rewatch completed"
+                     << dendl;
+
+      assert(m_unregister_watch_ctx == nullptr);
+      m_unregister_watch_ctx = new FunctionContext([this, on_finish](int r) {
+          unregister_watch(on_finish);
+        });
+      return;
+    }
+
+    gather_ctx = new C_Gather(m_image_ctx.cct, create_async_context_callback(
+      m_image_ctx, on_finish));
+    if (m_watch_state == WATCH_STATE_REGISTERED ||
+        m_watch_state == WATCH_STATE_ERROR) {
       m_watch_state = WATCH_STATE_UNREGISTERED;
 
       librados::AioCompletion *aio_comp = create_rados_safe_callback(
-        new C_UnwatchAndFlush(m_image_ctx.md_ctx, g->new_sub()));
+        new C_UnwatchAndFlush(m_image_ctx.md_ctx, gather_ctx->new_sub()));
       int r = m_image_ctx.md_ctx.aio_unwatch(m_watch_handle, aio_comp);
       assert(r == 0);
       aio_comp->release();
-      g->activate();
-      return;
-    } else if (m_watch_state == WATCH_STATE_ERROR) {
-      m_watch_state = WATCH_STATE_UNREGISTERED;
     }
   }
-  g->activate();
+
+  assert(gather_ctx != nullptr);
+  m_task_finisher->cancel_all(gather_ctx->new_sub());
+  gather_ctx->activate();
 }
 
 template <typename I>
@@ -481,7 +493,10 @@ void ImageWatcher<I>::handle_request_lock(int r) {
                               << dendl;
 
     // treat this is a dead client -- so retest acquiring the lock
-    m_image_ctx.exclusive_lock->handle_lock_released();
+    m_image_ctx.exclusive_lock->handle_peer_notification(0);
+  } else if (r == -EROFS) {
+    ldout(m_image_ctx.cct, 5) << this << " peer will not release lock" << dendl;
+    m_image_ctx.exclusive_lock->handle_peer_notification(r);
   } else if (r < 0) {
     lderr(m_image_ctx.cct) << this << " error requesting lock: "
                            << cpp_strerror(r) << dendl;
@@ -624,6 +639,11 @@ bool ImageWatcher<I>::handle_payload(const AcquiredLockPayload &payload,
   }
 
   RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
+  if (m_image_ctx.exclusive_lock != nullptr) {
+    // potentially wake up the exclusive lock state machine now that
+    // a lock owner has advertised itself
+    m_image_ctx.exclusive_lock->handle_peer_notification(0);
+  }
   if (cancel_async_requests &&
       (m_image_ctx.exclusive_lock == nullptr ||
        !m_image_ctx.exclusive_lock->is_lock_owner())) {
@@ -661,7 +681,7 @@ bool ImageWatcher<I>::handle_payload(const ReleasedLockPayload &payload,
   if (m_image_ctx.exclusive_lock != nullptr &&
       !m_image_ctx.exclusive_lock->is_lock_owner()) {
     m_task_finisher->cancel(TASK_CODE_REQUEST_LOCK);
-    m_image_ctx.exclusive_lock->handle_lock_released();
+    m_image_ctx.exclusive_lock->handle_peer_notification(0);
   }
   return true;
 }
@@ -675,25 +695,24 @@ bool ImageWatcher<I>::handle_payload(const RequestLockPayload &payload,
   }
 
   RWLock::RLocker l(m_image_ctx.owner_lock);
-  if (m_image_ctx.exclusive_lock != nullptr) {
-    int r;
-    if (m_image_ctx.exclusive_lock->accept_requests(&r)) {
-      // need to send something back so the client can detect a missing leader
-      ::encode(ResponseMessage(0), ack_ctx->out);
-
-      {
-        Mutex::Locker owner_client_id_locker(m_owner_client_id_lock);
-        if (!m_owner_client_id.is_valid()) {
-	  return true;
-        }
+  if (m_image_ctx.exclusive_lock != nullptr &&
+      m_image_ctx.exclusive_lock->is_lock_owner()) {
+    int r = 0;
+    bool accept_request = m_image_ctx.exclusive_lock->accept_requests(&r);
+
+    if (accept_request) {
+      assert(r == 0);
+      Mutex::Locker owner_client_id_locker(m_owner_client_id_lock);
+      if (!m_owner_client_id.is_valid()) {
+        return true;
       }
 
       ldout(m_image_ctx.cct, 10) << this << " queuing release of exclusive lock"
                                  << dendl;
-      m_image_ctx.get_exclusive_lock_policy()->lock_requested(payload.force);
-    } else if (r < 0) {
-      ::encode(ResponseMessage(r), ack_ctx->out);
+      r = m_image_ctx.get_exclusive_lock_policy()->lock_requested(
+        payload.force);
     }
+    ::encode(ResponseMessage(r), ack_ctx->out);
   }
   return true;
 }
@@ -996,11 +1015,10 @@ void ImageWatcher<I>::handle_error(uint64_t handle, int err) {
 
   RWLock::WLocker l(m_watch_lock);
   if (m_watch_state == WATCH_STATE_REGISTERED) {
-    m_image_ctx.md_ctx.unwatch2(m_watch_handle);
     m_watch_state = WATCH_STATE_ERROR;
 
     FunctionContext *ctx = new FunctionContext(
-      boost::bind(&ImageWatcher<I>::reregister_watch, this));
+      boost::bind(&ImageWatcher<I>::rewatch, this));
     m_task_finisher->queue(TASK_CODE_REREGISTER_WATCH, ctx);
   }
 }
@@ -1012,62 +1030,51 @@ void ImageWatcher<I>::acknowledge_notify(uint64_t notify_id, uint64_t handle,
 }
 
 template <typename I>
-void ImageWatcher<I>::reregister_watch() {
+void ImageWatcher<I>::rewatch() {
   ldout(m_image_ctx.cct, 10) << this << " re-registering image watch" << dendl;
 
-  bool releasing_lock = false;
-  C_SaferCond release_lock_ctx;
-  {
-    RWLock::WLocker l(m_image_ctx.owner_lock);
-    if (m_image_ctx.exclusive_lock != nullptr) {
-      releasing_lock = true;
-      m_image_ctx.exclusive_lock->release_lock(&release_lock_ctx);
-    }
+  RWLock::WLocker l(m_watch_lock);
+  if (m_watch_state != WATCH_STATE_ERROR) {
+    return;
   }
+  m_watch_state = WATCH_STATE_REWATCHING;
 
-  int r;
-  if (releasing_lock) {
-    r = release_lock_ctx.wait();
-    if (r == -EBLACKLISTED) {
-      lderr(m_image_ctx.cct) << this << " client blacklisted" << dendl;
-      return;
-    }
+  Context *ctx = create_context_callback<
+    ImageWatcher<I>, &ImageWatcher<I>::handle_rewatch>(this);
+  RewatchRequest<I> *req = RewatchRequest<I>::create(m_image_ctx, m_watch_lock,
+                                                     &m_watch_ctx,
+                                                     &m_watch_handle, ctx);
+  req->send();
+}
 
-    assert(r == 0);
+template <typename I>
+void ImageWatcher<I>::handle_rewatch(int r) {
+  CephContext *cct = m_image_ctx.cct;
+  ldout(cct, 10) << this << " " << __func__ << ": r=" << r << dendl;
+
+  WatchState next_watch_state = WATCH_STATE_REGISTERED;
+  if (r < 0) {
+    // only EBLACKLISTED or ENOENT can be returned
+    assert(r == -EBLACKLISTED || r == -ENOENT);
+    next_watch_state = WATCH_STATE_UNREGISTERED;
   }
 
+  Context *unregister_watch_ctx = nullptr;
   {
-    RWLock::WLocker l(m_watch_lock);
-    if (m_watch_state != WATCH_STATE_ERROR) {
-      return;
-    }
+    RWLock::WLocker watch_locker(m_watch_lock);
+    assert(m_watch_state == WATCH_STATE_REWATCHING);
+    m_watch_state = next_watch_state;
 
-    r = m_image_ctx.md_ctx.watch2(m_image_ctx.header_oid,
-                                  &m_watch_handle, &m_watch_ctx);
-    if (r < 0) {
-      lderr(m_image_ctx.cct) << this << " failed to re-register image watch: "
-                             << cpp_strerror(r) << dendl;
-      if (r != -ESHUTDOWN) {
-        FunctionContext *ctx = new FunctionContext(boost::bind(
-          &ImageWatcher<I>::reregister_watch, this));
-        m_task_finisher->add_event_after(TASK_CODE_REREGISTER_WATCH,
-                                         RETRY_DELAY_SECONDS, ctx);
-      }
-      return;
-    }
+    std::swap(unregister_watch_ctx, m_unregister_watch_ctx);
 
-    m_watch_state = WATCH_STATE_REGISTERED;
+    // image might have been updated while we didn't have active watch
+    handle_payload(HeaderUpdatePayload(), nullptr);
   }
 
-  // if the exclusive lock state machine was paused waiting for the
-  // watch to be re-registered, wake it up
-  RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
-  RWLock::RLocker snap_locker(m_image_ctx.snap_lock);
-  if (m_image_ctx.exclusive_lock != nullptr) {
-    m_image_ctx.exclusive_lock->handle_watch_registered();
+  // wake up pending unregister request
+  if (unregister_watch_ctx != nullptr) {
+    unregister_watch_ctx->complete(0);
   }
-
-  handle_payload(HeaderUpdatePayload(), NULL);
 }
 
 template <typename I>
diff --git a/src/librbd/ImageWatcher.h b/src/librbd/ImageWatcher.h
index e72eea7..deab294 100644
--- a/src/librbd/ImageWatcher.h
+++ b/src/librbd/ImageWatcher.h
@@ -64,7 +64,8 @@ private:
   enum WatchState {
     WATCH_STATE_UNREGISTERED,
     WATCH_STATE_REGISTERED,
-    WATCH_STATE_ERROR
+    WATCH_STATE_ERROR,
+    WATCH_STATE_REWATCHING
   };
 
   enum TaskCode {
@@ -226,6 +227,7 @@ private:
   WatchCtx m_watch_ctx;
   uint64_t m_watch_handle;
   WatchState m_watch_state;
+  Context *m_unregister_watch_ctx = nullptr;
 
   TaskFinisher<Task> *m_task_finisher;
 
@@ -310,7 +312,8 @@ private:
   void handle_error(uint64_t cookie, int err);
   void acknowledge_notify(uint64_t notify_id, uint64_t handle, bufferlist &out);
 
-  void reregister_watch();
+  void rewatch();
+  void handle_rewatch(int r);
 };
 
 } // namespace librbd
diff --git a/src/librbd/Journal.cc b/src/librbd/Journal.cc
index f2b37c1..5686e17 100644
--- a/src/librbd/Journal.cc
+++ b/src/librbd/Journal.cc
@@ -170,6 +170,168 @@ public:
   }
 };
 
+template <typename I>
+struct C_IsTagOwner : public Context {
+  I *image_ctx;
+  bool *is_tag_owner;
+  Context *on_finish;
+
+  typedef ::journal::Journaler Journaler;
+  Journaler *journaler;
+  cls::journal::Client client;
+  journal::ImageClientMeta client_meta;
+  uint64_t tag_tid;
+  journal::TagData tag_data;
+
+  C_IsTagOwner(I *image_ctx, bool *is_tag_owner, Context *on_finish)
+    : image_ctx(image_ctx), is_tag_owner(is_tag_owner), on_finish(on_finish),
+      journaler(new Journaler(image_ctx->md_ctx, image_ctx->id,
+                              Journal<>::IMAGE_CLIENT_ID, {})) {
+  }
+
+  virtual void finish(int r) {
+    CephContext *cct = image_ctx->cct;
+
+    ldout(cct, 20) << this << " C_IsTagOwner::" << __func__ << ": r=" << r
+		   << dendl;
+    if (r < 0) {
+      lderr(cct) << this << " C_IsTagOwner::" << __func__ << ": "
+                 << "failed to get tag owner: " << cpp_strerror(r) << dendl;
+    } else {
+      *is_tag_owner = (tag_data.mirror_uuid == Journal<>::LOCAL_MIRROR_UUID);
+    }
+
+    Journaler *journaler = this->journaler;
+    Context *on_finish = this->on_finish;
+    FunctionContext *ctx = new FunctionContext(
+      [journaler, on_finish](int r) {
+	on_finish->complete(r);
+	delete journaler;
+      });
+    image_ctx->op_work_queue->queue(ctx, r);
+  }
+};
+
+template <typename J>
+struct GetTagsRequest {
+  CephContext *cct;
+  J *journaler;
+  cls::journal::Client *client;
+  journal::ImageClientMeta *client_meta;
+  uint64_t *tag_tid;
+  journal::TagData *tag_data;
+  Context *on_finish;
+
+  Mutex lock;
+
+  GetTagsRequest(CephContext *cct, J *journaler, cls::journal::Client *client,
+                 journal::ImageClientMeta *client_meta, uint64_t *tag_tid,
+                 journal::TagData *tag_data, Context *on_finish)
+    : cct(cct), journaler(journaler), client(client), client_meta(client_meta),
+      tag_tid(tag_tid), tag_data(tag_data), on_finish(on_finish), lock("lock") {
+  }
+
+  /**
+   * @verbatim
+   *
+   * <start>
+   *    |
+   *    v
+   * GET_CLIENT * * * * * * * * * * * *
+   *    |                             *
+   *    v                             *
+   * GET_TAGS * * * * * * * * * * * * * (error)
+   *    |                             *
+   *    v                             *
+   * <finish> * * * * * * * * * * * * *
+   *
+   * @endverbatim
+   */
+
+  void send() {
+    send_get_client();
+  }
+
+  void send_get_client() {
+    ldout(cct, 20) << __func__ << dendl;
+
+    FunctionContext *ctx = new FunctionContext(
+      [this](int r) {
+        handle_get_client(r);
+      });
+    journaler->get_client(Journal<ImageCtx>::IMAGE_CLIENT_ID, client, ctx);
+  }
+
+  void handle_get_client(int r) {
+    ldout(cct, 20) << __func__ << ": r=" << r << dendl;
+
+    if (r < 0) {
+      complete(r);
+      return;
+    }
+
+    librbd::journal::ClientData client_data;
+    bufferlist::iterator bl_it = client->data.begin();
+    try {
+      ::decode(client_data, bl_it);
+    } catch (const buffer::error &err) {
+      lderr(cct) << this << " OpenJournalerRequest::" << __func__ << ": "
+                 << "failed to decode client data" << dendl;
+      complete(-EBADMSG);
+      return;
+    }
+
+    journal::ImageClientMeta *image_client_meta =
+      boost::get<journal::ImageClientMeta>(&client_data.client_meta);
+    if (image_client_meta == nullptr) {
+      lderr(cct) << this << " OpenJournalerRequest::" << __func__ << ": "
+                 << "failed to get client meta" << dendl;
+      complete(-EINVAL);
+      return;
+    }
+    *client_meta = *image_client_meta;
+
+    send_get_tags();
+  }
+
+  void send_get_tags() {
+    ldout(cct, 20) << __func__ << dendl;
+
+    FunctionContext *ctx = new FunctionContext(
+      [this](int r) {
+        handle_get_tags(r);
+      });
+    C_DecodeTags *tags_ctx = new C_DecodeTags(cct, &lock, tag_tid, tag_data,
+                                              ctx);
+    journaler->get_tags(client_meta->tag_class, &tags_ctx->tags, tags_ctx);
+  }
+
+  void handle_get_tags(int r) {
+    ldout(cct, 20) << __func__ << ": r=" << r << dendl;
+
+    complete(r);
+  }
+
+  void complete(int r) {
+    on_finish->complete(r);
+    delete this;
+  }
+};
+
+template <typename J>
+void get_tags(CephContext *cct, J *journaler,
+              cls::journal::Client *client,
+              journal::ImageClientMeta *client_meta,
+              uint64_t *tag_tid, journal::TagData *tag_data,
+              Context *on_finish) {
+  ldout(cct, 20) << __func__ << dendl;
+
+  GetTagsRequest<J> *req =
+    new GetTagsRequest<J>(cct, journaler, client, client_meta, tag_tid,
+                          tag_data, on_finish);
+  req->send();
+}
+
 template <typename J>
 int open_journaler(CephContext *cct, J *journaler,
                    cls::journal::Client *client,
@@ -514,6 +676,19 @@ int Journal<I>::is_tag_owner(IoCtx& io_ctx, std::string& image_id,
 }
 
 template <typename I>
+void Journal<I>::is_tag_owner(I *image_ctx, bool *is_tag_owner,
+                              Context *on_finish) {
+  CephContext *cct = image_ctx->cct;
+  ldout(cct, 20) << __func__ << dendl;
+
+  C_IsTagOwner<I> *is_tag_owner_ctx =  new C_IsTagOwner<I>(
+    image_ctx, is_tag_owner, on_finish);
+  get_tags(cct, is_tag_owner_ctx->journaler, &is_tag_owner_ctx->client,
+	   &is_tag_owner_ctx->client_meta, &is_tag_owner_ctx->tag_tid,
+	   &is_tag_owner_ctx->tag_data, is_tag_owner_ctx);
+}
+
+template <typename I>
 int Journal<I>::get_tag_owner(I *image_ctx, std::string *mirror_uuid) {
   return get_tag_owner(image_ctx->md_ctx, image_ctx->id, mirror_uuid);
 }
@@ -530,14 +705,17 @@ int Journal<I>::get_tag_owner(IoCtx& io_ctx, std::string& image_id,
   journal::ImageClientMeta client_meta;
   uint64_t tag_tid;
   journal::TagData tag_data;
-  int r = open_journaler(cct, &journaler, &client, &client_meta, &tag_tid,
-                         &tag_data);
-  if (r >= 0) {
-    *mirror_uuid = tag_data.mirror_uuid;
+  C_SaferCond get_tags_ctx;
+  get_tags(cct, &journaler, &client, &client_meta, &tag_tid,
+	   &tag_data, &get_tags_ctx);
+
+  int r = get_tags_ctx.wait();
+  if (r < 0) {
+    return r;
   }
 
-  journaler.shut_down();
-  return r;
+  *mirror_uuid = tag_data.mirror_uuid;
+  return 0;
 }
 
 template <typename I>
diff --git a/src/librbd/Journal.h b/src/librbd/Journal.h
index 61107d5..1563979 100644
--- a/src/librbd/Journal.h
+++ b/src/librbd/Journal.h
@@ -106,6 +106,8 @@ public:
   static int is_tag_owner(ImageCtxT *image_ctx, bool *is_tag_owner);
   static int is_tag_owner(librados::IoCtx& io_ctx, std::string& image_id,
                           bool *is_tag_owner);
+  static void is_tag_owner(ImageCtxT *image_ctx, bool *is_tag_owner,
+                           Context *on_finish);
   static int get_tag_owner(ImageCtxT *image_ctx, std::string *mirror_uuid);
   static int get_tag_owner(librados::IoCtx& io_ctx, std::string& image_id,
                            std::string *mirror_uuid);
diff --git a/src/librbd/LibrbdWriteback.cc b/src/librbd/LibrbdWriteback.cc
index 977b0b3..f4eabc8 100644
--- a/src/librbd/LibrbdWriteback.cc
+++ b/src/librbd/LibrbdWriteback.cc
@@ -122,7 +122,12 @@ namespace librbd {
 
     virtual void complete(int r) {
       if (request_sent || r < 0) {
-        commit_io_event_extent(r);
+        if (request_sent && r == 0) {
+          // only commit IO events that are safely recorded to the backing image
+          // since the cache will retry all IOs that fail
+          commit_io_event_extent(0);
+        }
+
         req_comp->complete(r);
         delete this;
       } else {
diff --git a/src/librbd/Makefile.am b/src/librbd/Makefile.am
index d6bc358..aa6cdc2 100644
--- a/src/librbd/Makefile.am
+++ b/src/librbd/Makefile.am
@@ -32,6 +32,10 @@ librbd_internal_la_SOURCES = \
 	librbd/Operations.cc \
 	librbd/Utils.cc \
 	librbd/exclusive_lock/AcquireRequest.cc \
+	librbd/exclusive_lock/AutomaticPolicy.cc \
+	librbd/exclusive_lock/BreakRequest.cc \
+	librbd/exclusive_lock/GetLockerRequest.cc \
+	librbd/exclusive_lock/ReacquireRequest.cc \
 	librbd/exclusive_lock/ReleaseRequest.cc \
 	librbd/exclusive_lock/StandardPolicy.cc \
 	librbd/image/CloseRequest.cc \
@@ -41,6 +45,7 @@ librbd_internal_la_SOURCES = \
 	librbd/image/SetSnapRequest.cc \
 	librbd/image_watcher/Notifier.cc \
 	librbd/image_watcher/NotifyLockOwner.cc \
+	librbd/image_watcher/RewatchRequest.cc \
 	librbd/journal/Replay.cc \
 	librbd/journal/StandardPolicy.cc \
 	librbd/object_map/InvalidateRequest.cc \
@@ -97,6 +102,7 @@ noinst_HEADERS += \
 	librbd/AsyncObjectThrottle.h \
 	librbd/AsyncOperation.h \
 	librbd/AsyncRequest.h \
+        librbd/BlockGuard.h \
 	librbd/CopyupRequest.h \
 	librbd/DiffIterate.h \
 	librbd/ExclusiveLock.h \
@@ -117,9 +123,14 @@ noinst_HEADERS += \
 	librbd/Utils.h \
 	librbd/WatchNotifyTypes.h \
 	librbd/exclusive_lock/AcquireRequest.h \
+	librbd/exclusive_lock/AutomaticPolicy.h \
+	librbd/exclusive_lock/BreakRequest.h \
+	librbd/exclusive_lock/GetLockerRequest.h \
 	librbd/exclusive_lock/Policy.h \
+	librbd/exclusive_lock/ReacquireRequest.h \
 	librbd/exclusive_lock/ReleaseRequest.h \
 	librbd/exclusive_lock/StandardPolicy.h \
+	librbd/exclusive_lock/Types.h \
 	librbd/image/CloseRequest.h \
 	librbd/image/OpenRequest.h \
 	librbd/image/RefreshParentRequest.h \
@@ -127,6 +138,7 @@ noinst_HEADERS += \
 	librbd/image/SetSnapRequest.h \
 	librbd/image_watcher/Notifier.h \
 	librbd/image_watcher/NotifyLockOwner.h \
+	librbd/image_watcher/RewatchRequest.h \
 	librbd/journal/DisabledPolicy.h \
 	librbd/journal/Policy.h \
 	librbd/journal/Replay.h \
diff --git a/src/librbd/ObjectMap.cc b/src/librbd/ObjectMap.cc
index b5d659e..c59366e 100644
--- a/src/librbd/ObjectMap.cc
+++ b/src/librbd/ObjectMap.cc
@@ -1,6 +1,8 @@
 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
 // vim: ts=8 sw=2 smarttab
+
 #include "librbd/ObjectMap.h"
+#include "librbd/BlockGuard.h"
 #include "librbd/ExclusiveLock.h"
 #include "librbd/ImageCtx.h"
 #include "librbd/object_map/RefreshRequest.h"
@@ -25,21 +27,30 @@
 
 #define dout_subsys ceph_subsys_rbd
 #undef dout_prefix
-#define dout_prefix *_dout << "librbd::ObjectMap: "
+#define dout_prefix *_dout << "librbd::ObjectMap: " << this << " " << __func__ \
+                           << ": "
 
 namespace librbd {
 
-ObjectMap::ObjectMap(ImageCtx &image_ctx, uint64_t snap_id)
-  : m_image_ctx(image_ctx), m_snap_id(snap_id)
-{
+template <typename I>
+ObjectMap<I>::ObjectMap(I &image_ctx, uint64_t snap_id)
+  : m_image_ctx(image_ctx), m_snap_id(snap_id),
+    m_update_guard(new UpdateGuard(m_image_ctx.cct)) {
+}
+
+template <typename I>
+ObjectMap<I>::~ObjectMap() {
+  delete m_update_guard;
 }
 
-int ObjectMap::remove(librados::IoCtx &io_ctx, const std::string &image_id) {
+template <typename I>
+int ObjectMap<I>::remove(librados::IoCtx &io_ctx, const std::string &image_id) {
   return io_ctx.remove(object_map_name(image_id, CEPH_NOSNAP));
 }
 
-std::string ObjectMap::object_map_name(const std::string &image_id,
-				       uint64_t snap_id) {
+template <typename I>
+std::string ObjectMap<I>::object_map_name(const std::string &image_id,
+				          uint64_t snap_id) {
   std::string oid(RBD_OBJECT_MAP_PREFIX + image_id);
   if (snap_id != CEPH_NOSNAP) {
     std::stringstream snap_suffix;
@@ -50,26 +61,30 @@ std::string ObjectMap::object_map_name(const std::string &image_id,
   return oid;
 }
 
-bool ObjectMap::is_compatible(const file_layout_t& layout, uint64_t size) {
+template <typename I>
+bool ObjectMap<I>::is_compatible(const file_layout_t& layout, uint64_t size) {
   uint64_t object_count = Striper::get_num_objects(layout, size);
   return (object_count <= cls::rbd::MAX_OBJECT_MAP_OBJECT_COUNT);
 }
 
-ceph::BitVector<2u>::Reference ObjectMap::operator[](uint64_t object_no)
+template <typename I>
+ceph::BitVector<2u>::Reference ObjectMap<I>::operator[](uint64_t object_no)
 {
   assert(m_image_ctx.object_map_lock.is_wlocked());
   assert(object_no < m_object_map.size());
   return m_object_map[object_no];
 }
 
-uint8_t ObjectMap::operator[](uint64_t object_no) const
+template <typename I>
+uint8_t ObjectMap<I>::operator[](uint64_t object_no) const
 {
   assert(m_image_ctx.object_map_lock.is_locked());
   assert(object_no < m_object_map.size());
   return m_object_map[object_no];
 }
 
-bool ObjectMap::object_may_exist(uint64_t object_no) const
+template <typename I>
+bool ObjectMap<I>::object_may_exist(uint64_t object_no) const
 {
   assert(m_image_ctx.snap_lock.is_locked());
 
@@ -84,13 +99,13 @@ bool ObjectMap::object_may_exist(uint64_t object_no) const
   RWLock::RLocker l(m_image_ctx.object_map_lock);
   uint8_t state = (*this)[object_no];
   bool exists = (state != OBJECT_NONEXISTENT);
-  ldout(m_image_ctx.cct, 20) << &m_image_ctx << " object_may_exist: "
-			     << "object_no=" << object_no << " r=" << exists
+  ldout(m_image_ctx.cct, 20) << "object_no=" << object_no << " r=" << exists
 			     << dendl;
   return exists;
 }
 
-bool ObjectMap::update_required(uint64_t object_no, uint8_t new_state) {
+template <typename I>
+bool ObjectMap<I>::update_required(uint64_t object_no, uint8_t new_state) {
   assert(m_image_ctx.object_map_lock.is_wlocked());
   uint8_t state = (*this)[object_no];
 
@@ -102,24 +117,26 @@ bool ObjectMap::update_required(uint64_t object_no, uint8_t new_state) {
   return true;
 }
 
-void ObjectMap::open(Context *on_finish) {
-  object_map::RefreshRequest<> *req = new object_map::RefreshRequest<>(
+template <typename I>
+void ObjectMap<I>::open(Context *on_finish) {
+  auto req = object_map::RefreshRequest<I>::create(
     m_image_ctx, &m_object_map, m_snap_id, on_finish);
   req->send();
 }
 
-void ObjectMap::close(Context *on_finish) {
+template <typename I>
+void ObjectMap<I>::close(Context *on_finish) {
   if (m_snap_id != CEPH_NOSNAP) {
     m_image_ctx.op_work_queue->queue(on_finish, 0);
     return;
   }
 
-  object_map::UnlockRequest<> *req = new object_map::UnlockRequest<>(
-    m_image_ctx, on_finish);
+  auto req = object_map::UnlockRequest<I>::create(m_image_ctx, on_finish);
   req->send();
 }
 
-void ObjectMap::rollback(uint64_t snap_id, Context *on_finish) {
+template <typename I>
+void ObjectMap<I>::rollback(uint64_t snap_id, Context *on_finish) {
   assert(m_image_ctx.snap_lock.is_locked());
   assert(m_image_ctx.object_map_lock.is_wlocked());
 
@@ -128,7 +145,8 @@ void ObjectMap::rollback(uint64_t snap_id, Context *on_finish) {
   req->send();
 }
 
-void ObjectMap::snapshot_add(uint64_t snap_id, Context *on_finish) {
+template <typename I>
+void ObjectMap<I>::snapshot_add(uint64_t snap_id, Context *on_finish) {
   assert(m_image_ctx.snap_lock.is_locked());
   assert((m_image_ctx.features & RBD_FEATURE_OBJECT_MAP) != 0);
   assert(snap_id != CEPH_NOSNAP);
@@ -139,7 +157,8 @@ void ObjectMap::snapshot_add(uint64_t snap_id, Context *on_finish) {
   req->send();
 }
 
-void ObjectMap::snapshot_remove(uint64_t snap_id, Context *on_finish) {
+template <typename I>
+void ObjectMap<I>::snapshot_remove(uint64_t snap_id, Context *on_finish) {
   assert(m_image_ctx.snap_lock.is_wlocked());
   assert((m_image_ctx.features & RBD_FEATURE_OBJECT_MAP) != 0);
   assert(snap_id != CEPH_NOSNAP);
@@ -150,7 +169,8 @@ void ObjectMap::snapshot_remove(uint64_t snap_id, Context *on_finish) {
   req->send();
 }
 
-void ObjectMap::aio_save(Context *on_finish) {
+template <typename I>
+void ObjectMap<I>::aio_save(Context *on_finish) {
   assert(m_image_ctx.owner_lock.is_locked());
   assert(m_image_ctx.snap_lock.is_locked());
   assert(m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP,
@@ -171,8 +191,9 @@ void ObjectMap::aio_save(Context *on_finish) {
   comp->release();
 }
 
-void ObjectMap::aio_resize(uint64_t new_size, uint8_t default_object_state,
-			   Context *on_finish) {
+template <typename I>
+void ObjectMap<I>::aio_resize(uint64_t new_size, uint8_t default_object_state,
+			      Context *on_finish) {
   assert(m_image_ctx.owner_lock.is_locked());
   assert(m_image_ctx.snap_lock.is_locked());
   assert(m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP,
@@ -187,61 +208,110 @@ void ObjectMap::aio_resize(uint64_t new_size, uint8_t default_object_state,
   req->send();
 }
 
-bool ObjectMap::aio_update(uint64_t object_no, uint8_t new_state,
-			   const boost::optional<uint8_t> &current_state,
-			   Context *on_finish)
-{
-  return aio_update(object_no, object_no + 1, new_state, current_state,
-		    on_finish);
+template <typename I>
+void ObjectMap<I>::detained_aio_update(UpdateOperation &&op) {
+  CephContext *cct = m_image_ctx.cct;
+  ldout(cct, 20) << dendl;
+
+  assert(m_image_ctx.snap_lock.is_locked());
+  assert(m_image_ctx.object_map_lock.is_wlocked());
+
+  BlockGuardCell *cell;
+  int r = m_update_guard->detain({op.start_object_no, op.end_object_no},
+                                &op, &cell);
+  if (r < 0) {
+    lderr(cct) << "failed to detain object map update: " << cpp_strerror(r)
+               << dendl;
+    m_image_ctx.op_work_queue->queue(op.on_finish, r);
+    return;
+  } else if (r > 0) {
+    ldout(cct, 20) << "detaining object map update due to in-flight update: "
+                   << "start=" << op.start_object_no << ", "
+		   << "end=" << op.end_object_no << ", "
+                   << (op.current_state ?
+                         stringify(static_cast<uint32_t>(*op.current_state)) :
+                         "")
+		   << "->" << static_cast<uint32_t>(op.new_state) << dendl;
+    return;
+  }
+
+  ldout(cct, 20) << "in-flight update cell: " << cell << dendl;
+  Context *on_finish = op.on_finish;
+  Context *ctx = new FunctionContext([this, cell, on_finish](int r) {
+      handle_detained_aio_update(cell, r, on_finish);
+    });
+  aio_update(CEPH_NOSNAP, op.start_object_no, op.end_object_no, op.new_state,
+             op.current_state, ctx);
 }
 
-bool ObjectMap::aio_update(uint64_t start_object_no, uint64_t end_object_no,
-			   uint8_t new_state,
-                           const boost::optional<uint8_t> &current_state,
-                           Context *on_finish)
-{
+template <typename I>
+void ObjectMap<I>::handle_detained_aio_update(BlockGuardCell *cell, int r,
+                                              Context *on_finish) {
+  CephContext *cct = m_image_ctx.cct;
+  ldout(cct, 20) << "cell=" << cell << ", r=" << r << dendl;
+
+  typename UpdateGuard::BlockOperations block_ops;
+  m_update_guard->release(cell, &block_ops);
+
+  {
+    RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
+    RWLock::RLocker snap_locker(m_image_ctx.snap_lock);
+    RWLock::WLocker object_map_locker(m_image_ctx.object_map_lock);
+    for (auto &op : block_ops) {
+      detained_aio_update(std::move(op));
+    }
+  }
+
+  on_finish->complete(r);
+}
+
+template <typename I>
+void ObjectMap<I>::aio_update(uint64_t snap_id, uint64_t start_object_no,
+                              uint64_t end_object_no, uint8_t new_state,
+                              const boost::optional<uint8_t> &current_state,
+                              Context *on_finish) {
   assert(m_image_ctx.snap_lock.is_locked());
   assert((m_image_ctx.features & RBD_FEATURE_OBJECT_MAP) != 0);
-  assert(m_image_ctx.owner_lock.is_locked());
+  assert(snap_id != CEPH_NOSNAP || m_image_ctx.owner_lock.is_locked());
   assert(m_image_ctx.image_watcher != NULL);
   assert(m_image_ctx.exclusive_lock == nullptr ||
          m_image_ctx.exclusive_lock->is_lock_owner());
-  assert(m_image_ctx.object_map_lock.is_wlocked());
+  assert(snap_id != CEPH_NOSNAP || m_image_ctx.object_map_lock.is_wlocked());
   assert(start_object_no < end_object_no);
 
   CephContext *cct = m_image_ctx.cct;
-  ldout(cct, 20) << &m_image_ctx << " aio_update: start=" << start_object_no
-		 << ", end=" << end_object_no << ", "
+  ldout(cct, 20) << "start=" << start_object_no << ", "
+		 << "end=" << end_object_no << ", "
                  << (current_state ?
                        stringify(static_cast<uint32_t>(*current_state)) : "")
 		 << "->" << static_cast<uint32_t>(new_state) << dendl;
-  if (end_object_no > m_object_map.size()) {
-    ldout(cct, 20) << "skipping update of invalid object map" << dendl;
-    return false;
-  }
+  if (snap_id == CEPH_NOSNAP) {
+    if (end_object_no > m_object_map.size()) {
+      ldout(cct, 20) << "skipping update of invalid object map" << dendl;
+      m_image_ctx.op_work_queue->queue(on_finish, 0);
+      return;
+    }
 
-  for (uint64_t object_no = start_object_no; object_no < end_object_no;
-       ++object_no) {
-    uint8_t state = m_object_map[object_no];
-    if ((!current_state || state == *current_state ||
-          (*current_state == OBJECT_EXISTS && state == OBJECT_EXISTS_CLEAN)) &&
-        state != new_state) {
-      aio_update(m_snap_id, start_object_no, end_object_no, new_state,
-                 current_state, on_finish);
-      return true;
+    uint64_t object_no;
+    for (object_no = start_object_no; object_no < end_object_no; ++object_no) {
+      if (update_required(object_no, new_state)) {
+        break;
+      }
+    }
+    if (object_no == end_object_no) {
+      ldout(cct, 20) << "object map update not required" << dendl;
+      m_image_ctx.op_work_queue->queue(on_finish, 0);
+      return;
     }
   }
-  return false;
-}
 
-void ObjectMap::aio_update(uint64_t snap_id, uint64_t start_object_no,
-                           uint64_t end_object_no, uint8_t new_state,
-                           const boost::optional<uint8_t> &current_state,
-                           Context *on_finish) {
-  object_map::UpdateRequest *req = new object_map::UpdateRequest(
+  auto req = object_map::UpdateRequest<I>::create(
     m_image_ctx, &m_object_map, snap_id, start_object_no, end_object_no,
     new_state, current_state, on_finish);
   req->send();
 }
 
 } // namespace librbd
+
+template class librbd::ObjectMap<librbd::ImageCtx>;
+
diff --git a/src/librbd/ObjectMap.h b/src/librbd/ObjectMap.h
index 5d99180..a3d5ea7 100644
--- a/src/librbd/ObjectMap.h
+++ b/src/librbd/ObjectMap.h
@@ -1,5 +1,6 @@
 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
 // vim: ts=8 sw=2 smarttab
+
 #ifndef CEPH_LIBRBD_OBJECT_MAP_H
 #define CEPH_LIBRBD_OBJECT_MAP_H
 
@@ -7,6 +8,7 @@
 #include "include/fs_types.h"
 #include "include/rbd/object_map_types.h"
 #include "common/bit_vector.hpp"
+#include "librbd/Utils.h"
 #include <boost/optional.hpp>
 
 class Context;
@@ -17,11 +19,19 @@ namespace librados {
 
 namespace librbd {
 
+template <typename Op> class BlockGuard;
+struct BlockGuardCell;
 class ImageCtx;
 
+template <typename ImageCtxT = ImageCtx>
 class ObjectMap {
 public:
-  ObjectMap(ImageCtx &image_ctx, uint64_t snap_id);
+  static ObjectMap *create(ImageCtxT &image_ctx, uint64_t snap_id) {
+    return new ObjectMap(image_ctx, snap_id);
+  }
+
+  ObjectMap(ImageCtxT &image_ctx, uint64_t snap_id);
+  ~ObjectMap();
 
   static int remove(librados::IoCtx &io_ctx, const std::string &image_id);
   static std::string object_map_name(const std::string &image_id,
@@ -39,35 +49,95 @@ public:
   void close(Context *on_finish);
 
   bool object_may_exist(uint64_t object_no) const;
-  bool update_required(uint64_t object_no, uint8_t new_state);
 
   void aio_save(Context *on_finish);
   void aio_resize(uint64_t new_size, uint8_t default_object_state,
 		  Context *on_finish);
-  bool aio_update(uint64_t object_no, uint8_t new_state,
-		  const boost::optional<uint8_t> &current_state,
-		  Context *on_finish);
-  bool aio_update(uint64_t start_object_no, uint64_t end_object_no,
-		  uint8_t new_state,
-		  const boost::optional<uint8_t> &current_state,
-		  Context *on_finish);
 
-  void aio_update(uint64_t snap_id, uint64_t start_object_no,
+  template <typename T, void(T::*MF)(int) = &T::complete>
+  bool aio_update(uint64_t snap_id, uint64_t start_object_no, uint8_t new_state,
+                  const boost::optional<uint8_t> &current_state,
+                  T *callback_object) {
+    return aio_update<T, MF>(snap_id, start_object_no, start_object_no + 1,
+                             new_state, current_state, callback_object);
+  }
+
+  template <typename T, void(T::*MF)(int) = &T::complete>
+  bool aio_update(uint64_t snap_id, uint64_t start_object_no,
                   uint64_t end_object_no, uint8_t new_state,
                   const boost::optional<uint8_t> &current_state,
-                  Context *on_finish);
+                  T *callback_object) {
+    assert(start_object_no < end_object_no);
+    if (snap_id == CEPH_NOSNAP) {
+      uint64_t object_no;
+      for (object_no = start_object_no; object_no < end_object_no;
+           ++object_no) {
+        if (update_required(object_no, new_state)) {
+          break;
+        }
+      }
+
+      if (object_no == end_object_no) {
+        return false;
+      }
+
+      UpdateOperation update_operation(start_object_no, end_object_no,
+                                       new_state, current_state,
+                                       util::create_context_callback<T, MF>(
+                                         callback_object));
+      detained_aio_update(std::move(update_operation));
+    } else {
+      aio_update(snap_id, start_object_no, end_object_no, new_state,
+                 current_state,
+                 util::create_context_callback<T, MF>(callback_object));
+    }
+    return true;
+  }
 
   void rollback(uint64_t snap_id, Context *on_finish);
   void snapshot_add(uint64_t snap_id, Context *on_finish);
   void snapshot_remove(uint64_t snap_id, Context *on_finish);
 
 private:
-  ImageCtx &m_image_ctx;
+  struct UpdateOperation {
+    uint64_t start_object_no;
+    uint64_t end_object_no;
+    uint8_t new_state;
+    boost::optional<uint8_t> current_state;
+    Context *on_finish;
+
+    UpdateOperation(uint64_t start_object_no, uint64_t end_object_no,
+                    uint8_t new_state,
+                    const boost::optional<uint8_t> &current_state,
+                    Context *on_finish)
+      : start_object_no(start_object_no), end_object_no(end_object_no),
+        new_state(new_state), current_state(current_state),
+        on_finish(on_finish) {
+    }
+  };
+
+  typedef BlockGuard<UpdateOperation> UpdateGuard;
+
+  ImageCtxT &m_image_ctx;
   ceph::BitVector<2> m_object_map;
   uint64_t m_snap_id;
 
+  UpdateGuard *m_update_guard = nullptr;
+
+  void detained_aio_update(UpdateOperation &&update_operation);
+  void handle_detained_aio_update(BlockGuardCell *cell, int r,
+                                  Context *on_finish);
+
+  void aio_update(uint64_t snap_id, uint64_t start_object_no,
+                  uint64_t end_object_no, uint8_t new_state,
+                  const boost::optional<uint8_t> &current_state,
+                  Context *on_finish);
+  bool update_required(uint64_t object_no, uint8_t new_state);
+
 };
 
 } // namespace librbd
 
+extern template class librbd::ObjectMap<librbd::ImageCtx>;
+
 #endif // CEPH_LIBRBD_OBJECT_MAP_H
diff --git a/src/librbd/Operations.cc b/src/librbd/Operations.cc
index f3e92a7..030ed6d 100644
--- a/src/librbd/Operations.cc
+++ b/src/librbd/Operations.cc
@@ -544,7 +544,7 @@ int Operations<I>::resize(uint64_t size, ProgressContext& prog_ctx) {
   }
 
   if (m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP) &&
-      !ObjectMap::is_compatible(m_image_ctx.layout, size)) {
+      !ObjectMap<>::is_compatible(m_image_ctx.layout, size)) {
     lderr(cct) << "New size not compatible with object map" << dendl;
     return -EINVAL;
   }
@@ -582,7 +582,7 @@ void Operations<I>::execute_resize(uint64_t size, ProgressContext &prog_ctx,
     return;
   } else if (m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP,
                                        m_image_ctx.snap_lock) &&
-             !ObjectMap::is_compatible(m_image_ctx.layout, size)) {
+             !ObjectMap<>::is_compatible(m_image_ctx.layout, size)) {
     m_image_ctx.snap_lock.put_read();
     on_finish->complete(-EINVAL);
     return;
@@ -951,6 +951,11 @@ int Operations<I>::snap_protect(const char *snap_name) {
     return -EROFS;
   }
 
+  if (!m_image_ctx.test_features(RBD_FEATURE_LAYERING)) {
+    lderr(cct) << "image must support layering" << dendl;
+    return -ENOSYS;
+  }
+
   int r = m_image_ctx.state->refresh_if_required();
   if (r < 0) {
     return r;
diff --git a/src/librbd/exclusive_lock/AcquireRequest.cc b/src/librbd/exclusive_lock/AcquireRequest.cc
index 94fee20..344a267 100644
--- a/src/librbd/exclusive_lock/AcquireRequest.cc
+++ b/src/librbd/exclusive_lock/AcquireRequest.cc
@@ -15,6 +15,9 @@
 #include "librbd/Journal.h"
 #include "librbd/ObjectMap.h"
 #include "librbd/Utils.h"
+#include "librbd/exclusive_lock/BreakRequest.h"
+#include "librbd/exclusive_lock/GetLockerRequest.h"
+#include "librbd/image/RefreshRequest.h"
 #include "librbd/journal/Policy.h"
 
 #define dout_subsys ceph_subsys_rbd
@@ -29,30 +32,6 @@ using util::create_context_callback;
 using util::create_rados_ack_callback;
 using util::create_rados_safe_callback;
 
-namespace {
-
-template <typename I>
-struct C_BlacklistClient : public Context {
-  I &image_ctx;
-  std::string locker_address;
-  Context *on_finish;
-
-  C_BlacklistClient(I &image_ctx, const std::string &locker_address,
-                    Context *on_finish)
-    : image_ctx(image_ctx), locker_address(locker_address),
-      on_finish(on_finish) {
-  }
-
-  virtual void finish(int r) override {
-    librados::Rados rados(image_ctx.md_ctx);
-    r = rados.blacklist_add(locker_address,
-                            image_ctx.blacklist_expire_seconds);
-    on_finish->complete(r);
-  }
-};
-
-} // anonymous namespace
-
 template <typename I>
 AcquireRequest<I>* AcquireRequest<I>::create(I &image_ctx,
                                              const std::string &cookie,
@@ -71,12 +50,35 @@ AcquireRequest<I>::AcquireRequest(I &image_ctx, const std::string &cookie,
 
 template <typename I>
 AcquireRequest<I>::~AcquireRequest() {
+  if (!m_prepare_lock_completed) {
+    m_image_ctx.state->handle_prepare_lock_complete();
+  }
   delete m_on_acquire;
 }
 
 template <typename I>
 void AcquireRequest<I>::send() {
+  send_prepare_lock();
+}
+
+template <typename I>
+void AcquireRequest<I>::send_prepare_lock() {
+  CephContext *cct = m_image_ctx.cct;
+  ldout(cct, 10) << __func__ << dendl;
+
+  // acquire the lock if the image is not busy performing other actions
+  Context *ctx = create_context_callback<
+    AcquireRequest<I>, &AcquireRequest<I>::handle_prepare_lock>(this);
+  m_image_ctx.state->prepare_lock(ctx);
+}
+
+template <typename I>
+Context *AcquireRequest<I>::handle_prepare_lock(int *ret_val) {
+  CephContext *cct = m_image_ctx.cct;
+  ldout(cct, 10) << __func__ << ": r=" << *ret_val << dendl;
+
   send_flush_notifies();
+  return nullptr;
 }
 
 template <typename I>
@@ -96,6 +98,39 @@ Context *AcquireRequest<I>::handle_flush_notifies(int *ret_val) {
   ldout(cct, 10) << __func__ << dendl;
 
   assert(*ret_val == 0);
+  send_get_locker();
+  return nullptr;
+}
+
+template <typename I>
+void AcquireRequest<I>::send_get_locker() {
+  CephContext *cct = m_image_ctx.cct;
+  ldout(cct, 10) << __func__ << dendl;
+
+  Context *ctx = create_context_callback<
+    AcquireRequest<I>, &AcquireRequest<I>::handle_get_locker>(this);
+  auto req = GetLockerRequest<I>::create(m_image_ctx, &m_locker, ctx);
+  req->send();
+}
+
+template <typename I>
+Context *AcquireRequest<I>::handle_get_locker(int *ret_val) {
+  CephContext *cct = m_image_ctx.cct;
+  ldout(cct, 10) << __func__ << ": r=" << *ret_val << dendl;
+
+  if (*ret_val == -ENOENT) {
+    ldout(cct, 20) << "no lockers detected" << dendl;
+    m_locker = {};
+    *ret_val = 0;
+  } else if (*ret_val == -EBUSY) {
+    ldout(cct, 5) << "incompatible lock detected" << dendl;
+    return m_on_finish;
+  } else if (*ret_val < 0) {
+    lderr(cct) << "failed to retrieve lockers: " << cpp_strerror(*ret_val)
+               << dendl;
+    return m_on_finish;
+  }
+
   send_lock();
   return nullptr;
 }
@@ -107,7 +142,7 @@ void AcquireRequest<I>::send_lock() {
 
   librados::ObjectWriteOperation op;
   rados::cls::lock::lock(&op, RBD_LOCK_NAME, LOCK_EXCLUSIVE, m_cookie,
-                         ExclusiveLock<I>::WATCHER_LOCK_TAG, "", utime_t(), 0);
+                         ExclusiveLock<>::WATCHER_LOCK_TAG, "", utime_t(), 0);
 
   using klass = AcquireRequest<I>;
   librados::AioCompletion *rados_completion =
@@ -125,12 +160,16 @@ Context *AcquireRequest<I>::handle_lock(int *ret_val) {
 
   if (*ret_val == 0) {
     return send_refresh();
+  } else if (*ret_val == -EBUSY && m_locker.cookie.empty()) {
+    ldout(cct, 5) << "already locked, refreshing locker" << dendl;
+    send_get_locker();
+    return nullptr;
   } else if (*ret_val != -EBUSY) {
     lderr(cct) << "failed to lock: " << cpp_strerror(*ret_val) << dendl;
     return m_on_finish;
   }
 
-  send_get_lockers();
+  send_break_lock();
   return nullptr;
 }
 
@@ -144,8 +183,14 @@ Context *AcquireRequest<I>::send_refresh() {
   ldout(cct, 10) << __func__ << dendl;
 
   using klass = AcquireRequest<I>;
-  Context *ctx = create_context_callback<klass, &klass::handle_refresh>(this);
-  m_image_ctx.state->acquire_lock_refresh(ctx);
+  Context *ctx = create_async_context_callback(
+    m_image_ctx, create_context_callback<klass, &klass::handle_refresh>(this));
+
+  // ImageState is blocked waiting for lock to complete -- safe to directly
+  // refresh
+  image::RefreshRequest<I> *req = image::RefreshRequest<I>::create(
+    m_image_ctx, true, false, ctx);
+  req->send();
   return nullptr;
 }
 
@@ -154,7 +199,11 @@ Context *AcquireRequest<I>::handle_refresh(int *ret_val) {
   CephContext *cct = m_image_ctx.cct;
   ldout(cct, 10) << __func__ << ": r=" << *ret_val << dendl;
 
-  if (*ret_val < 0) {
+  if (*ret_val == -ERESTART) {
+    // next issued IO or op will (re)-refresh the image and shut down lock
+    ldout(cct, 5) << ": exclusive lock dynamically disabled" << dendl;
+    *ret_val = 0;
+  } else if (*ret_val < 0) {
     lderr(cct) << "failed to refresh image: " << cpp_strerror(*ret_val)
                << dendl;
     m_error_result = *ret_val;
@@ -359,183 +408,15 @@ Context *AcquireRequest<I>::handle_unlock(int *ret_val) {
 }
 
 template <typename I>
-void AcquireRequest<I>::send_get_lockers() {
-  CephContext *cct = m_image_ctx.cct;
-  ldout(cct, 10) << __func__ << dendl;
-
-  librados::ObjectReadOperation op;
-  rados::cls::lock::get_lock_info_start(&op, RBD_LOCK_NAME);
-
-  using klass = AcquireRequest<I>;
-  librados::AioCompletion *rados_completion =
-    create_rados_ack_callback<klass, &klass::handle_get_lockers>(this);
-  m_out_bl.clear();
-  int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid,
-                                         rados_completion, &op, &m_out_bl);
-  assert(r == 0);
-  rados_completion->release();
-}
-
-template <typename I>
-Context *AcquireRequest<I>::handle_get_lockers(int *ret_val) {
-  CephContext *cct = m_image_ctx.cct;
-  ldout(cct, 10) << __func__ << ": r=" << *ret_val << dendl;
-
-  std::map<rados::cls::lock::locker_id_t,
-           rados::cls::lock::locker_info_t> lockers;
-  ClsLockType lock_type;
-  std::string lock_tag;
-  if (*ret_val == 0) {
-    bufferlist::iterator it = m_out_bl.begin();
-    *ret_val = rados::cls::lock::get_lock_info_finish(&it, &lockers,
-                                                      &lock_type, &lock_tag);
-  }
-
-  if (*ret_val < 0) {
-    lderr(cct) << "failed to retrieve lockers: " << cpp_strerror(*ret_val)
-               << dendl;
-    return m_on_finish;
-  }
-
-  if (lockers.empty()) {
-    ldout(cct, 20) << "no lockers detected" << dendl;
-    send_lock();
-    return nullptr;
-  }
-
-  if (lock_tag != ExclusiveLock<I>::WATCHER_LOCK_TAG) {
-    ldout(cct, 5) <<"locked by external mechanism: tag=" << lock_tag << dendl;
-    *ret_val = -EBUSY;
-    return m_on_finish;
-  }
-
-  if (lock_type == LOCK_SHARED) {
-    ldout(cct, 5) << "shared lock type detected" << dendl;
-    *ret_val = -EBUSY;
-    return m_on_finish;
-  }
-
-  std::map<rados::cls::lock::locker_id_t,
-           rados::cls::lock::locker_info_t>::iterator iter = lockers.begin();
-  if (!ExclusiveLock<I>::decode_lock_cookie(iter->first.cookie,
-                                            &m_locker_handle)) {
-    ldout(cct, 5) << "locked by external mechanism: "
-                  << "cookie=" << iter->first.cookie << dendl;
-    *ret_val = -EBUSY;
-    return m_on_finish;
-  }
-
-  m_locker_entity = iter->first.locker;
-  m_locker_cookie = iter->first.cookie;
-  m_locker_address = stringify(iter->second.addr);
-  if (m_locker_cookie.empty() || m_locker_address.empty()) {
-    ldout(cct, 20) << "no valid lockers detected" << dendl;
-    send_lock();
-    return nullptr;
-  }
-
-  ldout(cct, 10) << "retrieved exclusive locker: "
-                 << m_locker_entity << "@" << m_locker_address << dendl;
-  send_get_watchers();
-  return nullptr;
-}
-
-template <typename I>
-void AcquireRequest<I>::send_get_watchers() {
-  CephContext *cct = m_image_ctx.cct;
-  ldout(cct, 10) << __func__ << dendl;
-
-  librados::ObjectReadOperation op;
-  op.list_watchers(&m_watchers, &m_watchers_ret_val);
-
-  using klass = AcquireRequest<I>;
-  librados::AioCompletion *rados_completion =
-    create_rados_ack_callback<klass, &klass::handle_get_watchers>(this);
-  m_out_bl.clear();
-  int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid,
-                                         rados_completion, &op, &m_out_bl);
-  assert(r == 0);
-  rados_completion->release();
-}
-
-template <typename I>
-Context *AcquireRequest<I>::handle_get_watchers(int *ret_val) {
-  CephContext *cct = m_image_ctx.cct;
-  ldout(cct, 10) << __func__ << ": r=" << *ret_val << dendl;
-
-  if (*ret_val == 0) {
-    *ret_val = m_watchers_ret_val;
-  }
-  if (*ret_val < 0) {
-    lderr(cct) << "failed to retrieve watchers: " << cpp_strerror(*ret_val)
-               << dendl;
-    return m_on_finish;
-  }
-
-  for (auto &watcher : m_watchers) {
-    if ((strncmp(m_locker_address.c_str(),
-                 watcher.addr, sizeof(watcher.addr)) == 0) &&
-        (m_locker_handle == watcher.cookie)) {
-      ldout(cct, 10) << "lock owner is still alive" << dendl;
-
-      *ret_val = -EAGAIN;
-      return m_on_finish;
-    }
-  }
-
-  send_blacklist();
-  return nullptr;
-}
-
-template <typename I>
-void AcquireRequest<I>::send_blacklist() {
-  if (!m_image_ctx.blacklist_on_break_lock) {
-    send_break_lock();
-    return;
-  }
-
-  CephContext *cct = m_image_ctx.cct;
-  ldout(cct, 10) << __func__ << dendl;
-
-  // TODO: need async version of RadosClient::blacklist_add
-  using klass = AcquireRequest<I>;
-  Context *ctx = create_context_callback<klass, &klass::handle_blacklist>(
-    this);
-  m_image_ctx.op_work_queue->queue(new C_BlacklistClient<I>(m_image_ctx,
-                                                            m_locker_address,
-                                                            ctx), 0);
-}
-
-template <typename I>
-Context *AcquireRequest<I>::handle_blacklist(int *ret_val) {
-  CephContext *cct = m_image_ctx.cct;
-  ldout(cct, 10) << __func__ << ": r=" << *ret_val << dendl;
-
-  if (*ret_val < 0) {
-    lderr(cct) << "failed to blacklist lock owner: " << cpp_strerror(*ret_val)
-               << dendl;
-    return m_on_finish;
-  }
-  send_break_lock();
-  return nullptr;
-}
-
-template <typename I>
 void AcquireRequest<I>::send_break_lock() {
   CephContext *cct = m_image_ctx.cct;
   ldout(cct, 10) << __func__ << dendl;
 
-  librados::ObjectWriteOperation op;
-  rados::cls::lock::break_lock(&op, RBD_LOCK_NAME, m_locker_cookie,
-                               m_locker_entity);
-
-  using klass = AcquireRequest<I>;
-  librados::AioCompletion *rados_completion =
-    create_rados_safe_callback<klass, &klass::handle_break_lock>(this);
-  int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid,
-                                         rados_completion, &op);
-  assert(r == 0);
-  rados_completion->release();
+  Context *ctx = create_context_callback<
+    AcquireRequest<I>, &AcquireRequest<I>::handle_break_lock>(this);
+  auto req = BreakRequest<I>::create(
+    m_image_ctx, m_locker, m_image_ctx.blacklist_on_break_lock, false, ctx);
+  req->send();
 }
 
 template <typename I>
@@ -543,25 +424,31 @@ Context *AcquireRequest<I>::handle_break_lock(int *ret_val) {
   CephContext *cct = m_image_ctx.cct;
   ldout(cct, 10) << __func__ << ": r=" << *ret_val << dendl;
 
-  if (*ret_val == -ENOENT) {
-    *ret_val = 0;
+  if (*ret_val == -EAGAIN) {
+    ldout(cct, 5) << "lock owner is still alive" << dendl;
+    return m_on_finish;
   } else if (*ret_val < 0) {
-    lderr(cct) << "failed to break lock: " << cpp_strerror(*ret_val) << dendl;
+    lderr(cct) << "failed to break lock : " << cpp_strerror(*ret_val) << dendl;
     return m_on_finish;
   }
 
-  send_lock();
+  send_get_locker();
   return nullptr;
 }
 
 template <typename I>
 void AcquireRequest<I>::apply() {
-  RWLock::WLocker snap_locker(m_image_ctx.snap_lock);
-  assert(m_image_ctx.object_map == nullptr);
-  m_image_ctx.object_map = m_object_map;
+  {
+    RWLock::WLocker snap_locker(m_image_ctx.snap_lock);
+    assert(m_image_ctx.object_map == nullptr);
+    m_image_ctx.object_map = m_object_map;
+
+    assert(m_image_ctx.journal == nullptr);
+    m_image_ctx.journal = m_journal;
+  }
 
-  assert(m_image_ctx.journal == nullptr);
-  m_image_ctx.journal = m_journal;
+  m_prepare_lock_completed = true;
+  m_image_ctx.state->handle_prepare_lock_complete();
 }
 
 template <typename I>
diff --git a/src/librbd/exclusive_lock/AcquireRequest.h b/src/librbd/exclusive_lock/AcquireRequest.h
index 7b31d92..694b698 100644
--- a/src/librbd/exclusive_lock/AcquireRequest.h
+++ b/src/librbd/exclusive_lock/AcquireRequest.h
@@ -7,7 +7,7 @@
 #include "include/int_types.h"
 #include "include/buffer.h"
 #include "librbd/ImageCtx.h"
-#include "msg/msg_types.h"
+#include "librbd/exclusive_lock/Types.h"
 #include <string>
 
 class Context;
@@ -35,26 +35,29 @@ private:
    * <start>
    *    |
    *    v
+   * PREPARE_LOCK
+   *    |
+   *    v
    * FLUSH_NOTIFIES
    *    |
-   *    |     /-----------------------------------------------------------\
-   *    |     |                                                           |
-   *    |     |             (no lockers)                                  |
-   *    |     |   . . . . . . . . . . . . . . . . . . . . . .             |
-   *    |     |   .                                         .             |
-   *    |     v   v      (EBUSY)                            .             |
-   *    \--> LOCK_IMAGE * * * * * * * * > GET_LOCKERS . . . .             |
-   *              |                         |                             |
-   *              v                         v                             |
-   *         REFRESH (skip if not         GET_WATCHERS                    |
-   *              |   needed)               |                             |
-   *              v                         v                             |
-   *         OPEN_OBJECT_MAP (skip if     BLACKLIST (skip if blacklist    |
-   *              |           disabled)     |        disabled)            |
-   *              v                         v                             |
-   *         OPEN_JOURNAL (skip if        BREAK_LOCK                      |
-   *              |   *     disabled)       |                             |
-   *              |   *                     \-----------------------------/
+   *    v
+   * GET_LOCKERS <--------------------------------------\
+   *    |     ^                                         |
+   *    |     . (EBUSY && no cached locker)             |
+   *    |     .                                         |
+   *    |     .          (EBUSY && cached locker)       |
+   *    \--> LOCK_IMAGE * * * * * * * * > BREAK_LOCK ---/
+   *              |
+   *              v
+   *         REFRESH (skip if not
+   *              |   needed)
+   *              v
+   *         OPEN_OBJECT_MAP (skip if
+   *              |           disabled)
+   *              v
+   *         OPEN_JOURNAL (skip if
+   *              |   *     disabled)
+   *              |   *
    *              |   * * * * * * * *
    *              v                 *
    *          ALLOCATE_JOURNAL_TAG  *
@@ -83,24 +86,23 @@ private:
   Context *m_on_acquire;
   Context *m_on_finish;
 
-  bufferlist m_out_bl;
-
-  std::list<obj_watch_t> m_watchers;
-  int m_watchers_ret_val;
-
   decltype(m_image_ctx.object_map) m_object_map;
   decltype(m_image_ctx.journal) m_journal;
 
-  entity_name_t m_locker_entity;
-  std::string m_locker_cookie;
-  std::string m_locker_address;
-  uint64_t m_locker_handle;
+  Locker m_locker;
 
   int m_error_result;
+  bool m_prepare_lock_completed = false;
+
+  void send_prepare_lock();
+  Context *handle_prepare_lock(int *ret_val);
 
   void send_flush_notifies();
   Context *handle_flush_notifies(int *ret_val);
 
+  void send_get_locker();
+  Context *handle_get_locker(int *ret_val);
+
   void send_lock();
   Context *handle_lock(int *ret_val);
 
@@ -125,15 +127,6 @@ private:
   void send_unlock();
   Context *handle_unlock(int *ret_val);
 
-  void send_get_lockers();
-  Context *handle_get_lockers(int *ret_val);
-
-  void send_get_watchers();
-  Context *handle_get_watchers(int *ret_val);
-
-  void send_blacklist();
-  Context *handle_blacklist(int *ret_val);
-
   void send_break_lock();
   Context *handle_break_lock(int *ret_val);
 
diff --git a/src/librbd/exclusive_lock/AutomaticPolicy.cc b/src/librbd/exclusive_lock/AutomaticPolicy.cc
new file mode 100644
index 0000000..a27cf49
--- /dev/null
+++ b/src/librbd/exclusive_lock/AutomaticPolicy.cc
@@ -0,0 +1,29 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include "librbd/exclusive_lock/AutomaticPolicy.h"
+#include "librbd/ImageCtx.h"
+#include "librbd/ExclusiveLock.h"
+
+#define dout_subsys ceph_subsys_rbd
+#undef dout_prefix
+#define dout_prefix *_dout << "librbd::ExclusiveLock::AutomaticPolicy "
+
+namespace librbd {
+namespace exclusive_lock {
+
+int AutomaticPolicy::lock_requested(bool force) {
+  assert(m_image_ctx->owner_lock.is_locked());
+  assert(m_image_ctx->exclusive_lock != nullptr);
+
+  ldout(m_image_ctx->cct, 20) << this << " " << __func__ << ": force=" << force
+			      << dendl;
+
+  // release the lock upon request (ignore forced requests)
+  m_image_ctx->exclusive_lock->release_lock(nullptr);
+  return 0;
+}
+
+} // namespace exclusive_lock
+} // namespace librbd
+
diff --git a/src/librbd/exclusive_lock/AutomaticPolicy.h b/src/librbd/exclusive_lock/AutomaticPolicy.h
new file mode 100644
index 0000000..6d15542
--- /dev/null
+++ b/src/librbd/exclusive_lock/AutomaticPolicy.h
@@ -0,0 +1,34 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef CEPH_LIBRBD_EXCLUSIVE_LOCK_AUTOMATIC_POLICY_H
+#define CEPH_LIBRBD_EXCLUSIVE_LOCK_AUTOMATIC_POLICY_H
+
+#include "librbd/exclusive_lock/Policy.h"
+
+namespace librbd {
+
+struct ImageCtx;
+
+namespace exclusive_lock {
+
+class AutomaticPolicy : public Policy {
+public:
+  AutomaticPolicy(ImageCtx *image_ctx) : m_image_ctx(image_ctx) {
+  }
+
+  virtual bool may_auto_request_lock() {
+    return true;
+  }
+
+  virtual int lock_requested(bool force);
+
+private:
+  ImageCtx *m_image_ctx;
+
+};
+
+} // namespace exclusive_lock
+} // namespace librbd
+
+#endif // CEPH_LIBRBD_EXCLUSIVE_LOCK_AUTOMATIC_POLICY_H
diff --git a/src/librbd/exclusive_lock/BreakRequest.cc b/src/librbd/exclusive_lock/BreakRequest.cc
new file mode 100644
index 0000000..8b889e7
--- /dev/null
+++ b/src/librbd/exclusive_lock/BreakRequest.cc
@@ -0,0 +1,184 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include "librbd/exclusive_lock/BreakRequest.h"
+#include "common/dout.h"
+#include "common/errno.h"
+#include "common/WorkQueue.h"
+#include "include/stringify.h"
+#include "cls/lock/cls_lock_client.h"
+#include "cls/lock/cls_lock_types.h"
+#include "librbd/ExclusiveLock.h"
+#include "librbd/Utils.h"
+#include "librbd/exclusive_lock/Types.h"
+
+#define dout_subsys ceph_subsys_rbd
+#undef dout_prefix
+#define dout_prefix *_dout << "librbd::exclusive_lock::BreakRequest: " << this \
+                           << " " << __func__ << ": "
+
+namespace librbd {
+namespace exclusive_lock {
+
+using util::create_context_callback;
+using util::create_rados_ack_callback;
+using util::create_rados_safe_callback;
+
+namespace {
+
+template <typename I>
+struct C_BlacklistClient : public Context {
+  I &image_ctx;
+  std::string locker_address;
+  Context *on_finish;
+
+  C_BlacklistClient(I &image_ctx, const std::string &locker_address,
+                    Context *on_finish)
+    : image_ctx(image_ctx), locker_address(locker_address),
+      on_finish(on_finish) {
+  }
+
+  virtual void finish(int r) override {
+    librados::Rados rados(image_ctx.md_ctx);
+    r = rados.blacklist_add(locker_address,
+                            image_ctx.blacklist_expire_seconds);
+    on_finish->complete(r);
+  }
+};
+
+} // anonymous namespace
+
+template <typename I>
+void BreakRequest<I>::send() {
+  send_get_watchers();
+}
+
+template <typename I>
+void BreakRequest<I>::send_get_watchers() {
+  CephContext *cct = m_image_ctx.cct;
+  ldout(cct, 10) << dendl;
+
+  librados::ObjectReadOperation op;
+  op.list_watchers(&m_watchers, &m_watchers_ret_val);
+
+  using klass = BreakRequest<I>;
+  librados::AioCompletion *rados_completion =
+    create_rados_ack_callback<klass, &klass::handle_get_watchers>(this);
+  m_out_bl.clear();
+  int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid,
+                                         rados_completion, &op, &m_out_bl);
+  assert(r == 0);
+  rados_completion->release();
+}
+
+template <typename I>
+void BreakRequest<I>::handle_get_watchers(int r) {
+  CephContext *cct = m_image_ctx.cct;
+  ldout(cct, 10) << "r=" << r << dendl;
+
+  if (r == 0) {
+    r = m_watchers_ret_val;
+  }
+  if (r < 0) {
+    lderr(cct) << "failed to retrieve watchers: " << cpp_strerror(r)
+               << dendl;
+    finish(r);
+    return;
+  }
+
+  for (auto &watcher : m_watchers) {
+    if ((strncmp(m_locker.address.c_str(),
+                 watcher.addr, sizeof(watcher.addr)) == 0) &&
+        (m_locker.handle == watcher.cookie)) {
+      ldout(cct, 10) << "lock owner is still alive" << dendl;
+
+      if (m_force_break_lock) {
+        break;
+      } else {
+        finish(-EAGAIN);
+        return;
+      }
+    }
+  }
+
+  send_blacklist();
+}
+
+template <typename I>
+void BreakRequest<I>::send_blacklist() {
+  if (!m_blacklist_locker) {
+    send_break_lock();
+    return;
+  }
+
+  CephContext *cct = m_image_ctx.cct;
+  ldout(cct, 10) << dendl;
+
+  // TODO: need async version of RadosClient::blacklist_add
+  using klass = BreakRequest<I>;
+  Context *ctx = create_context_callback<klass, &klass::handle_blacklist>(
+    this);
+  m_image_ctx.op_work_queue->queue(new C_BlacklistClient<I>(m_image_ctx,
+                                                            m_locker.address,
+                                                            ctx), 0);
+}
+
+template <typename I>
+void BreakRequest<I>::handle_blacklist(int r) {
+  CephContext *cct = m_image_ctx.cct;
+  ldout(cct, 10) << "r=" << r << dendl;
+
+  if (r < 0) {
+    lderr(cct) << "failed to blacklist lock owner: " << cpp_strerror(r)
+               << dendl;
+    finish(r);
+    return;
+  }
+  send_break_lock();
+}
+
+template <typename I>
+void BreakRequest<I>::send_break_lock() {
+  CephContext *cct = m_image_ctx.cct;
+  ldout(cct, 10) << dendl;
+
+  librados::ObjectWriteOperation op;
+  rados::cls::lock::break_lock(&op, RBD_LOCK_NAME, m_locker.cookie,
+                               m_locker.entity);
+
+  using klass = BreakRequest<I>;
+  librados::AioCompletion *rados_completion =
+    create_rados_safe_callback<klass, &klass::handle_break_lock>(this);
+  int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid,
+                                         rados_completion, &op);
+  assert(r == 0);
+  rados_completion->release();
+}
+
+template <typename I>
+void BreakRequest<I>::handle_break_lock(int r) {
+  CephContext *cct = m_image_ctx.cct;
+  ldout(cct, 10) << "r=" << r << dendl;
+
+  if (r < 0 && r != -ENOENT) {
+    lderr(cct) << "failed to break lock: " << cpp_strerror(r) << dendl;
+    finish(r);
+    return;
+  }
+
+  finish(0);
+}
+
+template <typename I>
+void BreakRequest<I>::finish(int r) {
+  CephContext *cct = m_image_ctx.cct;
+  ldout(cct, 10) << "r=" << r << dendl;
+
+  m_on_finish->complete(r);
+  delete this;
+}
+
+} // namespace exclusive_lock
+} // namespace librbd
+
+template class librbd::exclusive_lock::BreakRequest<librbd::ImageCtx>;
diff --git a/src/librbd/exclusive_lock/BreakRequest.h b/src/librbd/exclusive_lock/BreakRequest.h
new file mode 100644
index 0000000..05bfe38
--- /dev/null
+++ b/src/librbd/exclusive_lock/BreakRequest.h
@@ -0,0 +1,95 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef CEPH_LIBRBD_EXCLUSIVE_LOCK_BREAK_REQUEST_H
+#define CEPH_LIBRBD_EXCLUSIVE_LOCK_BREAK_REQUEST_H
+
+#include "include/int_types.h"
+#include "include/buffer.h"
+#include "msg/msg_types.h"
+#include "librbd/ImageCtx.h"
+#include <list>
+#include <string>
+#include <boost/optional.hpp>
+
+class Context;
+
+namespace librbd {
+
+template <typename> class Journal;
+
+namespace exclusive_lock {
+
+struct Locker;
+
+template <typename ImageCtxT = ImageCtx>
+class BreakRequest {
+public:
+  static BreakRequest* create(ImageCtxT &image_ctx, const Locker &locker,
+                              bool blacklist_locker, bool force_break_lock,
+                              Context *on_finish) {
+    return new BreakRequest(image_ctx, locker, blacklist_locker,
+                            force_break_lock, on_finish);
+  }
+
+  void send();
+
+private:
+  /**
+   * @verbatim
+   *
+   * <start>
+   *    |
+   *    v
+   * GET_WATCHERS
+   *    |
+   *    v
+   * BLACKLIST (skip if disabled)
+   *    |
+   *    v
+   * BREAK_LOCK
+   *    |
+   *    v
+   * <finish>
+   *
+   * @endvertbatim
+   */
+
+  ImageCtxT &m_image_ctx;
+  const Locker &m_locker;
+  bool m_blacklist_locker;
+  bool m_force_break_lock;
+  Context *m_on_finish;
+
+  bufferlist m_out_bl;
+
+  std::list<obj_watch_t> m_watchers;
+  int m_watchers_ret_val;
+
+  BreakRequest(ImageCtxT &image_ctx, const Locker &locker,
+               bool blacklist_locker, bool force_break_lock,
+               Context *on_finish)
+    : m_image_ctx(image_ctx), m_locker(locker),
+      m_blacklist_locker(blacklist_locker),
+      m_force_break_lock(force_break_lock), m_on_finish(on_finish) {
+  }
+
+  void send_get_watchers();
+  void handle_get_watchers(int r);
+
+  void send_blacklist();
+  void handle_blacklist(int r);
+
+  void send_break_lock();
+  void handle_break_lock(int r);
+
+  void finish(int r);
+
+};
+
+} // namespace exclusive_lock
+} // namespace librbd
+
+extern template class librbd::exclusive_lock::BreakRequest<librbd::ImageCtx>;
+
+#endif // CEPH_LIBRBD_EXCLUSIVE_LOCK_BREAK_REQUEST_H
diff --git a/src/librbd/exclusive_lock/GetLockerRequest.cc b/src/librbd/exclusive_lock/GetLockerRequest.cc
new file mode 100644
index 0000000..9ab7edf
--- /dev/null
+++ b/src/librbd/exclusive_lock/GetLockerRequest.cc
@@ -0,0 +1,124 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include "librbd/exclusive_lock/GetLockerRequest.h"
+#include "cls/lock/cls_lock_client.h"
+#include "cls/lock/cls_lock_types.h"
+#include "common/dout.h"
+#include "common/errno.h"
+#include "include/stringify.h"
+#include "librbd/ExclusiveLock.h"
+#include "librbd/ImageCtx.h"
+#include "librbd/Utils.h"
+#include "librbd/exclusive_lock/Types.h"
+
+#define dout_subsys ceph_subsys_rbd
+#undef dout_prefix
+#define dout_prefix *_dout << "librbd::exclusive_lock::GetLockerRequest: " \
+                           << this << " " << __func__ << ": "
+
+namespace librbd {
+namespace exclusive_lock {
+
+using util::create_rados_ack_callback;
+
+template <typename I>
+void GetLockerRequest<I>::send() {
+  send_get_lockers();
+}
+
+template <typename I>
+void GetLockerRequest<I>::send_get_lockers() {
+  CephContext *cct = m_image_ctx.cct;
+  ldout(cct, 10) << dendl;
+
+  librados::ObjectReadOperation op;
+  rados::cls::lock::get_lock_info_start(&op, RBD_LOCK_NAME);
+
+  using klass = GetLockerRequest<I>;
+  librados::AioCompletion *rados_completion =
+    create_rados_ack_callback<klass, &klass::handle_get_lockers>(this);
+  m_out_bl.clear();
+  int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid,
+                                         rados_completion, &op, &m_out_bl);
+  assert(r == 0);
+  rados_completion->release();
+}
+
+template <typename I>
+void GetLockerRequest<I>::handle_get_lockers(int r) {
+  CephContext *cct = m_image_ctx.cct;
+  ldout(cct, 10) << "r=" << r << dendl;
+
+  std::map<rados::cls::lock::locker_id_t,
+           rados::cls::lock::locker_info_t> lockers;
+  ClsLockType lock_type = LOCK_NONE;
+  std::string lock_tag;
+  if (r == 0) {
+    bufferlist::iterator it = m_out_bl.begin();
+    r = rados::cls::lock::get_lock_info_finish(&it, &lockers,
+                                                      &lock_type, &lock_tag);
+  }
+
+  if (r < 0) {
+    lderr(cct) << "failed to retrieve lockers: " << cpp_strerror(r) << dendl;
+    finish(r);
+    return;
+  }
+
+  if (lockers.empty()) {
+    ldout(cct, 20) << "no lockers detected" << dendl;
+    finish(-ENOENT);
+    return;
+  }
+
+  if (lock_tag != ExclusiveLock<>::WATCHER_LOCK_TAG) {
+    ldout(cct, 5) <<"locked by external mechanism: tag=" << lock_tag << dendl;
+    finish(-EBUSY);
+    return;
+  }
+
+  if (lock_type == LOCK_SHARED) {
+    ldout(cct, 5) << "shared lock type detected" << dendl;
+    finish(-EBUSY);
+    return;
+  }
+
+  std::map<rados::cls::lock::locker_id_t,
+           rados::cls::lock::locker_info_t>::iterator iter = lockers.begin();
+  if (!ExclusiveLock<>::decode_lock_cookie(iter->first.cookie,
+                                            &m_locker->handle)) {
+    ldout(cct, 5) << "locked by external mechanism: "
+                  << "cookie=" << iter->first.cookie << dendl;
+    finish(-EBUSY);
+    return;
+  }
+
+  m_locker->entity = iter->first.locker;
+  m_locker->cookie = iter->first.cookie;
+  m_locker->address = stringify(iter->second.addr);
+  if (m_locker->cookie.empty() || m_locker->address.empty()) {
+    ldout(cct, 20) << "no valid lockers detected" << dendl;
+    finish(-ENOENT);
+    return;
+  }
+
+  ldout(cct, 10) << "retrieved exclusive locker: "
+                 << m_locker->entity << "@" << m_locker->address << dendl;
+  finish(0);
+}
+
+template <typename I>
+void GetLockerRequest<I>::finish(int r) {
+  CephContext *cct = m_image_ctx.cct;
+  ldout(cct, 10) << "r=" << r << dendl;
+
+  m_on_finish->complete(r);
+  delete this;
+}
+
+} // namespace exclusive_lock
+} // namespace librbd
+
+template class librbd::exclusive_lock::GetLockerRequest<librbd::ImageCtx>;
+
diff --git a/src/librbd/exclusive_lock/GetLockerRequest.h b/src/librbd/exclusive_lock/GetLockerRequest.h
new file mode 100644
index 0000000..ea39abd
--- /dev/null
+++ b/src/librbd/exclusive_lock/GetLockerRequest.h
@@ -0,0 +1,53 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef CEPH_LIBRBD_EXCLUSIVE_LOCK_GET_LOCKER_REQUEST_H
+#define CEPH_LIBRBD_EXCLUSIVE_LOCK_GET_LOCKER_REQUEST_H
+
+#include "include/int_types.h"
+#include "include/buffer.h"
+
+class Context;
+
+namespace librbd {
+
+struct ImageCtx;
+
+namespace exclusive_lock {
+
+struct Locker;
+
+template <typename ImageCtxT = ImageCtx>
+class GetLockerRequest {
+public:
+  static GetLockerRequest* create(ImageCtxT &image_ctx, Locker *locker,
+                                  Context *on_finish) {
+    return new GetLockerRequest(image_ctx, locker, on_finish);
+  }
+
+  void send();
+
+private:
+  ImageCtxT &m_image_ctx;
+  Locker *m_locker;
+  Context *m_on_finish;
+
+  bufferlist m_out_bl;
+
+  GetLockerRequest(ImageCtxT &image_ctx, Locker *locker, Context *on_finish)
+    : m_image_ctx(image_ctx), m_locker(locker),  m_on_finish(on_finish) {
+  }
+
+  void send_get_lockers();
+  void handle_get_lockers(int r);
+
+  void finish(int r);
+
+};
+
+} // namespace exclusive_lock
+} // namespace librbd
+
+extern template class librbd::exclusive_lock::GetLockerRequest<librbd::ImageCtx>;
+
+#endif // CEPH_LIBRBD_EXCLUSIVE_LOCK_GET_LOCKER_REQUEST_H
diff --git a/src/librbd/exclusive_lock/Policy.h b/src/librbd/exclusive_lock/Policy.h
index 2ff8418..72b2945 100644
--- a/src/librbd/exclusive_lock/Policy.h
+++ b/src/librbd/exclusive_lock/Policy.h
@@ -11,7 +11,8 @@ struct Policy {
   virtual ~Policy() {
   }
 
-  virtual void lock_requested(bool force) = 0;
+  virtual bool may_auto_request_lock() = 0;
+  virtual int lock_requested(bool force) = 0;
 };
 
 } // namespace exclusive_lock
diff --git a/src/librbd/exclusive_lock/ReacquireRequest.cc b/src/librbd/exclusive_lock/ReacquireRequest.cc
new file mode 100644
index 0000000..e847f65
--- /dev/null
+++ b/src/librbd/exclusive_lock/ReacquireRequest.cc
@@ -0,0 +1,72 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include "librbd/exclusive_lock/ReacquireRequest.h"
+#include "cls/lock/cls_lock_client.h"
+#include "cls/lock/cls_lock_types.h"
+#include "common/dout.h"
+#include "common/errno.h"
+#include "librbd/ExclusiveLock.h"
+#include "librbd/ImageCtx.h"
+#include "librbd/Utils.h"
+
+#define dout_subsys ceph_subsys_rbd
+#undef dout_prefix
+#define dout_prefix *_dout << "librbd::exclusive_lock::ReacquireRequest: " \
+                           << this << ": " << __func__
+
+namespace librbd {
+namespace exclusive_lock {
+
+using librbd::util::create_rados_safe_callback;
+
+template <typename I>
+ReacquireRequest<I>::ReacquireRequest(I &image_ctx,
+                                      const std::string &old_cookie,
+                                      const std::string &new_cookie,
+                                      Context *on_finish)
+  : m_image_ctx(image_ctx), m_old_cookie(old_cookie), m_new_cookie(new_cookie),
+    m_on_finish(on_finish) {
+}
+
+template <typename I>
+void ReacquireRequest<I>::send() {
+  set_cookie();
+}
+
+template <typename I>
+void ReacquireRequest<I>::set_cookie() {
+  CephContext *cct = m_image_ctx.cct;
+  ldout(cct, 10) << dendl;
+
+  librados::ObjectWriteOperation op;
+  rados::cls::lock::set_cookie(&op, RBD_LOCK_NAME, LOCK_EXCLUSIVE, m_old_cookie,
+                               ExclusiveLock<>::WATCHER_LOCK_TAG, m_new_cookie);
+
+  librados::AioCompletion *rados_completion = create_rados_safe_callback<
+    ReacquireRequest<I>, &ReacquireRequest<I>::handle_set_cookie>(this);
+  int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid,
+                                         rados_completion, &op);
+  assert(r == 0);
+  rados_completion->release();
+}
+
+template <typename I>
+void ReacquireRequest<I>::handle_set_cookie(int r) {
+  CephContext *cct = m_image_ctx.cct;
+  ldout(cct, 10) << ": r=" << r << dendl;
+
+  if (r == -EOPNOTSUPP) {
+    ldout(cct, 10) << ": OSD doesn't support updating lock" << dendl;
+  } else if (r < 0) {
+    lderr(cct) << ": failed to update lock: " << cpp_strerror(r) << dendl;
+  }
+
+  m_on_finish->complete(r);
+  delete this;
+}
+
+} // namespace exclusive_lock
+} // namespace librbd
+
+template class librbd::exclusive_lock::ReacquireRequest<librbd::ImageCtx>;
diff --git a/src/librbd/exclusive_lock/ReacquireRequest.h b/src/librbd/exclusive_lock/ReacquireRequest.h
new file mode 100644
index 0000000..2718bef
--- /dev/null
+++ b/src/librbd/exclusive_lock/ReacquireRequest.h
@@ -0,0 +1,63 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef CEPH_LIBRBD_EXCLUSIVE_LOCK_REACQUIRE_REQUEST_H
+#define CEPH_LIBRBD_EXCLUSIVE_LOCK_REACQUIRE_REQUEST_H
+
+#include "include/int_types.h"
+#include <string>
+
+class Context;
+
+namespace librbd {
+
+class ImageCtx;
+
+namespace exclusive_lock {
+
+template <typename ImageCtxT = ImageCtx>
+class ReacquireRequest {
+public:
+
+  static ReacquireRequest *create(ImageCtxT &image_ctx,
+                                  const std::string &old_cookie,
+                                  const std::string &new_cookie,
+                                  Context *on_finish) {
+    return new ReacquireRequest(image_ctx, old_cookie, new_cookie, on_finish);
+  }
+
+  ReacquireRequest(ImageCtxT &image_ctx, const std::string &old_cookie,
+                   const std::string &new_cookie, Context *on_finish);
+
+  void send();
+
+private:
+  /**
+   * @verbatim
+   *
+   * <start>
+   *    |
+   *    v
+   * SET_COOKIE
+   *    |
+   *    v
+   * <finish>
+   *
+   * @endverbatim
+   */
+  ImageCtxT &m_image_ctx;
+  std::string m_old_cookie;
+  std::string m_new_cookie;
+  Context *m_on_finish;
+
+  void set_cookie();
+  void handle_set_cookie(int r);
+
+};
+
+} // namespace exclusive_lock
+} // namespace librbd
+
+extern template class librbd::exclusive_lock::ReacquireRequest<librbd::ImageCtx>;
+
+#endif // CEPH_LIBRBD_EXCLUSIVE_LOCK_REACQUIRE_REQUEST_H
diff --git a/src/librbd/exclusive_lock/ReleaseRequest.cc b/src/librbd/exclusive_lock/ReleaseRequest.cc
index bed9517..c37cbc8 100644
--- a/src/librbd/exclusive_lock/ReleaseRequest.cc
+++ b/src/librbd/exclusive_lock/ReleaseRequest.cc
@@ -8,6 +8,7 @@
 #include "common/errno.h"
 #include "librbd/AioImageRequestWQ.h"
 #include "librbd/ExclusiveLock.h"
+#include "librbd/ImageState.h"
 #include "librbd/ImageWatcher.h"
 #include "librbd/Journal.h"
 #include "librbd/ObjectMap.h"
@@ -28,26 +29,57 @@ template <typename I>
 ReleaseRequest<I>* ReleaseRequest<I>::create(I &image_ctx,
                                              const std::string &cookie,
                                              Context *on_releasing,
-                                             Context *on_finish) {
-  return new ReleaseRequest(image_ctx, cookie, on_releasing, on_finish);
+                                             Context *on_finish,
+                                             bool shutting_down) {
+  return new ReleaseRequest(image_ctx, cookie, on_releasing, on_finish,
+                            shutting_down);
 }
 
 template <typename I>
 ReleaseRequest<I>::ReleaseRequest(I &image_ctx, const std::string &cookie,
-                                  Context *on_releasing, Context *on_finish)
+                                  Context *on_releasing, Context *on_finish,
+                                  bool shutting_down)
   : m_image_ctx(image_ctx), m_cookie(cookie), m_on_releasing(on_releasing),
     m_on_finish(create_async_context_callback(image_ctx, on_finish)),
-    m_object_map(nullptr), m_journal(nullptr) {
+    m_shutting_down(shutting_down), m_object_map(nullptr), m_journal(nullptr) {
 }
 
 template <typename I>
 ReleaseRequest<I>::~ReleaseRequest() {
+  if (!m_shutting_down) {
+    m_image_ctx.state->handle_prepare_lock_complete();
+  }
   delete m_on_releasing;
 }
 
 template <typename I>
 void ReleaseRequest<I>::send() {
+  send_prepare_lock();
+}
+
+template <typename I>
+void ReleaseRequest<I>::send_prepare_lock() {
+  if (m_shutting_down) {
+    send_cancel_op_requests();
+    return;
+  }
+
+  CephContext *cct = m_image_ctx.cct;
+  ldout(cct, 10) << __func__ << dendl;
+
+  // release the lock if the image is not busy performing other actions
+  Context *ctx = create_context_callback<
+    ReleaseRequest<I>, &ReleaseRequest<I>::handle_prepare_lock>(this);
+  m_image_ctx.state->prepare_lock(ctx);
+}
+
+template <typename I>
+Context *ReleaseRequest<I>::handle_prepare_lock(int *ret_val) {
+  CephContext *cct = m_image_ctx.cct;
+  ldout(cct, 10) << __func__ << ": r=" << *ret_val << dendl;
+
   send_cancel_op_requests();
+  return nullptr;
 }
 
 template <typename I>
@@ -95,7 +127,54 @@ Context *ReleaseRequest<I>::handle_block_writes(int *ret_val) {
   CephContext *cct = m_image_ctx.cct;
   ldout(cct, 10) << __func__ << ": r=" << *ret_val << dendl;
 
-  if (*ret_val < 0) {
+  if (*ret_val == -EBLACKLISTED) {
+    // allow clean shut down if blacklisted
+    lderr(cct) << "failed to block writes because client is blacklisted"
+               << dendl;
+  } else if (*ret_val < 0) {
+    lderr(cct) << "failed to block writes: " << cpp_strerror(*ret_val) << dendl;
+    m_image_ctx.aio_work_queue->unblock_writes();
+    return m_on_finish;
+  }
+
+  send_invalidate_cache(false);
+  return nullptr;
+}
+
+template <typename I>
+void ReleaseRequest<I>::send_invalidate_cache(bool purge_on_error) {
+  if (m_image_ctx.object_cacher == nullptr) {
+    send_flush_notifies();
+    return;
+  }
+
+  CephContext *cct = m_image_ctx.cct;
+  ldout(cct, 10) << __func__ << ": purge_on_error=" << purge_on_error << dendl;
+
+  RWLock::RLocker owner_lock(m_image_ctx.owner_lock);
+  Context *ctx = create_async_context_callback(
+    m_image_ctx, create_context_callback<
+      ReleaseRequest<I>,
+      &ReleaseRequest<I>::handle_invalidate_cache>(this));
+  m_image_ctx.invalidate_cache(purge_on_error, ctx);
+}
+
+template <typename I>
+Context *ReleaseRequest<I>::handle_invalidate_cache(int *ret_val) {
+  CephContext *cct = m_image_ctx.cct;
+  ldout(cct, 10) << __func__ << ": r=" << *ret_val << dendl;
+
+  if (*ret_val == -EBLACKLISTED) {
+    lderr(cct) << "failed to invalidate cache because client is blacklisted"
+               << dendl;
+    if (!m_image_ctx.is_cache_empty()) {
+      // force purge the cache after after being blacklisted
+      send_invalidate_cache(true);
+      return nullptr;
+    }
+  } else if (*ret_val < 0 && *ret_val != -EBUSY) {
+    lderr(cct) << "failed to invalidate cache: " << cpp_strerror(*ret_val)
+               << dendl;
     m_image_ctx.aio_work_queue->unblock_writes();
     return m_on_finish;
   }
diff --git a/src/librbd/exclusive_lock/ReleaseRequest.h b/src/librbd/exclusive_lock/ReleaseRequest.h
index a68530b..17d5b93 100644
--- a/src/librbd/exclusive_lock/ReleaseRequest.h
+++ b/src/librbd/exclusive_lock/ReleaseRequest.h
@@ -20,7 +20,8 @@ template <typename ImageCtxT = ImageCtx>
 class ReleaseRequest {
 public:
   static ReleaseRequest* create(ImageCtxT &image_ctx, const std::string &cookie,
-                                Context *on_releasing, Context *on_finish);
+                                Context *on_releasing, Context *on_finish,
+                                bool shutting_down);
 
   ~ReleaseRequest();
   void send();
@@ -32,12 +33,18 @@ private:
    * <start>
    *    |
    *    v
+   * PREPARE_LOCK
+   *    |
+   *    v
    * CANCEL_OP_REQUESTS
    *    |
    *    v
    * BLOCK_WRITES
    *    |
    *    v
+   * INVALIDATE_CACHE
+   *    |
+   *    v
    * FLUSH_NOTIFIES . . . . . . . . . . . . . .
    *    |                                     .
    *    v                                     .
@@ -56,22 +63,30 @@ private:
    */
 
   ReleaseRequest(ImageCtxT &image_ctx, const std::string &cookie,
-                 Context *on_releasing, Context *on_finish);
+                 Context *on_releasing, Context *on_finish,
+                 bool shutting_down);
 
   ImageCtxT &m_image_ctx;
   std::string m_cookie;
   Context *m_on_releasing;
   Context *m_on_finish;
+  bool m_shutting_down;
 
   decltype(m_image_ctx.object_map) m_object_map;
   decltype(m_image_ctx.journal) m_journal;
 
+  void send_prepare_lock();
+  Context *handle_prepare_lock(int *ret_val);
+
   void send_cancel_op_requests();
   Context *handle_cancel_op_requests(int *ret_val);
 
   void send_block_writes();
   Context *handle_block_writes(int *ret_val);
 
+  void send_invalidate_cache(bool purge_on_error);
+  Context *handle_invalidate_cache(int *ret_val);
+
   void send_flush_notifies();
   Context *handle_flush_notifies(int *ret_val);
 
diff --git a/src/librbd/exclusive_lock/StandardPolicy.cc b/src/librbd/exclusive_lock/StandardPolicy.cc
index 22f0434..adeaf3f 100644
--- a/src/librbd/exclusive_lock/StandardPolicy.cc
+++ b/src/librbd/exclusive_lock/StandardPolicy.cc
@@ -5,15 +5,21 @@
 #include "librbd/ImageCtx.h"
 #include "librbd/ExclusiveLock.h"
 
+#define dout_subsys ceph_subsys_rbd
+#undef dout_prefix
+#define dout_prefix *_dout << "librbd::ExclusiveLock::StandardPolicy "
+
 namespace librbd {
 namespace exclusive_lock {
 
-void StandardPolicy::lock_requested(bool force) {
+int StandardPolicy::lock_requested(bool force) {
   assert(m_image_ctx->owner_lock.is_locked());
   assert(m_image_ctx->exclusive_lock != nullptr);
 
-  // release the lock upon request (ignore forced requests)
-  m_image_ctx->exclusive_lock->release_lock(nullptr);
+  ldout(m_image_ctx->cct, 20) << this << " " << __func__ << ": force=" << force
+			      << dendl;
+
+  return -EROFS;
 }
 
 } // namespace exclusive_lock
diff --git a/src/librbd/exclusive_lock/StandardPolicy.h b/src/librbd/exclusive_lock/StandardPolicy.h
index ddc78cc..78a346d 100644
--- a/src/librbd/exclusive_lock/StandardPolicy.h
+++ b/src/librbd/exclusive_lock/StandardPolicy.h
@@ -12,12 +12,16 @@ struct ImageCtx;
 
 namespace exclusive_lock {
 
-class StandardPolicy : public Policy{
+class StandardPolicy : public Policy {
 public:
   StandardPolicy(ImageCtx *image_ctx) : m_image_ctx(image_ctx) {
   }
 
-  virtual void lock_requested(bool force);
+  virtual bool may_auto_request_lock() {
+    return false;
+  }
+
+  virtual int lock_requested(bool force);
 
 private:
   ImageCtx *m_image_ctx;
diff --git a/src/librbd/exclusive_lock/Types.h b/src/librbd/exclusive_lock/Types.h
new file mode 100644
index 0000000..96c07ea
--- /dev/null
+++ b/src/librbd/exclusive_lock/Types.h
@@ -0,0 +1,23 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef CEPH_LIBRBD_EXCLUSIVE_LOCK_TYPES_H
+#define CEPH_LIBRBD_EXCLUSIVE_LOCK_TYPES_H
+
+#include "msg/msg_types.h"
+#include <string>
+
+namespace librbd {
+namespace exclusive_lock {
+
+struct Locker {
+  entity_name_t entity;
+  std::string cookie;
+  std::string address;
+  uint64_t handle;
+};
+
+} // namespace exclusive_lock
+} // namespace librbd
+
+#endif // CEPH_LIBRBD_EXCLUSIVE_LOCK_TYPES_H
diff --git a/src/librbd/image/OpenRequest.cc b/src/librbd/image/OpenRequest.cc
index 36d740d..9fbd8bc 100644
--- a/src/librbd/image/OpenRequest.cc
+++ b/src/librbd/image/OpenRequest.cc
@@ -30,8 +30,10 @@ using util::create_context_callback;
 using util::create_rados_ack_callback;
 
 template <typename I>
-OpenRequest<I>::OpenRequest(I *image_ctx, Context *on_finish)
-  : m_image_ctx(image_ctx), m_on_finish(on_finish), m_error_result(0),
+OpenRequest<I>::OpenRequest(I *image_ctx, bool skip_open_parent,
+                            Context *on_finish)
+  : m_image_ctx(image_ctx), m_skip_open_parent_image(skip_open_parent),
+    m_on_finish(on_finish), m_error_result(0),
     m_last_metadata_key(ImageCtx::METADATA_CONF_PREFIX) {
 }
 
@@ -373,7 +375,7 @@ void OpenRequest<I>::send_refresh() {
 
   using klass = OpenRequest<I>;
   RefreshRequest<I> *ctx = RefreshRequest<I>::create(
-    *m_image_ctx, false,
+    *m_image_ctx, false, m_skip_open_parent_image,
     create_context_callback<klass, &klass::handle_refresh>(this));
   ctx->send();
 }
diff --git a/src/librbd/image/OpenRequest.h b/src/librbd/image/OpenRequest.h
index 627285b..04c255d 100644
--- a/src/librbd/image/OpenRequest.h
+++ b/src/librbd/image/OpenRequest.h
@@ -19,8 +19,9 @@ namespace image {
 template <typename ImageCtxT = ImageCtx>
 class OpenRequest {
 public:
-  static OpenRequest *create(ImageCtxT *image_ctx, Context *on_finish) {
-    return new OpenRequest(image_ctx, on_finish);
+  static OpenRequest *create(ImageCtxT *image_ctx, bool skip_open_parent,
+                             Context *on_finish) {
+    return new OpenRequest(image_ctx, skip_open_parent, on_finish);
   }
 
   void send();
@@ -65,9 +66,10 @@ private:
    * @endverbatim
    */
 
-  OpenRequest(ImageCtxT *image_ctx, Context *on_finish);
+  OpenRequest(ImageCtxT *image_ctx, bool skip_open_parent, Context *on_finish);
 
   ImageCtxT *m_image_ctx;
+  bool m_skip_open_parent_image;
   Context *m_on_finish;
 
   bufferlist m_out_bl;
diff --git a/src/librbd/image/RefreshParentRequest.cc b/src/librbd/image/RefreshParentRequest.cc
index a44124f..28058ab 100644
--- a/src/librbd/image/RefreshParentRequest.cc
+++ b/src/librbd/image/RefreshParentRequest.cc
@@ -120,7 +120,7 @@ void RefreshParentRequest<I>::send_open_parent() {
   Context *ctx = create_async_context_callback(
     m_child_image_ctx, create_context_callback<
       klass, &klass::handle_open_parent, false>(this));
-  OpenRequest<I> *req = OpenRequest<I>::create(m_parent_image_ctx, ctx);
+  OpenRequest<I> *req = OpenRequest<I>::create(m_parent_image_ctx, false, ctx);
   req->send();
 }
 
diff --git a/src/librbd/image/RefreshRequest.cc b/src/librbd/image/RefreshRequest.cc
index 04b05fa..70c43b9 100644
--- a/src/librbd/image/RefreshRequest.cc
+++ b/src/librbd/image/RefreshRequest.cc
@@ -28,8 +28,9 @@ using util::create_context_callback;
 
 template <typename I>
 RefreshRequest<I>::RefreshRequest(I &image_ctx, bool acquiring_lock,
-                                  Context *on_finish)
+                                  bool skip_open_parent, Context *on_finish)
   : m_image_ctx(image_ctx), m_acquiring_lock(acquiring_lock),
+    m_skip_open_parent_image(skip_open_parent),
     m_on_finish(create_async_context_callback(m_image_ctx, on_finish)),
     m_error_result(0), m_flush_aio(false), m_exclusive_lock(nullptr),
     m_object_map(nullptr), m_journal(nullptr), m_refresh_parent(nullptr) {
@@ -397,8 +398,8 @@ void RefreshRequest<I>::send_v2_refresh_parent() {
 
     parent_info parent_md;
     int r = get_parent_info(m_image_ctx.snap_id, &parent_md);
-    if (r < 0 ||
-        RefreshParentRequest<I>::is_refresh_required(m_image_ctx, parent_md)) {
+    if (!m_skip_open_parent_image && (r < 0 ||
+        RefreshParentRequest<I>::is_refresh_required(m_image_ctx, parent_md))) {
       CephContext *cct = m_image_ctx.cct;
       ldout(cct, 10) << this << " " << __func__ << dendl;
 
diff --git a/src/librbd/image/RefreshRequest.h b/src/librbd/image/RefreshRequest.h
index 79b5d9e..43275d7 100644
--- a/src/librbd/image/RefreshRequest.h
+++ b/src/librbd/image/RefreshRequest.h
@@ -27,11 +27,13 @@ template<typename ImageCtxT = ImageCtx>
 class RefreshRequest {
 public:
   static RefreshRequest *create(ImageCtxT &image_ctx, bool acquiring_lock,
-                                Context *on_finish) {
-    return new RefreshRequest(image_ctx, acquiring_lock, on_finish);
+                                bool skip_open_parent, Context *on_finish) {
+    return new RefreshRequest(image_ctx, acquiring_lock, skip_open_parent,
+                              on_finish);
   }
 
-  RefreshRequest(ImageCtxT &image_ctx, bool acquiring_lock, Context *on_finish);
+  RefreshRequest(ImageCtxT &image_ctx, bool acquiring_lock,
+                 bool skip_open_parent, Context *on_finish);
   ~RefreshRequest();
 
   void send();
@@ -98,6 +100,7 @@ private:
 
   ImageCtxT &m_image_ctx;
   bool m_acquiring_lock;
+  bool m_skip_open_parent_image;
   Context *m_on_finish;
 
   int m_error_result;
diff --git a/src/librbd/image/SetSnapRequest.cc b/src/librbd/image/SetSnapRequest.cc
index e00ebeb..def86f9 100644
--- a/src/librbd/image/SetSnapRequest.cc
+++ b/src/librbd/image/SetSnapRequest.cc
@@ -56,7 +56,8 @@ void SetSnapRequest<I>::send_init_exclusive_lock() {
     }
   }
 
-  if (!m_image_ctx.test_features(RBD_FEATURE_EXCLUSIVE_LOCK)) {
+  if (m_image_ctx.read_only ||
+      !m_image_ctx.test_features(RBD_FEATURE_EXCLUSIVE_LOCK)) {
     int r = 0;
     if (send_refresh_parent(&r) != nullptr) {
       send_complete();
@@ -261,7 +262,7 @@ Context *SetSnapRequest<I>::send_open_object_map(int *result) {
   using klass = SetSnapRequest<I>;
   Context *ctx = create_context_callback<
     klass, &klass::handle_open_object_map>(this);
-  m_object_map = new ObjectMap(m_image_ctx, m_snap_id);
+  m_object_map = ObjectMap<I>::create(m_image_ctx, m_snap_id);
   m_object_map->open(ctx);
   return nullptr;
 }
diff --git a/src/librbd/image/SetSnapRequest.h b/src/librbd/image/SetSnapRequest.h
index 1e2df49..b64fa68 100644
--- a/src/librbd/image/SetSnapRequest.h
+++ b/src/librbd/image/SetSnapRequest.h
@@ -12,7 +12,7 @@ namespace librbd {
 
 template <typename> class ExclusiveLock;
 class ImageCtx;
-class ObjectMap;
+template <typename> class ObjectMap;
 
 namespace image {
 
@@ -84,7 +84,7 @@ private:
 
   uint64_t m_snap_id;
   ExclusiveLock<ImageCtxT> *m_exclusive_lock;
-  ObjectMap *m_object_map;
+  ObjectMap<ImageCtxT> *m_object_map;
   RefreshParentRequest<ImageCtxT> *m_refresh_parent;
 
   bool m_writes_blocked;
diff --git a/src/librbd/image_watcher/RewatchRequest.cc b/src/librbd/image_watcher/RewatchRequest.cc
new file mode 100644
index 0000000..cbf22c0
--- /dev/null
+++ b/src/librbd/image_watcher/RewatchRequest.cc
@@ -0,0 +1,126 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include "librbd/image_watcher/RewatchRequest.h"
+#include "common/errno.h"
+#include "librbd/ExclusiveLock.h"
+#include "librbd/ImageCtx.h"
+#include "librbd/Utils.h"
+
+#define dout_subsys ceph_subsys_rbd
+#undef dout_prefix
+#define dout_prefix *_dout << "librbd::image_watcher::RewatchRequest: " \
+                           << this << ": " << __func__
+
+namespace librbd {
+namespace image_watcher {
+
+using librbd::util::create_context_callback;
+using librbd::util::create_rados_safe_callback;
+
+template <typename I>
+RewatchRequest<I>::RewatchRequest(I &image_ctx, RWLock &watch_lock,
+                                  librados::WatchCtx2 *watch_ctx,
+                                  uint64_t *watch_handle, Context *on_finish)
+  : m_image_ctx(image_ctx), m_watch_lock(watch_lock), m_watch_ctx(watch_ctx),
+    m_watch_handle(watch_handle), m_on_finish(on_finish) {
+}
+
+template <typename I>
+void RewatchRequest<I>::send() {
+  unwatch();
+}
+
+template <typename I>
+void RewatchRequest<I>::unwatch() {
+  assert(m_watch_lock.is_wlocked());
+  assert(*m_watch_handle != 0);
+
+  CephContext *cct = m_image_ctx.cct;
+  ldout(cct, 10) << dendl;
+
+  librados::AioCompletion *aio_comp = create_rados_safe_callback<
+    RewatchRequest<I>, &RewatchRequest<I>::handle_unwatch>(this);
+  int r = m_image_ctx.md_ctx.aio_unwatch(*m_watch_handle, aio_comp);
+  assert(r == 0);
+  aio_comp->release();
+
+  *m_watch_handle = 0;
+}
+
+template <typename I>
+void RewatchRequest<I>::handle_unwatch(int r) {
+  CephContext *cct = m_image_ctx.cct;
+  ldout(cct, 10) << "r=" << r << dendl;
+
+  if (r == -EBLACKLISTED) {
+    lderr(cct) << "client blacklisted" << dendl;
+    finish(r);
+    return;
+  } else if (r < 0) {
+    lderr(cct) << "failed to unwatch: " << cpp_strerror(r) << dendl;
+  }
+  rewatch();
+}
+
+template <typename I>
+void RewatchRequest<I>::rewatch() {
+  CephContext *cct = m_image_ctx.cct;
+  ldout(cct, 10) << dendl;
+
+  librados::AioCompletion *aio_comp = create_rados_safe_callback<
+    RewatchRequest<I>, &RewatchRequest<I>::handle_rewatch>(this);
+  int r = m_image_ctx.md_ctx.aio_watch(m_image_ctx.header_oid, aio_comp,
+                                       &m_rewatch_handle, m_watch_ctx);
+  assert(r == 0);
+  aio_comp->release();
+}
+
+template <typename I>
+void RewatchRequest<I>::handle_rewatch(int r) {
+  CephContext *cct = m_image_ctx.cct;
+  ldout(cct, 10) << "r=" << r << dendl;
+
+  if (r == -EBLACKLISTED) {
+    lderr(cct) << "client blacklisted" << dendl;
+    finish(r);
+    return;
+  } else if (r == -ENOENT) {
+    ldout(cct, 5) << "image header deleted" << dendl;
+    finish(r);
+    return;
+  } else if (r < 0) {
+    lderr(cct) << "failed to watch image header: " << cpp_strerror(r)
+               << dendl;
+    rewatch();
+    return;
+  }
+
+  {
+    RWLock::WLocker watch_locker(m_watch_lock);
+    *m_watch_handle = m_rewatch_handle;
+  }
+
+  {
+    RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
+    if (m_image_ctx.exclusive_lock != nullptr) {
+      // update the lock cookie with the new watch handle
+      m_image_ctx.exclusive_lock->reacquire_lock();
+    }
+  }
+  finish(0);
+}
+
+template <typename I>
+void RewatchRequest<I>::finish(int r) {
+  CephContext *cct = m_image_ctx.cct;
+  ldout(cct, 10) << "r=" << r << dendl;
+
+  m_on_finish->complete(r);
+  delete this;
+}
+
+} // namespace image_watcher
+} // namespace librbd
+
+template class librbd::image_watcher::RewatchRequest<librbd::ImageCtx>;
diff --git a/src/librbd/image_watcher/RewatchRequest.h b/src/librbd/image_watcher/RewatchRequest.h
new file mode 100644
index 0000000..dc3a2cd
--- /dev/null
+++ b/src/librbd/image_watcher/RewatchRequest.h
@@ -0,0 +1,78 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef CEPH_LIBRBD_IMAGE_WATCHER_REWATCH_REQUEST_H
+#define CEPH_LIBRBD_IMAGE_WATCHER_REWATCH_REQUEST_H
+
+#include "include/int_types.h"
+#include "include/rados/librados.hpp"
+
+struct Context;
+struct RWLock;
+
+namespace librbd {
+
+class ImageCtx;
+
+namespace image_watcher {
+
+template <typename ImageCtxT = librbd::ImageCtx>
+class RewatchRequest {
+public:
+
+  static RewatchRequest *create(ImageCtxT &image_ctx, RWLock &watch_lock,
+                                librados::WatchCtx2 *watch_ctx,
+                                uint64_t *watch_handle, Context *on_finish) {
+    return new RewatchRequest(image_ctx, watch_lock, watch_ctx, watch_handle,
+                              on_finish);
+  }
+
+  RewatchRequest(ImageCtxT &image_ctx, RWLock &watch_lock,
+                 librados::WatchCtx2 *watch_ctx, uint64_t *watch_handle,
+                 Context *on_finish);
+
+  void send();
+
+private:
+  /**
+   * @verbatim
+   *
+   * <start>
+   *    |
+   *    v
+   * UNWATCH
+   *    |
+   *    |  . . . .
+   *    |  .     . (recoverable error)
+   *    v  v     .
+   * REWATCH . . .
+   *    |
+   *    v
+   * <finish>
+   *
+   * @endverbatim
+   */
+
+  ImageCtxT &m_image_ctx;
+  RWLock &m_watch_lock;
+  librados::WatchCtx2 *m_watch_ctx;
+  uint64_t *m_watch_handle;
+  Context *m_on_finish;
+
+  uint64_t m_rewatch_handle = 0;
+
+  void unwatch();
+  void handle_unwatch(int r);
+
+  void rewatch();
+  void handle_rewatch(int r);
+
+  void finish(int r);
+};
+
+} // namespace image_watcher
+} // namespace librbd
+
+extern template class librbd::image_watcher::RewatchRequest<librbd::ImageCtx>;
+
+#endif // CEPH_LIBRBD_IMAGE_WATCHER_REWATCH_REQUEST_H
diff --git a/src/librbd/internal.cc b/src/librbd/internal.cc
index 073be35..d5875d8 100644
--- a/src/librbd/internal.cc
+++ b/src/librbd/internal.cc
@@ -38,6 +38,11 @@
 #include "librbd/Operations.h"
 #include "librbd/parent_types.h"
 #include "librbd/Utils.h"
+#include "librbd/exclusive_lock/AutomaticPolicy.h"
+#include "librbd/exclusive_lock/BreakRequest.h"
+#include "librbd/exclusive_lock/GetLockerRequest.h"
+#include "librbd/exclusive_lock/StandardPolicy.h"
+#include "librbd/exclusive_lock/Types.h"
 #include "librbd/operation/TrimRequest.h"
 #include "include/util.h"
 
@@ -76,7 +81,7 @@ int remove_object_map(ImageCtx *ictx) {
   int r;
   for (std::map<snap_t, SnapInfo>::iterator it = ictx->snap_info.begin();
        it != ictx->snap_info.end(); ++it) {
-    std::string oid(ObjectMap::object_map_name(ictx->id, it->first));
+    std::string oid(ObjectMap<>::object_map_name(ictx->id, it->first));
     r = ictx->md_ctx.remove(oid);
     if (r < 0 && r != -ENOENT) {
       lderr(cct) << "failed to remove object map " << oid << ": "
@@ -85,7 +90,7 @@ int remove_object_map(ImageCtx *ictx) {
     }
   }
 
-  r = ictx->md_ctx.remove(ObjectMap::object_map_name(ictx->id, CEPH_NOSNAP));
+  r = ictx->md_ctx.remove(ObjectMap<>::object_map_name(ictx->id, CEPH_NOSNAP));
   if (r < 0 && r != -ENOENT) {
     lderr(cct) << "failed to remove object map: " << cpp_strerror(r) << dendl;
     return r;
@@ -107,7 +112,7 @@ int create_object_map(ImageCtx *ictx) {
     snap_ids.push_back(it->first);
   }
 
-  if (!ObjectMap::is_compatible(ictx->layout, max_size)) {
+  if (!ObjectMap<>::is_compatible(ictx->layout, max_size)) {
     lderr(cct) << "image size not compatible with object map" << dendl;
     return -EINVAL;
   }
@@ -115,7 +120,7 @@ int create_object_map(ImageCtx *ictx) {
   for (std::vector<uint64_t>::iterator it = snap_ids.begin();
     it != snap_ids.end(); ++it) {
     librados::ObjectWriteOperation op;
-    std::string oid(ObjectMap::object_map_name(ictx->id, *it));
+    std::string oid(ObjectMap<>::object_map_name(ictx->id, *it));
     uint64_t snap_size = ictx->get_image_size(*it);
     cls_client::object_map_resize(&op, Striper::get_num_objects(ictx->layout, snap_size),
                                   OBJECT_NONEXISTENT);
@@ -339,6 +344,13 @@ int mirror_image_disable_internal(ImageCtx *ictx, bool force,
                << dendl;
   }
 
+  if (!is_primary) {
+    r = Journal<>::promote(ictx);
+    if (r < 0) {
+      lderr(cct) << "failed to promote image: " << cpp_strerror(r) << dendl;
+    }
+  }
+
   header_oid = ::journal::Journaler::header_oid(ictx->id);
 
   while(true) {
@@ -1132,7 +1144,7 @@ int mirror_image_disable_internal(ImageCtx *ictx, bool force,
         layout.stripe_count = stripe_count;
       }
 
-      if (!ObjectMap::is_compatible(layout, size)) {
+      if (!ObjectMap<>::is_compatible(layout, size)) {
         lderr(cct) << "image size not compatible with object map" << dendl;
         goto err_remove_header;
       }
@@ -1140,7 +1152,7 @@ int mirror_image_disable_internal(ImageCtx *ictx, bool force,
       librados::ObjectWriteOperation op;
       cls_client::object_map_resize(&op, Striper::get_num_objects(layout, size),
                                     OBJECT_NONEXISTENT);
-      r = io_ctx.operate(ObjectMap::object_map_name(id, CEPH_NOSNAP), &op);
+      r = io_ctx.operate(ObjectMap<>::object_map_name(id, CEPH_NOSNAP), &op);
       if (r < 0) {
         lderr(cct) << "error creating initial object map: "
                    << cpp_strerror(r) << dendl;
@@ -1197,7 +1209,7 @@ int mirror_image_disable_internal(ImageCtx *ictx, bool force,
 
   err_remove_object_map:
     if ((features & RBD_FEATURE_OBJECT_MAP) != 0) {
-      remove_r = ObjectMap::remove(io_ctx, id);
+      remove_r = ObjectMap<>::remove(io_ctx, id);
       if (remove_r < 0) {
         lderr(cct) << "error cleaning up object map after creation failed: "
                    << cpp_strerror(remove_r) << dendl;
@@ -1410,7 +1422,7 @@ int mirror_image_disable_internal(ImageCtx *ictx, bool force,
 
     // make sure parent snapshot exists
     ImageCtx *p_imctx = new ImageCtx(p_name, "", p_snap_name, p_ioctx, true);
-    int r = p_imctx->state->open();
+    int r = p_imctx->state->open(false);
     if (r < 0) {
       lderr(cct) << "error opening parent image: "
 		 << cpp_strerror(-r) << dendl;
@@ -1546,7 +1558,7 @@ int mirror_image_disable_internal(ImageCtx *ictx, bool force,
     }
 
     c_imctx = new ImageCtx(c_name, "", NULL, c_ioctx, false);
-    r = c_imctx->state->open();
+    r = c_imctx->state->open(false);
     if (r < 0) {
       lderr(cct) << "Error opening new image: " << cpp_strerror(r) << dendl;
       delete c_imctx;
@@ -1619,7 +1631,7 @@ int mirror_image_disable_internal(ImageCtx *ictx, bool force,
 		   << dstname << dendl;
 
     ImageCtx *ictx = new ImageCtx(srcname, "", "", io_ctx, false);
-    int r = ictx->state->open();
+    int r = ictx->state->open(false);
     if (r < 0) {
       lderr(ictx->cct) << "error opening source image: " << cpp_strerror(r)
 		       << dendl;
@@ -1807,7 +1819,8 @@ int mirror_image_disable_internal(ImageCtx *ictx, bool force,
 
         if ((features & RBD_FEATURE_OBJECT_MAP) != 0) {
           if ((new_features & RBD_FEATURE_EXCLUSIVE_LOCK) == 0) {
-            lderr(cct) << "cannot enable object map" << dendl;
+            lderr(cct) << "cannot enable object-map. exclusive-lock must be "
+                           "enabled before enabling object-map." << dendl;
             return -EINVAL;
           }
           enable_flags |= RBD_FLAG_OBJECT_MAP_INVALID;
@@ -1815,7 +1828,8 @@ int mirror_image_disable_internal(ImageCtx *ictx, bool force,
         }
         if ((features & RBD_FEATURE_FAST_DIFF) != 0) {
           if ((new_features & RBD_FEATURE_OBJECT_MAP) == 0) {
-            lderr(cct) << "cannot enable fast diff" << dendl;
+            lderr(cct) << "cannot enable fast-diff. object-map must be "
+                           "enabled before enabling fast-diff." << dendl;
             return -EINVAL;
           }
           enable_flags |= RBD_FLAG_FAST_DIFF_INVALID;
@@ -1823,7 +1837,8 @@ int mirror_image_disable_internal(ImageCtx *ictx, bool force,
         }
         if ((features & RBD_FEATURE_JOURNALING) != 0) {
           if ((new_features & RBD_FEATURE_EXCLUSIVE_LOCK) == 0) {
-            lderr(cct) << "cannot enable journaling" << dendl;
+            lderr(cct) << "cannot enable journaling. exclusive-lock must be "
+                           "enabled before enabling journaling." << dendl;
             return -EINVAL;
           }
           features_mask |= RBD_FEATURE_EXCLUSIVE_LOCK;
@@ -1850,7 +1865,9 @@ int mirror_image_disable_internal(ImageCtx *ictx, bool force,
         if ((features & RBD_FEATURE_EXCLUSIVE_LOCK) != 0) {
           if ((new_features & RBD_FEATURE_OBJECT_MAP) != 0 ||
               (new_features & RBD_FEATURE_JOURNALING) != 0) {
-            lderr(cct) << "cannot disable exclusive lock" << dendl;
+            lderr(cct) << "cannot disable exclusive-lock. object-map "
+                          "or journaling must be disabled before "
+                          "disabling exclusive-lock." << dendl;
             return -EINVAL;
           }
           features_mask |= (RBD_FEATURE_OBJECT_MAP |
@@ -1858,7 +1875,8 @@ int mirror_image_disable_internal(ImageCtx *ictx, bool force,
         }
         if ((features & RBD_FEATURE_OBJECT_MAP) != 0) {
           if ((new_features & RBD_FEATURE_FAST_DIFF) != 0) {
-            lderr(cct) << "cannot disable object map" << dendl;
+            lderr(cct) << "cannot disable object-map. fast-diff must be "
+                          "disabled before disabling object-map." << dendl;
             return -EINVAL;
           }
 
@@ -1953,7 +1971,7 @@ int mirror_image_disable_internal(ImageCtx *ictx, bool force,
       if (enable_mirroring) {
         ImageCtx *img_ctx = new ImageCtx("", ictx->id, nullptr,
             ictx->md_ctx, false);
-        r = img_ctx->state->open();
+        r = img_ctx->state->open(false);
         if (r < 0) {
           lderr(cct) << "error opening image: " << cpp_strerror(r) << dendl;
           delete img_ctx;
@@ -2087,6 +2105,155 @@ int mirror_image_disable_internal(ImageCtx *ictx, bool force,
     return 0;
   }
 
+  int lock_acquire(ImageCtx *ictx, rbd_lock_mode_t lock_mode)
+  {
+    CephContext *cct = ictx->cct;
+    ldout(cct, 20) << __func__ << ": ictx=" << ictx << ", "
+                   << "lock_mode=" << lock_mode << dendl;
+
+    if (lock_mode != RBD_LOCK_MODE_EXCLUSIVE) {
+      return -EOPNOTSUPP;
+    }
+
+    C_SaferCond lock_ctx;
+    {
+      RWLock::WLocker l(ictx->owner_lock);
+
+      if (ictx->exclusive_lock == nullptr) {
+	lderr(cct) << "exclusive-lock feature is not enabled" << dendl;
+	return -EINVAL;
+      }
+
+      if (ictx->get_exclusive_lock_policy()->may_auto_request_lock()) {
+	ictx->set_exclusive_lock_policy(
+	  new exclusive_lock::StandardPolicy(ictx));
+      }
+
+      if (ictx->exclusive_lock->is_lock_owner()) {
+	return 0;
+      }
+
+      ictx->exclusive_lock->request_lock(&lock_ctx);
+    }
+
+    int r = lock_ctx.wait();
+    if (r < 0) {
+      lderr(cct) << "failed to request exclusive lock: " << cpp_strerror(r)
+		 << dendl;
+      return r;
+    }
+
+    RWLock::RLocker l(ictx->owner_lock);
+
+    if (ictx->exclusive_lock == nullptr ||
+	!ictx->exclusive_lock->is_lock_owner()) {
+      lderr(cct) << "failed to acquire exclusive lock" << dendl;
+      return -EROFS;
+    }
+
+    return 0;
+  }
+
+  int lock_release(ImageCtx *ictx)
+  {
+    CephContext *cct = ictx->cct;
+    ldout(cct, 20) << __func__ << ": ictx=" << ictx << dendl;
+
+    C_SaferCond lock_ctx;
+    {
+      RWLock::WLocker l(ictx->owner_lock);
+
+      if (ictx->exclusive_lock == nullptr ||
+	  !ictx->exclusive_lock->is_lock_owner()) {
+	lderr(cct) << "not exclusive lock owner" << dendl;
+	return -EINVAL;
+      }
+
+      ictx->exclusive_lock->release_lock(&lock_ctx);
+    }
+
+    int r = lock_ctx.wait();
+    if (r < 0) {
+      lderr(cct) << "failed to release exclusive lock: " << cpp_strerror(r)
+		 << dendl;
+      return r;
+    }
+    return 0;
+  }
+
+  int lock_get_owners(ImageCtx *ictx, rbd_lock_mode_t *lock_mode,
+                      std::list<std::string> *lock_owners)
+  {
+    CephContext *cct = ictx->cct;
+    ldout(cct, 20) << __func__ << ": ictx=" << ictx << dendl;
+
+    exclusive_lock::Locker locker;
+    C_SaferCond get_owner_ctx;
+    auto get_owner_req = exclusive_lock::GetLockerRequest<>::create(
+      *ictx, &locker, &get_owner_ctx);
+    get_owner_req->send();
+
+    int r = get_owner_ctx.wait();
+    if (r == -ENOENT) {
+      return r;
+    } else if (r < 0) {
+      lderr(cct) << "failed to determine current lock owner: "
+                 << cpp_strerror(r) << dendl;
+      return r;
+    }
+
+    *lock_mode = RBD_LOCK_MODE_EXCLUSIVE;
+    lock_owners->clear();
+    lock_owners->emplace_back(locker.address);
+    return 0;
+  }
+
+  int lock_break(ImageCtx *ictx, rbd_lock_mode_t lock_mode,
+                 const std::string &lock_owner)
+  {
+    CephContext *cct = ictx->cct;
+    ldout(cct, 20) << __func__ << ": ictx=" << ictx << ", "
+                   << "lock_mode=" << lock_mode << ", "
+                   << "lock_owner=" << lock_owner << dendl;
+
+    if (lock_mode != RBD_LOCK_MODE_EXCLUSIVE) {
+      return -EOPNOTSUPP;
+    }
+
+    exclusive_lock::Locker locker;
+    C_SaferCond get_owner_ctx;
+    auto get_owner_req = exclusive_lock::GetLockerRequest<>::create(
+      *ictx, &locker, &get_owner_ctx);
+    get_owner_req->send();
+
+    int r = get_owner_ctx.wait();
+    if (r == -ENOENT) {
+      return r;
+    } else if (r < 0) {
+      lderr(cct) << "failed to determine current lock owner: "
+                 << cpp_strerror(r) << dendl;
+      return r;
+    }
+
+    if (locker.address != lock_owner) {
+      return -EBUSY;
+    }
+
+    C_SaferCond break_ctx;
+    auto break_req = exclusive_lock::BreakRequest<>::create(
+      *ictx, locker, ictx->blacklist_on_break_lock, true, &break_ctx);
+    break_req->send();
+
+    r = break_ctx.wait();
+    if (r == -ENOENT) {
+      return r;
+    } else if (r < 0) {
+      lderr(cct) << "failed to break lock: " << cpp_strerror(r) << dendl;
+      return r;
+    }
+    return 0;
+  }
+
   int remove(IoCtx& io_ctx, const std::string &image_name,
              const std::string &image_id, ProgressContext& prog_ctx,
              bool force)
@@ -2101,10 +2268,13 @@ int mirror_image_disable_internal(ImageCtx *ictx, bool force,
     bool unknown_format = true;
     ImageCtx *ictx = new ImageCtx(
       (id.empty() ? name : std::string()), id, nullptr, io_ctx, false);
-    int r = ictx->state->open();
+    int r = ictx->state->open(true);
     if (r < 0) {
       ldout(cct, 2) << "error opening image: " << cpp_strerror(-r) << dendl;
       delete ictx;
+      if (r != -ENOENT) {
+	return r;
+      }
     } else {
       string header_oid = ictx->header_oid;
       old_format = ictx->old_format;
@@ -2123,7 +2293,7 @@ int mirror_image_disable_internal(ImageCtx *ictx, bool force,
             ictx->exclusive_lock->shut_down(&ctx);
             r = ctx.wait();
             if (r < 0) {
-              lderr(cct) << "error shutting down exclusive lock"
+              lderr(cct) << "error shutting down exclusive lock: "
                          << cpp_strerror(r) << dendl;
               ictx->state->close();
               return r;
@@ -2240,7 +2410,7 @@ int mirror_image_disable_internal(ImageCtx *ictx, bool force,
         }
 
         ldout(cct, 10) << "removing object map..." << dendl;
-        r = ObjectMap::remove(io_ctx, id);
+        r = ObjectMap<>::remove(io_ctx, id);
         if (r < 0 && r != -ENOENT) {
           lderr(cct) << "error removing image object map" << dendl;
           return r;
@@ -2368,7 +2538,7 @@ int mirror_image_disable_internal(ImageCtx *ictx, bool force,
 
     ImageCtx *dest = new librbd::ImageCtx(destname, "", NULL,
 					  dest_md_ctx, false);
-    r = dest->state->open();
+    r = dest->state->open(false);
     if (r < 0) {
       delete dest;
       lderr(cct) << "failed to read newly created header" << dendl;
@@ -2817,7 +2987,7 @@ int mirror_image_disable_internal(ImageCtx *ictx, bool force,
 
     RWLock::RLocker owner_locker(ictx->owner_lock);
     RWLock::WLocker md_locker(ictx->md_lock);
-    r = ictx->invalidate_cache();
+    r = ictx->invalidate_cache(false);
     ictx->perfcounter->inc(l_librbd_invalidate_cache);
     return r;
   }
@@ -2859,8 +3029,9 @@ int mirror_image_disable_internal(ImageCtx *ictx, bool force,
     size_t conf_prefix_len = start.size();
 
     if(key.size() > conf_prefix_len && !key.compare(0,conf_prefix_len,start)) {
+      // validate config setting
       string subkey = key.substr(conf_prefix_len, key.size()-conf_prefix_len);
-      int r = cct->_conf->set_val(subkey.c_str(), value);
+      int r = md_config_t().set_val(subkey.c_str(), value);
       if (r < 0)
         return r;
     }
@@ -3432,7 +3603,7 @@ int mirror_image_disable_internal(ImageCtx *ictx, bool force,
         if ((features & RBD_FEATURE_JOURNALING) != 0) {
           ImageCtx *img_ctx = new ImageCtx("", img_pair.second, nullptr,
                                            io_ctx, false);
-          r = img_ctx->state->open();
+          r = img_ctx->state->open(false);
           if (r < 0) {
             lderr(cct) << "error opening image "<< img_pair.first << ": "
                        << cpp_strerror(r) << dendl;
@@ -3479,7 +3650,7 @@ int mirror_image_disable_internal(ImageCtx *ictx, bool force,
           }
         } else {
           ImageCtx *img_ctx = new ImageCtx("", img_id, nullptr, io_ctx, false);
-          r = img_ctx->state->open();
+          r = img_ctx->state->open(false);
           if (r < 0) {
             lderr(cct) << "error opening image id "<< img_id << ": "
                        << cpp_strerror(r) << dendl;
diff --git a/src/librbd/internal.h b/src/librbd/internal.h
index 8522ea0..c08ad9f 100644
--- a/src/librbd/internal.h
+++ b/src/librbd/internal.h
@@ -135,6 +135,12 @@ namespace librbd {
   int get_flags(ImageCtx *ictx, uint64_t *flags);
   int set_image_notification(ImageCtx *ictx, int fd, int type);
   int is_exclusive_lock_owner(ImageCtx *ictx, bool *is_owner);
+  int lock_acquire(ImageCtx *ictx, rbd_lock_mode_t lock_mode);
+  int lock_release(ImageCtx *ictx);
+  int lock_get_owners(ImageCtx *ictx, rbd_lock_mode_t *lock_mode,
+                      std::list<std::string> *lock_owners);
+  int lock_break(ImageCtx *ictx, rbd_lock_mode_t lock_mode,
+                 const std::string &lock_owner);
 
   int remove(librados::IoCtx& io_ctx, const std::string &image_name,
              const std::string &image_id, ProgressContext& prog_ctx,
diff --git a/src/librbd/librbd.cc b/src/librbd/librbd.cc
index 2d6d80c..04bdb06 100644
--- a/src/librbd/librbd.cc
+++ b/src/librbd/librbd.cc
@@ -27,7 +27,6 @@
 #include "librbd/ImageState.h"
 #include "librbd/internal.h"
 #include "librbd/Operations.h"
-
 #include <algorithm>
 #include <string>
 #include <vector>
@@ -104,7 +103,7 @@ struct C_OpenAfterCloseComplete : public Context {
   virtual void finish(int r) {
     ldout(ictx->cct, 20) << "C_OpenAfterCloseComplete::finish: r=" << r
 			 << dendl;
-    ictx->state->open(new C_OpenComplete(ictx, comp, ictxp, true));
+    ictx->state->open(false, new C_OpenComplete(ictx, comp, ictxp, true));
   }
 };
 
@@ -214,7 +213,7 @@ namespace librbd {
       image.ctx = NULL;
     }
 
-    int r = ictx->state->open();
+    int r = ictx->state->open(false);
     if (r < 0) {
       delete ictx;
       tracepoint(librbd, open_image_exit, r);
@@ -237,7 +236,7 @@ namespace librbd {
       reinterpret_cast<ImageCtx*>(image.ctx)->state->close(
 	new C_OpenAfterCloseComplete(ictx, get_aio_completion(c), &image.ctx));
     } else {
-      ictx->state->open(new C_OpenComplete(ictx, get_aio_completion(c),
+      ictx->state->open(false, new C_OpenComplete(ictx, get_aio_completion(c),
 					   &image.ctx));
     }
     tracepoint(librbd, aio_open_image_exit, 0);
@@ -256,7 +255,7 @@ namespace librbd {
       image.ctx = NULL;
     }
 
-    int r = ictx->state->open();
+    int r = ictx->state->open(false);
     if (r < 0) {
       delete ictx;
       tracepoint(librbd, open_image_exit, r);
@@ -279,7 +278,7 @@ namespace librbd {
       reinterpret_cast<ImageCtx*>(image.ctx)->state->close(
 	new C_OpenAfterCloseComplete(ictx, get_aio_completion(c), &image.ctx));
     } else {
-      ictx->state->open(new C_OpenComplete(ictx, get_aio_completion(c),
+      ictx->state->open(false, new C_OpenComplete(ictx, get_aio_completion(c),
 					   &image.ctx));
     }
     tracepoint(librbd, aio_open_image_exit, 0);
@@ -678,6 +677,28 @@ namespace librbd {
     return r;
   }
 
+  int Image::get_id(std::string *id)
+  {
+    ImageCtx *ictx = reinterpret_cast<ImageCtx *>(ctx);
+    if (ictx->old_format) {
+      return -EINVAL;
+    }
+    *id = ictx->id;
+    return 0;
+  }
+
+  std::string Image::get_block_name_prefix()
+  {
+    ImageCtx *ictx = reinterpret_cast<ImageCtx *>(ctx);
+    return ictx->object_prefix;
+  }
+
+  int64_t Image::get_data_pool_id()
+  {
+    ImageCtx *ictx = reinterpret_cast<ImageCtx *>(ctx);
+    return ictx->data_ctx.get_id();
+  }
+
   int Image::parent_info(string *parent_pool_name, string *parent_name,
 			 string *parent_snap_name)
   {
@@ -716,6 +737,44 @@ namespace librbd {
     return r;
   }
 
+  int Image::lock_acquire(rbd_lock_mode_t lock_mode)
+  {
+    ImageCtx *ictx = (ImageCtx *)ctx;
+    tracepoint(librbd, lock_acquire_enter, ictx, lock_mode);
+    int r = librbd::lock_acquire(ictx, lock_mode);
+    tracepoint(librbd, lock_acquire_exit, ictx, r);
+    return r;
+  }
+
+  int Image::lock_release()
+  {
+    ImageCtx *ictx = (ImageCtx *)ctx;
+    tracepoint(librbd, lock_release_enter, ictx);
+    int r = librbd::lock_release(ictx);
+    tracepoint(librbd, lock_release_exit, ictx, r);
+    return r;
+  }
+
+  int Image::lock_get_owners(rbd_lock_mode_t *lock_mode,
+                             std::list<std::string> *lock_owners)
+  {
+    ImageCtx *ictx = (ImageCtx *)ctx;
+    tracepoint(librbd, lock_get_owners_enter, ictx);
+    int r = librbd::lock_get_owners(ictx, lock_mode, lock_owners);
+    tracepoint(librbd, lock_get_owners_exit, ictx, r);
+    return r;
+  }
+
+  int Image::lock_break(rbd_lock_mode_t lock_mode,
+                        const std::string &lock_owner)
+  {
+    ImageCtx *ictx = (ImageCtx *)ctx;
+    tracepoint(librbd, lock_break_enter, ictx, lock_mode, lock_owner.c_str());
+    int r = librbd::lock_break(ictx, lock_mode, lock_owner);
+    tracepoint(librbd, lock_break_exit, ictx, r);
+    return r;
+  }
+
   int Image::rebuild_object_map(ProgressContext &prog_ctx)
   {
     ImageCtx *ictx = reinterpret_cast<ImageCtx*>(ctx);
@@ -1845,7 +1904,7 @@ extern "C" int rbd_open(rados_ioctx_t p, const char *name, rbd_image_t *image,
 						false);
   tracepoint(librbd, open_image_enter, ictx, ictx->name.c_str(), ictx->id.c_str(), ictx->snap_name.c_str(), ictx->read_only);
 
-  int r = ictx->state->open();
+  int r = ictx->state->open(false);
   if (r < 0) {
     delete ictx;
   } else {
@@ -1866,7 +1925,7 @@ extern "C" int rbd_aio_open(rados_ioctx_t p, const char *name,
 						false);
   librbd::RBD::AioCompletion *comp = (librbd::RBD::AioCompletion *)c;
   tracepoint(librbd, aio_open_image_enter, ictx, ictx->name.c_str(), ictx->id.c_str(), ictx->snap_name.c_str(), ictx->read_only, comp->pc);
-  ictx->state->open(new C_OpenComplete(ictx, get_aio_completion(comp), image));
+  ictx->state->open(false, new C_OpenComplete(ictx, get_aio_completion(comp), image));
   tracepoint(librbd, aio_open_image_exit, 0);
   return 0;
 }
@@ -1881,7 +1940,7 @@ extern "C" int rbd_open_read_only(rados_ioctx_t p, const char *name,
 						true);
   tracepoint(librbd, open_image_enter, ictx, ictx->name.c_str(), ictx->id.c_str(), ictx->snap_name.c_str(), ictx->read_only);
 
-  int r = ictx->state->open();
+  int r = ictx->state->open(false);
   if (r < 0) {
     delete ictx;
   } else {
@@ -1902,7 +1961,8 @@ extern "C" int rbd_aio_open_read_only(rados_ioctx_t p, const char *name,
 						true);
   librbd::RBD::AioCompletion *comp = (librbd::RBD::AioCompletion *)c;
   tracepoint(librbd, aio_open_image_enter, ictx, ictx->name.c_str(), ictx->id.c_str(), ictx->snap_name.c_str(), ictx->read_only, comp->pc);
-  ictx->state->open(new C_OpenComplete(ictx, get_aio_completion(comp), image));
+  ictx->state->open(false, new C_OpenComplete(ictx, get_aio_completion(comp),
+                                              image));
   tracepoint(librbd, aio_open_image_exit, 0);
   return 0;
 }
@@ -2024,6 +2084,40 @@ extern "C" int rbd_get_overlap(rbd_image_t image, uint64_t *overlap)
   return r;
 }
 
+extern "C" int rbd_get_id(rbd_image_t image, char *id, size_t id_len)
+{
+  librbd::ImageCtx *ictx = reinterpret_cast<librbd::ImageCtx *>(image);
+  if (ictx->old_format) {
+    return -EINVAL;
+  }
+  if (ictx->id.size() >= id_len) {
+    return -ERANGE;
+  }
+
+  strncpy(id, ictx->id.c_str(), id_len - 1);
+  id[id_len - 1] = '\0';
+  return 0;
+}
+
+extern "C" int rbd_get_block_name_prefix(rbd_image_t image, char *prefix,
+                                         size_t prefix_len)
+{
+  librbd::ImageCtx *ictx = reinterpret_cast<librbd::ImageCtx *>(image);
+  if (ictx->object_prefix.size() >= prefix_len) {
+    return -ERANGE;
+  }
+
+  strncpy(prefix, ictx->object_prefix.c_str(), prefix_len - 1);
+  prefix[prefix_len - 1] = '\0';
+  return 0;
+}
+
+extern "C" int64_t rbd_get_data_pool_id(rbd_image_t image)
+{
+  librbd::ImageCtx *ictx = reinterpret_cast<librbd::ImageCtx *>(image);
+  return ictx->data_ctx.get_id();
+}
+
 extern "C" int rbd_get_parent_info(rbd_image_t image,
   char *parent_pool_name, size_t ppool_namelen, char *parent_name,
   size_t pnamelen, char *parent_snap_name, size_t psnap_namelen)
@@ -2096,6 +2190,66 @@ extern "C" int rbd_is_exclusive_lock_owner(rbd_image_t image, int *is_owner)
   return r;
 }
 
+extern "C" int rbd_lock_acquire(rbd_image_t image, rbd_lock_mode_t lock_mode)
+{
+  librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
+  tracepoint(librbd, lock_acquire_enter, ictx, lock_mode);
+  int r = librbd::lock_acquire(ictx, lock_mode);
+  tracepoint(librbd, lock_acquire_exit, ictx, r);
+  return r;
+}
+
+extern "C" int rbd_lock_release(rbd_image_t image)
+{
+  librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
+  tracepoint(librbd, lock_release_enter, ictx);
+  int r = librbd::lock_release(ictx);
+  tracepoint(librbd, lock_release_exit, ictx, r);
+  return r;
+}
+
+extern "C" int rbd_lock_get_owners(rbd_image_t image,
+                                   rbd_lock_mode_t *lock_mode,
+                                   char **lock_owners,
+                                   size_t *max_lock_owners)
+{
+  librbd::ImageCtx *ictx = reinterpret_cast<librbd::ImageCtx*>(image);
+  tracepoint(librbd, lock_get_owners_enter, ictx);
+  std::list<std::string> lock_owner_list;
+  int r = librbd::lock_get_owners(ictx, lock_mode, &lock_owner_list);
+  if (r >= 0) {
+    if (*max_lock_owners >= lock_owner_list.size()) {
+      *max_lock_owners = 0;
+      for (auto &lock_owner : lock_owner_list) {
+        lock_owners[(*max_lock_owners)++] = strdup(lock_owner.c_str());
+      }
+    } else {
+      *max_lock_owners = lock_owner_list.size();
+      r = -ERANGE;
+    }
+  }
+  tracepoint(librbd, lock_get_owners_exit, ictx, r);
+  return r;
+}
+
+extern "C" void rbd_lock_get_owners_cleanup(char **lock_owners,
+                                            size_t lock_owner_count)
+{
+  for (size_t i = 0; i < lock_owner_count; ++i) {
+    free(lock_owners[i]);
+  }
+}
+
+extern "C" int rbd_lock_break(rbd_image_t image, rbd_lock_mode_t lock_mode,
+                              const char *lock_owner)
+{
+  librbd::ImageCtx *ictx = reinterpret_cast<librbd::ImageCtx*>(image);
+  tracepoint(librbd, lock_break_enter, ictx, lock_mode, lock_owner);
+  int r = librbd::lock_break(ictx, lock_mode, lock_owner);
+  tracepoint(librbd, lock_break_exit, ictx, r);
+  return r;
+}
+
 extern "C" int rbd_rebuild_object_map(rbd_image_t image,
                                       librbd_progress_fn_t cb, void *cbdata)
 {
diff --git a/src/librbd/object_map/LockRequest.cc b/src/librbd/object_map/LockRequest.cc
index 3af5073..f5687f7 100644
--- a/src/librbd/object_map/LockRequest.cc
+++ b/src/librbd/object_map/LockRequest.cc
@@ -32,7 +32,7 @@ void LockRequest<I>::send() {
 template <typename I>
 void LockRequest<I>::send_lock() {
   CephContext *cct = m_image_ctx.cct;
-  std::string oid(ObjectMap::object_map_name(m_image_ctx.id, CEPH_NOSNAP));
+  std::string oid(ObjectMap<>::object_map_name(m_image_ctx.id, CEPH_NOSNAP));
   ldout(cct, 10) << this << " " << __func__ << ": oid=" << oid << dendl;
 
   librados::ObjectWriteOperation op;
@@ -54,6 +54,10 @@ Context *LockRequest<I>::handle_lock(int *ret_val) {
 
   if (*ret_val == 0) {
     return m_on_finish;
+  } else if (*ret_val == -EEXIST) {
+    // already locked by myself
+    *ret_val = 0;
+    return m_on_finish;
   } else if (m_broke_lock || *ret_val != -EBUSY) {
     lderr(cct) << "failed to lock object map: " << cpp_strerror(*ret_val)
                << dendl;
@@ -68,7 +72,7 @@ Context *LockRequest<I>::handle_lock(int *ret_val) {
 template <typename I>
 void LockRequest<I>::send_get_lock_info() {
   CephContext *cct = m_image_ctx.cct;
-  std::string oid(ObjectMap::object_map_name(m_image_ctx.id, CEPH_NOSNAP));
+  std::string oid(ObjectMap<>::object_map_name(m_image_ctx.id, CEPH_NOSNAP));
   ldout(cct, 10) << this << " " << __func__ << ": oid=" << oid << dendl;
 
   librados::ObjectReadOperation op;
@@ -113,7 +117,7 @@ Context *LockRequest<I>::handle_get_lock_info(int *ret_val) {
 template <typename I>
 void LockRequest<I>::send_break_locks() {
   CephContext *cct = m_image_ctx.cct;
-  std::string oid(ObjectMap::object_map_name(m_image_ctx.id, CEPH_NOSNAP));
+  std::string oid(ObjectMap<>::object_map_name(m_image_ctx.id, CEPH_NOSNAP));
   ldout(cct, 10) << this << " " << __func__ << ": oid=" << oid << ", "
                  << "num_lockers=" << m_lockers.size() << dendl;
 
diff --git a/src/librbd/object_map/RefreshRequest.cc b/src/librbd/object_map/RefreshRequest.cc
index 9421c12..c7b4834 100644
--- a/src/librbd/object_map/RefreshRequest.cc
+++ b/src/librbd/object_map/RefreshRequest.cc
@@ -72,7 +72,7 @@ void RefreshRequest<I>::send_lock() {
     return;
   }
 
-  std::string oid(ObjectMap::object_map_name(m_image_ctx.id, m_snap_id));
+  std::string oid(ObjectMap<>::object_map_name(m_image_ctx.id, m_snap_id));
   ldout(cct, 10) << this << " " << __func__ << ": oid=" << oid << dendl;
 
   using klass = RefreshRequest<I>;
@@ -96,7 +96,7 @@ Context *RefreshRequest<I>::handle_lock(int *ret_val) {
 template <typename I>
 void RefreshRequest<I>::send_load() {
   CephContext *cct = m_image_ctx.cct;
-  std::string oid(ObjectMap::object_map_name(m_image_ctx.id, m_snap_id));
+  std::string oid(ObjectMap<>::object_map_name(m_image_ctx.id, m_snap_id));
   ldout(cct, 10) << this << " " << __func__ << ": oid=" << oid << dendl;
 
   librados::ObjectReadOperation op;
@@ -122,7 +122,7 @@ Context *RefreshRequest<I>::handle_load(int *ret_val) {
                                                   &m_on_disk_object_map);
   }
 
-  std::string oid(ObjectMap::object_map_name(m_image_ctx.id, m_snap_id));
+  std::string oid(ObjectMap<>::object_map_name(m_image_ctx.id, m_snap_id));
   if (*ret_val == -EINVAL) {
      // object map is corrupt on-disk -- clear it and properly size it
      // so future IO can keep the object map in sync
@@ -220,7 +220,7 @@ Context *RefreshRequest<I>::handle_resize_invalidate(int *ret_val) {
 template <typename I>
 void RefreshRequest<I>::send_resize() {
   CephContext *cct = m_image_ctx.cct;
-  std::string oid(ObjectMap::object_map_name(m_image_ctx.id, m_snap_id));
+  std::string oid(ObjectMap<>::object_map_name(m_image_ctx.id, m_snap_id));
   ldout(cct, 10) << this << " " << __func__ << ": oid=" << oid << dendl;
 
   librados::ObjectWriteOperation op;
diff --git a/src/librbd/object_map/RefreshRequest.h b/src/librbd/object_map/RefreshRequest.h
index 4c2b059..24a7998 100644
--- a/src/librbd/object_map/RefreshRequest.h
+++ b/src/librbd/object_map/RefreshRequest.h
@@ -19,6 +19,12 @@ namespace object_map {
 template <typename ImageCtxT = ImageCtx>
 class RefreshRequest {
 public:
+  static RefreshRequest *create(ImageCtxT &image_ctx,
+                                ceph::BitVector<2> *object_map,
+                                uint64_t snap_id, Context *on_finish) {
+    return new RefreshRequest(image_ctx, object_map, snap_id, on_finish);
+  }
+
   RefreshRequest(ImageCtxT &image_ctx, ceph::BitVector<2> *object_map,
                  uint64_t snap_id, Context *on_finish);
 
diff --git a/src/librbd/object_map/Request.cc b/src/librbd/object_map/Request.cc
index 1725cbf..e54aed0 100644
--- a/src/librbd/object_map/Request.cc
+++ b/src/librbd/object_map/Request.cc
@@ -28,10 +28,7 @@ bool Request::should_complete(int r) {
       return invalidate();
     }
 
-    {
-      RWLock::WLocker l2(m_image_ctx.object_map_lock);
-      finish_request();
-    }
+    finish_request();
     return true;
 
   case STATE_INVALIDATE:
diff --git a/src/librbd/object_map/ResizeRequest.cc b/src/librbd/object_map/ResizeRequest.cc
index e574a18..3182c8a 100644
--- a/src/librbd/object_map/ResizeRequest.cc
+++ b/src/librbd/object_map/ResizeRequest.cc
@@ -30,7 +30,7 @@ void ResizeRequest::send() {
   RWLock::WLocker l(m_image_ctx.object_map_lock);
   m_num_objs = Striper::get_num_objects(m_image_ctx.layout, m_new_size);
 
-  std::string oid(ObjectMap::object_map_name(m_image_ctx.id, m_snap_id));
+  std::string oid(ObjectMap<>::object_map_name(m_image_ctx.id, m_snap_id));
   ldout(cct, 5) << this << " resizing on-disk object map: "
                 << "ictx=" << &m_image_ctx << ", "
                 << "oid=" << oid << ", num_objs=" << m_num_objs << dendl;
@@ -48,10 +48,12 @@ void ResizeRequest::send() {
 }
 
 void ResizeRequest::finish_request() {
-  CephContext *cct = m_image_ctx.cct;
+  RWLock::WLocker object_map_locker(m_image_ctx.object_map_lock);
 
+  CephContext *cct = m_image_ctx.cct;
   ldout(cct, 5) << this << " resizing in-memory object map: "
 		<< m_num_objs << dendl;
+
   resize(m_object_map, m_num_objs, m_default_object_state);
 }
 
diff --git a/src/librbd/object_map/SnapshotCreateRequest.cc b/src/librbd/object_map/SnapshotCreateRequest.cc
index 6408973..5d77d26 100644
--- a/src/librbd/object_map/SnapshotCreateRequest.cc
+++ b/src/librbd/object_map/SnapshotCreateRequest.cc
@@ -79,7 +79,7 @@ void SnapshotCreateRequest::send_read_map() {
   assert(m_image_ctx.get_snap_info(m_snap_id) != NULL);
 
   CephContext *cct = m_image_ctx.cct;
-  std::string oid(ObjectMap::object_map_name(m_image_ctx.id, CEPH_NOSNAP));
+  std::string oid(ObjectMap<>::object_map_name(m_image_ctx.id, CEPH_NOSNAP));
   ldout(cct, 5) << this << " " << __func__ << ": oid=" << oid << dendl;
   m_state = STATE_READ_MAP;
 
@@ -96,7 +96,7 @@ void SnapshotCreateRequest::send_read_map() {
 
 void SnapshotCreateRequest::send_write_map() {
   CephContext *cct = m_image_ctx.cct;
-  std::string snap_oid(ObjectMap::object_map_name(m_image_ctx.id, m_snap_id));
+  std::string snap_oid(ObjectMap<>::object_map_name(m_image_ctx.id, m_snap_id));
   ldout(cct, 5) << this << " " << __func__ << ": snap_oid=" << snap_oid
                 << dendl;
   m_state = STATE_WRITE_MAP;
@@ -117,7 +117,7 @@ bool SnapshotCreateRequest::send_add_snapshot() {
   }
 
   CephContext *cct = m_image_ctx.cct;
-  std::string oid(ObjectMap::object_map_name(m_image_ctx.id, CEPH_NOSNAP));
+  std::string oid(ObjectMap<>::object_map_name(m_image_ctx.id, CEPH_NOSNAP));
   ldout(cct, 5) << this << " " << __func__ << ": oid=" << oid << dendl;
   m_state = STATE_ADD_SNAPSHOT;
 
diff --git a/src/librbd/object_map/SnapshotRemoveRequest.cc b/src/librbd/object_map/SnapshotRemoveRequest.cc
index 94c0952..821dd5b 100644
--- a/src/librbd/object_map/SnapshotRemoveRequest.cc
+++ b/src/librbd/object_map/SnapshotRemoveRequest.cc
@@ -112,7 +112,7 @@ bool SnapshotRemoveRequest::should_complete(int r) {
 
 void SnapshotRemoveRequest::send_load_map() {
   CephContext *cct = m_image_ctx.cct;
-  std::string snap_oid(ObjectMap::object_map_name(m_image_ctx.id, m_snap_id));
+  std::string snap_oid(ObjectMap<>::object_map_name(m_image_ctx.id, m_snap_id));
   ldout(cct, 5) << this << " " << __func__ << ": snap_oid=" << snap_oid
                 << dendl;
   m_state = STATE_LOAD_MAP;
@@ -129,7 +129,7 @@ void SnapshotRemoveRequest::send_load_map() {
 
 void SnapshotRemoveRequest::send_remove_snapshot() {
   CephContext *cct = m_image_ctx.cct;
-  std::string oid(ObjectMap::object_map_name(m_image_ctx.id, m_next_snap_id));
+  std::string oid(ObjectMap<>::object_map_name(m_image_ctx.id, m_next_snap_id));
   ldout(cct, 5) << this << " " << __func__ << ": oid=" << oid << dendl;
   m_state = STATE_REMOVE_SNAPSHOT;
 
@@ -161,7 +161,7 @@ void SnapshotRemoveRequest::send_invalidate_next_map() {
 
 void SnapshotRemoveRequest::send_remove_map() {
   CephContext *cct = m_image_ctx.cct;
-  std::string oid(ObjectMap::object_map_name(m_image_ctx.id, m_snap_id));
+  std::string oid(ObjectMap<>::object_map_name(m_image_ctx.id, m_snap_id));
   ldout(cct, 5) << this << " " << __func__ << ": oid=" << oid << dendl;
   m_state = STATE_REMOVE_MAP;
 
diff --git a/src/librbd/object_map/SnapshotRollbackRequest.cc b/src/librbd/object_map/SnapshotRollbackRequest.cc
index 10eb591..0da4fd4 100644
--- a/src/librbd/object_map/SnapshotRollbackRequest.cc
+++ b/src/librbd/object_map/SnapshotRollbackRequest.cc
@@ -76,7 +76,7 @@ bool SnapshotRollbackRequest::should_complete(int r) {
 }
 
 void SnapshotRollbackRequest::send_read_map() {
-  std::string snap_oid(ObjectMap::object_map_name(m_image_ctx.id, m_snap_id));
+  std::string snap_oid(ObjectMap<>::object_map_name(m_image_ctx.id, m_snap_id));
 
   CephContext *cct = m_image_ctx.cct;
   ldout(cct, 5) << this << " " << __func__ << ": snap_oid=" << snap_oid
@@ -97,7 +97,8 @@ void SnapshotRollbackRequest::send_write_map() {
   RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
 
   CephContext *cct = m_image_ctx.cct;
-  std::string snap_oid(ObjectMap::object_map_name(m_image_ctx.id, CEPH_NOSNAP));
+  std::string snap_oid(ObjectMap<>::object_map_name(m_image_ctx.id,
+                                                    CEPH_NOSNAP));
   ldout(cct, 5) << this << " " << __func__ << ": snap_oid=" << snap_oid
                 << dendl;
   m_state = STATE_WRITE_MAP;
diff --git a/src/librbd/object_map/UnlockRequest.cc b/src/librbd/object_map/UnlockRequest.cc
index c7ae980..3458bbe 100644
--- a/src/librbd/object_map/UnlockRequest.cc
+++ b/src/librbd/object_map/UnlockRequest.cc
@@ -31,7 +31,7 @@ void UnlockRequest<I>::send() {
 template <typename I>
 void UnlockRequest<I>::send_unlock() {
   CephContext *cct = m_image_ctx.cct;
-  std::string oid(ObjectMap::object_map_name(m_image_ctx.id, CEPH_NOSNAP));
+  std::string oid(ObjectMap<>::object_map_name(m_image_ctx.id, CEPH_NOSNAP));
   ldout(cct, 10) << this << " " << __func__ << ": oid=" << oid << dendl;
 
   librados::ObjectWriteOperation op;
diff --git a/src/librbd/object_map/UnlockRequest.h b/src/librbd/object_map/UnlockRequest.h
index b52a3d0..ae1d9e9 100644
--- a/src/librbd/object_map/UnlockRequest.h
+++ b/src/librbd/object_map/UnlockRequest.h
@@ -15,6 +15,10 @@ namespace object_map {
 template <typename ImageCtxT = ImageCtx>
 class UnlockRequest {
 public:
+  static UnlockRequest *create(ImageCtxT &image_ctx, Context *on_finish) {
+    return new UnlockRequest(image_ctx, on_finish);
+  }
+
   UnlockRequest(ImageCtxT &image_ctx, Context *on_finish);
 
   void send();
diff --git a/src/librbd/object_map/UpdateRequest.cc b/src/librbd/object_map/UpdateRequest.cc
index 51dbc48..e88085a 100644
--- a/src/librbd/object_map/UpdateRequest.cc
+++ b/src/librbd/object_map/UpdateRequest.cc
@@ -17,14 +17,15 @@
 namespace librbd {
 namespace object_map {
 
-void UpdateRequest::send() {
+template <typename I>
+void UpdateRequest<I>::send() {
   assert(m_image_ctx.snap_lock.is_locked());
   assert(m_image_ctx.object_map_lock.is_locked());
   CephContext *cct = m_image_ctx.cct;
 
   // safe to update in-memory state first without handling rollback since any
   // failures will invalidate the object map
-  std::string oid(ObjectMap::object_map_name(m_image_ctx.id, m_snap_id));
+  std::string oid(ObjectMap<>::object_map_name(m_image_ctx.id, m_snap_id));
   ldout(cct, 20) << this << " updating object map"
                  << ": ictx=" << &m_image_ctx << ", oid=" << oid << ", ["
 		 << m_start_object_no << "," << m_end_object_no << ") = "
@@ -33,20 +34,6 @@ void UpdateRequest::send() {
 		 << "->" << static_cast<uint32_t>(m_new_state)
 		 << dendl;
 
-  // rebuilding the object map might update on-disk only
-  if (m_snap_id == m_image_ctx.snap_id) {
-    assert(m_image_ctx.object_map_lock.is_wlocked());
-    for (uint64_t object_no = m_start_object_no;
-         object_no < MIN(m_end_object_no, m_object_map.size());
-         ++object_no) {
-      uint8_t state = m_object_map[object_no];
-      if (!m_current_state || state == *m_current_state ||
-          (*m_current_state == OBJECT_EXISTS && state == OBJECT_EXISTS_CLEAN)) {
-        m_object_map[object_no] = m_new_state;
-      }
-    }
-  }
-
   librados::ObjectWriteOperation op;
   if (m_snap_id == CEPH_NOSNAP) {
     rados::cls::lock::assert_locked(&op, RBD_LOCK_NAME, LOCK_EXCLUSIVE, "", "");
@@ -60,10 +47,28 @@ void UpdateRequest::send() {
   rados_completion->release();
 }
 
-void UpdateRequest::finish_request() {
+template <typename I>
+void UpdateRequest<I>::finish_request() {
+  RWLock::RLocker snap_locker(m_image_ctx.snap_lock);
+  RWLock::WLocker object_map_locker(m_image_ctx.object_map_lock);
   ldout(m_image_ctx.cct, 20) << this << " on-disk object map updated"
                              << dendl;
+
+  // rebuilding the object map might update on-disk only
+  if (m_snap_id == m_image_ctx.snap_id) {
+    for (uint64_t object_no = m_start_object_no;
+         object_no < MIN(m_end_object_no, m_object_map.size());
+         ++object_no) {
+      uint8_t state = m_object_map[object_no];
+      if (!m_current_state || state == *m_current_state ||
+          (*m_current_state == OBJECT_EXISTS && state == OBJECT_EXISTS_CLEAN)) {
+        m_object_map[object_no] = m_new_state;
+      }
+    }
+  }
 }
 
 } // namespace object_map
 } // namespace librbd
+
+template class librbd::object_map::UpdateRequest<librbd::ImageCtx>;
diff --git a/src/librbd/object_map/UpdateRequest.h b/src/librbd/object_map/UpdateRequest.h
index d1ce407..5f3ae5c 100644
--- a/src/librbd/object_map/UpdateRequest.h
+++ b/src/librbd/object_map/UpdateRequest.h
@@ -17,13 +17,25 @@ class ImageCtx;
 
 namespace object_map {
 
+template <typename ImageCtxT = librbd::ImageCtx>
 class UpdateRequest : public Request {
 public:
+  static UpdateRequest *create(ImageCtx &image_ctx,
+                               ceph::BitVector<2> *object_map,
+                               uint64_t snap_id, uint64_t start_object_no,
+                               uint64_t end_object_no, uint8_t new_state,
+                               const boost::optional<uint8_t> &current_state,
+                               Context *on_finish) {
+    return new UpdateRequest(image_ctx, object_map, snap_id, start_object_no,
+                             end_object_no, new_state, current_state,
+                             on_finish);
+  }
+
   UpdateRequest(ImageCtx &image_ctx, ceph::BitVector<2> *object_map,
                 uint64_t snap_id, uint64_t start_object_no,
                 uint64_t end_object_no, uint8_t new_state,
                 const boost::optional<uint8_t> &current_state,
-      	  Context *on_finish)
+      	        Context *on_finish)
     : Request(image_ctx, snap_id, on_finish), m_object_map(*object_map),
       m_start_object_no(start_object_no), m_end_object_no(end_object_no),
       m_new_state(new_state), m_current_state(current_state)
@@ -46,4 +58,6 @@ private:
 } // namespace object_map
 } // namespace librbd
 
+extern template class librbd::object_map::UpdateRequest<librbd::ImageCtx>;
+
 #endif // CEPH_LIBRBD_OBJECT_MAP_UPDATE_REQUEST_H
diff --git a/src/librbd/operation/ResizeRequest.cc b/src/librbd/operation/ResizeRequest.cc
index c046e79..f890537 100644
--- a/src/librbd/operation/ResizeRequest.cc
+++ b/src/librbd/operation/ResizeRequest.cc
@@ -213,7 +213,7 @@ void ResizeRequest<I>::send_invalidate_cache() {
   // need to invalidate since we're deleting objects, and
   // ObjectCacher doesn't track non-existent objects
   RWLock::RLocker owner_locker(image_ctx.owner_lock);
-  image_ctx.invalidate_cache(create_async_context_callback(
+  image_ctx.invalidate_cache(false, create_async_context_callback(
     image_ctx, create_context_callback<
       ResizeRequest<I>, &ResizeRequest<I>::handle_invalidate_cache>(this)));
 }
diff --git a/src/librbd/operation/SnapshotRollbackRequest.cc b/src/librbd/operation/SnapshotRollbackRequest.cc
index 3335b36..99dcb08 100644
--- a/src/librbd/operation/SnapshotRollbackRequest.cc
+++ b/src/librbd/operation/SnapshotRollbackRequest.cc
@@ -286,7 +286,7 @@ Context *SnapshotRollbackRequest<I>::send_invalidate_cache() {
   Context *ctx = create_context_callback<
     SnapshotRollbackRequest<I>,
     &SnapshotRollbackRequest<I>::handle_invalidate_cache>(this);
-  image_ctx.invalidate_cache(ctx);
+  image_ctx.invalidate_cache(true, ctx);
   return nullptr;
 }
 
diff --git a/src/librbd/operation/TrimRequest.cc b/src/librbd/operation/TrimRequest.cc
index 3992fb7..e99e0e9 100644
--- a/src/librbd/operation/TrimRequest.cc
+++ b/src/librbd/operation/TrimRequest.cc
@@ -254,12 +254,9 @@ void TrimRequest<I>::send_pre_remove() {
     return;
   }
 
-  bool remove_objects = false;
   {
     RWLock::RLocker snap_locker(image_ctx.snap_lock);
-    if (image_ctx.object_map == nullptr) {
-      remove_objects = true;
-    } else {
+    if (image_ctx.object_map != nullptr) {
       ldout(image_ctx.cct, 5) << this << " send_pre_remove: "
 				<< " delete_start=" << m_delete_start
 				<< " num_objects=" << m_num_objects << dendl;
@@ -268,22 +265,17 @@ void TrimRequest<I>::send_pre_remove() {
       assert(image_ctx.exclusive_lock->is_lock_owner());
 
       // flag the objects as pending deletion
-      Context *ctx = this->create_callback_context();
       RWLock::WLocker object_map_locker(image_ctx.object_map_lock);
-      if (!image_ctx.object_map->aio_update(m_delete_start, m_num_objects,
-					    OBJECT_PENDING, OBJECT_EXISTS,
-                                            ctx)) {
-        delete ctx;
-        remove_objects = true;
+      if (image_ctx.object_map->template aio_update<AsyncRequest<I> >(
+            CEPH_NOSNAP, m_delete_start, m_num_objects, OBJECT_PENDING,
+            OBJECT_EXISTS, this)) {
+        return;
       }
     }
   }
 
-  // avoid possible recursive lock attempts
-  if (remove_objects) {
-    // no object map update required
-    send_remove_objects();
-  }
+  // no object map update required
+  send_remove_objects();
 }
 
 template <typename I>
@@ -291,12 +283,9 @@ void TrimRequest<I>::send_post_remove() {
   I &image_ctx = this->m_image_ctx;
   assert(image_ctx.owner_lock.is_locked());
 
-  bool clean_boundary = false;
   {
     RWLock::RLocker snap_locker(image_ctx.snap_lock);
-    if (image_ctx.object_map == nullptr) {
-      clean_boundary = true;
-    } else {
+    if (image_ctx.object_map != nullptr) {
       ldout(image_ctx.cct, 5) << this << " send_post_remove: "
           		        << " delete_start=" << m_delete_start
           		        << " num_objects=" << m_num_objects << dendl;
@@ -305,22 +294,17 @@ void TrimRequest<I>::send_post_remove() {
       assert(image_ctx.exclusive_lock->is_lock_owner());
 
       // flag the pending objects as removed
-      Context *ctx = this->create_callback_context();
       RWLock::WLocker object_map_locker(image_ctx.object_map_lock);
-      if (!image_ctx.object_map->aio_update(m_delete_start, m_num_objects,
-					    OBJECT_NONEXISTENT,
-					    OBJECT_PENDING, ctx)) {
-        delete ctx;
-	clean_boundary = true;
+      if (image_ctx.object_map->template aio_update<AsyncRequest<I> >(
+            CEPH_NOSNAP, m_delete_start, m_num_objects, OBJECT_NONEXISTENT,
+            OBJECT_PENDING, this)) {
+        return;
       }
     }
   }
 
-  // avoid possible recursive lock attempts
-  if (clean_boundary) {
-    // no object map update required
-    send_clean_boundary();
-  }
+  // no object map update required
+  send_clean_boundary();
 }
 
 template <typename I>
diff --git a/src/mds/Beacon.cc b/src/mds/Beacon.cc
index 06020af..b2565fd 100644
--- a/src/mds/Beacon.cc
+++ b/src/mds/Beacon.cc
@@ -384,8 +384,10 @@ void Beacon::notify_health(MDSRank const *mds)
   {
     set<Session*> sessions;
     mds->sessionmap.get_client_session_set(sessions);
+
     utime_t cutoff = ceph_clock_now(g_ceph_context);
     cutoff -= g_conf->mds_recall_state_timeout;
+    utime_t last_recall = mds->mdcache->last_recall_state;
 
     std::list<MDSHealthMetric> late_recall_metrics;
     std::list<MDSHealthMetric> large_completed_requests_metrics;
@@ -395,7 +397,10 @@ void Beacon::notify_health(MDSRank const *mds)
         dout(20) << "Session servicing RECALL " << session->info.inst
           << ": " << session->recalled_at << " " << session->recall_release_count
           << "/" << session->recall_count << dendl;
-        if (session->recalled_at < cutoff) {
+	if (last_recall < cutoff || session->last_recall_sent < last_recall) {
+	  dout(20) << "  no longer recall" << dendl;
+	  session->clear_recalled_at();
+	} else if (session->recalled_at < cutoff) {
           dout(20) << "  exceeded timeout " << session->recalled_at << " vs. " << cutoff << dendl;
           std::ostringstream oss;
 	  oss << "Client " << session->get_human_name() << " failing to respond to cache pressure";
diff --git a/src/mds/CDentry.cc b/src/mds/CDentry.cc
index 83470dc..d387e25 100644
--- a/src/mds/CDentry.cc
+++ b/src/mds/CDentry.cc
@@ -204,10 +204,10 @@ void CDentry::mark_new()
   state_set(STATE_NEW);
 }
 
-void CDentry::make_path_string(string& s) const
+void CDentry::make_path_string(string& s, bool projected) const
 {
   if (dir) {
-    dir->inode->make_path_string(s);
+    dir->inode->make_path_string(s, projected);
   } else {
     s = "???";
   }
@@ -215,36 +215,14 @@ void CDentry::make_path_string(string& s) const
   s.append(name.data(), name.length());
 }
 
-void CDentry::make_path(filepath& fp) const
+void CDentry::make_path(filepath& fp, bool projected) const
 {
   assert(dir);
-  if (dir->inode->is_base())
-    fp = filepath(dir->inode->ino());               // base case
-  else if (dir->inode->get_parent_dn())
-    dir->inode->get_parent_dn()->make_path(fp);  // recurse
-  else
-    fp = filepath(dir->inode->ino());               // relative but not base?  hrm!
+  dir->inode->make_path(fp, projected);
   fp.push_dentry(name);
 }
 
 /*
-void CDentry::make_path(string& s, inodeno_t tobase)
-{
-  assert(dir);
-  
-  if (dir->inode->is_root()) {
-    s += "/";  // make it an absolute path (no matter what) if we hit the root.
-  } 
-  else if (dir->inode->get_parent_dn() &&
-	   dir->inode->ino() != tobase) {
-    dir->inode->get_parent_dn()->make_path(s, tobase);
-    s += "/";
-  }
-  s += name;
-}
-*/
-
-/*
  * we only add ourselves to remote_parents when the linkage is
  * active (no longer projected).  if the passed dnl is projected,
  * don't link in, and do that work later in pop_projected_linkage().
diff --git a/src/mds/CDentry.h b/src/mds/CDentry.h
index 0198d5b..b5bf18b 100644
--- a/src/mds/CDentry.h
+++ b/src/mds/CDentry.h
@@ -275,8 +275,8 @@ public:
   const CDentry& operator= (const CDentry& right);
 
   // misc
-  void make_path_string(std::string& s) const;
-  void make_path(filepath& fp) const;
+  void make_path_string(std::string& s, bool projected=false) const;
+  void make_path(filepath& fp, bool projected=false) const;
 
   // -- version --
   version_t get_version() const { return version; }
diff --git a/src/mds/CDir.cc b/src/mds/CDir.cc
index 8b241e3..475fcc7 100644
--- a/src/mds/CDir.cc
+++ b/src/mds/CDir.cc
@@ -3155,7 +3155,7 @@ bool CDir::scrub_local()
 std::string CDir::get_path() const
 {
   std::string path;
-  get_inode()->make_path_string_projected(path);
+  get_inode()->make_path_string(path, true);
   return path;
 }
 
diff --git a/src/mds/CDir.h b/src/mds/CDir.h
index 8533ba8..6f73eb2 100644
--- a/src/mds/CDir.h
+++ b/src/mds/CDir.h
@@ -428,6 +428,7 @@ protected:
 
   map_t::iterator begin() { return items.begin(); }
   map_t::iterator end() { return items.end(); }
+  map_t::iterator lower_bound(dentry_key_t key) { return items.lower_bound(key); }
 
   unsigned get_num_head_items() const { return num_head_items; }
   unsigned get_num_head_null() const { return num_head_null; }
diff --git a/src/mds/CInode.cc b/src/mds/CInode.cc
index c4001a2..5535491 100644
--- a/src/mds/CInode.cc
+++ b/src/mds/CInode.cc
@@ -100,7 +100,7 @@ int num_cinode_locks = sizeof(cinode_lock_info) / sizeof(cinode_lock_info[0]);
 ostream& operator<<(ostream& out, const CInode& in)
 {
   string path;
-  in.make_path_string_projected(path);
+  in.make_path_string(path, true);
 
   out << "[inode " << in.inode.ino;
   out << " [" 
@@ -811,61 +811,47 @@ bool CInode::is_projected_ancestor_of(CInode *other)
 }
 
 /*
- * If use_parent is NULL (it should be one of inode's projected parents),
- * we use it to make path string. Otherwise, we use inode's parent dentry
- * to make path string
+ * Because a non-directory inode may have multiple links, the use_parent
+ * argument allows selecting which parent to use for path construction. This
+ * argument is only meaningful for the final component (i.e. the first of the
+ * nested calls) because directories cannot have multiple hard links. If
+ * use_parent is NULL and projected is true, the primary parent's projected
+ * inode is used all the way up the path chain. Otherwise the primary parent
+ * stable inode is used.
  */
-void CInode::make_path_string(string& s, CDentry *use_parent) const
+void CInode::make_path_string(string& s, bool projected, const CDentry *use_parent) const
 {
-  if (!use_parent)
-    use_parent = parent;
+  if (!use_parent) {
+    use_parent = projected ? get_projected_parent_dn() : parent;
+  }
 
   if (use_parent) {
-    use_parent->make_path_string(s);
-  } 
-  else if (is_root()) {
-    s = "";  // root
-  } 
-  else if (is_mdsdir()) {
+    use_parent->make_path_string(s, projected);
+  } else if (is_root()) {
+    s = "";
+  } else if (is_mdsdir()) {
     char t[40];
     uint64_t eino(ino());
     eino -= MDS_INO_MDSDIR_OFFSET;
     snprintf(t, sizeof(t), "~mds%" PRId64, eino);
     s = t;
-  }
-  else {
+  } else {
     char n[40];
     uint64_t eino(ino());
     snprintf(n, sizeof(n), "#%" PRIx64, eino);
     s += n;
   }
 }
-void CInode::make_path_string_projected(string& s) const
-{
-  make_path_string(s);
-  
-  if (!projected_parent.empty()) {
-    string q;
-    q.swap(s);
-    s = "{" + q;
-    for (list<CDentry*>::const_iterator p = projected_parent.begin();
-	 p != projected_parent.end();
-	 ++p) {
-      string q;
-      make_path_string(q, *p);
-      s += " ";
-      s += q;
-    }
-    s += "}";
-  }
-}
 
-void CInode::make_path(filepath& fp) const
+void CInode::make_path(filepath& fp, bool projected) const
 {
-  if (parent) 
-    parent->make_path(fp);
-  else
+  const CDentry *use_parent = projected ? get_projected_parent_dn() : parent;
+  if (use_parent) {
+    assert(!is_base());
+    use_parent->make_path(fp, projected);
+  } else {
     fp = filepath(ino());
+  }
 }
 
 void CInode::name_stray_dentry(string& dname)
diff --git a/src/mds/CInode.h b/src/mds/CInode.h
index eb70325..193d435 100644
--- a/src/mds/CInode.h
+++ b/src/mds/CInode.h
@@ -739,9 +739,9 @@ public:
 
   // -- misc -- 
   bool is_projected_ancestor_of(CInode *other);
-  void make_path_string(std::string& s, CDentry *use_parent=NULL) const;
-  void make_path_string_projected(std::string& s) const;
-  void make_path(filepath& s) const;
+
+  void make_path_string(std::string& s, bool projected=false, const CDentry *use_parent=NULL) const;
+  void make_path(filepath& s, bool projected=false) const;
   void name_stray_dentry(std::string& dname);
   
   // -- dirtyness --
diff --git a/src/mds/FSMap.cc b/src/mds/FSMap.cc
index eb3dd74..a4db701 100644
--- a/src/mds/FSMap.cc
+++ b/src/mds/FSMap.cc
@@ -421,6 +421,9 @@ void FSMap::decode(bufferlist::iterator& p)
       migrate_fs->mds_map.epoch = epoch;
       filesystems[migrate_fs->fscid] = migrate_fs;
 
+      // List of GIDs that had invalid states
+      std::set<mds_gid_t> drop_gids;
+
       // Construct mds_roles, standby_daemons, and remove
       // standbys from the MDSMap in the Filesystem.
       for (auto &p : migrate_fs->mds_map.mds_info) {
@@ -430,16 +433,27 @@ void FSMap::decode(bufferlist::iterator& p)
           p.second.rank = p.second.standby_for_rank;
         }
         if (p.second.rank == MDS_RANK_NONE) {
-          standby_daemons[p.first] = p.second;
-          standby_epochs[p.first] = epoch;
-          mds_roles[p.first] = FS_CLUSTER_ID_NONE;
+          if (p.second.state != MDSMap::STATE_STANDBY) {
+            // Old MDSMaps can have down:dne here, which
+            // is invalid in an FSMap (#17837)
+            drop_gids.insert(p.first);
+          } else {
+            insert(p.second); // into standby_daemons
+          }
         } else {
           mds_roles[p.first] = migrate_fs->fscid;
         }
       }
       for (const auto &p : standby_daemons) {
+        // Erase from this Filesystem's MDSMap, because it has
+        // been copied into FSMap::Standby_daemons above
         migrate_fs->mds_map.mds_info.erase(p.first);
       }
+      for (const auto &gid : drop_gids) {
+        // Throw away all info for this MDS because it was identified
+        // as having invalid state above.
+        migrate_fs->mds_map.mds_info.erase(gid);
+      }
 
       legacy_client_fscid = migrate_fs->fscid;
     } else {
@@ -499,7 +513,7 @@ int FSMap::parse_filesystem(
   std::string ns_err;
   fs_cluster_id_t fscid = strict_strtol(ns_str.c_str(), 10, &ns_err);
   if (!ns_err.empty() || filesystems.count(fscid) == 0) {
-    for (auto fs : filesystems) {
+    for (auto &fs : filesystems) {
       if (fs.second->mds_map.fs_name == ns_str) {
         *result = std::const_pointer_cast<const Filesystem>(fs.second);
         return 0;
@@ -732,7 +746,7 @@ void FSMap::erase(mds_gid_t who, epoch_t blacklist_epoch)
     standby_daemons.erase(who);
     standby_epochs.erase(who);
   } else {
-    auto fs = filesystems.at(mds_roles.at(who));
+    auto &fs = filesystems.at(mds_roles.at(who));
     const auto &info = fs->mds_map.mds_info.at(who);
     if (info.state != MDSMap::STATE_STANDBY_REPLAY) {
       if (info.state == MDSMap::STATE_CREATING) {
@@ -824,66 +838,41 @@ int FSMap::parse_role(
     mds_role_t *role,
     std::ostream &ss) const
 {
-  auto colon_pos = role_str.find(":");
-
-  if (colon_pos != std::string::npos && colon_pos != role_str.size()) {
-    auto fs_part = role_str.substr(0, colon_pos);
-    auto rank_part = role_str.substr(colon_pos + 1);
-
-    std::string err;
-    fs_cluster_id_t fs_id = FS_CLUSTER_ID_NONE;
-    long fs_id_i = strict_strtol(fs_part.c_str(), 10, &err);
-    if (fs_id_i < 0 || !err.empty()) {
-      // Try resolving as name
-      auto fs = get_filesystem(fs_part);
-      if (fs == nullptr) {
-        ss << "Unknown filesystem name '" << fs_part << "'";
-        return -EINVAL;
-      } else {
-        fs_id = fs->fscid;
-      }
-    } else {
-      fs_id = fs_id_i;
-    }
-
-    mds_rank_t rank;
-    long rank_i = strict_strtol(rank_part.c_str(), 10, &err);
-    if (rank_i < 0 || !err.empty()) {
-      ss << "Invalid rank '" << rank_part << "'";
-      return -EINVAL;
-    } else {
-      rank = rank_i;
-    }
-
-    *role = {fs_id, rank};
-  } else {
-    std::string err;
-    long who_i = strict_strtol(role_str.c_str(), 10, &err);
-    if (who_i < 0 || !err.empty()) {
-      ss << "Invalid rank '" << role_str << "'";
-      return -EINVAL;
-    }
-
+  size_t colon_pos = role_str.find(":");
+  size_t rank_pos;
+  std::shared_ptr<const Filesystem> fs;
+  if (colon_pos == std::string::npos) {
     if (legacy_client_fscid == FS_CLUSTER_ID_NONE) {
       ss << "No filesystem selected";
       return -ENOENT;
-    } else {
-      *role = mds_role_t(legacy_client_fscid, who_i);
     }
+    fs = get_filesystem(legacy_client_fscid);
+    rank_pos = 0;
+  } else {
+    if (parse_filesystem(role_str.substr(0, colon_pos), &fs) < 0) {
+      ss << "Invalid filesystem";
+      return -ENOENT;
+    }
+    rank_pos = colon_pos+1;
   }
 
-  // Now check that the role actually exists
-  if (get_filesystem(role->fscid) == nullptr) {
-    ss << "Filesystem with ID '" << role->fscid << "' not found";
-    return -ENOENT;
+  mds_rank_t rank;
+  std::string err;
+  std::string rank_str = role_str.substr(rank_pos);
+  long rank_i = strict_strtol(rank_str.c_str(), 10, &err);
+  if (rank_i < 0 || !err.empty()) {
+    ss << "Invalid rank '" << rank_str << "'";
+    return -EINVAL;
+  } else {
+    rank = rank_i;
   }
 
-  auto fs = get_filesystem(role->fscid);
-  if (fs->mds_map.in.count(role->rank) == 0) {
-    ss << "Rank '" << role->rank << "' not found";
+  if (fs->mds_map.in.count(rank) == 0) {
+    ss << "Rank '" << rank << "' not found";
     return -ENOENT;
   }
 
+  *role = {fs->fscid, rank};
+
   return 0;
 }
-
diff --git a/src/mds/FSMap.h b/src/mds/FSMap.h
index d14e365..cbeb7b3 100644
--- a/src/mds/FSMap.h
+++ b/src/mds/FSMap.h
@@ -375,20 +375,21 @@ public:
     });
   }
 
-  const std::map<fs_cluster_id_t, std::shared_ptr<Filesystem> > &get_filesystems() const
-  {
-    return filesystems;
-  }
-  bool any_filesystems() const {return !filesystems.empty(); }
-  bool filesystem_exists(fs_cluster_id_t fscid) const
-    {return filesystems.count(fscid) > 0;}
-
   epoch_t get_epoch() const { return epoch; }
   void inc_epoch() { epoch++; }
 
-  std::shared_ptr<const Filesystem> get_filesystem(fs_cluster_id_t fscid) const
+  size_t filesystem_count() const {return filesystems.size();}
+  bool filesystem_exists(fs_cluster_id_t fscid) const {return filesystems.count(fscid) > 0;}
+  std::shared_ptr<const Filesystem> get_filesystem(fs_cluster_id_t fscid) const {return std::const_pointer_cast<const Filesystem>(filesystems.at(fscid));}
+  std::shared_ptr<const Filesystem> get_filesystem(void) const {return std::const_pointer_cast<const Filesystem>(filesystems.begin()->second);}
+  std::shared_ptr<const Filesystem> get_filesystem(const std::string &name) const
   {
-    return std::const_pointer_cast<const Filesystem>(filesystems.at(fscid));
+    for (auto &i : filesystems) {
+      if (i.second->mds_map.fs_name == name) {
+        return std::const_pointer_cast<const Filesystem>(i.second);
+      }
+    }
+    return nullptr;
   }
 
   int parse_filesystem(
@@ -424,17 +425,6 @@ public:
   void get_health(list<pair<health_status_t,std::string> >& summary,
 		  list<pair<health_status_t,std::string> > *detail) const;
 
-  std::shared_ptr<const Filesystem> get_filesystem(const std::string &name) const
-  {
-    for (auto &i : filesystems) {
-      if (i.second->mds_map.fs_name == name) {
-        return i.second;
-      }
-    }
-
-    return nullptr;
-  }
-
   /**
    * Assert that the FSMap, Filesystem, MDSMap, mds_info_t relations are
    * all self-consistent.
diff --git a/src/mds/Locker.cc b/src/mds/Locker.cc
index 10fff98..410ad14 100644
--- a/src/mds/Locker.cc
+++ b/src/mds/Locker.cc
@@ -1578,6 +1578,7 @@ void Locker::_finish_xlock(SimpleLock *lock, client_t xlocker, bool *pneed_issue
   if (lock->get_num_rdlocks() == 0 &&
       lock->get_num_wrlocks() == 0 &&
       lock->get_num_client_lease() == 0 &&
+      lock->get_state() != LOCK_XLOCKSNAP &&
       lock->get_type() != CEPH_LOCK_DN) {
     CInode *in = static_cast<CInode*>(lock->get_parent());
     client_t loner = in->get_target_loner();
@@ -1594,7 +1595,7 @@ void Locker::_finish_xlock(SimpleLock *lock, client_t xlocker, bool *pneed_issue
     }
   }
   // the xlocker may have CEPH_CAP_GSHARED, need to revoke it if next state is LOCK_LOCK
-  eval_gather(lock, true, pneed_issue);
+  eval_gather(lock, lock->get_state() != LOCK_XLOCKSNAP, pneed_issue);
 }
 
 void Locker::xlock_finish(SimpleLock *lock, MutationImpl *mut, bool *pneed_issue)
@@ -2467,6 +2468,18 @@ void Locker::handle_client_caps(MClientCaps *m)
 	  << " op " << ceph_cap_op_name(m->get_op()) << dendl;
 
   if (!mds->is_clientreplay() && !mds->is_active() && !mds->is_stopping()) {
+    if (!session) {
+      dout(5) << " no session, dropping " << *m << dendl;
+      m->put();
+      return;
+    }
+    if (session->is_closed() ||
+	session->is_closing() ||
+	session->is_killing()) {
+      dout(7) << " session closed|closing|killing, dropping " << *m << dendl;
+      m->put();
+      return;
+    }
     if (mds->is_reconnect() &&
 	m->get_dirty() && m->get_client_tid() > 0 &&
 	!session->have_completed_flush(m->get_client_tid())) {
@@ -2476,7 +2489,7 @@ void Locker::handle_client_caps(MClientCaps *m)
     return;
   }
 
-  if (m->get_client_tid() > 0 &&
+  if (m->get_client_tid() > 0 && session &&
       session->have_completed_flush(m->get_client_tid())) {
     dout(7) << "handle_client_caps already flushed tid " << m->get_client_tid()
 	    << " for client." << client << dendl;
@@ -2503,7 +2516,7 @@ void Locker::handle_client_caps(MClientCaps *m)
   }
 
   // "oldest flush tid" > 0 means client uses unique TID for each flush
-  if (m->get_oldest_flush_tid() > 0) {
+  if (m->get_oldest_flush_tid() > 0 && session) {
     if (session->trim_completed_flushes(m->get_oldest_flush_tid())) {
       mds->mdlog->get_current_segment()->touched_sessions.insert(session->info.inst.name);
 
diff --git a/src/mds/MDCache.cc b/src/mds/MDCache.cc
index 3226969..4985076 100644
--- a/src/mds/MDCache.cc
+++ b/src/mds/MDCache.cc
@@ -6133,7 +6133,11 @@ void MDCache::identify_files_to_recover()
     }
 
     if (recover) {
-      in->auth_pin(&in->filelock);
+      if (in->filelock.is_stable()) {
+	in->auth_pin(&in->filelock);
+      } else {
+	assert(in->filelock.get_state() == LOCK_XLOCKSNAP);
+      }
       in->filelock.set_state(LOCK_PRE_SCAN);
       rejoin_recover_q.push_back(in);
     } else {
@@ -6145,6 +6149,8 @@ void MDCache::identify_files_to_recover()
 void MDCache::start_files_to_recover()
 {
   for (CInode *in : rejoin_check_q) {
+    if (in->filelock.get_state() == LOCK_XLOCKSNAP)
+      mds->locker->issue_caps(in);
     mds->locker->check_inode_max_size(in);
   }
   rejoin_check_q.clear();
@@ -6168,6 +6174,17 @@ void MDCache::do_file_recover()
 // ----------------------------
 // truncate
 
+class C_MDC_RetryTruncate : public MDCacheContext {
+  CInode *in;
+  LogSegment *ls;
+public:
+  C_MDC_RetryTruncate(MDCache *c, CInode *i, LogSegment *l) :
+    MDCacheContext(c), in(i), ls(l) {}
+  void finish(int r) {
+    mdcache->_truncate_inode(in, ls);
+  }
+};
+
 void MDCache::truncate_inode(CInode *in, LogSegment *ls)
 {
   inode_t *pi = in->get_projected_inode();
@@ -6178,6 +6195,15 @@ void MDCache::truncate_inode(CInode *in, LogSegment *ls)
 
   ls->truncating_inodes.insert(in);
   in->get(CInode::PIN_TRUNCATING);
+  in->auth_pin(this);
+
+  if (!in->client_need_snapflush.empty() &&
+      (in->get_caps_issued() & CEPH_CAP_FILE_BUFFER)) {
+    assert(in->filelock.is_xlocked());
+    in->filelock.set_xlock_snap_sync(new C_MDC_RetryTruncate(this, in, ls));
+    mds->locker->issue_caps(in);
+    return;
+  }
 
   _truncate_inode(in, ls);
 }
@@ -6205,7 +6231,6 @@ void MDCache::_truncate_inode(CInode *in, LogSegment *ls)
   assert(pi->truncate_from < (1ULL << 63));
   assert(pi->truncate_size < pi->truncate_from);
 
-  in->auth_pin(this);
 
   SnapRealm *realm = in->find_snaprealm();
   SnapContext nullsnap;
@@ -6314,8 +6339,21 @@ void MDCache::start_recovered_truncates()
     LogSegment *ls = p->second;
     for (set<CInode*>::iterator q = ls->truncating_inodes.begin();
 	 q != ls->truncating_inodes.end();
-	 ++q)
-      _truncate_inode(*q, ls);
+	 ++q) {
+      CInode *in = *q;
+      in->auth_pin(this);
+
+      if (!in->client_need_snapflush.empty() &&
+	  (in->get_caps_issued() & CEPH_CAP_FILE_BUFFER)) {
+	assert(in->filelock.is_stable());
+	in->filelock.set_state(LOCK_XLOCKDONE);
+	in->auth_pin(&in->filelock);
+	in->filelock.set_xlock_snap_sync(new C_MDC_RetryTruncate(this, in, ls));
+	// start_files_to_recover will revoke caps
+	continue;
+      }
+      _truncate_inode(in, ls);
+    }
   }
 }
 
@@ -7316,10 +7354,11 @@ void MDCache::check_memory_usage()
 
   if (num_inodes_with_caps > g_conf->mds_cache_size) {
     float ratio = (float)g_conf->mds_cache_size * .9 / (float)num_inodes_with_caps;
-    if (ratio < 1.0)
+    if (ratio < 1.0) {
+      last_recall_state = ceph_clock_now(g_ceph_context);
       mds->server->recall_client_state(ratio);
+    }
   }
-
 }
 
 
@@ -8206,11 +8245,19 @@ class C_IO_MDC_OpenInoBacktraceFetched : public MDCacheIOContext {
 
 struct C_MDC_OpenInoTraverseDir : public MDCacheContext {
   inodeno_t ino;
+  MMDSOpenIno *msg;
+  bool parent;
   public:
-  C_MDC_OpenInoTraverseDir(MDCache *c, inodeno_t i) : MDCacheContext(c), ino(i) {}
+  C_MDC_OpenInoTraverseDir(MDCache *c, inodeno_t i, MMDSOpenIno *m,  bool p) :
+    MDCacheContext(c), ino(i), msg(m), parent(p) {}
   void finish(int r) {
+    if (r < 0 && !parent)
+      r = -EAGAIN;
+    if (msg) {
+      mdcache->handle_open_ino(msg, r);
+      return;
+    }
     assert(mdcache->opening_inodes.count(ino));
-    assert(r >= 0);
     mdcache->_open_ino_traverse_dir(ino, mdcache->opening_inodes[ino], r);
   }
 };
@@ -8240,7 +8287,14 @@ void MDCache::_open_ino_backtrace_fetched(inodeno_t ino, bufferlist& bl, int err
 
   inode_backtrace_t backtrace;
   if (err == 0) {
-    ::decode(backtrace, bl);
+    try {
+      ::decode(backtrace, bl);
+    } catch (const buffer::error &decode_exc) {
+      derr << "corrupt backtrace on ino x0" << std::hex << ino
+           << std::dec << ": " << decode_exc << dendl;
+      open_ino_finish(ino, info, -EIO);
+      return;
+    }
     if (backtrace.pool != info.pool && backtrace.pool != -1) {
       dout(10) << " old object in pool " << info.pool
 	       << ", retrying pool " << backtrace.pool << dendl;
@@ -8263,6 +8317,7 @@ void MDCache::_open_ino_backtrace_fetched(inodeno_t ino, bufferlist& bl, int err
 		      new C_OnFinisher(fin, mds->finisher));
       return;
     }
+    err = 0; // backtrace.ancestors.empty() is checked below
   }
 
   if (err == 0) {
@@ -8273,11 +8328,15 @@ void MDCache::_open_ino_backtrace_fetched(inodeno_t ino, bufferlist& bl, int err
       if (info.ancestors[0] == backtrace.ancestors[0]) {
 	dout(10) << " got same parents " << info.ancestors[0] << " 2 times" << dendl;
 	err = -EINVAL;
+      } else {
+	info.last_err = 0;
       }
     }
   }
   if (err) {
-    dout(10) << " failed to open ino " << ino << dendl;
+    dout(0) << " failed to open ino " << ino << " err " << err << "/" << info.last_err << dendl;
+    if (info.last_err)
+      err = info.last_err;
     open_ino_finish(ino, info, err);
     return;
   }
@@ -8315,14 +8374,6 @@ void MDCache::_open_ino_parent_opened(inodeno_t ino, int ret)
   }
 }
 
-MDSInternalContextBase* MDCache::_open_ino_get_waiter(inodeno_t ino, MMDSOpenIno *m)
-{
-  if (m)
-    return new C_MDS_RetryMessage(mds, m);
-  else
-    return new C_MDC_OpenInoTraverseDir(this, ino);
-}
-
 void MDCache::_open_ino_traverse_dir(inodeno_t ino, open_ino_info_t& info, int ret)
 {
   dout(10) << __func__ << ": ino " << ino << " ret " << ret << dendl;
@@ -8349,11 +8400,11 @@ void MDCache::_open_ino_traverse_dir(inodeno_t ino, open_ino_info_t& info, int r
   do_open_ino(ino, info, ret);
 }
 
-void MDCache::_open_ino_fetch_dir(inodeno_t ino, MMDSOpenIno *m, CDir *dir)
+void MDCache::_open_ino_fetch_dir(inodeno_t ino, MMDSOpenIno *m, CDir *dir, bool parent)
 {
   if (dir->state_test(CDir::STATE_REJOINUNDEF))
     assert(dir->get_inode()->dirfragtree.is_leaf(dir->get_frag()));
-  dir->fetch(_open_ino_get_waiter(ino, m));
+  dir->fetch(new C_MDC_OpenInoTraverseDir(this, ino, m, parent));
 }
 
 int MDCache::open_ino_traverse_dir(inodeno_t ino, MMDSOpenIno *m,
@@ -8367,7 +8418,7 @@ int MDCache::open_ino_traverse_dir(inodeno_t ino, MMDSOpenIno *m,
 
     if (!diri) {
       if (discover && MDS_INO_IS_MDSDIR(ancestors[i].dirino)) {
-	open_foreign_mdsdir(ancestors[i].dirino, _open_ino_get_waiter(ino, m));
+	open_foreign_mdsdir(ancestors[i].dirino, new C_MDC_OpenInoTraverseDir(this, ino, m, i == 0));
 	return 1;
       }
       continue;
@@ -8378,7 +8429,7 @@ int MDCache::open_ino_traverse_dir(inodeno_t ino, MMDSOpenIno *m,
       while (dir->state_test(CDir::STATE_REJOINUNDEF) &&
 	     dir->get_inode()->state_test(CInode::STATE_REJOINUNDEF))
 	dir = dir->get_inode()->get_parent_dir();
-      _open_ino_fetch_dir(ino, m, dir);
+      _open_ino_fetch_dir(ino, m, dir, i == 0);
       return 1;
     }
 
@@ -8396,12 +8447,12 @@ int MDCache::open_ino_traverse_dir(inodeno_t ino, MMDSOpenIno *m,
       if (diri->is_auth()) {
 	if (diri->is_frozen()) {
 	  dout(10) << " " << *diri << " is frozen, waiting " << dendl;
-	  diri->add_waiter(CDir::WAIT_UNFREEZE, _open_ino_get_waiter(ino, m));
+	  diri->add_waiter(CDir::WAIT_UNFREEZE, new C_MDC_OpenInoTraverseDir(this, ino, m, i == 0));
 	  return 1;
 	}
 	dir = diri->get_or_open_dirfrag(this, fg);
       } else if (discover) {
-	open_remote_dirfrag(diri, fg, _open_ino_get_waiter(ino, m));
+	open_remote_dirfrag(diri, fg, new C_MDC_OpenInoTraverseDir(this, ino, m, i == 0));
 	return 1;
       }
     }
@@ -8413,14 +8464,14 @@ int MDCache::open_ino_traverse_dir(inodeno_t ino, MMDSOpenIno *m,
 	if (dnl && dnl->is_primary() &&
 	    dnl->get_inode()->state_test(CInode::STATE_REJOINUNDEF)) {
 	  dout(10) << " fetching undef " << *dnl->get_inode() << dendl;
-	  _open_ino_fetch_dir(ino, m, dir);
+	  _open_ino_fetch_dir(ino, m, dir, i == 0);
 	  return 1;
 	}
 
 	if (!dnl && !dir->is_complete() &&
 	    (!dir->has_bloom() || dir->is_in_bloom(name))) {
 	  dout(10) << " fetching incomplete " << *dir << dendl;
-	  _open_ino_fetch_dir(ino, m, dir);
+	  _open_ino_fetch_dir(ino, m, dir, i == 0);
 	  return 1;
 	}
 
@@ -8430,13 +8481,13 @@ int MDCache::open_ino_traverse_dir(inodeno_t ino, MMDSOpenIno *m,
       } else if (discover) {
 	if (!dnl) {
 	  filepath path(name, 0);
-	  discover_path(dir, CEPH_NOSNAP, path, _open_ino_get_waiter(ino, m),
+	  discover_path(dir, CEPH_NOSNAP, path, new C_MDC_OpenInoTraverseDir(this, ino, m, i == 0),
 			(i == 0 && want_xlocked));
 	  return 1;
 	}
 	if (dnl->is_null() && !dn->lock.can_read(-1)) {
 	  dout(10) << " null " << *dn << " is not readable, waiting" << dendl;
-	  dn->lock.add_waiter(SimpleLock::WAIT_RD, _open_ino_get_waiter(ino, m));
+	  dn->lock.add_waiter(SimpleLock::WAIT_RD, new C_MDC_OpenInoTraverseDir(this, ino, m, i == 0));
 	  return 1;
 	}
 	dout(10) << " no ino " << next_ino << " in " << *dir << dendl;
@@ -8463,7 +8514,7 @@ void MDCache::open_ino_finish(inodeno_t ino, open_ino_info_t& info, int ret)
 
 void MDCache::do_open_ino(inodeno_t ino, open_ino_info_t& info, int err)
 {
-  if (err < 0) {
+  if (err < 0 && err != -EAGAIN) {
     info.checked.clear();
     info.checked.insert(mds->get_nodeid());
     info.checking = MDS_RANK_NONE;
@@ -8473,6 +8524,8 @@ void MDCache::do_open_ino(inodeno_t ino, open_ino_info_t& info, int err)
       info.discover = false;
       info.ancestors.clear();
     }
+    if (err != -ENOENT && err != -ENOTDIR)
+      info.last_err = err;
   }
 
   if (info.check_peers) {
@@ -8534,9 +8587,9 @@ void MDCache::do_open_ino_peer(inodeno_t ino, open_ino_info_t& info)
   }
 }
 
-void MDCache::handle_open_ino(MMDSOpenIno *m)
+void MDCache::handle_open_ino(MMDSOpenIno *m, int err)
 {
-  dout(10) << "handle_open_ino " << *m << dendl;
+  dout(10) << "handle_open_ino " << *m << " err " << err << dendl;
 
   inodeno_t ino = m->ino;
   MMDSOpenInoReply *reply;
@@ -8558,6 +8611,8 @@ void MDCache::handle_open_ino(MMDSOpenIno *m)
     } else {
       reply->hint = in->authority().first;
     }
+  } else if (err < 0) {
+    reply = new MMDSOpenInoReply(m->get_tid(), ino, MDS_RANK_NONE, err);
   } else {
     mds_rank_t hint = MDS_RANK_NONE;
     int ret = open_ino_traverse_dir(ino, m, m->ancestors, false, false, &hint);
diff --git a/src/mds/MDCache.h b/src/mds/MDCache.h
index 9152c90..62a60e6 100644
--- a/src/mds/MDCache.h
+++ b/src/mds/MDCache.h
@@ -693,6 +693,8 @@ public:
   void trim_client_leases();
   void check_memory_usage();
 
+  utime_t last_recall_state;
+
   // shutdown
   void shutdown_start();
   void shutdown_check();
@@ -913,10 +915,12 @@ protected:
     bool want_xlocked;
     version_t tid;
     int64_t pool;
+    int last_err;
     list<MDSInternalContextBase*> waiters;
     open_ino_info_t() : checking(MDS_RANK_NONE), auth_hint(MDS_RANK_NONE),
       check_peers(true), fetch_backtrace(true), discover(false),
-      want_replica(false), want_xlocked(false), tid(0), pool(-1) {}
+      want_replica(false), want_xlocked(false), tid(0), pool(-1),
+      last_err(0) {}
   };
   ceph_tid_t open_ino_last_tid;
   map<inodeno_t,open_ino_info_t> opening_inodes;
@@ -924,15 +928,14 @@ protected:
   void _open_ino_backtrace_fetched(inodeno_t ino, bufferlist& bl, int err);
   void _open_ino_parent_opened(inodeno_t ino, int ret);
   void _open_ino_traverse_dir(inodeno_t ino, open_ino_info_t& info, int err);
-  void _open_ino_fetch_dir(inodeno_t ino, MMDSOpenIno *m, CDir *dir);
-  MDSInternalContextBase* _open_ino_get_waiter(inodeno_t ino, MMDSOpenIno *m);
+  void _open_ino_fetch_dir(inodeno_t ino, MMDSOpenIno *m, CDir *dir, bool parent);
   int open_ino_traverse_dir(inodeno_t ino, MMDSOpenIno *m,
 			    vector<inode_backpointer_t>& ancestors,
 			    bool discover, bool want_xlocked, mds_rank_t *hint);
   void open_ino_finish(inodeno_t ino, open_ino_info_t& info, int err);
   void do_open_ino(inodeno_t ino, open_ino_info_t& info, int err);
   void do_open_ino_peer(inodeno_t ino, open_ino_info_t& info);
-  void handle_open_ino(MMDSOpenIno *m);
+  void handle_open_ino(MMDSOpenIno *m, int err=0);
   void handle_open_ino_reply(MMDSOpenInoReply *m);
   friend class C_IO_MDC_OpenInoBacktraceFetched;
   friend struct C_MDC_OpenInoTraverseDir;
diff --git a/src/mds/MDLog.cc b/src/mds/MDLog.cc
index 795ffb3..8d0cbb1 100644
--- a/src/mds/MDLog.cc
+++ b/src/mds/MDLog.cc
@@ -910,6 +910,10 @@ void MDLog::_recovery_thread(MDSInternalContextBase *completion)
     int write_result = jp.save(mds->objecter);
     // Nothing graceful we can do for this
     assert(write_result >= 0);
+  } else if (read_result == -EBLACKLISTED) {
+    derr << "Blacklisted during JournalPointer read!  Respawning..." << dendl;
+    mds->respawn();
+    assert(0); // Should be unreachable because respawn calls execv
   } else if (read_result != 0) {
     mds->clog->error() << "failed to read JournalPointer: " << read_result
                        << " (" << cpp_strerror(read_result) << ")";
@@ -936,7 +940,11 @@ void MDLog::_recovery_thread(MDSInternalContextBase *completion)
     C_SaferCond recover_wait;
     back.recover(&recover_wait);
     int recovery_result = recover_wait.wait();
-    if (recovery_result != 0) {
+    if (recovery_result == -EBLACKLISTED) {
+      derr << "Blacklisted during journal recovery!  Respawning..." << dendl;
+      mds->respawn();
+      assert(0); // Should be unreachable because respawn calls execv
+    } else if (recovery_result != 0) {
       // Journaler.recover succeeds if no journal objects are present: an error
       // means something worse like a corrupt header, which we can't handle here.
       mds->clog->error() << "Error recovering journal " << jp.front << ": "
@@ -979,7 +987,11 @@ void MDLog::_recovery_thread(MDSInternalContextBase *completion)
   int recovery_result = recover_wait.wait();
   dout(4) << "Journal " << jp.front << " recovered." << dendl;
 
-  if (recovery_result != 0) {
+  if (recovery_result == -EBLACKLISTED) {
+    derr << "Blacklisted during journal recovery!  Respawning..." << dendl;
+    mds->respawn();
+    assert(0); // Should be unreachable because respawn calls execv
+  } else if (recovery_result != 0) {
     mds->clog->error() << "Error recovering journal " << jp.front << ": "
       << cpp_strerror(recovery_result);
     mds->damaged_unlocked();
@@ -1404,9 +1416,20 @@ void MDLog::standby_trim_segments()
   bool removed_segment = false;
   while (have_any_segments()) {
     LogSegment *seg = get_oldest_segment();
-    if (seg->end > expire_pos)
+    dout(10) << " segment seq=" << seg->seq << " " << seg->offset <<
+      "~" << seg->end - seg->offset << dendl;
+
+    if (seg->end > expire_pos) {
+      dout(10) << " won't remove, not expired!" << dendl;
+      break;
+    }
+
+    if (segments.size() == 1) {
+      dout(10) << " won't remove, last segment!" << dendl;
       break;
-    dout(10) << " removing segment " << seg->seq << "/" << seg->offset << dendl;
+    }
+
+    dout(10) << " removing segment" << dendl;
     mds->mdcache->standby_trim_segment(seg);
     remove_oldest_segment();
     removed_segment = true;
@@ -1415,8 +1438,9 @@ void MDLog::standby_trim_segments()
   if (removed_segment) {
     dout(20) << " calling mdcache->trim!" << dendl;
     mds->mdcache->trim(-1);
-  } else
+  } else {
     dout(20) << " removed no segments!" << dendl;
+  }
 }
 
 void MDLog::dump_replay_status(Formatter *f) const
diff --git a/src/mds/MDSDaemon.cc b/src/mds/MDSDaemon.cc
index 0482477..33d07a5 100644
--- a/src/mds/MDSDaemon.cc
+++ b/src/mds/MDSDaemon.cc
@@ -801,11 +801,9 @@ int MDSDaemon::_handle_command(
     int64_t session_id = 0;
     bool got = cmd_getval(cct, cmdmap, "session_id", session_id);
     assert(got);
-    const bool killed = mds_rank->kill_session(session_id);
-    if (!killed) {
+    bool killed = mds_rank->kill_session(session_id, false, ss);
+    if (!killed)
       r = -ENOENT;
-      ss << "session '" << session_id << "' not found";
-    }
   } else if (prefix == "heap") {
     if (!ceph_using_tcmalloc()) {
       r = -EOPNOTSUPP;
@@ -1114,12 +1112,12 @@ void MDSDaemon::respawn()
   }
   new_argv[orig_argc] = NULL;
 
-  /* Determine the path to our executable, try to read
-   * linux-specific /proc/ path first */
-  char exe_path[PATH_MAX];
-  ssize_t exe_path_bytes = readlink("/proc/self/exe", exe_path,
-				    sizeof(exe_path) - 1);
-  if (exe_path_bytes < 0) {
+  /* Determine the path to our executable, test if Linux /proc/self/exe exists.
+   * This allows us to exec the same executable even if it has since been
+   * unlinked.
+   */
+  char exe_path[PATH_MAX] = "";
+  if (readlink("/proc/self/exe", exe_path, PATH_MAX-1) == -1) {
     /* Print CWD for the user's interest */
     char buf[PATH_MAX];
     char *cwd = getcwd(buf, sizeof(buf));
@@ -1127,9 +1125,10 @@ void MDSDaemon::respawn()
     dout(1) << " cwd " << cwd << dendl;
 
     /* Fall back to a best-effort: just running in our CWD */
-    strncpy(exe_path, orig_argv[0], sizeof(exe_path) - 1);
+    strncpy(exe_path, orig_argv[0], PATH_MAX-1);
   } else {
-    exe_path[exe_path_bytes] = '\0';
+    dout(1) << "respawning with exe " << exe_path << dendl;
+    strcpy(exe_path, "/proc/self/exe");
   }
 
   dout(1) << " exe_path " << exe_path << dendl;
diff --git a/src/mds/MDSRank.cc b/src/mds/MDSRank.cc
index 19a584a..728af77 100644
--- a/src/mds/MDSRank.cc
+++ b/src/mds/MDSRank.cc
@@ -1240,7 +1240,7 @@ void MDSRank::clientreplay_start()
 bool MDSRank::queue_one_replay()
 {
   if (replay_queue.empty()) {
-    clientreplay_done();
+    mdlog->wait_for_safe(new C_MDS_VoidFn(this, &MDSRank::clientreplay_done));
     return false;
   }
   queue_waiter(replay_queue.front());
@@ -1556,7 +1556,7 @@ void MDSRankDispatcher::handle_mds_map(
     oldmap->get_down_mds_set(&olddown);
     mdsmap->get_down_mds_set(&down);
     for (set<mds_rank_t>::iterator p = down.begin(); p != down.end(); ++p) {
-      if (olddown.count(*p) == 0) {
+      if (oldmap->have_inst(*p) && olddown.count(*p) == 0) {
         messenger->mark_down(oldmap->get_inst(*p).addr);
         handle_mds_failure(*p);
       }
@@ -1710,18 +1710,11 @@ bool MDSRankDispatcher::handle_asok_command(
     assert(got_arg == true);
 
     mds_lock.Lock();
-    Session *session = sessionmap.get_session(entity_name_t(CEPH_ENTITY_TYPE_CLIENT,
-                                                            strtol(client_id.c_str(), 0, 10)));
-    if (session) {
-      C_SaferCond on_safe;
-      server->kill_session(session, &on_safe);
-
-      mds_lock.Unlock();
-      on_safe.wait();
-    } else {
-      dout(15) << "session " << session << " not in sessionmap!" << dendl;
-      mds_lock.Unlock();
-    }
+    std::stringstream ss;
+    bool killed = kill_session(strtol(client_id.c_str(), 0, 10), true, ss);
+    if (!killed)
+      dout(15) << ss.str() << dendl;
+    mds_lock.Unlock();
   } else if (command == "scrub_path") {
     string path;
     vector<string> scrubop_vec;
@@ -1788,13 +1781,13 @@ protected:
 public:
   C_MDS_Send_Command_Reply(MDSRank *_mds, MCommand *_m) :
     MDSInternalContext(_mds), m(_m) { m->get(); }
-  void send (int r) {
+  void send (int r, const std::string& out_str) {
     bufferlist bl;
-    MDSDaemon::send_command_reply(m, mds, r, bl, "");
+    MDSDaemon::send_command_reply(m, mds, r, bl, out_str);
     m->put();
   }
   void finish (int r) {
-    send(r);
+    send(r, "");
   }
 };
 
@@ -1805,9 +1798,15 @@ public:
  */
 void MDSRankDispatcher::evict_sessions(const SessionFilter &filter, MCommand *m)
 {
-  std::list<Session*> victims;
   C_MDS_Send_Command_Reply *reply = new C_MDS_Send_Command_Reply(this, m);
 
+  if (is_any_replay()) {
+    reply->send(-EAGAIN, "MDS is replaying log");
+    delete reply;
+    return;
+  }
+
+  std::list<Session*> victims;
   const auto sessions = sessionmap.get_sessions();
   for (const auto p : sessions)  {
     if (!p.first.is_client()) {
@@ -1824,7 +1823,7 @@ void MDSRankDispatcher::evict_sessions(const SessionFilter &filter, MCommand *m)
   dout(20) << __func__ << " matched " << victims.size() << " sessions" << dendl;
 
   if (victims.empty()) {
-    reply->send(0);
+    reply->send(0, "");
     delete reply;
     return;
   }
@@ -2420,16 +2419,29 @@ void MDSRankDispatcher::handle_osd_map()
   objecter->maybe_request_map();
 }
 
-bool MDSRankDispatcher::kill_session(int64_t session_id)
+bool MDSRankDispatcher::kill_session(int64_t session_id, bool wait, std::stringstream& err_ss)
 {
+  if (is_any_replay()) {
+    err_ss << "MDS is replaying log";
+    return false;
+  }
+
   Session *session = sessionmap.get_session(entity_name_t(CEPH_ENTITY_TYPE_CLIENT, session_id));
+  if (!session) {
+    err_ss << "session " << session_id << " not in sessionmap!";
+    return false;
+  }
+  if (wait) {
+    C_SaferCond on_safe;
+    server->kill_session(session, &on_safe);
 
-  if (session) {
-    server->kill_session(session, NULL);
-    return true;
+    mds_lock.Unlock();
+    on_safe.wait();
+    mds_lock.Lock();
   } else {
-    return false;
+    server->kill_session(session, NULL);
   }
+  return true;
 }
 
 void MDSRank::bcast_mds_map()
@@ -2456,12 +2468,11 @@ bool MDSRankDispatcher::handle_command_legacy(std::vector<std::string> args)
       mdcache->dump_cache();
   }
   else if (args[0] == "session" && args[1] == "kill") {
-    Session *session = sessionmap.get_session(entity_name_t(CEPH_ENTITY_TYPE_CLIENT,
-							    strtol(args[2].c_str(), 0, 10)));
-    if (session)
-      server->kill_session(session, NULL);
-    else
-      dout(15) << "session " << session << " not in sessionmap!" << dendl;
+    std::stringstream ss;
+    bool killed = kill_session(strtol(args[2].c_str(), 0, 10), false, ss);
+    if (!killed)
+      dout(15) << ss.str() << dendl;
+
   } else if (args[0] == "issue_caps") {
     long inum = strtol(args[1].c_str(), 0, 10);
     CInode *in = mdcache->get_inode(inodeno_t(inum));
diff --git a/src/mds/MDSRank.h b/src/mds/MDSRank.h
index 17e259b..c8022e6 100644
--- a/src/mds/MDSRank.h
+++ b/src/mds/MDSRank.h
@@ -488,7 +488,7 @@ public:
                            Formatter *f, std::ostream& ss);
   void handle_mds_map(MMDSMap *m, MDSMap *oldmap);
   void handle_osd_map();
-  bool kill_session(int64_t session_id);
+  bool kill_session(int64_t session_id, bool wait, std::stringstream& ss);
   void update_log_config();
   bool handle_command_legacy(std::vector<std::string> args);
 
diff --git a/src/mds/ScatterLock.h b/src/mds/ScatterLock.h
index 78bd474..2105578 100644
--- a/src/mds/ScatterLock.h
+++ b/src/mds/ScatterLock.h
@@ -96,6 +96,14 @@ public:
       get_state() == LOCK_MIX;
   }
 
+  void set_xlock_snap_sync(MDSInternalContextBase *c)
+  {
+    assert(get_type() == CEPH_LOCK_IFILE);
+    assert(state == LOCK_XLOCK || state == LOCK_XLOCKDONE);
+    state = LOCK_XLOCKSNAP;
+    add_waiter(WAIT_STABLE, c);
+  }
+
   xlist<ScatterLock*>::item *get_updated_item() { return &more()->item_updated; }
 
   utime_t get_update_stamp() {
diff --git a/src/mds/Server.cc b/src/mds/Server.cc
index 61320c3..6160bce 100644
--- a/src/mds/Server.cc
+++ b/src/mds/Server.cc
@@ -3180,9 +3180,8 @@ void Server::handle_client_openc(MDRequestRef& mdr)
     // file would have inherited anyway from its parent.
     CDir *parent = dn->get_dir();
     CInode *parent_in = parent->get_inode();
-    int64_t parent_pool = parent_in->inode.layout.pool_id;
-
-    if (layout.pool_id != parent_pool) {
+    if (layout.pool_id != parent_in->inode.layout.pool_id
+        || layout.pool_ns != parent_in->inode.layout.pool_ns) {
       access |= MAY_SET_POOL;
     }
 
@@ -3395,7 +3394,9 @@ void Server::handle_client_readdir(MDRequestRef& mdr)
   bufferlist dnbl;
   __u32 numfiles = 0;
   bool end = (dir->begin() == dir->end());
-  for (CDir::map_t::iterator it = dir->begin();
+  // skip all dns < dentry_key_t(snapid, offset_str, offset_hash)
+  dentry_key_t skip_key(snapid, offset_str.c_str(), offset_hash);
+  for (CDir::map_t::iterator it = offset_str.empty() ? dir->begin() : dir->lower_bound(skip_key);
        !end && numfiles < max;
        end = (it == dir->end())) {
     CDentry *dn = it->second;
@@ -3788,13 +3789,20 @@ void Server::handle_client_setattr(MDRequestRef& mdr)
 
   pi = cur->project_inode();
 
-  if (mask & CEPH_SETATTR_MODE)
-    pi->mode = (pi->mode & ~07777) | (req->head.args.setattr.mode & 07777);
   if (mask & CEPH_SETATTR_UID)
     pi->uid = req->head.args.setattr.uid;
   if (mask & CEPH_SETATTR_GID)
     pi->gid = req->head.args.setattr.gid;
 
+  if (mask & CEPH_SETATTR_MODE)
+    pi->mode = (pi->mode & ~07777) | (req->head.args.setattr.mode & 07777);
+  else if ((mask & (CEPH_SETATTR_UID|CEPH_SETATTR_GID)) &&
+	    S_ISREG(pi->mode)) {
+    pi->mode &= ~S_ISUID;
+    if ((pi->mode & (S_ISGID|S_IXGRP)) == (S_ISGID|S_IXGRP))
+      pi->mode &= ~S_ISGID;
+  }
+
   if (mask & CEPH_SETATTR_MTIME)
     pi->mtime = req->head.args.setattr.mtime;
   if (mask & CEPH_SETATTR_ATIME)
@@ -4239,7 +4247,8 @@ void Server::handle_set_vxattr(MDRequestRef& mdr, CInode *cur,
     if (!mds->locker->acquire_locks(mdr, rdlocks, wrlocks, xlocks))
       return;
 
-    if (cur->inode.layout.pool_id != layout.pool_id) {
+    if (cur->inode.layout.pool_id != layout.pool_id
+        || cur->inode.layout.pool_ns != layout.pool_ns) {
       if (!check_access(mdr, cur, MAY_SET_POOL)) {
         return;
       }
@@ -4290,7 +4299,8 @@ void Server::handle_set_vxattr(MDRequestRef& mdr, CInode *cur,
     if (!mds->locker->acquire_locks(mdr, rdlocks, wrlocks, xlocks))
       return;
 
-    if (cur->inode.layout.pool_id != layout.pool_id) {
+    if (cur->inode.layout.pool_id != layout.pool_id
+        || cur->inode.layout.pool_ns != layout.pool_ns) {
       if (!check_access(mdr, cur, MAY_SET_POOL)) {
         return;
       }
@@ -6000,7 +6010,7 @@ bool Server::_dir_is_nonempty(MDRequestRef& mdr, CInode *in)
 {
   dout(10) << "dir_is_nonempty " << *in << dendl;
   assert(in->is_auth());
-  assert(in->filelock.can_read(-1));
+  assert(in->filelock.can_read(mdr->get_client()));
 
   frag_info_t dirstat;
   version_t dirstat_version = in->get_projected_inode()->dirstat.version;
diff --git a/src/mds/SessionMap.cc b/src/mds/SessionMap.cc
index d91713f..6d09694 100644
--- a/src/mds/SessionMap.cc
+++ b/src/mds/SessionMap.cc
@@ -771,11 +771,8 @@ void Session::notify_cap_release(size_t n_caps)
 {
   if (!recalled_at.is_zero()) {
     recall_release_count += n_caps;
-    if (recall_release_count >= recall_count) {
-      recalled_at = utime_t();
-      recall_count = 0;
-      recall_release_count = 0;
-    }
+    if (recall_release_count >= recall_count)
+      clear_recalled_at();
   }
 }
 
@@ -790,13 +787,22 @@ void Session::notify_recall_sent(int const new_limit)
   if (recalled_at.is_zero()) {
     // Entering recall phase, set up counters so we can later
     // judge whether the client has respected the recall request
-    recalled_at = ceph_clock_now(g_ceph_context);
+    recalled_at = last_recall_sent = ceph_clock_now(g_ceph_context);
     assert (new_limit < caps.size());  // Behaviour of Server::recall_client_state
     recall_count = caps.size() - new_limit;
     recall_release_count = 0;
+  } else {
+    last_recall_sent = ceph_clock_now(g_ceph_context);
   }
 }
 
+void Session::clear_recalled_at()
+{
+  recalled_at = last_recall_sent = utime_t();
+  recall_count = 0;
+  recall_release_count = 0;
+}
+
 void Session::set_client_metadata(map<string, string> const &meta)
 {
   info.client_metadata = meta;
@@ -854,7 +860,7 @@ int Session::check_access(CInode *in, unsigned mask,
     path = in->get_projected_inode()->stray_prior_path;
     dout(20) << __func__ << " stray_prior_path " << path << dendl;
   } else {
-    in->make_path_string(path, in->get_projected_parent_dn());
+    in->make_path_string(path, true);
     dout(20) << __func__ << " path " << path << dendl;
   }
   if (path.length())
diff --git a/src/mds/SessionMap.h b/src/mds/SessionMap.h
index d03e16c..6ddb603 100644
--- a/src/mds/SessionMap.h
+++ b/src/mds/SessionMap.h
@@ -123,6 +123,7 @@ public:
 
   // Ephemeral state for tracking progress of capability recalls
   utime_t recalled_at;  // When was I asked to SESSION_RECALL?
+  utime_t last_recall_sent;
   uint32_t recall_count;  // How many caps was I asked to SESSION_RECALL?
   uint32_t recall_release_count;  // How many caps have I actually revoked?
 
@@ -142,6 +143,7 @@ public:
 
   void notify_cap_release(size_t n_caps);
   void notify_recall_sent(int const new_limit);
+  void clear_recalled_at();
 
   inodeno_t next_ino() {
     if (info.prealloc_inos.empty())
@@ -309,7 +311,7 @@ public:
 
   Session() : 
     state(STATE_CLOSED), state_seq(0), importing_count(0),
-    recalled_at(), recall_count(0), recall_release_count(0),
+    recall_count(0), recall_release_count(0),
     auth_caps(g_ceph_context),
     connection(NULL), item_session_list(this),
     requests(0),  // member_offset passed to front() manually
diff --git a/src/mds/SimpleLock.h b/src/mds/SimpleLock.h
index 6d1d7fa..10a3bd0 100644
--- a/src/mds/SimpleLock.h
+++ b/src/mds/SimpleLock.h
@@ -101,6 +101,7 @@ public:
     case LOCK_PREXLOCK: return "prexlock";
     case LOCK_XLOCK: return "xlock";
     case LOCK_XLOCKDONE: return "xlockdone";
+    case LOCK_XLOCKSNAP: return "xlocksnap";
     case LOCK_LOCK_XLOCK: return "lock->xlock";
 
     case LOCK_SYNC_LOCK: return "sync->lock";
@@ -493,7 +494,8 @@ public:
     more()->xlock_by.reset();
   }
   void put_xlock() {
-    assert(state == LOCK_XLOCK || state == LOCK_XLOCKDONE || is_locallock() ||
+    assert(state == LOCK_XLOCK || state == LOCK_XLOCKDONE ||
+	   state == LOCK_XLOCKSNAP || is_locallock() ||
 	   state == LOCK_LOCK /* if we are a master of a slave */);
     --more()->num_xlock;
     parent->put(MDSCacheObject::PIN_LOCK);
diff --git a/src/mds/locks.c b/src/mds/locks.c
index aa61236..2fc0a5f 100644
--- a/src/mds/locks.c
+++ b/src/mds/locks.c
@@ -99,6 +99,7 @@ const struct sm_state_t filelock[LOCK_MAX] = {
     [LOCK_PREXLOCK]  = { LOCK_LOCK, false, LOCK_LOCK, 0,    XCL, 0,   0,   0,   0,   ANY, CEPH_CAP_GCACHE|CEPH_CAP_GBUFFER,0,0,0 },
     [LOCK_XLOCK]     = { LOCK_LOCK, false, LOCK_LOCK, 0,    XCL, 0,   0,   0,   0,   0,   CEPH_CAP_GCACHE|CEPH_CAP_GBUFFER,0,0,0 },
     [LOCK_XLOCKDONE] = { LOCK_LOCK, false, LOCK_LOCK, XCL,  XCL, XCL, 0,   0,   XCL, 0,   CEPH_CAP_GCACHE|CEPH_CAP_GBUFFER,0,CEPH_CAP_GSHARED,0 },
+    [LOCK_XLOCKSNAP] = { LOCK_LOCK, false, LOCK_LOCK, 0,    XCL, 0,   0,   0,   0,   0,   CEPH_CAP_GCACHE,0,0,0 },
     [LOCK_LOCK_XLOCK]= { LOCK_PREXLOCK,false,LOCK_LOCK,0,   XCL, 0,   0,   0,   0,   XCL, CEPH_CAP_GCACHE|CEPH_CAP_GBUFFER,0,0,0 },
 
     [LOCK_MIX]       = { 0,         false, LOCK_MIX,  0,    0,   REQ, ANY, 0,   0,   0,   CEPH_CAP_GRD|CEPH_CAP_GWR|CEPH_CAP_GLAZYIO,0,0,CEPH_CAP_GRD },
diff --git a/src/mds/locks.h b/src/mds/locks.h
index d1585ce..9f4ea56 100644
--- a/src/mds/locks.h
+++ b/src/mds/locks.h
@@ -52,6 +52,7 @@ enum {
   LOCK_PREXLOCK,    // A    . . .. . . / . .   (lock)
   LOCK_XLOCK,       // A    . . .. . . / . .   (lock)
   LOCK_XLOCKDONE,   // A    r p rd l x / . .   (lock)  <-- by same client only!!
+  LOCK_XLOCKSNAP,   // also revoke Fb
   LOCK_LOCK_XLOCK,
 
   LOCK_SYNC_LOCK,    // AR   R . .. . . / . .   R .. . . / . .
diff --git a/src/mon/MDSMonitor.cc b/src/mon/MDSMonitor.cc
index b91e360..945cefd 100644
--- a/src/mon/MDSMonitor.cc
+++ b/src/mon/MDSMonitor.cc
@@ -1555,7 +1555,7 @@ int MDSMonitor::management_command(
       }
     }
 
-    if (pending_fsmap.any_filesystems()
+    if (pending_fsmap.filesystem_count() > 0
         && !pending_fsmap.get_enable_multiple()) {
       ss << "Creation of multiple filesystems is disabled.  To enable "
             "this experimental feature, use 'ceph fs flag set enable_multiple "
@@ -2786,14 +2786,20 @@ bool MDSMonitor::maybe_promote_standby(std::shared_ptr<Filesystem> fs)
 	do_propose = true;
       }
     }
-  }
+  } else {
+    // There were no failures to replace, so try using any available standbys
+    // as standby-replay daemons.
 
-  // There were no failures to replace, so try using any available standbys
-  // as standby-replay daemons.
-  if (failed.empty()) {
+    // Take a copy of the standby GIDs so that we can iterate over
+    // them while perhaps-modifying standby_daemons during the loop
+    // (if we promote anyone they are removed from standby_daemons)
+    std::vector<mds_gid_t> standby_gids;
     for (const auto &j : pending_fsmap.standby_daemons) {
-      const auto &gid = j.first;
-      const auto &info = j.second;
+      standby_gids.push_back(j.first);
+    }
+
+    for (const auto &gid : standby_gids) {
+      const auto &info = pending_fsmap.standby_daemons.at(gid);
       assert(info.state == MDSMap::STATE_STANDBY);
 
       if (!info.standby_replay) {
@@ -2819,8 +2825,7 @@ bool MDSMonitor::maybe_promote_standby(std::shared_ptr<Filesystem> fs)
         // that doesn't correspond to an existing filesystem, especially
         // if we loaded from a version with a bug (#17466)
         if (info.standby_for_fscid != FS_CLUSTER_ID_NONE
-            && pending_fsmap.get_filesystems().count(
-              info.standby_for_fscid) == 0) {
+            && !pending_fsmap.filesystem_exists(info.standby_for_fscid)) {
           derr << "gid " << gid << " has invalid standby_for_fscid "
                << info.standby_for_fscid << dendl;
           continue;
diff --git a/src/mon/MonCap.cc b/src/mon/MonCap.cc
index a2540b5..45b66a8 100644
--- a/src/mon/MonCap.cc
+++ b/src/mon/MonCap.cc
@@ -146,10 +146,16 @@ void MonCapGrant::expand_profile(EntityName name) const
     profile_grants.push_back(MonCapGrant("config-key delete", "key", StringConstraint("", prefix)));
   }
   if (profile == "bootstrap-osd") {
+    string prefix = "dm-crypt/osd";
+    profile_grants.push_back(MonCapGrant("config-key put", "key", StringConstraint("", prefix)));
     profile_grants.push_back(MonCapGrant("mon", MON_CAP_R));  // read monmap
     profile_grants.push_back(MonCapGrant("osd", MON_CAP_R));  // read osdmap
     profile_grants.push_back(MonCapGrant("mon getmap"));
     profile_grants.push_back(MonCapGrant("osd create"));
+    profile_grants.push_back(MonCapGrant("auth get-or-create"));
+    profile_grants.back().command_args["entity"] = StringConstraint("", "client.");
+    prefix = "allow command \"config-key get\" with key=\"dm-crypt/osd/";
+    profile_grants.back().command_args["caps_mon"] = StringConstraint("", prefix);
     profile_grants.push_back(MonCapGrant("auth add"));
     profile_grants.back().command_args["entity"] = StringConstraint("", "osd.");
     profile_grants.back().command_args["caps_mon"] = StringConstraint("allow profile osd", "");
diff --git a/src/mon/Monitor.cc b/src/mon/Monitor.cc
index 4047351..d0cb5d4 100644
--- a/src/mon/Monitor.cc
+++ b/src/mon/Monitor.cc
@@ -2478,7 +2478,7 @@ void Monitor::get_cluster_status(stringstream &ss, Formatter *f)
     ss << "     monmap " << *monmap << "\n";
     ss << "            election epoch " << get_epoch()
        << ", quorum " << get_quorum() << " " << get_quorum_names() << "\n";
-    if (mdsmon()->fsmap.any_filesystems()) {
+    if (mdsmon()->fsmap.filesystem_count() > 0) {
       ss << "      fsmap " << mdsmon()->fsmap << "\n";
     }
 
@@ -4292,7 +4292,7 @@ void Monitor::handle_subscribe(MonOpRequestRef op)
 	pgmon()->check_sub(s->sub_map["osd_pg_creates"]);
       }
     } else if (p->first == "monmap") {
-      check_sub(s->sub_map["monmap"]);
+      monmon()->check_sub(s->sub_map[p->first]);
     } else if (logmon()->sub_name_to_id(p->first) >= 0) {
       logmon()->check_sub(s->sub_map[p->first]);
     }
@@ -4381,32 +4381,6 @@ bool Monitor::ms_handle_reset(Connection *con)
   return true;
 }
 
-void Monitor::check_subs()
-{
-  string type = "monmap";
-  if (session_map.subs.count(type) == 0)
-    return;
-  xlist<Subscription*>::iterator p = session_map.subs[type]->begin();
-  while (!p.end()) {
-    Subscription *sub = *p;
-    ++p;
-    check_sub(sub);
-  }
-}
-
-void Monitor::check_sub(Subscription *sub)
-{
-  dout(10) << "check_sub monmap next " << sub->next << " have " << monmap->get_epoch() << dendl;
-  if (sub->next <= monmap->get_epoch()) {
-    send_latest_monmap(sub->session->con.get());
-    if (sub->onetime)
-      session_map.remove_sub(sub);
-    else
-      sub->next = monmap->get_epoch() + 1;
-  }
-}
-
-
 // -----
 
 void Monitor::send_latest_monmap(Connection *con)
diff --git a/src/mon/Monitor.h b/src/mon/Monitor.h
index bf94dea..cc49a3b 100644
--- a/src/mon/Monitor.h
+++ b/src/mon/Monitor.h
@@ -690,9 +690,6 @@ public:
   MonSessionMap session_map;
   AdminSocketHook *admin_hook;
 
-  void check_subs();
-  void check_sub(Subscription *sub);
-
   void send_latest_monmap(Connection *con);
 
   // messages
diff --git a/src/mon/MonmapMonitor.cc b/src/mon/MonmapMonitor.cc
index 81964ba..5f20b98 100644
--- a/src/mon/MonmapMonitor.cc
+++ b/src/mon/MonmapMonitor.cc
@@ -76,6 +76,8 @@ void MonmapMonitor::update_from_paxos(bool *need_bootstrap)
     t->erase("mkfs", "monmap");
     mon->store->apply_transaction(t);
   }
+
+  check_subs();
 }
 
 void MonmapMonitor::create_pending()
@@ -579,3 +581,29 @@ int MonmapMonitor::get_monmap(bufferlist &bl)
   }
   return 0;
 }
+
+void MonmapMonitor::check_subs()
+{
+  const string type = "monmap";
+  auto subs = mon->session_map.subs.find(type);
+  if (subs == mon->session_map.subs.end())
+    return;
+  for (auto sub : *subs->second) {
+    check_sub(sub);
+  }
+}
+
+void MonmapMonitor::check_sub(Subscription *sub)
+{
+  const auto epoch = mon->monmap->get_epoch();
+  dout(10) << __func__
+	   << " monmap next " << sub->next
+	   << " have " << epoch << dendl;
+  if (sub->next <= epoch) {
+    mon->send_latest_monmap(sub->session->con.get());
+    if (sub->onetime)
+      mon->session_map.remove_sub(sub);
+    else
+      sub->next = epoch + 1;
+  }
+}
diff --git a/src/mon/MonmapMonitor.h b/src/mon/MonmapMonitor.h
index ecfbe1e..42fd8af 100644
--- a/src/mon/MonmapMonitor.h
+++ b/src/mon/MonmapMonitor.h
@@ -81,7 +81,12 @@ class MonmapMonitor : public PaxosService {
 
   void tick();
 
- private:
+  void check_sub(Subscription *sub);
+
+private:
+  void check_subs();
+
+private:
   bufferlist monmap_bl;
 };
 
diff --git a/src/mon/OSDMonitor.cc b/src/mon/OSDMonitor.cc
index b9cdc4d..aa0296c 100644
--- a/src/mon/OSDMonitor.cc
+++ b/src/mon/OSDMonitor.cc
@@ -284,12 +284,19 @@ void OSDMonitor::update_from_paxos(bool *need_bootstrap)
   }
 
   for (int o = 0; o < osdmap.get_max_osd(); o++) {
+    if (osdmap.is_out(o))
+      continue;
+    auto found = down_pending_out.find(o);
     if (osdmap.is_down(o)) {
       // populate down -> out map
-      if (osdmap.is_in(o) &&
-	  down_pending_out.count(o) == 0) {
-	dout(10) << " adding osd." << o << " to down_pending_out map" << dendl;
-	down_pending_out[o] = ceph_clock_now(g_ceph_context);
+      if (found == down_pending_out.end()) {
+        dout(10) << " adding osd." << o << " to down_pending_out map" << dendl;
+        down_pending_out[o] = ceph_clock_now(g_ceph_context);
+      }
+    } else {
+      if (found != down_pending_out.end()) {
+        dout(10) << " removing osd." << o << " from down_pending_out map" << dendl;
+        down_pending_out.erase(found);
       }
     }
   }
@@ -1215,7 +1222,9 @@ void OSDMonitor::encode_pending(MonitorDBStore::TransactionRef t)
     // determine appropriate features
     if (!tmp.test_flag(CEPH_OSDMAP_REQUIRE_JEWEL)) {
       dout(10) << __func__ << " encoding without feature SERVER_JEWEL" << dendl;
-      features &= ~CEPH_FEATURE_SERVER_JEWEL;
+      features &= ~(CEPH_FEATURE_SERVER_JEWEL |
+	  CEPH_FEATURE_NEW_OSDOP_ENCODING |
+	  CEPH_FEATURE_CRUSH_TUNABLES5);
     }
     dout(10) << __func__ << " encoding full map with " << features << dendl;
 
@@ -1716,15 +1725,17 @@ bool OSDMonitor::can_mark_in(int i)
   return true;
 }
 
-void OSDMonitor::check_failures(utime_t now)
+bool OSDMonitor::check_failures(utime_t now)
 {
+  bool found_failure = false;
   for (map<int,failure_info_t>::iterator p = failure_info.begin();
        p != failure_info.end();
        ++p) {
     if (can_mark_down(p->first)) {
-      check_failure(now, p->first, p->second);
+      found_failure |= check_failure(now, p->first, p->second);
     }
   }
+  return found_failure;
 }
 
 bool OSDMonitor::check_failure(utime_t now, int target_osd, failure_info_t& fi)
@@ -2720,7 +2731,8 @@ void OSDMonitor::tick()
   utime_t now = ceph_clock_now(g_ceph_context);
 
   // mark osds down?
-  check_failures(now);
+  if (check_failures(now))
+    do_propose = true;
 
   // mark down osds out?
 
@@ -2955,21 +2967,25 @@ void OSDMonitor::get_health(list<pair<health_status_t,string> >& summary,
     }
 
     // warn about flags
-    if (osdmap.test_flag(CEPH_OSDMAP_FULL |
-			 CEPH_OSDMAP_PAUSERD |
-			 CEPH_OSDMAP_PAUSEWR |
-			 CEPH_OSDMAP_NOUP |
-			 CEPH_OSDMAP_NODOWN |
-			 CEPH_OSDMAP_NOIN |
-			 CEPH_OSDMAP_NOOUT |
-			 CEPH_OSDMAP_NOBACKFILL |
-			 CEPH_OSDMAP_NOREBALANCE |
-			 CEPH_OSDMAP_NORECOVER |
-			 CEPH_OSDMAP_NOSCRUB |
-			 CEPH_OSDMAP_NODEEP_SCRUB |
-			 CEPH_OSDMAP_NOTIERAGENT)) {
+    uint64_t warn_flags =
+      CEPH_OSDMAP_FULL |
+      CEPH_OSDMAP_PAUSERD |
+      CEPH_OSDMAP_PAUSEWR |
+      CEPH_OSDMAP_PAUSEREC |
+      CEPH_OSDMAP_NOUP |
+      CEPH_OSDMAP_NODOWN |
+      CEPH_OSDMAP_NOIN |
+      CEPH_OSDMAP_NOOUT |
+      CEPH_OSDMAP_NOBACKFILL |
+      CEPH_OSDMAP_NORECOVER |
+      CEPH_OSDMAP_NOSCRUB |
+      CEPH_OSDMAP_NODEEP_SCRUB |
+      CEPH_OSDMAP_NOTIERAGENT |
+      CEPH_OSDMAP_NOREBALANCE;
+    if (osdmap.test_flag(warn_flags)) {
       ostringstream ss;
-      ss << osdmap.get_flag_string() << " flag(s) set";
+      ss << osdmap.get_flag_string(osdmap.get_flags() & warn_flags)
+	 << " flag(s) set";
       summary.push_back(make_pair(HEALTH_WARN, ss.str()));
       if (detail)
 	detail->push_back(make_pair(HEALTH_WARN, ss.str()));
@@ -7316,6 +7332,8 @@ done:
     np->read_tier = overlaypool_id;
     np->write_tier = overlaypool_id;
     np->last_force_op_resend = pending_inc.epoch;
+    pg_pool_t *noverlay_p = pending_inc.get_new_pool(overlaypool_id, overlay_p);
+    noverlay_p->last_force_op_resend = pending_inc.epoch;
     ss << "overlay for '" << poolstr << "' is now (or already was) '" << overlaypoolstr << "'";
     if (overlay_p->cache_mode == pg_pool_t::CACHEMODE_NONE)
       ss <<" (WARNING: overlay pool cache_mode is still NONE)";
@@ -7346,6 +7364,16 @@ done:
 
     // go
     pg_pool_t *np = pending_inc.get_new_pool(pool_id, p);
+    if (np->has_read_tier()) {
+      const pg_pool_t *op = osdmap.get_pg_pool(np->read_tier);
+      pg_pool_t *nop = pending_inc.get_new_pool(np->read_tier,op);
+      nop->last_force_op_resend = pending_inc.epoch;
+    }
+    if (np->has_write_tier()) {
+      const pg_pool_t *op = osdmap.get_pg_pool(np->write_tier);
+      pg_pool_t *nop = pending_inc.get_new_pool(np->write_tier, op);
+      nop->last_force_op_resend = pending_inc.epoch;
+    }
     np->clear_read_tier();
     np->clear_write_tier();
     np->last_force_op_resend = pending_inc.epoch;
@@ -7582,6 +7610,8 @@ done:
     np->tiers.insert(tierpool_id);
     np->read_tier = np->write_tier = tierpool_id;
     np->set_snap_epoch(pending_inc.epoch); // tier will update to our snap info
+    np->last_force_op_resend = pending_inc.epoch;
+    ntp->last_force_op_resend = pending_inc.epoch;
     ntp->tier_of = pool_id;
     ntp->cache_mode = mode;
     ntp->hit_set_count = g_conf->osd_tier_default_cache_hit_set_count;
diff --git a/src/mon/OSDMonitor.h b/src/mon/OSDMonitor.h
index 4fb1ba4..c2826d3 100644
--- a/src/mon/OSDMonitor.h
+++ b/src/mon/OSDMonitor.h
@@ -134,7 +134,7 @@ private:
   SimpleLRU<version_t, bufferlist> inc_osd_cache;
   SimpleLRU<version_t, bufferlist> full_osd_cache;
 
-  void check_failures(utime_t now);
+  bool check_failures(utime_t now);
   bool check_failure(utime_t now, int target_osd, failure_info_t& fi);
 
   // map thrashing
diff --git a/src/mon/PGMap.cc b/src/mon/PGMap.cc
index fbef9ba..861c804 100644
--- a/src/mon/PGMap.cc
+++ b/src/mon/PGMap.cc
@@ -964,20 +964,23 @@ bool PGMap::get_stuck_counts(const utime_t cutoff, map<string, int>& note) const
   for (ceph::unordered_map<pg_t, pg_stat_t>::const_iterator i = pg_stat.begin();
        i != pg_stat.end();
        ++i) {
-
     if (! (i->second.state & PG_STATE_ACTIVE)) {
       if (i->second.last_active < cutoff)
         ++inactive;
-    } else if (! (i->second.state & PG_STATE_CLEAN)) {
+    }
+    if (! (i->second.state & PG_STATE_CLEAN)) {
       if (i->second.last_clean < cutoff)
         ++unclean;
-    } else if (i->second.state & PG_STATE_DEGRADED) {
+    }
+    if (i->second.state & PG_STATE_DEGRADED) {
       if (i->second.last_undegraded < cutoff)
         ++degraded;
-    } else if (i->second.state & PG_STATE_UNDERSIZED) {
+    }
+    if (i->second.state & PG_STATE_UNDERSIZED) {
       if (i->second.last_fullsized < cutoff)
         ++undersized;
-    } else if (i->second.state & PG_STATE_STALE) {
+    }
+    if (i->second.state & PG_STATE_STALE) {
       if (i->second.last_unstale < cutoff)
         ++stale;
     }
diff --git a/src/mon/PGMonitor.cc b/src/mon/PGMonitor.cc
index 341d324..9ff5d48 100644
--- a/src/mon/PGMonitor.cc
+++ b/src/mon/PGMonitor.cc
@@ -708,7 +708,8 @@ bool PGMonitor::preprocess_pg_stats(MonOpRequestRef op)
   // only if they've had the map for a while.
   if (stats->had_map_for > 30.0 &&
       mon->osdmon()->is_readable() &&
-      stats->epoch < mon->osdmon()->osdmap.get_epoch())
+      stats->epoch < mon->osdmon()->osdmap.get_epoch() &&
+      !session->proxy_con)
     mon->osdmon()->send_latest_now_nodelete(op, stats->epoch+1);
 
   // Always forward the PGStats to the leader, even if they are the same as
diff --git a/src/msg/Message.h b/src/msg/Message.h
index 9ea4536..06b894a 100644
--- a/src/msg/Message.h
+++ b/src/msg/Message.h
@@ -423,8 +423,8 @@ public:
   uint64_t get_tid() const { return header.tid; }
   void set_tid(uint64_t t) { header.tid = t; }
 
-  unsigned get_seq() const { return header.seq; }
-  void set_seq(unsigned s) { header.seq = s; }
+  uint64_t get_seq() const { return header.seq; }
+  void set_seq(uint64_t s) { header.seq = s; }
 
   unsigned get_priority() const { return header.priority; }
   void set_priority(__s16 p) { header.priority = p; }
diff --git a/src/msg/async/AsyncConnection.h b/src/msg/async/AsyncConnection.h
index 416bccb..45ec461 100644
--- a/src/msg/async/AsyncConnection.h
+++ b/src/msg/async/AsyncConnection.h
@@ -278,8 +278,8 @@ class AsyncConnection : public Connection {
   PerfCounters *logger;
   int global_seq;
   __u32 connect_seq, peer_global_seq;
-  atomic_t out_seq;
-  atomic_t ack_left, in_seq;
+  atomic64_t out_seq;
+  atomic64_t ack_left, in_seq;
   int state;
   int state_after_send;
   int sd;
diff --git a/src/msg/simple/Pipe.cc b/src/msg/simple/Pipe.cc
index cfb1986..e590b7e 100644
--- a/src/msg/simple/Pipe.cc
+++ b/src/msg/simple/Pipe.cc
@@ -967,12 +967,17 @@ int Pipe::connect()
     ldout(msgr->cct,2) << "connect couldn't read peer addrs, " << cpp_strerror(rc) << dendl;
     goto fail;
   }
-  {
+  try {
     bufferlist::iterator p = addrbl.begin();
     ::decode(paddr, p);
     ::decode(peer_addr_for_me, p);
-    port = peer_addr_for_me.get_port();
   }
+  catch (buffer::error& e) {
+    ldout(msgr->cct,2) << "connect couldn't decode peer addrs: " << e.what()
+		       << dendl;
+    goto fail;
+  }
+  port = peer_addr_for_me.get_port();
 
   ldout(msgr->cct,20) << "connect read peer addr " << paddr << " on socket " << sd << dendl;
   if (peer_addr != paddr) {
diff --git a/src/msg/simple/Pipe.h b/src/msg/simple/Pipe.h
index d9e346c..c9a166e 100644
--- a/src/msg/simple/Pipe.h
+++ b/src/msg/simple/Pipe.h
@@ -275,7 +275,7 @@ static const int SM_IOV_MAX = (IOV_MAX >= 1024 ? IOV_MAX / 4 : IOV_MAX);
     static const Pipe& Server(int s);
     static const Pipe& Client(const entity_addr_t& pi);
 
-    __u32 get_out_seq() { return out_seq; }
+    uint64_t get_out_seq() { return out_seq; }
 
     bool is_queued() { return !out_q.empty() || send_keepalive || send_keepalive_ack; }
 
diff --git a/src/msg/simple/SimpleMessenger.cc b/src/msg/simple/SimpleMessenger.cc
index 5c257ab..f0ea282 100644
--- a/src/msg/simple/SimpleMessenger.cc
+++ b/src/msg/simple/SimpleMessenger.cc
@@ -574,6 +574,10 @@ void SimpleMessenger::wait()
       p->unregister_pipe();
       p->pipe_lock.Lock();
       p->stop_and_wait();
+      // don't generate an event here; we're shutting down anyway.
+      PipeConnectionRef con = p->connection_state;
+      if (con)
+	con->clear_pipe(p);
       p->pipe_lock.Unlock();
     }
 
diff --git a/src/msg/xio/XioConnection.h b/src/msg/xio/XioConnection.h
index e55ea98..d62a57e 100644
--- a/src/msg/xio/XioConnection.h
+++ b/src/msg/xio/XioConnection.h
@@ -93,18 +93,18 @@ private:
     /* XXX */
     uint32_t reconnects;
     uint32_t connect_seq, peer_global_seq;
-    uint32_t in_seq, out_seq_acked; // atomic<uint64_t>, got receipt
-    atomic_t out_seq; // atomic<uint32_t>
+    uint64_t in_seq, out_seq_acked; // atomic<uint64_t>, got receipt
+    atomic64_t out_seq; // atomic<uint32_t>
 
     lifecycle() : state(lifecycle::INIT), reconnects(0), connect_seq(0),
 		  peer_global_seq(0), in_seq(0), out_seq_acked(0), 
 		  out_seq(0) {}
 
-    void set_in_seq(uint32_t seq) {
+    void set_in_seq(uint64_t seq) {
       in_seq = seq;
     }
 
-    uint32_t next_out_seq() {
+    uint64_t next_out_seq() {
       return out_seq.inc();
     }
 
@@ -139,8 +139,8 @@ private:
 
     uint32_t reconnects;
     uint32_t connect_seq, global_seq, peer_global_seq;
-    uint32_t in_seq, out_seq_acked; // atomic<uint64_t>, got receipt
-    atomic_t out_seq; // atomic<uint32_t>
+    uint64_t in_seq, out_seq_acked; // atomic<uint64_t>, got receipt
+    atomic64_t out_seq; // atomic<uint64_t>
 
     uint32_t flags;
 
@@ -168,11 +168,11 @@ private:
       return startup_state.read();
     }
 
-    void set_in_seq(uint32_t seq) {
+    void set_in_seq(uint64_t seq) {
       in_seq = seq;
     }
 
-    uint32_t next_out_seq() {
+    uint64_t next_out_seq() {
       return out_seq.inc();
     };
 
@@ -329,7 +329,7 @@ typedef boost::intrusive_ptr<XioConnection> XioConnectionRef;
 class XioLoopbackConnection : public Connection
 {
 private:
-  atomic_t seq;
+  atomic64_t seq;
 public:
   explicit XioLoopbackConnection(Messenger *m) : Connection(m->cct, m), seq(0)
     {
@@ -350,11 +350,11 @@ public:
   void mark_down() override {}
   void mark_disposable() override {}
 
-  uint32_t get_seq() {
+  uint64_t get_seq() {
     return seq.read();
   }
 
-  uint32_t next_seq() {
+  uint64_t next_seq() {
     return seq.inc();
   }
 };
diff --git a/src/os/filestore/DBObjectMap.cc b/src/os/filestore/DBObjectMap.cc
index 67e17bd..7e9dfe6 100644
--- a/src/os/filestore/DBObjectMap.cc
+++ b/src/os/filestore/DBObjectMap.cc
@@ -902,10 +902,10 @@ int DBObjectMap::clone(const ghobject_t &oid,
   {
     Header destination = lookup_map_header(*ltarget, target);
     if (destination) {
-      remove_map_header(*ltarget, target, destination, t);
       if (check_spos(target, destination, spos))
 	return 0;
       destination->num_children--;
+      remove_map_header(*ltarget, target, destination, t);
       _clear(destination, t);
     }
   }
diff --git a/src/os/filestore/FileStore.cc b/src/os/filestore/FileStore.cc
index ce1cfe2..d2e969f 100644
--- a/src/os/filestore/FileStore.cc
+++ b/src/os/filestore/FileStore.cc
@@ -473,13 +473,7 @@ int FileStore::lfn_unlink(const coll_t& cid, const ghobject_t& o,
     }
 
     if (!force_clear_omap) {
-      if (hardlink == 0) {
-          if (!m_disable_wbthrottle) {
-	    wbthrottle.clear_object(o); // should be only non-cache ref
-          }
-	  fdcache.clear(o);
-	  return 0;
-      } else if (hardlink == 1) {
+      if (hardlink == 0 || hardlink == 1) {
 	  force_clear_omap = true;
       }
     }
@@ -506,6 +500,12 @@ int FileStore::lfn_unlink(const coll_t& cid, const ghobject_t& o,
       if (!backend->can_checkpoint())
 	object_map->sync(&o, &spos);
     }
+    if (hardlink == 0) {
+      if (!m_disable_wbthrottle) {
+	wbthrottle.clear_object(o); // should be only non-cache ref
+      }
+      return 0;
+    }
   }
   r = index->unlink(o);
   if (r < 0) {
@@ -2234,10 +2234,12 @@ void FileStore::_set_replay_guard(int fd,
   // first make sure the previous operation commits
   ::fsync(fd);
 
-  // sync object_map too.  even if this object has a header or keys,
-  // it have had them in the past and then removed them, so always
-  // sync.
-  object_map->sync(hoid, &spos);
+  if (!in_progress) {
+    // sync object_map too.  even if this object has a header or keys,
+    // it have had them in the past and then removed them, so always
+    // sync.
+    object_map->sync(hoid, &spos);
+  }
 
   _inject_failure();
 
@@ -2275,7 +2277,8 @@ void FileStore::_close_replay_guard(const coll_t& cid,
   VOID_TEMP_FAILURE_RETRY(::close(fd));
 }
 
-void FileStore::_close_replay_guard(int fd, const SequencerPosition& spos)
+void FileStore::_close_replay_guard(int fd, const SequencerPosition& spos,
+				    const ghobject_t *hoid)
 {
   if (backend->can_checkpoint())
     return;
@@ -2284,6 +2287,11 @@ void FileStore::_close_replay_guard(int fd, const SequencerPosition& spos)
 
   _inject_failure();
 
+  // sync object_map too.  even if this object has a header or keys,
+  // it have had them in the past and then removed them, so always
+  // sync.
+  object_map->sync(hoid, &spos);
+
   // then record that we are done with this operation
   bufferlist v(40);
   ::encode(spos, v);
@@ -2882,17 +2890,17 @@ void FileStore::_do_transaction(
 
 	if (r == -ENOENT && (op->op == Transaction::OP_CLONERANGE ||
 			     op->op == Transaction::OP_CLONE ||
-			     op->op == Transaction::OP_CLONERANGE2))
+			     op->op == Transaction::OP_CLONERANGE2)) {
 	  msg = "ENOENT on clone suggests osd bug";
-
-	if (r == -ENOSPC)
+	} else if (r == -ENOSPC) {
 	  // For now, if we hit _any_ ENOSPC, crash, before we do any damage
 	  // by partially applying transactions.
 	  msg = "ENOSPC handling not implemented";
-
-	if (r == -ENOTEMPTY) {
+	} else if (r == -ENOTEMPTY) {
 	  msg = "ENOTEMPTY suggests garbage data in osd data dir";
-	}
+	} else if (r == -EPERM) {
+          msg = "EPERM suggests file(s) in osd data dir not owned by ceph user, or leveldb corruption";
+        }
 
 	dout(0) << " error " << cpp_strerror(r) << " not handled on operation " << op
 		<< " (" << spos << ", or op " << spos.op << ", counting from 0)" << dendl;
@@ -3046,11 +3054,12 @@ int FileStore::read(
 int FileStore::_do_fiemap(int fd, uint64_t offset, size_t len,
                           map<uint64_t, uint64_t> *m)
 {
-  struct fiemap *fiemap = NULL;
   uint64_t i;
   struct fiemap_extent *extent = NULL;
+  struct fiemap *fiemap = NULL;
   int r = 0;
 
+more:
   r = backend->do_fiemap(fd, offset, len, &fiemap);
   if (r < 0)
     return r;
@@ -3070,6 +3079,7 @@ int FileStore::_do_fiemap(int fd, uint64_t offset, size_t len,
 
   i = 0;
 
+  struct fiemap_extent *last = nullptr;
   while (i < fiemap->fm_mapped_extents) {
     struct fiemap_extent *next = extent + 1;
 
@@ -3090,9 +3100,16 @@ int FileStore::_do_fiemap(int fd, uint64_t offset, size_t len,
       extent->fe_length = offset + len - extent->fe_logical;
     (*m)[extent->fe_logical] = extent->fe_length;
     i++;
-    extent++;
+    last = extent++;
   }
+  const bool is_last = last->fe_flags & FIEMAP_EXTENT_LAST;
   free(fiemap);
+  if (!is_last) {
+    uint64_t xoffset = last->fe_logical + last->fe_length - offset;
+    offset = last->fe_logical + last->fe_length;
+    len -= xoffset;
+    goto more;
+  }
 
   return r;
 }
@@ -5210,18 +5227,30 @@ int FileStore::_collection_move_rename(const coll_t& oldcid, const ghobject_t& o
       } else {
 	assert(0 == "ERROR: source must exist");
       }
-      return 0;
-    }
-    if (dstcmp > 0) {      // if dstcmp == 0 the guard already says "in-progress"
-      _set_replay_guard(**fd, spos, &o, true);
-    }
 
-    r = lfn_link(oldcid, c, oldoid, o);
-    if (replaying && !backend->can_checkpoint() &&
-	r == -EEXIST)    // crashed between link() and set_replay_guard()
-      r = 0;
+      if (!replaying) {
+	return 0;
+      }
+      if (allow_enoent && dstcmp > 0) { // if dstcmp == 0, try_rename was started.
+	return 0;
+      }
 
-    _inject_failure();
+      r = 0; // don't know if object_map was cloned
+    } else {
+      if (dstcmp > 0) { // if dstcmp == 0 the guard already says "in-progress"
+	_set_replay_guard(**fd, spos, &o, true);
+      }
+
+      r = lfn_link(oldcid, c, oldoid, o);
+      if (replaying && !backend->can_checkpoint() &&
+	  r == -EEXIST)    // crashed between link() and set_replay_guard()
+	r = 0;
+
+      lfn_close(fd);
+      fd = FDRef();
+
+      _inject_failure();
+    }
 
     if (r == 0) {
       // the name changed; link the omap content
@@ -5232,9 +5261,6 @@ int FileStore::_collection_move_rename(const coll_t& oldcid, const ghobject_t& o
 
     _inject_failure();
 
-    lfn_close(fd);
-    fd = FDRef();
-
     if (r == 0)
       r = lfn_unlink(oldcid, oldoid, spos, true);
 
@@ -5243,7 +5269,7 @@ int FileStore::_collection_move_rename(const coll_t& oldcid, const ghobject_t& o
 
     // close guard on object so we don't do this again
     if (r == 0) {
-      _close_replay_guard(**fd, spos);
+      _close_replay_guard(**fd, spos, &o);
       lfn_close(fd);
     }
   }
diff --git a/src/os/filestore/FileStore.h b/src/os/filestore/FileStore.h
index d1fd595..9d3f9c8 100644
--- a/src/os/filestore/FileStore.h
+++ b/src/os/filestore/FileStore.h
@@ -499,7 +499,8 @@ public:
 				const SequencerPosition &spos);
 
   /// close a replay guard opened with in_progress=true
-  void _close_replay_guard(int fd, const SequencerPosition& spos);
+  void _close_replay_guard(int fd, const SequencerPosition& spos,
+			   const ghobject_t *oid=0);
   void _close_replay_guard(const coll_t& cid, const SequencerPosition& spos);
 
   /**
diff --git a/src/osd/OSD.cc b/src/osd/OSD.cc
index 6b503a5..8a480b6 100644
--- a/src/osd/OSD.cc
+++ b/src/osd/OSD.cc
@@ -1422,11 +1422,14 @@ void OSDService::handle_misdirected_op(PG *pg, OpRequestRef op)
 
   dout(7) << *pg << " misdirected op in " << m->get_map_epoch() << dendl;
   clog->warn() << m->get_source_inst() << " misdirected " << m->get_reqid()
-	      << " pg " << m->get_pg()
-	      << " to osd." << whoami
-	      << " not " << pg->acting
-	      << " in e" << m->get_map_epoch() << "/" << osdmap->get_epoch() << "\n";
-  reply_op_error(op, -ENXIO);
+	       << " pg " << m->get_pg()
+	       << " to osd." << whoami
+	       << " not " << pg->acting
+	       << " in e" << m->get_map_epoch() << "/" << osdmap->get_epoch()
+	       << "\n";
+  if (g_conf->osd_enxio_on_misdirected_op) {
+    reply_op_error(op, -ENXIO);
+  }
 }
 
 
@@ -2939,7 +2942,6 @@ void OSD::add_newly_split_pg(PG *pg, PG::RecoveryCtx *rctx)
   pg->get_osdmap()->pg_to_up_acting_osds(pg->info.pgid.pgid, up, acting);
   int role = OSDMap::calc_pg_role(service.whoami, acting);
   pg->set_role(role);
-  pg->reg_next_scrub();
   pg->handle_loaded(rctx);
   pg->write_if_dirty(*(rctx->transaction));
   pg->queue_null(e, e);
@@ -6436,6 +6438,11 @@ void OSD::sched_scrub()
 	break;
       }
 
+      if (!cct->_conf->osd_scrub_during_recovery && is_recovery_active()) {
+        dout(10) << __func__ << "not scheduling scrub of " << scrub.pgid << " due to active recovery ops" << dendl;
+        break;
+      }
+
       PG *pg = _lookup_lock_pg(scrub.pgid);
       if (!pg)
 	continue;
@@ -8397,6 +8404,14 @@ void OSD::finish_recovery_op(PG *pg, const hobject_t& soid, bool dequeue)
   recovery_wq.unlock();
 }
 
+bool OSD::is_recovery_active()
+{
+  if (recovery_ops_active > 0)
+    return true;
+
+  return false;
+}
+
 // =========================================================
 // OPS
 
@@ -8539,14 +8554,16 @@ void OSD::handle_op(OpRequestRef& op, OSDMapRef& osdmap)
   if (!send_map->osd_is_valid_op_target(pgid.pgid, whoami)) {
     dout(7) << "we are invalid target" << dendl;
     clog->warn() << m->get_source_inst() << " misdirected " << m->get_reqid()
-		      << " pg " << m->get_pg()
-		      << " to osd." << whoami
-		      << " in e" << osdmap->get_epoch()
-		      << ", client e" << m->get_map_epoch()
-		      << " pg " << pgid
-		      << " features " << m->get_connection()->get_features()
-		      << "\n";
-    service.reply_op_error(op, -ENXIO);
+		 << " pg " << m->get_pg()
+		 << " to osd." << whoami
+		 << " in e" << osdmap->get_epoch()
+		 << ", client e" << m->get_map_epoch()
+		 << " pg " << pgid
+		 << " features " << m->get_connection()->get_features()
+		 << "\n";
+    if (g_conf->osd_enxio_on_misdirected_op) {
+      service.reply_op_error(op, -ENXIO);
+    }
     return;
   }
 
diff --git a/src/osd/OSD.h b/src/osd/OSD.h
index 1d918ff..8438165 100644
--- a/src/osd/OSD.h
+++ b/src/osd/OSD.h
@@ -2283,6 +2283,7 @@ protected:
 
   void start_recovery_op(PG *pg, const hobject_t& soid);
   void finish_recovery_op(PG *pg, const hobject_t& soid, bool dequeue);
+  bool is_recovery_active();
   void do_recovery(PG *pg, ThreadPool::TPHandle &handle);
   bool _recover_now();
 
diff --git a/src/osd/PG.cc b/src/osd/PG.cc
index 39b5bf8..27a77e3 100644
--- a/src/osd/PG.cc
+++ b/src/osd/PG.cc
@@ -158,7 +158,7 @@ void PGPool::update(OSDMapRef map)
   auid = pi->auid;
   name = map->get_pool_name(id);
   bool updated = false;
-  if ((map->get_epoch() == cached_epoch + 1) &&
+  if ((map->get_epoch() != cached_epoch + 1) ||
       (pi->get_snap_epoch() == map->get_epoch())) {
     updated = true;
     pi->build_removed_snaps(newly_removed_snaps);
@@ -176,6 +176,16 @@ void PGPool::update(OSDMapRef map)
     }
     snapc = pi->get_snap_context();
   } else {
+    /* 1) map->get_epoch() == cached_epoch + 1 &&
+     * 2) pi->get_snap_epoch() != map->get_epoch()
+     *
+     * From the if branch, 1 && 2 must be true.  From 2, we know that
+     * this map didn't change the set of removed snaps.  From 1, we
+     * know that our cached_removed_snaps matches the previous map.
+     * Thus, from 1 && 2, cached_removed snaps matches the current
+     * set of removed snaps and all we have to do is clear
+     * newly_removed_snaps.
+     */
     newly_removed_snaps.clear();
   }
   cached_epoch = map->get_epoch();
@@ -3428,6 +3438,7 @@ void PG::reg_next_scrub()
   double scrub_min_interval = 0, scrub_max_interval = 0;
   pool.info.opts.get(pool_opts_t::SCRUB_MIN_INTERVAL, &scrub_min_interval);
   pool.info.opts.get(pool_opts_t::SCRUB_MAX_INTERVAL, &scrub_max_interval);
+  assert(scrubber.scrub_reg_stamp == utime_t());
   scrubber.scrub_reg_stamp = osd->reg_pg_scrub(info.pgid,
 					       reg_stamp,
 					       scrub_min_interval,
@@ -3437,8 +3448,10 @@ void PG::reg_next_scrub()
 
 void PG::unreg_next_scrub()
 {
-  if (is_primary())
+  if (is_primary()) {
     osd->unreg_pg_scrub(info.pgid, scrubber.scrub_reg_stamp);
+    scrubber.scrub_reg_stamp = utime_t();
+  }
 }
 
 void PG::sub_op_scrub_map(OpRequestRef op)
@@ -5583,6 +5596,19 @@ void PG::handle_advance_map(
 	   << dendl;
   update_osdmap_ref(osdmap);
   pool.update(osdmap);
+  if (cct->_conf->osd_debug_verify_cached_snaps) {
+    interval_set<snapid_t> actual_removed_snaps;
+    const pg_pool_t *pi = osdmap->get_pg_pool(info.pgid.pool());
+    assert(pi);
+    pi->build_removed_snaps(actual_removed_snaps);
+    if (!(actual_removed_snaps == pool.cached_removed_snaps)) {
+      derr << __func__ << ": mismatch between the actual removed snaps "
+	   << actual_removed_snaps << " and pool.cached_removed_snaps "
+	   << " pool.cached_removed_snaps " << pool.cached_removed_snaps
+	   << dendl;
+    }
+    assert(actual_removed_snaps == pool.cached_removed_snaps);
+  }
   AdvMap evt(
     osdmap, lastmap, newup, up_primary,
     newacting, acting_primary);
@@ -6072,6 +6098,7 @@ PG::RecoveryState::Backfilling::Backfilling(my_context ctx)
   pg->state_clear(PG_STATE_BACKFILL_TOOFULL);
   pg->state_clear(PG_STATE_BACKFILL_WAIT);
   pg->state_set(PG_STATE_BACKFILL);
+  pg->publish_stats_to_osd();
 }
 
 boost::statechart::result
@@ -6127,6 +6154,7 @@ PG::RecoveryState::WaitRemoteBackfillReserved::WaitRemoteBackfillReserved(my_con
   context< RecoveryMachine >().log_enter(state_name);
   PG *pg = context< RecoveryMachine >().pg;
   pg->state_set(PG_STATE_BACKFILL_WAIT);
+  pg->publish_stats_to_osd();
   post_event(RemoteBackfillReserved());
 }
 
@@ -6192,6 +6220,7 @@ PG::RecoveryState::WaitRemoteBackfillReserved::react(const RemoteReservationReje
 
   pg->state_clear(PG_STATE_BACKFILL_WAIT);
   pg->state_set(PG_STATE_BACKFILL_TOOFULL);
+  pg->publish_stats_to_osd();
 
   pg->schedule_backfill_full_retry();
 
@@ -6212,6 +6241,7 @@ PG::RecoveryState::WaitLocalBackfillReserved::WaitLocalBackfillReserved(my_conte
       pg, pg->get_osdmap()->get_epoch(),
       LocalBackfillReserved()),
     pg->get_backfill_priority());
+  pg->publish_stats_to_osd();
 }
 
 void PG::RecoveryState::WaitLocalBackfillReserved::exit()
@@ -6228,6 +6258,8 @@ PG::RecoveryState::NotBackfilling::NotBackfilling(my_context ctx)
     NamedState(context< RecoveryMachine >().pg->cct, "Started/Primary/Active/NotBackfilling")
 {
   context< RecoveryMachine >().log_enter(state_name);
+  PG *pg = context< RecoveryMachine >().pg;
+  pg->publish_stats_to_osd();
 }
 
 boost::statechart::result
@@ -6441,6 +6473,7 @@ PG::RecoveryState::WaitLocalRecoveryReserved::WaitLocalRecoveryReserved(my_conte
       pg, pg->get_osdmap()->get_epoch(),
       LocalRecoveryReserved()),
     pg->get_recovery_priority());
+  pg->publish_stats_to_osd();
 }
 
 void PG::RecoveryState::WaitLocalRecoveryReserved::exit()
@@ -6500,6 +6533,7 @@ PG::RecoveryState::Recovering::Recovering(my_context ctx)
   PG *pg = context< RecoveryMachine >().pg;
   pg->state_clear(PG_STATE_RECOVERY_WAIT);
   pg->state_set(PG_STATE_RECOVERING);
+  pg->publish_stats_to_osd();
   pg->osd->queue_for_recovery(pg);
 }
 
diff --git a/src/osd/PGBackend.cc b/src/osd/PGBackend.cc
index 7351e55..35183cc 100644
--- a/src/osd/PGBackend.cc
+++ b/src/osd/PGBackend.cc
@@ -133,7 +133,7 @@ int PGBackend::objects_list_partial(
   int r = 0;
   while (!_next.is_max() && ls->size() < (unsigned)min) {
     vector<ghobject_t> objects;
-    int r = store->collection_list(
+    r = store->collection_list(
       ch,
       _next,
       ghobject_t::get_max(),
@@ -141,8 +141,10 @@ int PGBackend::objects_list_partial(
       max - ls->size(),
       &objects,
       &_next);
-    if (r != 0)
+    if (r != 0) {
+      derr << __func__ << " list collection " << ch << " got: " << cpp_strerror(r) << dendl;
       break;
+    }
     for (vector<ghobject_t>::iterator i = objects.begin();
 	 i != objects.end();
 	 ++i) {
diff --git a/src/osd/ReplicatedBackend.cc b/src/osd/ReplicatedBackend.cc
index 32b9f17..708ca78 100644
--- a/src/osd/ReplicatedBackend.cc
+++ b/src/osd/ReplicatedBackend.cc
@@ -1985,7 +1985,9 @@ int ReplicatedBackend::build_push_op(const ObjectRecoveryInfo &recovery_info,
 	 iter->valid();
 	 iter->next(false)) {
       if (!out_op->omap_entries.empty() &&
-	  available <= (iter->key().size() + iter->value().length()))
+	  ((cct->_conf->osd_recovery_max_omap_entries_per_chunk > 0 &&
+	    out_op->omap_entries.size() >= cct->_conf->osd_recovery_max_omap_entries_per_chunk) ||
+	   available <= iter->key().size() + iter->value().length()))
 	break;
       out_op->omap_entries.insert(make_pair(iter->key(), iter->value()));
 
diff --git a/src/osd/ReplicatedPG.cc b/src/osd/ReplicatedPG.cc
index 4b879d9..dcc9860 100644
--- a/src/osd/ReplicatedPG.cc
+++ b/src/osd/ReplicatedPG.cc
@@ -9685,13 +9685,13 @@ void ReplicatedPG::do_update_log_missing(OpRequestRef &op)
   assert(m->get_type() == MSG_OSD_PG_UPDATE_LOG_MISSING);
   ObjectStore::Transaction t;
   append_log_entries_update_missing(m->entries, t);
-  // TODO FIX
 
   Context *complete = new FunctionContext(
-      [=](int) {
-	MOSDPGUpdateLogMissing *msg =
-	  static_cast<MOSDPGUpdateLogMissing*>(
-	    op->get_req());
+    [=](int) {
+      MOSDPGUpdateLogMissing *msg = static_cast<MOSDPGUpdateLogMissing*>(
+	op->get_req());
+      lock();
+      if (!pg_has_reset_since(msg->get_epoch())) {
 	MOSDPGUpdateLogMissingReply *reply =
 	  new MOSDPGUpdateLogMissingReply(
 	    spg_t(info.pgid.pgid, primary_shard().shard),
@@ -9700,7 +9700,9 @@ void ReplicatedPG::do_update_log_missing(OpRequestRef &op)
 	    msg->get_tid());
 	reply->set_priority(CEPH_MSG_PRIO_HIGH);
 	msg->get_connection()->send_message(reply);
-      });
+      }
+      unlock();
+    });
 
   /* Hack to work around the fact that ReplicatedBackend sends
    * ack+commit if commit happens first */
diff --git a/src/osdc/ObjectCacher.cc b/src/osdc/ObjectCacher.cc
index 2a1e76e..454146e 100644
--- a/src/osdc/ObjectCacher.cc
+++ b/src/osdc/ObjectCacher.cc
@@ -1039,7 +1039,6 @@ void ObjectCacher::bh_write_commit(int64_t poolid, sobject_t oid,
       }
     }
 
-    list <BufferHead*> hit;
     // apply to bh's!
     for (map<loff_t, BufferHead*>::iterator p = ob->data_lower_bound(start);
 	 p != ob->data.end();
@@ -1074,7 +1073,6 @@ void ObjectCacher::bh_write_commit(int64_t poolid, sobject_t oid,
 	bh->set_journal_tid(0);
 	if (bh->get_nocache())
 	  bh_lru_rest.lru_bottouch(bh);
-	hit.push_back(bh);
 	ldout(cct, 10) << "bh_write_commit clean " << *bh << dendl;
       } else {
 	mark_dirty(bh);
@@ -1083,13 +1081,6 @@ void ObjectCacher::bh_write_commit(int64_t poolid, sobject_t oid,
 		       << dendl;
       }
     }
-
-    for (list<BufferHead*>::iterator bh = hit.begin();
-	bh != hit.end();
-	++bh) {
-      assert(*bh);
-      ob->try_merge_bh(*bh);
-    }
   }
 
   // update last_commit.
@@ -2524,5 +2515,7 @@ void ObjectCacher::bh_remove(Object *ob, BufferHead *bh)
     dirty_or_tx_bh.erase(bh);
   }
   bh_stat_sub(bh);
+  if (get_stat_dirty_waiting() > 0)
+    stat_cond.Signal();
 }
 
diff --git a/src/pybind/ceph_volume_client.py b/src/pybind/ceph_volume_client.py
index aa63157..4ac71d7 100644
--- a/src/pybind/ceph_volume_client.py
+++ b/src/pybind/ceph_volume_client.py
@@ -318,7 +318,7 @@ class CephFSVolumeClient(object):
 
         log.debug("Recovered from partial auth updates (if any).")
 
-    def _recover_auth_meta(auth_id, auth_meta):
+    def _recover_auth_meta(self, auth_id, auth_meta):
         """
         Call me after locking the auth meta file.
         """
@@ -329,6 +329,7 @@ class CephFSVolumeClient(object):
                 continue
 
             (group_id, volume_id) = volume.split('/')
+            group_id = group_id if group_id is not 'None' else None
             volume_path = VolumePath(group_id, volume_id)
             access_level = volume_data['access_level']
 
@@ -338,7 +339,7 @@ class CephFSVolumeClient(object):
                 # No VMeta update indicates that there was no auth update
                 # in Ceph either. So it's safe to remove corresponding
                 # partial update in AMeta.
-                if auth_id not in vol_meta['auths']:
+                if not vol_meta or auth_id not in vol_meta['auths']:
                     remove_volumes.append(volume)
                     continue
 
@@ -1083,7 +1084,7 @@ class CephFSVolumeClient(object):
                     'caps': [
                         'mds', mds_cap_str,
                         'osd', osd_cap_str,
-                        'mon', cap['caps'].get('mon')]
+                        'mon', cap['caps'].get('mon', 'allow r')]
                 })
             caps = self._rados_command(
                 'auth get',
@@ -1217,7 +1218,7 @@ class CephFSVolumeClient(object):
                         'caps': [
                             'mds', mds_cap_str,
                             'osd', osd_cap_str,
-                            'mon', cap['caps'].get('mon')]
+                            'mon', cap['caps'].get('mon', 'allow r')]
                     })
 
         # FIXME: rados raising Error instead of ObjectNotFound in auth get failure
diff --git a/src/pybind/rbd/rbd.pyx b/src/pybind/rbd/rbd.pyx
index 5dadc37..fa79c33 100644
--- a/src/pybind/rbd/rbd.pyx
+++ b/src/pybind/rbd/rbd.pyx
@@ -123,6 +123,10 @@ cdef extern from "rbd/librbd.h" nogil:
         time_t last_update
         bint up
 
+    ctypedef enum rbd_lock_mode_t:
+        _RBD_LOCK_MODE_EXCLUSIVE "RBD_LOCK_MODE_EXCLUSIVE"
+        _RBD_LOCK_MODE_SHARED "RBD_LOCK_MODE_SHARED"
+
     void rbd_version(int *major, int *minor, int *extra)
 
     void rbd_image_options_create(rbd_image_options_t* opts)
@@ -190,12 +194,14 @@ cdef extern from "rbd/librbd.h" nogil:
     int rbd_get_stripe_unit(rbd_image_t image, uint64_t *stripe_unit)
     int rbd_get_stripe_count(rbd_image_t image, uint64_t *stripe_count)
     int rbd_get_overlap(rbd_image_t image, uint64_t *overlap)
+    int rbd_get_id(rbd_image_t image, char *id, size_t id_len)
+    int rbd_get_block_name_prefix(rbd_image_t image, char *prefix,
+                                  size_t prefix_len)
     int rbd_get_parent_info(rbd_image_t image,
                             char *parent_poolname, size_t ppoolnamelen,
                             char *parent_name, size_t pnamelen,
                             char *parent_snapname, size_t psnapnamelen)
     int rbd_get_flags(rbd_image_t image, uint64_t *flags)
-    int rbd_is_exclusive_lock_owner(rbd_image_t image, int *is_owner)
     ssize_t rbd_read2(rbd_image_t image, uint64_t ofs, size_t len,
                       char *buf, int op_flags)
     ssize_t rbd_write2(rbd_image_t image, uint64_t ofs, size_t len,
@@ -233,6 +239,16 @@ cdef extern from "rbd/librbd.h" nogil:
     int rbd_break_lock(rbd_image_t image, const char *client,
                        const char *cookie)
 
+    int rbd_is_exclusive_lock_owner(rbd_image_t image, int *is_owner)
+    int rbd_lock_acquire(rbd_image_t image, rbd_lock_mode_t lock_mode)
+    int rbd_lock_release(rbd_image_t image)
+    int rbd_lock_get_owners(rbd_image_t image, rbd_lock_mode_t *lock_mode,
+                            char **lock_owners, size_t *max_lock_owners)
+    void rbd_lock_get_owners_cleanup(char **lock_owners,
+                                     size_t lock_owner_count)
+    int rbd_lock_break(rbd_image_t image, rbd_lock_mode_t lock_mode,
+                       char *lock_owner)
+
     # We use -9000 to propagate Python exceptions. We use except? to make sure
     # things still work as intended if -9000 happens to be a valid errno value
     # somewhere.
@@ -290,6 +306,9 @@ MIRROR_IMAGE_STATUS_STATE_REPLAYING = _MIRROR_IMAGE_STATUS_STATE_REPLAYING
 MIRROR_IMAGE_STATUS_STATE_STOPPING_REPLAY = _MIRROR_IMAGE_STATUS_STATE_STOPPING_REPLAY
 MIRROR_IMAGE_STATUS_STATE_STOPPED = _MIRROR_IMAGE_STATUS_STATE_STOPPED
 
+RBD_LOCK_MODE_EXCLUSIVE = _RBD_LOCK_MODE_EXCLUSIVE
+RBD_LOCK_MODE_SHARED = _RBD_LOCK_MODE_SHARED
+
 RBD_IMAGE_OPTION_FORMAT = _RBD_IMAGE_OPTION_FORMAT
 RBD_IMAGE_OPTION_FEATURES = _RBD_IMAGE_OPTION_FEATURES
 RBD_IMAGE_OPTION_ORDER = _RBD_IMAGE_OPTION_ORDER
@@ -1126,6 +1145,54 @@ cdef class Image(object):
             'parent_name'       : info.parent_name
             }
 
+    def id(self):
+        """
+        Get the RBD v2 internal image id
+
+        :returns: str - image id
+        """
+        cdef:
+            int ret = -errno.ERANGE
+            size_t size = 32
+            char *image_id = NULL
+        try:
+            while ret == -errno.ERANGE and size <= 4096:
+                image_id =  <char *>realloc_chk(image_id, size)
+                with nogil:
+                    ret = rbd_get_id(self.image, image_id, size)
+                if ret == -errno.ERANGE:
+                    size *= 2
+
+            if ret != 0:
+                raise make_ex(ret, 'error getting id for image %s' % (self.name,))
+            return decode_cstr(image_id)
+        finally:
+            free(image_id)
+
+    def block_name_prefix(self):
+        """
+        Get the RBD block name prefix
+
+        :returns: str - block name prefix
+        """
+        cdef:
+            int ret = -errno.ERANGE
+            size_t size = 32
+            char *prefix = NULL
+        try:
+            while ret == -errno.ERANGE and size <= 4096:
+                prefix =  <char *>realloc_chk(prefix, size)
+                with nogil:
+                    ret = rbd_get_block_name_prefix(self.image, prefix, size)
+                if ret == -errno.ERANGE:
+                    size *= 2
+
+            if ret != 0:
+                raise make_ex(ret, 'error getting block name prefix for image %s' % (self.name,))
+            return decode_cstr(prefix)
+        finally:
+            free(prefix)
+
     def parent_info(self):
         """
         Get information about a cloned image's parent (if any)
@@ -1747,6 +1814,54 @@ written." % (self.name, ret, length))
             free(c_addrs)
             free(c_tag)
 
+    def lock_acquire(self, lock_mode):
+        """
+        Acquire a managed lock on the image.
+
+        :param lock_mode: lock mode to set
+        :type lock_mode: int
+        :raises: :class:`ImageBusy` if the lock could not be acquired
+        """
+        cdef:
+            rbd_lock_mode_t _lock_mode = lock_mode
+        with nogil:
+            ret = rbd_lock_acquire(self.image, _lock_mode)
+        if ret < 0:
+            raise make_ex(ret, 'error acquiring lock on image')
+
+    def lock_release(self):
+        """
+        Release a managed lock on the image that was previously acquired.
+        """
+        with nogil:
+            ret = rbd_lock_release(self.image)
+        if ret < 0:
+            raise make_ex(ret, 'error releasing lock on image')
+
+    def lock_get_owners(self):
+        """
+        Iterate over the lock owners of an image.
+
+        :returns: :class:`LockOwnerIterator`
+        """
+        return LockOwnerIterator(self)
+
+    def lock_break(self, lock_mode, lock_owner):
+        """
+        Break the image lock held by a another client.
+
+        :param lock_owner: the owner of the lock to break
+        :type lock_owner: str
+        """
+        lock_owner = cstr(lock_owner, 'lock_owner')
+        cdef:
+            rbd_lock_mode_t _lock_mode = lock_mode
+            char *_lock_owner = lock_owner
+        with nogil:
+            ret = rbd_lock_break(self.image, _lock_mode, _lock_owner)
+        if ret < 0:
+            raise make_ex(ret, 'error breaking lock on image')
+
     def lock_exclusive(self, cookie):
         """
         Take an exclusive lock on the image.
@@ -1929,6 +2044,54 @@ written." % (self.name, ret, length))
         free(c_status.description)
         return status
 
+cdef class LockOwnerIterator(object):
+    """
+    Iterator over managed lock owners for an image
+
+    Yields a dictionary containing information about the image's lock
+
+    Keys are:
+
+    * ``mode`` (int) - active lock mode
+
+    * ``owner`` (str) - lock owner name
+    """
+
+    cdef:
+        rbd_lock_mode_t lock_mode
+        char **lock_owners
+        size_t num_lock_owners
+        object image
+
+    def __init__(self, Image image):
+        self.image = image
+        self.lock_owners = NULL
+        self.num_lock_owners = 8
+        while True:
+            self.lock_owners = <char**>realloc_chk(self.lock_owners,
+                                                   self.num_lock_owners *
+                                                   sizeof(char*))
+            with nogil:
+                ret = rbd_lock_get_owners(image.image, &self.lock_mode,
+                                          self.lock_owners,
+                                          &self.num_lock_owners)
+            if ret >= 0:
+                break
+            elif ret != -errno.ERANGE:
+                raise make_ex(ret, 'error listing lock owners for image %s' % (image.name,))
+
+    def __iter__(self):
+        for i in range(self.num_lock_owners):
+            yield {
+                'mode'  : int(self.lock_mode),
+                'owner' : decode_cstr(self.lock_owners[i]),
+                }
+
+    def __dealloc__(self):
+        if self.lock_owners:
+            rbd_lock_get_owners_cleanup(self.lock_owners, self.num_lock_owners)
+            free(self.lock_owners)
+
 cdef class SnapIterator(object):
     """
     Iterator over snapshot info for an image.
diff --git a/src/rgw/Makefile.am b/src/rgw/Makefile.am
index b083dd1..2085378 100644
--- a/src/rgw/Makefile.am
+++ b/src/rgw/Makefile.am
@@ -1,5 +1,6 @@
 if ENABLE_CLIENT
-
+if WITH_RADOS
+if WITH_RADOSGW
 # inject rgw stuff in the decoder testcase
 DENCODER_SOURCES += \
 	rgw/rgw_dencoder.cc \
@@ -18,9 +19,6 @@ DENCODER_DEPS += -lcurl -lexpat \
 	libcls_timeindex_client.la \
 	libcls_statelog_client.la
 
-if WITH_RADOS
-if WITH_RADOSGW
-
 librgw_la_SOURCES = \
 	rgw/rgw_acl.cc \
 	rgw/rgw_acl_s3.cc \
@@ -116,6 +114,11 @@ LIBRGW_DEPS += \
 	-lfcgi \
 	-ldl
 
+if WITH_OPENLDAP
+LIBRGW_DEPS += \
+        -lldap
+endif
+
 librgw_la_LIBADD = $(LIBRGW_DEPS) \
 	$(PTHREAD_LIBS) $(RESOLV_LIBS) libglobal.la \
 	$(EXTRALIBS)
diff --git a/src/rgw/librgw.cc b/src/rgw/librgw.cc
index 1b4decc..70fd69e 100644
--- a/src/rgw/librgw.cc
+++ b/src/rgw/librgw.cc
@@ -13,6 +13,7 @@
  */
 #include <sys/types.h>
 #include <string.h>
+#include <chrono>
 
 #include "include/types.h"
 #include "include/rados/librgw.h"
@@ -79,6 +80,8 @@ namespace rgw {
     m_tp.drain(&req_wq);
   }
 
+#define MIN_EXPIRE_S 120
+
   void RGWLibProcess::run()
   {
     /* write completion interval */
@@ -91,6 +94,14 @@ namespace rgw {
     /* gc loop */
     while (! shutdown) {
       lsubdout(cct, rgw, 5) << "RGWLibProcess GC" << dendl;
+
+      /* dirent invalidate timeout--basically, the upper-bound on
+       * inconsistency with the S3 namespace */
+      auto expire_s = cct->_conf->rgw_nfs_namespace_expire_secs;
+
+      /* delay between gc cycles */
+      auto delay_s = std::max(1, std::min(MIN_EXPIRE_S, expire_s/2));
+
       unique_lock uniq(mtx);
     restart:
       int cur_gen = gen;
@@ -105,7 +116,7 @@ namespace rgw {
 	  goto restart; /* invalidated */
       }
       uniq.unlock();
-      std::this_thread::sleep_for(std::chrono::seconds(120));
+      std::this_thread::sleep_for(std::chrono::seconds(delay_s));
     }
   }
 
@@ -286,7 +297,8 @@ namespace rgw {
       dout(0) << "ERROR: io->complete_request() returned " << r << dendl;
     }
     if (should_log) {
-      rgw_log_op(store, s, (op ? op->name() : "unknown"), olog);
+      rgw_log_op(store, nullptr /* !rest */, s,
+		 (op ? op->name() : "unknown"), olog);
     }
 
     int http_ret = s->err.http_ret;
diff --git a/src/rgw/rgw_admin.cc b/src/rgw/rgw_admin.cc
index 281e7be..05c0562 100644
--- a/src/rgw/rgw_admin.cc
+++ b/src/rgw/rgw_admin.cc
@@ -1,4 +1,3 @@
-// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
 // vim: ts=8 sw=2 smarttab
 
 #include <errno.h>
@@ -16,6 +15,8 @@
 #include "common/errno.h"
 #include "common/safe_io.h"
 
+#include "cls/rgw/cls_rgw_client.h"
+
 #include "global/global_init.h"
 
 #include "include/utime.h"
@@ -70,10 +71,13 @@ void _usage()
   cout << "  bucket stats               returns bucket statistics\n";
   cout << "  bucket rm                  remove bucket\n";
   cout << "  bucket check               check bucket index\n";
+  cout << "  bucket reshard             reshard bucket\n";
+  cout << "  bi get                     retrieve bucket index object entries\n";
+  cout << "  bi put                     store bucket index object entries\n";
+  cout << "  bi list                    list raw bucket index entries\n";
   cout << "  object rm                  remove object\n";
   cout << "  object unlink              unlink object from bucket index\n";
   cout << "  objects expire             run expired objects cleanup\n";
-  cout << "  period prepare             prepare a new period\n";
   cout << "  period delete              delete a period\n";
   cout << "  period get                 get period info\n";
   cout << "  period get-current         get current period info\n";
@@ -106,6 +110,11 @@ void _usage()
   cout << "  zonegroup remove           remove a zone from a zonegroup\n";
   cout << "  zonegroup rename           rename a zone group\n";
   cout << "  zonegroup list             list all zone groups set on this cluster\n";
+  cout << "  zonegroup placement list   list zonegroup's placement targets\n";
+  cout << "  zonegroup placement add    add a placement target id to a zonegroup\n";
+  cout << "  zonegroup placement modify modify a placement target of a specific zonegroup\n";
+  cout << "  zonegroup placement rm     remove a placement target from a zonegroup\n";
+  cout << "  zonegroup placement default  set a zonegroup's default placement target\n";
   cout << "  zonegroup-map get          show zonegroup-map\n";
   cout << "  zonegroup-map set          set zonegroup-map (requires infile)\n";
   cout << "  zone create                create a new zone\n";
@@ -115,6 +124,10 @@ void _usage()
   cout << "  zone set                   set zone cluster params (requires infile)\n";
   cout << "  zone list                  list all zones set on this cluster\n";
   cout << "  zone rename                rename a zone\n";
+  cout << "  zone placement list        list zone's placement targets\n";
+  cout << "  zone placement add         add a zone placement target\n";
+  cout << "  zone placement modify      modify a zone placement target\n";
+  cout << "  zone placement rm          remove a zone placement target\n";
   cout << "  pool add                   add an existing pool for data placement\n";
   cout << "  pool rm                    remove an existing pool from data placement set\n";
   cout << "  pools list                 list placement active set\n";
@@ -202,7 +215,16 @@ void _usage()
   cout << "   --source-zone             specify the source zone (for data sync)\n";
   cout << "   --default                 set entity (realm, zonegroup, zone) as default\n";
   cout << "   --read-only               set zone as read-only (when adding to zonegroup)\n";
+  cout << "   --placement-id            placement id for zonegroup placement commands\n";
+  cout << "   --tags=<list>             list of tags for zonegroup placement add and modify commands\n";
+  cout << "   --tags-add=<list>         list of tags to add for zonegroup placement modify command\n";
+  cout << "   --tags-rm=<list>          list of tags to remove for zonegroup placement modify command\n";
   cout << "   --endpoints=<list>        zone endpoints\n";
+  cout << "   --index_pool=<pool>       placement target index pool\n";
+  cout << "   --data_pool=<pool>        placement target data pool\n";
+  cout << "   --data_extra_pool=<pool>  placement target data extra (non-ec) pool\n";
+  cout << "   --placement-index-type=<type>\n";
+  cout << "                             placement target index type (normal, indexless, or #id)\n";
   cout << "   --fix                     besides checking bucket index, will also fix it\n";
   cout << "   --check-objects           bucket check: rebuilds bucket index according to\n";
   cout << "                             actual objects state\n";
@@ -228,6 +250,10 @@ void _usage()
   cout << "   --caps=<caps>             list of caps (e.g., \"usage=read, write; user=read\"\n";
   cout << "   --yes-i-really-mean-it    required for certain operations\n";
   cout << "   --reset-regions           reset regionmap when regionmap update\n";
+  cout << "   --bypass-gc               when specified with bucket deletion, triggers\n";
+  cout << "                             object deletions by not involving GC\n";
+  cout << "   --inconsistent-index      when specified with bucket deletion and bypass-gc set to true,\n";
+  cout << "                             ignores bucket index consistency\n";
   cout << "\n";
   cout << "<date> := \"YYYY-MM-DD[ hh:mm:ss]\"\n";
   cout << "\nQuota options:\n";
@@ -276,6 +302,7 @@ enum {
   OPT_BUCKET_SYNC_RUN,
   OPT_BUCKET_RM,
   OPT_BUCKET_REWRITE,
+  OPT_BUCKET_RESHARD,
   OPT_POLICY,
   OPT_POOL_ADD,
   OPT_POOL_RM,
@@ -293,6 +320,7 @@ enum {
   OPT_BI_GET,
   OPT_BI_PUT,
   OPT_BI_LIST,
+  OPT_BI_PURGE,
   OPT_OLH_GET,
   OPT_OLH_READLOG,
   OPT_QUOTA_SET,
@@ -312,6 +340,11 @@ enum {
   OPT_ZONEGROUP_LIST,
   OPT_ZONEGROUP_REMOVE,
   OPT_ZONEGROUP_RENAME,
+  OPT_ZONEGROUP_PLACEMENT_ADD,
+  OPT_ZONEGROUP_PLACEMENT_MODIFY,
+  OPT_ZONEGROUP_PLACEMENT_RM,
+  OPT_ZONEGROUP_PLACEMENT_LIST,
+  OPT_ZONEGROUP_PLACEMENT_DEFAULT,
   OPT_ZONEGROUPMAP_GET,
   OPT_ZONEGROUPMAP_SET,
   OPT_ZONEGROUPMAP_UPDATE,
@@ -323,6 +356,10 @@ enum {
   OPT_ZONE_LIST,
   OPT_ZONE_RENAME,
   OPT_ZONE_DEFAULT,
+  OPT_ZONE_PLACEMENT_ADD,
+  OPT_ZONE_PLACEMENT_MODIFY,
+  OPT_ZONE_PLACEMENT_RM,
+  OPT_ZONE_PLACEMENT_LIST,
   OPT_CAPS_ADD,
   OPT_CAPS_RM,
   OPT_METADATA_GET,
@@ -364,7 +401,6 @@ enum {
   OPT_REALM_SET,
   OPT_REALM_DEFAULT,
   OPT_REALM_PULL,
-  OPT_PERIOD_PREPARE,
   OPT_PERIOD_DELETE,
   OPT_PERIOD_GET,
   OPT_PERIOD_GET_CURRENT,
@@ -399,6 +435,7 @@ static int get_cmd(const char *cmd, const char *prev_cmd, const char *prev_prev_
       strcmp(cmd, "opstate") == 0 ||
       strcmp(cmd, "orphans") == 0 || 
       strcmp(cmd, "period") == 0 ||
+      strcmp(cmd, "placement") == 0 ||
       strcmp(cmd, "pool") == 0 ||
       strcmp(cmd, "pools") == 0 ||
       strcmp(cmd, "quota") == 0 ||
@@ -472,6 +509,8 @@ static int get_cmd(const char *cmd, const char *prev_cmd, const char *prev_prev_
       return OPT_BUCKET_RM;
     if (strcmp(cmd, "rewrite") == 0)
       return OPT_BUCKET_REWRITE;
+    if (strcmp(cmd, "reshard") == 0)
+      return OPT_BUCKET_RESHARD;
     if (strcmp(cmd, "check") == 0)
       return OPT_BUCKET_CHECK;
     if (strcmp(cmd, "sync") == 0) {
@@ -537,9 +576,9 @@ static int get_cmd(const char *cmd, const char *prev_cmd, const char *prev_prev_
       return OPT_BI_PUT;
     if (strcmp(cmd, "list") == 0)
       return OPT_BI_LIST;
+    if (strcmp(cmd, "purge") == 0)
+      return OPT_BI_PURGE;
   } else if (strcmp(prev_cmd, "period") == 0) {
-    if (strcmp(cmd, "prepare") == 0)
-      return OPT_PERIOD_PREPARE;
     if (strcmp(cmd, "delete") == 0)
       return OPT_PERIOD_DELETE;
     if (strcmp(cmd, "get") == 0)
@@ -579,6 +618,18 @@ static int get_cmd(const char *cmd, const char *prev_cmd, const char *prev_prev_
       return OPT_REALM_DEFAULT;
     if (strcmp(cmd, "pull") == 0)
       return OPT_REALM_PULL;
+  } else if ((prev_prev_cmd && strcmp(prev_prev_cmd, "zonegroup") == 0) &&
+	     (strcmp(prev_cmd, "placement") == 0)) {
+    if (strcmp(cmd, "add") == 0)
+      return OPT_ZONEGROUP_PLACEMENT_ADD;
+    if (strcmp(cmd, "modify") == 0)
+      return OPT_ZONEGROUP_PLACEMENT_MODIFY;
+    if (strcmp(cmd, "rm") == 0)
+      return OPT_ZONEGROUP_PLACEMENT_RM;
+    if (strcmp(cmd, "list") == 0)
+      return OPT_ZONEGROUP_PLACEMENT_LIST;
+    if (strcmp(cmd, "default") == 0)
+      return OPT_ZONEGROUP_PLACEMENT_DEFAULT;
   } else if (strcmp(prev_cmd, "zonegroup") == 0 ||
 	     strcmp(prev_cmd, "region") == 0) {
     if (strcmp(cmd, "add") == 0)
@@ -622,6 +673,16 @@ static int get_cmd(const char *cmd, const char *prev_cmd, const char *prev_prev_
       return OPT_ZONEGROUPMAP_SET;
     if (strcmp(cmd, "update") == 0)
       return OPT_ZONEGROUPMAP_UPDATE;
+  } else if ((prev_prev_cmd && strcmp(prev_prev_cmd, "zone") == 0) &&
+	     (strcmp(prev_cmd, "placement") == 0)) {
+    if (strcmp(cmd, "add") == 0)
+      return OPT_ZONE_PLACEMENT_ADD;
+    if (strcmp(cmd, "modify") == 0)
+      return OPT_ZONE_PLACEMENT_MODIFY;
+    if (strcmp(cmd, "rm") == 0)
+      return OPT_ZONE_PLACEMENT_RM;
+    if (strcmp(cmd, "list") == 0)
+      return OPT_ZONE_PLACEMENT_LIST;
   } else if (strcmp(prev_cmd, "zone") == 0) {
     if (strcmp(cmd, "delete") == 0)
       return OPT_ZONE_DELETE;
@@ -861,16 +922,16 @@ public:
 };
 
 static int init_bucket(const string& tenant_name, const string& bucket_name, const string& bucket_id,
-                       RGWBucketInfo& bucket_info, rgw_bucket& bucket)
+                       RGWBucketInfo& bucket_info, rgw_bucket& bucket, map<string, bufferlist> *pattrs = nullptr)
 {
   if (!bucket_name.empty()) {
     RGWObjectCtx obj_ctx(store);
     int r;
     if (bucket_id.empty()) {
-      r = store->get_bucket_info(obj_ctx, tenant_name, bucket_name, bucket_info, NULL);
+      r = store->get_bucket_info(obj_ctx, tenant_name, bucket_name, bucket_info, nullptr, pattrs);
     } else {
       string bucket_instance_id = bucket_name + ":" + bucket_id;
-      r = store->get_bucket_instance_info(obj_ctx, bucket_instance_id, bucket_info, NULL, NULL);
+      r = store->get_bucket_instance_info(obj_ctx, bucket_instance_id, bucket_info, NULL, pattrs);
     }
     if (r < 0) {
       cerr << "could not get bucket info for bucket=" << bucket_name << std::endl;
@@ -1694,18 +1755,18 @@ static void get_md_sync_status(list<string>& status)
   }
 
   map<int, RGWMetadataLogInfo> master_shards_info;
-  string master_period;
+  string master_period = store->get_current_period_id();
 
-  ret = sync.read_master_log_shards_info(&master_period, &master_shards_info);
+  ret = sync.read_master_log_shards_info(master_period, &master_shards_info);
   if (ret < 0) {
     status.push_back(string("failed to fetch master sync status: ") + cpp_strerror(-ret));
     return;
   }
 
   map<int, string> shards_behind;
-
   if (sync_status.sync_info.period != master_period) {
-    status.push_back(string("master is on a different period: master_period=" + master_period + " local_period=" + sync_status.sync_info.period));
+    status.push_back(string("master is on a different period: master_period=" +
+                            master_period + " local_period=" + sync_status.sync_info.period));
   } else {
     for (auto local_iter : sync_status.sync_markers) {
       int shard_id = local_iter.first;
@@ -1725,7 +1786,7 @@ static void get_md_sync_status(list<string>& status)
 
   int total_behind = shards_behind.size() + (sync_status.sync_info.num_shards - num_inc);
   if (total_behind == 0) {
-    status.push_back("metadata is caught up with master");
+    push_ss(ss, status) << "metadata is caught up with master";
   } else {
     push_ss(ss, status) << "metadata is behind on " << total_behind << " shards";
 
@@ -1942,7 +2003,190 @@ static void sync_status(Formatter *formatter)
   tab_dump("data sync", width, data_status);
 }
 
-int main(int argc, char **argv) 
+static void parse_tier_config_param(const string& s, map<string, string>& out)
+{
+  list<string> confs;
+  get_str_list(s, ",", confs);
+  for (auto c : confs) {
+    ssize_t pos = c.find("=");
+    if (pos < 0) {
+      out[c] = "";
+    } else {
+      out[c.substr(0, pos)] = c.substr(pos + 1);
+    }
+  }
+}
+
+#define RESHARD_SHARD_WINDOW 64
+#define RESHARD_MAX_AIO 128
+
+class BucketReshardShard {
+  RGWRados *store;
+  RGWBucketInfo& bucket_info;
+  int num_shard;
+  RGWRados::BucketShard bs;
+  vector<rgw_cls_bi_entry> entries;
+  map<uint8_t, rgw_bucket_category_stats> stats;
+  deque<librados::AioCompletion *>& aio_completions;
+
+  int wait_next_completion() {
+    librados::AioCompletion *c = aio_completions.front();
+    aio_completions.pop_front();
+
+    c->wait_for_safe();
+
+    int ret = c->get_return_value();
+    c->release();
+
+    if (ret < 0) {
+      cerr << "ERROR: reshard rados operation failed: " << cpp_strerror(-ret) << std::endl;
+      return ret;
+    }
+
+    return 0;
+  }
+
+  int get_completion(librados::AioCompletion **c) {
+    if (aio_completions.size() >= RESHARD_MAX_AIO) {
+      int ret = wait_next_completion();
+      if (ret < 0) {
+        return ret;
+      }
+    }
+
+    *c = librados::Rados::aio_create_completion(nullptr, nullptr, nullptr);
+    aio_completions.push_back(*c);
+
+    return 0;
+  }
+
+public:
+  BucketReshardShard(RGWRados *_store, RGWBucketInfo& _bucket_info,
+                     int _num_shard,
+                     deque<librados::AioCompletion *>& _completions) : store(_store), bucket_info(_bucket_info), bs(store),
+                                                                       aio_completions(_completions) {
+    num_shard = (bucket_info.num_shards > 0 ? _num_shard : -1);
+    bs.init(bucket_info.bucket, num_shard);
+  }
+
+  int get_num_shard() {
+    return num_shard;
+  }
+
+  int add_entry(rgw_cls_bi_entry& entry, bool account, uint8_t category,
+                const rgw_bucket_category_stats& entry_stats) {
+    entries.push_back(entry);
+    if (account) {
+      rgw_bucket_category_stats& target = stats[category];
+      target.num_entries += entry_stats.num_entries;
+      target.total_size += entry_stats.total_size;
+      target.total_size_rounded += entry_stats.total_size_rounded;
+    }
+    if (entries.size() >= RESHARD_SHARD_WINDOW) {
+      int ret = flush();
+      if (ret < 0) {
+        return ret;
+      }
+    }
+    return 0;
+  }
+  int flush() {
+    if (entries.size() == 0) {
+      return 0;
+    }
+
+    librados::ObjectWriteOperation op;
+    for (auto& entry : entries) {
+      store->bi_put(op, bs, entry);
+    }
+    cls_rgw_bucket_update_stats(op, false, stats);
+
+    librados::AioCompletion *c;
+    int ret = get_completion(&c);
+    if (ret < 0) {
+      return ret;
+    }
+    ret = bs.index_ctx.aio_operate(bs.bucket_obj, c, &op);
+    if (ret < 0) {
+      std::cerr << "ERROR: failed to store entries in target bucket shard (bs=" << bs.bucket << "/" << bs.shard_id << ") error=" << cpp_strerror(-ret) << std::endl;
+      return ret;
+    }
+    entries.clear();
+    stats.clear();
+    return 0;
+  }
+
+  int wait_all_aio() {
+    int ret = 0;
+    while (!aio_completions.empty()) {
+      int r = wait_next_completion();
+      if (r < 0) {
+        ret = r;
+      }
+    }
+    return ret;
+  }
+};
+
+class BucketReshardManager {
+  RGWRados *store;
+  RGWBucketInfo& target_bucket_info;
+  deque<librados::AioCompletion *> completions;
+  int num_target_shards;
+  vector<BucketReshardShard *> target_shards;
+
+public:
+  BucketReshardManager(RGWRados *_store, RGWBucketInfo& _target_bucket_info, int _num_target_shards) : store(_store), target_bucket_info(_target_bucket_info),
+                                                                                                       num_target_shards(_num_target_shards) {
+    target_shards.resize(num_target_shards);
+    for (int i = 0; i < num_target_shards; ++i) {
+      target_shards[i] = new BucketReshardShard(store, target_bucket_info, i, completions);
+    }
+  }
+
+  ~BucketReshardManager() {
+    for (auto& shard : target_shards) {
+      int ret = shard->wait_all_aio();
+      if (ret < 0) {
+        ldout(store->ctx(), 20) << __func__ << ": shard->wait_all_aio() returned ret=" << ret << dendl;
+      }
+    }
+  }
+
+  int add_entry(int shard_index,
+                rgw_cls_bi_entry& entry, bool account, uint8_t category,
+                const rgw_bucket_category_stats& entry_stats) {
+    int ret = target_shards[shard_index]->add_entry(entry, account, category, entry_stats);
+    if (ret < 0) {
+      cerr << "ERROR: target_shards.add_entry(" << entry.idx << ") returned error: " << cpp_strerror(-ret) << std::endl;
+      return ret;
+    }
+    return 0;
+  }
+
+  int finish() {
+    int ret = 0;
+    for (auto& shard : target_shards) {
+      int r = shard->flush();
+      if (r < 0) {
+        cerr << "ERROR: target_shards[" << shard->get_num_shard() << "].flush() returned error: " << cpp_strerror(-r) << std::endl;
+        ret = r;
+      }
+    }
+    for (auto& shard : target_shards) {
+      int r = shard->wait_all_aio();
+      if (r < 0) {
+        cerr << "ERROR: target_shards[" << shard->get_num_shard() << "].wait_all_aio() returned error: " << cpp_strerror(-r) << std::endl;
+        ret = r;
+      }
+      delete shard;
+    }
+    target_shards.clear();
+    return ret;
+  }
+};
+
+int main(int argc, char **argv)
 {
   vector<const char*> args;
   argv_to_vec(argc, (const char **)argv, args);
@@ -2028,6 +2272,10 @@ int main(int argc, char **argv)
   string op_mask_str;
   string quota_scope;
   string object_version;
+  string placement_id;
+  list<string> tags;
+  list<string> tags_add;
+  list<string> tags_rm;
 
   int64_t max_objects = -1;
   int64_t max_size = -1;
@@ -2037,6 +2285,10 @@ int main(int argc, char **argv)
 
   int sync_stats = false;
   int reset_regions = false;
+  int bypass_gc = false;
+  int inconsistent_index = false;
+
+  int verbose = false;
 
   int extra_info = false;
 
@@ -2048,6 +2300,7 @@ int main(int argc, char **argv)
 
   string job_id;
   int num_shards = 0;
+  bool num_shards_specified = false;
   int max_concurrent_ios = 32;
   uint64_t orphan_stale_secs = (24 * 3600);
 
@@ -2059,6 +2312,12 @@ int main(int argc, char **argv)
   string source_zone_name;
   string source_zone; /* zone id */
 
+  boost::optional<string> index_pool;
+  boost::optional<string> data_pool;
+  boost::optional<string> data_extra_pool;
+  RGWBucketIndexType placement_index_type = RGWBIType_Normal;
+  bool index_type_specified = false;
+
   for (std::vector<const char*>::iterator i = args.begin(); i != args.end(); ) {
     if (ceph_argparse_double_dash(args, i)) {
       break;
@@ -2120,6 +2379,8 @@ int main(int argc, char **argv)
       // do nothing
     } else if (ceph_argparse_binary_flag(args, i, &system, NULL, "--system", (char*)NULL)) {
       system_specified = true;
+    } else if (ceph_argparse_binary_flag(args, i, &verbose, NULL, "--verbose", (char*)NULL)) {
+      // do nothing
     } else if (ceph_argparse_binary_flag(args, i, &staging, NULL, "--staging", (char*)NULL)) {
       // do nothing
     } else if (ceph_argparse_binary_flag(args, i, &commit, NULL, "--commit", (char*)NULL)) {
@@ -2176,6 +2437,7 @@ int main(int argc, char **argv)
         cerr << "ERROR: failed to parse num shards: " << err << std::endl;
         return EINVAL;
       }
+      num_shards_specified = true;
     } else if (ceph_argparse_witharg(args, i, &val, "--max-concurrent-ios", (char*)NULL)) {
       max_concurrent_ios = (int)strict_strtol(val.c_str(), 10, &err);
       if (!err.empty()) {
@@ -2250,7 +2512,10 @@ int main(int argc, char **argv)
      // do nothing
     } else if (ceph_argparse_binary_flag(args, i, &extra_info, NULL, "--extra-info", (char*)NULL)) {
      // do nothing
-    } else if (ceph_argparse_binary_flag(args, i, &reset_regions, NULL, "--reset-regions", (char*)NULL)) {
+    } else if (ceph_argparse_binary_flag(args, i, &bypass_gc, NULL, "--bypass-gc", (char*)NULL)) {
+     // do nothing
+    } else if (ceph_argparse_binary_flag(args, i, &inconsistent_index, NULL, "--inconsistent-index", (char*)NULL)) {
+     // do nothing
     } else if (ceph_argparse_witharg(args, i, &val, "--caps", (char*)NULL)) {
       caps = val;
     } else if (ceph_argparse_witharg(args, i, &val, "-i", "--infile", (char*)NULL)) {
@@ -2309,6 +2574,14 @@ int main(int argc, char **argv)
       zonegroup_id = val;
     } else if (ceph_argparse_witharg(args, i, &val, "--zonegroup-new-name", (char*)NULL)) {
       zonegroup_new_name = val;
+    } else if (ceph_argparse_witharg(args, i, &val, "--placement-id", (char*)NULL)) {
+      placement_id = val;
+    } else if (ceph_argparse_witharg(args, i, &val, "--tags", (char*)NULL)) {
+      get_str_list(val, tags);
+    } else if (ceph_argparse_witharg(args, i, &val, "--tags-add", (char*)NULL)) {
+      get_str_list(val, tags_add);
+    } else if (ceph_argparse_witharg(args, i, &val, "--tags-rm", (char*)NULL)) {
+      get_str_list(val, tags_rm);
     } else if (ceph_argparse_witharg(args, i, &val, "--api-name", (char*)NULL)) {
       api_name = val;
     } else if (ceph_argparse_witharg(args, i, &val, "--zone-id", (char*)NULL)) {
@@ -2319,6 +2592,25 @@ int main(int argc, char **argv)
       get_str_list(val, endpoints);
     } else if (ceph_argparse_witharg(args, i, &val, "--source-zone", (char*)NULL)) {
       source_zone_name = val;
+    } else if (ceph_argparse_witharg(args, i, &val, "--index-pool", (char*)NULL)) {
+      index_pool = val;
+    } else if (ceph_argparse_witharg(args, i, &val, "--data-pool", (char*)NULL)) {
+      data_pool = val;
+    } else if (ceph_argparse_witharg(args, i, &val, "--data-extra-pool", (char*)NULL)) {
+      data_extra_pool = val;
+    } else if (ceph_argparse_witharg(args, i, &val, "--placement-index-type", (char*)NULL)) {
+      if (val == "normal") {
+        placement_index_type = RGWBIType_Normal;
+      } else if (val == "indexless") {
+        placement_index_type = RGWBIType_Indexless;
+      } else {
+        placement_index_type = (RGWBucketIndexType)strict_strtol(val.c_str(), 10, &err);
+        if (!err.empty()) {
+          cerr << "ERROR: failed to parse index type index: " << err << std::endl;
+          return EINVAL;
+        }
+      }
+      index_type_specified = true;
     } else if (strncmp(*i, "-", 1) == 0) {
       cerr << "ERROR: invalid flag " << *i << std::endl;
       return EINVAL;
@@ -2411,25 +2703,33 @@ int main(int argc, char **argv)
   // not a raw op if 'period pull' needs to look up remotes
   bool raw_period_pull = opt_cmd == OPT_PERIOD_PULL && remote.empty() && !url.empty();
 
-  bool raw_storage_op = (opt_cmd == OPT_ZONEGROUP_ADD || opt_cmd == OPT_ZONEGROUP_CREATE || opt_cmd == OPT_ZONEGROUP_DELETE ||
-			 opt_cmd == OPT_ZONEGROUP_GET || opt_cmd == OPT_ZONEGROUP_LIST ||  
-                         opt_cmd == OPT_ZONEGROUP_SET || opt_cmd == OPT_ZONEGROUP_DEFAULT ||
-			 opt_cmd == OPT_ZONEGROUP_RENAME || opt_cmd == OPT_ZONEGROUP_MODIFY ||
-			 opt_cmd == OPT_ZONEGROUP_REMOVE ||
-                         opt_cmd == OPT_ZONEGROUPMAP_GET || opt_cmd == OPT_ZONEGROUPMAP_SET ||
-                         opt_cmd == OPT_ZONEGROUPMAP_UPDATE ||
-			 opt_cmd == OPT_ZONE_CREATE || opt_cmd == OPT_ZONE_DELETE ||
-                         opt_cmd == OPT_ZONE_GET || opt_cmd == OPT_ZONE_SET || opt_cmd == OPT_ZONE_RENAME ||
-                         opt_cmd == OPT_ZONE_LIST || opt_cmd == OPT_ZONE_MODIFY || opt_cmd == OPT_ZONE_DEFAULT ||
-			 opt_cmd == OPT_REALM_CREATE || opt_cmd == OPT_PERIOD_PREPARE ||
-			 opt_cmd == OPT_PERIOD_DELETE || opt_cmd == OPT_PERIOD_GET ||
-			 opt_cmd == OPT_PERIOD_GET_CURRENT || opt_cmd == OPT_PERIOD_LIST ||
-                         raw_period_update || raw_period_pull ||
-			 opt_cmd == OPT_REALM_DELETE || opt_cmd == OPT_REALM_GET || opt_cmd == OPT_REALM_LIST ||
-			 opt_cmd == OPT_REALM_LIST_PERIODS ||
-			 opt_cmd == OPT_REALM_GET_DEFAULT || opt_cmd == OPT_REALM_REMOVE ||
-			 opt_cmd == OPT_REALM_RENAME || opt_cmd == OPT_REALM_SET ||
-			 opt_cmd == OPT_REALM_DEFAULT || opt_cmd == OPT_REALM_PULL);
+  std::set<int> raw_storage_ops_list = {OPT_ZONEGROUP_ADD, OPT_ZONEGROUP_CREATE, OPT_ZONEGROUP_DELETE,
+			 OPT_ZONEGROUP_GET, OPT_ZONEGROUP_LIST,
+                         OPT_ZONEGROUP_SET, OPT_ZONEGROUP_DEFAULT,
+			 OPT_ZONEGROUP_RENAME, OPT_ZONEGROUP_MODIFY,
+			 OPT_ZONEGROUP_REMOVE,
+			 OPT_ZONEGROUP_PLACEMENT_ADD, OPT_ZONEGROUP_PLACEMENT_RM,
+			 OPT_ZONEGROUP_PLACEMENT_MODIFY, OPT_ZONEGROUP_PLACEMENT_LIST,
+			 OPT_ZONEGROUP_PLACEMENT_DEFAULT,
+                         OPT_ZONEGROUPMAP_GET, OPT_ZONEGROUPMAP_SET,
+                         OPT_ZONEGROUPMAP_UPDATE,
+			 OPT_ZONE_CREATE, OPT_ZONE_DELETE,
+                         OPT_ZONE_GET, OPT_ZONE_SET, OPT_ZONE_RENAME,
+                         OPT_ZONE_LIST, OPT_ZONE_MODIFY, OPT_ZONE_DEFAULT,
+			 OPT_ZONE_PLACEMENT_ADD, OPT_ZONE_PLACEMENT_RM,
+			 OPT_ZONE_PLACEMENT_MODIFY, OPT_ZONE_PLACEMENT_LIST,
+			 OPT_REALM_CREATE,
+			 OPT_PERIOD_DELETE, OPT_PERIOD_GET,
+			 OPT_PERIOD_GET_CURRENT, OPT_PERIOD_LIST,
+			 OPT_REALM_DELETE, OPT_REALM_GET, OPT_REALM_LIST,
+			 OPT_REALM_LIST_PERIODS,
+			 OPT_REALM_GET_DEFAULT, OPT_REALM_REMOVE,
+			 OPT_REALM_RENAME, OPT_REALM_SET,
+			 OPT_REALM_DEFAULT, OPT_REALM_PULL};
+
+
+  bool raw_storage_op = (raw_storage_ops_list.find(opt_cmd) != raw_storage_ops_list.end() ||
+                         raw_period_update || raw_period_pull);
 
   if (raw_storage_op) {
     store = RGWStoreManager::get_raw_storage(g_ceph_context);
@@ -2455,29 +2755,6 @@ int main(int argc, char **argv)
 
   if (raw_storage_op) {
     switch (opt_cmd) {
-    case OPT_PERIOD_PREPARE:
-      {
-	RGWRealm realm(realm_id, realm_name);
-	int ret = realm.init(g_ceph_context, store);
-	if (ret < 0) {
-	  cerr << "could not init realm " << ": " << cpp_strerror(-ret) << std::endl;
-	  return ret;
-	}
-	RGWPeriod period;
-	ret = period.init(g_ceph_context, store, realm.get_id(), realm.get_name(), false);
-	if (ret < 0) {
-	  cerr << "failed to init period " << ": " << cpp_strerror(-ret) << std::endl;
-	  return ret;
-	}
-	ret = period.create();
-	if (ret < 0) {
-	  cerr << "ERROR: couldn't create period " << ": " << cpp_strerror(-ret) << std::endl;
-	  return ret;
-	}
-	encode_json("period", period, formatter);
-	formatter->flush(cout);
-      }
-      break;
     case OPT_PERIOD_DELETE:
       {
 	if (period_id.empty()) {
@@ -2676,7 +2953,7 @@ int main(int argc, char **argv)
 	  cerr << "failed to list realmss: " << cpp_strerror(-ret) << std::endl;
 	  return -ret;
 	}
-	formatter->open_object_section("realmss_list");
+	formatter->open_object_section("realms_list");
 	encode_json("default_info", default_id, formatter);
 	encode_json("realms", realms, formatter);
 	formatter->close_section();
@@ -2734,24 +3011,39 @@ int main(int argc, char **argv)
 	  cerr << "no realm name or id provided" << std::endl;
 	  return -EINVAL;
 	}
-        if (infile.empty()) {
-	  cerr << "no realm input file provided" << std::endl;
-	  return -EINVAL;
-        }
 	RGWRealm realm(realm_id, realm_name);
-	int ret = realm.init(g_ceph_context, store, false);
-	if (ret < 0) {
+	bool new_realm = false;
+	int ret = realm.init(g_ceph_context, store);
+	if (ret < 0 && ret != -ENOENT) {
 	  cerr << "failed to init realm: " << cpp_strerror(-ret) << std::endl;
 	  return -ret;
+	} else if (ret == -ENOENT) {
+	  new_realm = true;
 	}
 	ret = read_decode_json(infile, realm);
 	if (ret < 0) {
 	  return 1;
 	}
-	ret = realm.update();
-	if (ret < 0) {
-	  cerr << "ERROR: couldn't store realm info: " << cpp_strerror(-ret) << std::endl;
-	  return 1;
+	if (!realm_name.empty() && realm.get_name() != realm_name) {
+	  cerr << "mismatch between --rgw-realm " << realm_name << " and json input file name " <<
+	    realm.get_name() << std::endl;
+	  return EINVAL;
+	}
+	/* new realm */
+	if (new_realm) {
+	  cout << "clearing period and epoch for new realm" << std::endl;
+	  realm.clear_current_period_and_epoch();
+	  ret = realm.create();
+	  if (ret < 0) {
+	    cerr << "ERROR: couldn't create new realm: " << cpp_strerror(-ret) << std::endl;
+	    return 1;
+	  }
+	} else {
+	  ret = realm.update();
+	  if (ret < 0) {
+	    cerr << "ERROR: couldn't store realm info: " << cpp_strerror(-ret) << std::endl;
+	    return 1;
+	  }
 	}
 
         if (set_default) {
@@ -3183,6 +3475,81 @@ int main(int argc, char **argv)
 	}
       }
       break;
+    case OPT_ZONEGROUP_PLACEMENT_LIST:
+      {
+	RGWZoneGroup zonegroup(zonegroup_id, zonegroup_name);
+	int ret = zonegroup.init(g_ceph_context, store);
+	if (ret < 0) {
+	  cerr << "failed to init zonegroup: " << cpp_strerror(-ret) << std::endl;
+	  return -ret;
+	}
+
+	encode_json("placement_targets", zonegroup.placement_targets, formatter);
+	formatter->flush(cout);
+	cout << std::endl;
+      }
+      break;
+    case OPT_ZONEGROUP_PLACEMENT_ADD:
+    case OPT_ZONEGROUP_PLACEMENT_MODIFY:
+    case OPT_ZONEGROUP_PLACEMENT_RM:
+    case OPT_ZONEGROUP_PLACEMENT_DEFAULT:
+      {
+        if (placement_id.empty()) {
+          cerr << "ERROR: --placement-id not specified" << std::endl;
+          return EINVAL;
+        }
+
+	RGWZoneGroup zonegroup(zonegroup_id, zonegroup_name);
+	int ret = zonegroup.init(g_ceph_context, store);
+	if (ret < 0) {
+	  cerr << "failed to init zonegroup: " << cpp_strerror(-ret) << std::endl;
+	  return -ret;
+	}
+
+        if (opt_cmd == OPT_ZONEGROUP_PLACEMENT_ADD) {
+          RGWZoneGroupPlacementTarget target;
+          target.name = placement_id;
+          for (auto& t : tags) {
+            target.tags.insert(t);
+          }
+          zonegroup.placement_targets[placement_id] = target;
+        } else if (opt_cmd == OPT_ZONEGROUP_PLACEMENT_MODIFY) {
+          RGWZoneGroupPlacementTarget& target = zonegroup.placement_targets[placement_id];
+          if (!tags.empty()) {
+            target.tags.clear();
+            for (auto& t : tags) {
+              target.tags.insert(t);
+            }
+          }
+          target.name = placement_id;
+          for (auto& t : tags_rm) {
+            target.tags.erase(t);
+          }
+          for (auto& t : tags_add) {
+            target.tags.insert(t);
+          }
+        } else if (opt_cmd == OPT_ZONEGROUP_PLACEMENT_RM) {
+          zonegroup.placement_targets.erase(placement_id);
+        } else if (opt_cmd == OPT_ZONEGROUP_PLACEMENT_DEFAULT) {
+          if (!zonegroup.placement_targets.count(placement_id)) {
+            cerr << "failed to find a zonegroup placement target named '"
+                << placement_id << "'" << std::endl;
+            return -ENOENT;
+          }
+          zonegroup.default_placement = placement_id;
+        }
+
+        zonegroup.post_process_params();
+        ret = zonegroup.update();
+        if (ret < 0) {
+          cerr << "failed to update zonegroup: " << cpp_strerror(-ret) << std::endl;
+          return -ret;
+        }
+
+        encode_json("placement_targets", zonegroup.placement_targets, formatter);
+        formatter->flush(cout);
+      }
+      break;
     case OPT_ZONEGROUPMAP_GET:
       {
 	RGWZoneGroupMap zonegroupmap;
@@ -3627,6 +3994,85 @@ int main(int argc, char **argv)
 	}
       }
       break;
+    case OPT_ZONE_PLACEMENT_ADD:
+    case OPT_ZONE_PLACEMENT_MODIFY:
+    case OPT_ZONE_PLACEMENT_RM:
+      {
+        if (placement_id.empty()) {
+          cerr << "ERROR: --placement-id not specified" << std::endl;
+          return EINVAL;
+        }
+	RGWZoneParams zone(zone_id, zone_name);
+	int ret = zone.init(g_ceph_context, store);
+        if (ret < 0) {
+	  cerr << "failed to init zone: " << cpp_strerror(-ret) << std::endl;
+	  return -ret;
+	}
+
+        if (opt_cmd == OPT_ZONE_PLACEMENT_ADD) {
+          // pool names are required
+          if (!index_pool || index_pool->empty() ||
+              !data_pool || data_pool->empty()) {
+            cerr << "ERROR: need to specify both --index-pool and --data-pool" << std::endl;
+            return EINVAL;
+          }
+
+          RGWZonePlacementInfo& info = zone.placement_pools[placement_id];
+
+          info.index_pool = *index_pool;
+          info.data_pool = *data_pool;
+          if (data_extra_pool) {
+            info.data_extra_pool = *data_extra_pool;
+          }
+          if (index_type_specified) {
+            info.index_type = placement_index_type;
+          }
+        } else if (opt_cmd == OPT_ZONE_PLACEMENT_MODIFY) {
+          auto p = zone.placement_pools.find(placement_id);
+          if (p == zone.placement_pools.end()) {
+            cerr << "ERROR: zone placement target '" << placement_id
+                << "' not found" << std::endl;
+            return -ENOENT;
+          }
+          auto& info = p->second;
+          if (index_pool && !index_pool->empty()) {
+            info.index_pool = *index_pool;
+          }
+          if (data_pool && !data_pool->empty()) {
+            info.data_pool = *data_pool;
+          }
+          if (data_extra_pool) {
+            info.data_extra_pool = *data_extra_pool;
+          }
+          if (index_type_specified) {
+            info.index_type = placement_index_type;
+          }
+        } else if (opt_cmd == OPT_ZONE_PLACEMENT_RM) {
+          zone.placement_pools.erase(placement_id);
+        }
+
+        ret = zone.update();
+        if (ret < 0) {
+          cerr << "failed to save zone info: " << cpp_strerror(-ret) << std::endl;
+          return -ret;
+        }
+
+        encode_json("zone", zone, formatter);
+        formatter->flush(cout);
+      }
+      break;
+    case OPT_ZONE_PLACEMENT_LIST:
+      {
+	RGWZoneParams zone(zone_id, zone_name);
+	int ret = zone.init(g_ceph_context, store);
+	if (ret < 0) {
+	  cerr << "unable to initialize zone: " << cpp_strerror(-ret) << std::endl;
+	  return -ret;
+	}
+	encode_json("placement_pools", zone.placement_pools, formatter);
+	formatter->flush(cout);
+      }
+      break;
     }
     return 0;
   }
@@ -3718,6 +4164,7 @@ int main(int argc, char **argv)
   bucket_op.set_check_objects(check_objects);
   bucket_op.set_delete_children(delete_child_objects);
   bucket_op.set_fix_index(fix);
+  bucket_op.set_max_aio(max_concurrent_ios);
 
   // required to gather errors from operations
   std::string err_msg;
@@ -4120,7 +4567,7 @@ int main(int argc, char **argv)
         formatter->open_array_section("log_entries");
 
       do {
-	uint64_t total_time =  entry.total_time.sec() * 1000000LL * entry.total_time.usec();
+	uint64_t total_time =  entry.total_time.sec() * 1000000LL + entry.total_time.usec();
 
         agg_time += total_time;
         agg_bytes_sent += entry.bytes_sent;
@@ -4380,6 +4827,10 @@ next:
   }
 
   if (opt_cmd == OPT_BI_LIST) {
+    if (bucket_name.empty()) {
+      cerr << "ERROR: bucket name not specified" << std::endl;
+      return EINVAL;
+    }
     RGWBucketInfo bucket_info;
     int ret = init_bucket(tenant, bucket_name, bucket_id, bucket_info, bucket);
     if (ret < 0) {
@@ -4393,29 +4844,88 @@ next:
       max_entries = 1000;
     }
 
+    int max_shards = (bucket_info.num_shards > 0 ? bucket_info.num_shards : 1);
 
     formatter->open_array_section("entries");
 
-    do {
-      entries.clear();
-      ret = store->bi_list(bucket, object, marker, max_entries, &entries, &is_truncated);
+    for (int i = 0; i < max_shards; i++) {
+      RGWRados::BucketShard bs(store);
+      int shard_id = (bucket_info.num_shards > 0  ? i : -1);
+      int ret = bs.init(bucket, shard_id);
+      marker.clear();
+
       if (ret < 0) {
-        cerr << "ERROR: bi_list(): " << cpp_strerror(-ret) << std::endl;
+        cerr << "ERROR: bs.init(bucket=" << bucket << ", shard=" << shard_id << "): " << cpp_strerror(-ret) << std::endl;
         return -ret;
       }
 
-      list<rgw_cls_bi_entry>::iterator iter;
-      for (iter = entries.begin(); iter != entries.end(); ++iter) {
-        rgw_cls_bi_entry& entry = *iter;
-        encode_json("entry", entry, formatter);
-        marker = entry.idx;
-      }
+      do {
+        entries.clear();
+        ret = store->bi_list(bs, object, marker, max_entries, &entries, &is_truncated);
+        if (ret < 0) {
+          cerr << "ERROR: bi_list(): " << cpp_strerror(-ret) << std::endl;
+          return -ret;
+        }
+
+        list<rgw_cls_bi_entry>::iterator iter;
+        for (iter = entries.begin(); iter != entries.end(); ++iter) {
+          rgw_cls_bi_entry& entry = *iter;
+          encode_json("entry", entry, formatter);
+          marker = entry.idx;
+        }
+        formatter->flush(cout);
+      } while (is_truncated);
       formatter->flush(cout);
-    } while (is_truncated);
+    }
     formatter->close_section();
     formatter->flush(cout);
   }
 
+  if (opt_cmd == OPT_BI_PURGE) {
+    if (bucket_name.empty()) {
+      cerr << "ERROR: bucket name not specified" << std::endl;
+      return EINVAL;
+    }
+    RGWBucketInfo bucket_info;
+    int ret = init_bucket(tenant, bucket_name, bucket_id, bucket_info, bucket);
+    if (ret < 0) {
+      cerr << "ERROR: could not init bucket: " << cpp_strerror(-ret) << std::endl;
+      return -ret;
+    }
+
+    RGWBucketInfo cur_bucket_info;
+    rgw_bucket cur_bucket;
+    ret = init_bucket(tenant, bucket_name, string(), cur_bucket_info, cur_bucket);
+    if (ret < 0) {
+      cerr << "ERROR: could not init current bucket info for bucket_name=" << bucket_name << ": " << cpp_strerror(-ret) << std::endl;
+      return -ret;
+    }
+
+    if (cur_bucket_info.bucket.bucket_id == bucket_info.bucket.bucket_id && !yes_i_really_mean_it) {
+      cerr << "specified bucket instance points to a current bucket instance" << std::endl;
+      cerr << "do you really mean it? (requires --yes-i-really-mean-it)" << std::endl;
+      return EINVAL;
+    }
+
+    int max_shards = (bucket_info.num_shards > 0 ? bucket_info.num_shards : 1);
+
+    for (int i = 0; i < max_shards; i++) {
+      RGWRados::BucketShard bs(store);
+      int shard_id = (bucket_info.num_shards > 0  ? i : -1);
+      int ret = bs.init(bucket, shard_id);
+      if (ret < 0) {
+        cerr << "ERROR: bs.init(bucket=" << bucket << ", shard=" << shard_id << "): " << cpp_strerror(-ret) << std::endl;
+        return -ret;
+      }
+
+      ret = store->bi_remove(bs);
+      if (ret < 0) {
+        cerr << "ERROR: failed to remove bucket index object: " << cpp_strerror(-ret) << std::endl;
+        return -ret;
+      }
+    }
+  }
+
   if (opt_cmd == OPT_OBJECT_RM) {
     RGWBucketInfo bucket_info;
     int ret = init_bucket(tenant, bucket_name, bucket_id, bucket_info, bucket);
@@ -4579,6 +5089,158 @@ next:
     formatter->flush(cout);
   }
 
+  if (opt_cmd == OPT_BUCKET_RESHARD) {
+    if (bucket_name.empty()) {
+      cerr << "ERROR: bucket not specified" << std::endl;
+      return EINVAL;
+    }
+
+    if (!num_shards_specified) {
+      cerr << "ERROR: --num-shards not specified" << std::endl;
+      return EINVAL;
+    }
+
+    if (num_shards > (int)store->get_max_bucket_shards()) {
+      cerr << "ERROR: num_shards too high, max value: " << store->get_max_bucket_shards() << std::endl;
+      return EINVAL;
+    }
+
+    RGWBucketInfo bucket_info;
+    map<string, bufferlist> attrs;
+    int ret = init_bucket(tenant, bucket_name, bucket_id, bucket_info, bucket, &attrs);
+    if (ret < 0) {
+      cerr << "ERROR: could not init bucket: " << cpp_strerror(-ret) << std::endl;
+      return -ret;
+    }
+
+    int num_source_shards = (bucket_info.num_shards > 0 ? bucket_info.num_shards : 1);
+
+    if (num_shards <= num_source_shards && !yes_i_really_mean_it) {
+      cerr << "num shards is less or equal to current shards count" << std::endl
+           << "do you really mean it? (requires --yes-i-really-mean-it)" << std::endl;
+      return EINVAL;
+    }
+
+    RGWBucketInfo new_bucket_info(bucket_info);
+    store->create_bucket_id(&new_bucket_info.bucket.bucket_id);
+    new_bucket_info.bucket.oid.clear();
+
+    new_bucket_info.num_shards = num_shards;
+    new_bucket_info.objv_tracker.clear();
+
+    cout << "*** NOTICE: operation will not remove old bucket index objects ***" << std::endl;
+    cout << "***         these will need to be removed manually             ***" << std::endl;
+    cout << "old bucket instance id: " << bucket_info.bucket.bucket_id << std::endl;
+    cout << "new bucket instance id: " << new_bucket_info.bucket.bucket_id << std::endl;
+
+    ret = store->init_bucket_index(new_bucket_info.bucket, new_bucket_info.num_shards);
+    if (ret < 0) {
+      cerr << "ERROR: failed to init new bucket indexes: " << cpp_strerror(-ret) << std::endl;
+      return -ret;
+    }
+
+    ret = store->put_bucket_instance_info(new_bucket_info, true, real_time(), &attrs);
+    if (ret < 0) {
+      cerr << "ERROR: failed to store new bucket instance info: " << cpp_strerror(-ret) << std::endl;
+      return -ret;
+    }
+    list<rgw_cls_bi_entry> entries;
+
+    if (max_entries < 0) {
+      max_entries = 1000;
+    }
+
+    int num_target_shards = (new_bucket_info.num_shards > 0 ? new_bucket_info.num_shards : 1);
+
+    BucketReshardManager target_shards_mgr(store, new_bucket_info, num_target_shards);
+    
+    if (verbose) {
+      formatter->open_array_section("entries");
+    }
+
+    uint64_t total_entries = 0;
+
+    if (!verbose) {
+      cout << "total entries:";
+    }
+
+    for (int i = 0; i < num_source_shards; ++i) {
+      bool is_truncated = true;
+      marker.clear();
+      while (is_truncated) {
+        entries.clear();
+        ret = store->bi_list(bucket, i, string(), marker, max_entries, &entries, &is_truncated);
+        if (ret < 0) {
+          cerr << "ERROR: bi_list(): " << cpp_strerror(-ret) << std::endl;
+          return -ret;
+        }
+
+        list<rgw_cls_bi_entry>::iterator iter;
+        for (iter = entries.begin(); iter != entries.end(); ++iter) {
+          rgw_cls_bi_entry& entry = *iter;
+          if (verbose) {
+            formatter->open_object_section("entry");
+
+            encode_json("shard_id", i, formatter);
+            encode_json("num_entry", total_entries, formatter);
+            encode_json("entry", entry, formatter);
+          }
+          total_entries++;
+
+          marker = entry.idx;
+
+          int target_shard_id;
+          cls_rgw_obj_key cls_key;
+          uint8_t category;
+          rgw_bucket_category_stats stats;
+          bool account = entry.get_info(&cls_key, &category, &stats);
+          rgw_obj_key key(cls_key);
+          rgw_obj obj(new_bucket_info.bucket, key);
+          int ret = store->get_target_shard_id(new_bucket_info, obj.get_hash_object(), &target_shard_id);
+          if (ret < 0) {
+            cerr << "ERROR: get_target_shard_id() returned ret=" << ret << std::endl;
+            return ret;
+          }
+
+          int shard_index = (target_shard_id > 0 ? target_shard_id : 0);
+
+          ret = target_shards_mgr.add_entry(shard_index, entry, account, category, stats);
+          if (ret < 0) {
+            return ret;
+          }
+          if (verbose) {
+            formatter->close_section();
+            formatter->flush(cout);
+            formatter->flush(cout);
+          } else if (!(total_entries % 1000)) {
+            cout << " " << total_entries;
+          }
+        }
+      }
+    }
+    if (verbose) {
+      formatter->close_section();
+      formatter->flush(cout);
+    } else {
+      cout << " " << total_entries << std::endl;
+    }
+
+    ret = target_shards_mgr.finish();
+    if (ret < 0) {
+      cerr << "ERROR: failed to reshard" << std::endl;
+      return EIO;
+    }
+
+    bucket_op.set_bucket_id(new_bucket_info.bucket.bucket_id);
+    bucket_op.set_user_id(new_bucket_info.owner);
+    string err;
+    int r = RGWBucketAdminOp::link(store, bucket_op, &err);
+    if (r < 0) {
+      cerr << "failed to link new bucket instance (bucket_id=" << new_bucket_info.bucket.bucket_id << ": " << err << "; " << cpp_strerror(-r) << std::endl;
+      return -r;
+    }
+  }
+
   if (opt_cmd == OPT_OBJECT_UNLINK) {
     RGWBucketInfo bucket_info;
     int ret = init_bucket(tenant, bucket_name, bucket_id, bucket_info, bucket);
@@ -4665,7 +5327,11 @@ next:
   }
 
   if (opt_cmd == OPT_BUCKET_RM) {
-    RGWBucketAdminOp::remove_bucket(store, bucket_op);
+    if (inconsistent_index == false) {
+      RGWBucketAdminOp::remove_bucket(store, bucket_op, bypass_gc, true);
+    } else {
+      RGWBucketAdminOp::remove_bucket(store, bucket_op, bypass_gc, false);
+    }
   }
 
   if (opt_cmd == OPT_GC_LIST) {
diff --git a/src/rgw/rgw_bucket.cc b/src/rgw/rgw_bucket.cc
index daf18f5..ca37b07 100644
--- a/src/rgw/rgw_bucket.cc
+++ b/src/rgw/rgw_bucket.cc
@@ -19,6 +19,7 @@
 #include "rgw_user.h"
 #include "rgw_string.h"
 
+#include "include/rados/librados.hpp"
 // until everything is moved from rgw_common
 #include "rgw_common.h"
 
@@ -133,9 +134,8 @@ int rgw_read_user_buckets(RGWRados * store,
     if (ret < 0)
       return ret;
 
-    for (list<cls_user_bucket_entry>::iterator q = entries.begin(); q != entries.end(); ++q) {
-      RGWBucketEnt e(*q);
-      buckets.add(e);
+    for (const auto& entry : entries) {
+      buckets.add(RGWBucketEnt(user_id, entry));
       total++;
     }
 
@@ -224,6 +224,7 @@ int rgw_link_bucket(RGWRados *store, const rgw_user& user_id, rgw_bucket& bucket
 
   ep.linked = true;
   ep.owner = user_id;
+  ep.bucket = bucket;
   ret = store->put_bucket_entrypoint_info(tenant_name, bucket_name, ep, false, ot, real_time(), &attrs);
   if (ret < 0)
     goto done_err;
@@ -531,9 +532,7 @@ int rgw_remove_bucket(RGWRados *store, rgw_bucket& bucket, bool delete_children)
   map<RGWObjCategory, RGWStorageStats> stats;
   std::vector<RGWObjEnt> objs;
   map<string, bool> common_prefixes;
-  rgw_obj obj;
   RGWBucketInfo info;
-  bufferlist bl;
   RGWObjectCtx obj_ctx(store);
 
   string bucket_ver, master_ver;
@@ -542,8 +541,6 @@ int rgw_remove_bucket(RGWRados *store, rgw_bucket& bucket, bool delete_children)
   if (ret < 0)
     return ret;
 
-  obj.bucket = bucket;
-
   ret = store->get_bucket_info(obj_ctx, bucket.tenant, bucket.name, info, NULL);
   if (ret < 0)
     return ret;
@@ -562,7 +559,7 @@ int rgw_remove_bucket(RGWRados *store, rgw_bucket& bucket, bool delete_children)
 
     while (!objs.empty()) {
       std::vector<RGWObjEnt>::iterator it = objs.begin();
-      for (it = objs.begin(); it != objs.end(); ++it) {
+      for (; it != objs.end(); ++it) {
         ret = rgw_remove_object(store, info, bucket, (*it).key);
         if (ret < 0)
           return ret;
@@ -596,6 +593,172 @@ int rgw_remove_bucket(RGWRados *store, rgw_bucket& bucket, bool delete_children)
   return ret;
 }
 
+static int aio_wait(librados::AioCompletion *handle)
+{
+  librados::AioCompletion *c = (librados::AioCompletion *)handle;
+  c->wait_for_safe();
+  int ret = c->get_return_value();
+  c->release();
+  return ret;
+}
+
+static int drain_handles(list<librados::AioCompletion *>& pending)
+{
+  int ret = 0;
+  while (!pending.empty()) {
+    librados::AioCompletion *handle = pending.front();
+    pending.pop_front();
+    int r = aio_wait(handle);
+    if (r < 0) {
+      ret = r;
+    }
+  }
+  return ret;
+}
+
+int rgw_remove_bucket_bypass_gc(RGWRados *store, rgw_bucket& bucket,
+                                int concurrent_max, bool keep_index_consistent)
+{
+  int ret;
+  map<RGWObjCategory, RGWStorageStats> stats;
+  std::vector<RGWObjEnt> objs;
+  map<string, bool> common_prefixes;
+  RGWBucketInfo info;
+  RGWObjectCtx obj_ctx(store);
+
+  string bucket_ver, master_ver;
+
+  ret = store->get_bucket_stats(bucket, RGW_NO_SHARD, &bucket_ver, &master_ver, stats, NULL);
+  if (ret < 0)
+    return ret;
+
+  ret = store->get_bucket_info(obj_ctx, bucket.tenant, bucket.name, info, NULL);
+  if (ret < 0)
+    return ret;
+
+
+  RGWRados::Bucket target(store, info);
+  RGWRados::Bucket::List list_op(&target);
+
+  list_op.params.list_versions = true;
+
+  std::list<librados::AioCompletion*> handles;
+
+  int max = 1000;
+  int max_aio = concurrent_max;
+  ret = list_op.list_objects(max, &objs, &common_prefixes, NULL);
+  if (ret < 0)
+    return ret;
+
+  while (!objs.empty()) {
+    std::vector<RGWObjEnt>::iterator it = objs.begin();
+    for (; it != objs.end(); ++it) {
+      RGWObjState *astate = NULL;
+      rgw_obj obj(bucket, (*it).key.name);
+      obj.set_instance((*it).key.instance);
+
+      ret = store->get_obj_state(&obj_ctx, obj, &astate, NULL);
+      if (ret == -ENOENT) {
+        dout(1) << "WARNING: cannot find obj state for obj " << obj.get_object() << dendl;
+        continue;
+      }
+      if (ret < 0) {
+        lderr(store->ctx()) << "ERROR: get obj state returned with error " << ret << dendl;
+        return ret;
+      }
+
+      if (astate->has_manifest) {
+        rgw_obj head_obj;
+        RGWObjManifest& manifest = astate->manifest;
+        RGWObjManifest::obj_iterator miter = manifest.obj_begin();
+
+        if (miter.get_location().ns.empty()) {
+          head_obj = miter.get_location();
+        }
+
+        for (; miter != manifest.obj_end() && max_aio--; ++miter) {
+          if (!max_aio) {
+            ret = drain_handles(handles);
+            if (ret < 0) {
+              lderr(store->ctx()) << "ERROR: could not drain handles as aio completion returned with " << ret << dendl;
+              return ret;
+            }
+            max_aio = concurrent_max;
+          }
+
+          rgw_obj last_obj = miter.get_location();
+          if (last_obj == head_obj) {
+            // have the head obj deleted at the end
+            continue;
+          }
+
+          ret = store->delete_obj_aio(last_obj, bucket, info, astate, handles, keep_index_consistent);
+          if (ret < 0) {
+            lderr(store->ctx()) << "ERROR: delete obj aio failed with " << ret << dendl;
+            return ret;
+          }
+        } // for all shadow objs
+
+        ret = store->delete_obj_aio(head_obj, bucket, info, astate, handles, keep_index_consistent);
+        if (ret < 0) {
+          lderr(store->ctx()) << "ERROR: delete obj aio failed with " << ret << dendl;
+          return ret;
+        }
+      }
+
+      if (!max_aio) {
+        ret = drain_handles(handles);
+        if (ret < 0) {
+          lderr(store->ctx()) << "ERROR: could not drain handles as aio completion returned with " << ret << dendl;
+          return ret;
+        }
+        max_aio = concurrent_max;
+      }
+    } // for all RGW objects
+    objs.clear();
+
+    ret = list_op.list_objects(max, &objs, &common_prefixes, NULL);
+    if (ret < 0)
+      return ret;
+  }
+
+  ret = drain_handles(handles);
+  if (ret < 0) {
+    lderr(store->ctx()) << "ERROR: could not drain handles as aio completion returned with " << ret << dendl;
+    return ret;
+  }
+
+  ret = rgw_bucket_sync_user_stats(store, bucket.tenant, bucket.name);
+  if (ret < 0) {
+     dout(1) << "WARNING: failed sync user stats before bucket delete. ret=" <<  ret << dendl;
+  }
+
+  RGWObjVersionTracker objv_tracker;
+
+  ret = rgw_bucket_delete_bucket_obj(store, bucket.tenant, bucket.name, objv_tracker);
+  if (ret < 0) {
+    lderr(store->ctx()) << "ERROR: could not remove bucket " << bucket.name << "with ret as " << ret << dendl;
+    return ret;
+  }
+
+  if (!store->is_syncing_bucket_meta(bucket)) {
+    RGWObjVersionTracker objv_tracker;
+    string entry = bucket.get_key();
+    ret = rgw_bucket_instance_remove_entry(store, entry, &objv_tracker);
+    if (ret < 0) {
+      lderr(store->ctx()) << "ERROR: could not remove bucket instance entry" << bucket.name << "with ret as " << ret << dendl;
+      return ret;
+    }
+  }
+
+  ret = rgw_unlink_bucket(store, info.owner, bucket.tenant, bucket.name, false);
+  if (ret < 0) {
+    lderr(store->ctx()) << "ERROR: unable to remove user bucket information" << dendl;
+  }
+
+  return ret;
+}
+
 int rgw_bucket_delete_bucket_obj(RGWRados *store,
                                  const string& tenant_name,
                                  const string& bucket_name,
@@ -736,7 +899,7 @@ int RGWBucket::link(RGWBucketAdminOpState& op_state, std::string *err_msg)
     rgw_obj obj_bucket_instance(bucket_instance, no_oid);
     r = store->system_obj_set_attr(NULL, obj_bucket_instance, RGW_ATTR_ACL, aclbl, &objv_tracker);
 
-    r = rgw_link_bucket(store, user_info.user_id, bucket, real_time());
+    r = rgw_link_bucket(store, user_info.user_id, bucket_info.bucket, real_time());
     if (r < 0)
       return r;
   }
@@ -761,12 +924,24 @@ int RGWBucket::unlink(RGWBucketAdminOpState& op_state, std::string *err_msg)
   return r;
 }
 
-int RGWBucket::remove(RGWBucketAdminOpState& op_state, std::string *err_msg)
+int RGWBucket::remove(RGWBucketAdminOpState& op_state, bool bypass_gc,
+                      bool keep_index_consistent, std::string *err_msg)
 {
   bool delete_children = op_state.will_delete_children();
   rgw_bucket bucket = op_state.get_bucket();
+  int ret;
+
+  if (bypass_gc) {
+    if (delete_children) {
+      ret = rgw_remove_bucket_bypass_gc(store, bucket, op_state.get_max_aio(), keep_index_consistent);
+    } else {
+      set_err_msg(err_msg, "purge objects should be set for gc to be bypassed");
+      return -EINVAL;
+    }
+  } else {
+    ret = rgw_remove_bucket(store, bucket, delete_children);
+  }
 
-  int ret = rgw_remove_bucket(store, bucket, delete_children);
   if (ret < 0) {
     set_err_msg(err_msg, "unable to remove bucket" + cpp_strerror(-ret));
     return ret;
@@ -839,7 +1014,7 @@ int RGWBucket::check_bad_index_multipart(RGWBucketAdminOpState& op_state,
   int max = 1000;
 
   map<string, bool> common_prefixes;
-  string ns = "multipart";
+  string ns = "";
 
   bool is_truncated;
   map<string, bool> meta_objs;
@@ -1173,7 +1348,8 @@ int RGWBucketAdminOp::check_index(RGWRados *store, RGWBucketAdminOpState& op_sta
   return 0;
 }
 
-int RGWBucketAdminOp::remove_bucket(RGWRados *store, RGWBucketAdminOpState& op_state)
+int RGWBucketAdminOp::remove_bucket(RGWRados *store, RGWBucketAdminOpState& op_state,
+                                    bool bypass_gc, bool keep_index_consistent)
 {
   RGWBucket bucket;
 
@@ -1181,7 +1357,7 @@ int RGWBucketAdminOp::remove_bucket(RGWRados *store, RGWBucketAdminOpState& op_s
   if (ret < 0)
     return ret;
 
-  return bucket.remove(op_state);
+  return bucket.remove(op_state, bypass_gc, keep_index_consistent);
 }
 
 int RGWBucketAdminOp::remove_object(RGWRados *store, RGWBucketAdminOpState& op_state)
@@ -1625,7 +1801,7 @@ int RGWDataChangesLog::list_entries(const real_time& start_time, const real_time
 
 int RGWDataChangesLog::get_info(int shard_id, RGWDataChangesLogInfo *info)
 {
-  if (shard_id > num_shards)
+  if (shard_id >= num_shards)
     return -EINVAL;
 
   string oid = oids[shard_id];
@@ -1972,7 +2148,6 @@ public:
       bci.info.bucket.data_pool = old_bci.info.bucket.data_pool;
       bci.info.bucket.index_pool = old_bci.info.bucket.index_pool;
       bci.info.bucket.data_extra_pool = old_bci.info.bucket.data_extra_pool;
-      bci.info.index_type = old_bci.info.index_type;
     }
 
     // are we actually going to perform this put, or is it too old?
diff --git a/src/rgw/rgw_bucket.h b/src/rgw/rgw_bucket.h
index 32bc0ab..ed678f5 100644
--- a/src/rgw/rgw_bucket.h
+++ b/src/rgw/rgw_bucket.h
@@ -126,7 +126,7 @@ public:
   /**
    * Add a (created) bucket to the user's bucket list.
    */
-  void add(RGWBucketEnt& bucket) {
+  void add(const RGWBucketEnt& bucket) {
     buckets[bucket.bucket.name] = bucket;
   }
 
@@ -178,6 +178,7 @@ extern int rgw_unlink_bucket(RGWRados *store, const rgw_user& user_id,
 
 extern int rgw_remove_object(RGWRados *store, RGWBucketInfo& bucket_info, rgw_bucket& bucket, rgw_obj_key& key);
 extern int rgw_remove_bucket(RGWRados *store, rgw_bucket& bucket, bool delete_children);
+extern int rgw_remove_bucket_bypass_gc(RGWRados *store, rgw_bucket& bucket, int concurrent_max);
 
 extern int rgw_bucket_set_attrs(RGWRados *store, RGWBucketInfo& bucket_info,
                                 map<string, bufferlist>& attrs,
@@ -198,6 +199,7 @@ struct RGWBucketAdminOpState {
   bool fix_index;
   bool delete_child_objects;
   bool bucket_stored;
+  int max_aio;
 
   rgw_bucket bucket;
 
@@ -206,6 +208,8 @@ struct RGWBucketAdminOpState {
   void set_fix_index(bool value) { fix_index = value; }
   void set_delete_children(bool value) { delete_child_objects = value; }
 
+  void set_max_aio(int value) { max_aio = value; }
+
   void set_user_id(rgw_user& user_id) {
     if (!user_id.empty())
       uid = user_id;
@@ -240,6 +244,7 @@ struct RGWBucketAdminOpState {
   bool is_user_op() { return !uid.empty(); }
   bool is_system_op() { return uid.empty(); }
   bool has_bucket_stored() { return bucket_stored; }
+  int get_max_aio() { return max_aio; }
 
   RGWBucketAdminOpState() : list_buckets(false), stat_buckets(false), check_objects(false), 
                             fix_index(false), delete_child_objects(false),
@@ -279,7 +284,7 @@ public:
           map<RGWObjCategory, RGWStorageStats>& calculated_stats,
           std::string *err_msg = NULL);
 
-  int remove(RGWBucketAdminOpState& op_state, std::string *err_msg = NULL);
+  int remove(RGWBucketAdminOpState& op_state, bool bypass_gc = false, bool keep_index_consistent = true, std::string *err_msg = NULL);
   int link(RGWBucketAdminOpState& op_state, std::string *err_msg = NULL);
   int unlink(RGWBucketAdminOpState& op_state, std::string *err_msg = NULL);
 
@@ -306,7 +311,7 @@ public:
   static int check_index(RGWRados *store, RGWBucketAdminOpState& op_state,
                   RGWFormatterFlusher& flusher);
 
-  static int remove_bucket(RGWRados *store, RGWBucketAdminOpState& op_state);
+  static int remove_bucket(RGWRados *store, RGWBucketAdminOpState& op_state, bool bypass_gc = false, bool keep_index_consistent = true);
   static int remove_object(RGWRados *store, RGWBucketAdminOpState& op_state);
   static int info(RGWRados *store, RGWBucketAdminOpState& op_state, RGWFormatterFlusher& flusher);
 };
diff --git a/src/rgw/rgw_civetweb.cc b/src/rgw/rgw_civetweb.cc
index d4a03e1..860c01b 100644
--- a/src/rgw/rgw_civetweb.cc
+++ b/src/rgw/rgw_civetweb.cc
@@ -19,9 +19,11 @@ int RGWMongoose::write_data(const char *buf, int len)
     data.append(buf, len);
     return len;
   }
-  int r = mg_write(conn, buf, len);
-  if (r == 0) {
-    /* didn't send anything, error out */
+  const int r = mg_write(conn, buf, len);
+  if (r <= 0) {
+    /* According to the documentation of mg_write() it always returns -1 on
+     * error. The details aren't available, so we will just throw EIO. Same
+     * goes to 0 that is associated with writing to a closed connection. */
     return -EIO;
   }
   return r;
@@ -36,7 +38,8 @@ RGWMongoose::RGWMongoose(mg_connection *_conn, int _port)
 
 int RGWMongoose::read_data(char *buf, int len)
 {
-  return mg_read(conn, buf, len);
+  const int ret = mg_read(conn, buf, len);
+  return (ret >= 0) ? ret : -EIO;
 }
 
 void RGWMongoose::flush()
diff --git a/src/rgw/rgw_common.h b/src/rgw/rgw_common.h
index c392bc8..645f236 100644
--- a/src/rgw/rgw_common.h
+++ b/src/rgw/rgw_common.h
@@ -184,6 +184,7 @@ using ceph::crypto::MD5;
 #define ERR_USER_SUSPENDED       2100
 #define ERR_INTERNAL_ERROR       2200
 #define ERR_NOT_IMPLEMENTED      2201
+#define ERR_SERVICE_UNAVAILABLE  2202
 
 #ifndef UINT32_MAX
 #define UINT32_MAX (0xffffffffu)
@@ -422,6 +423,9 @@ enum RGWOpType {
   RGW_OP_DELETE_MULTI_OBJ,
   RGW_OP_BULK_DELETE,
   RGW_OP_SET_ATTRS,
+  RGW_OP_GET_CROSS_DOMAIN_POLICY,
+  RGW_OP_GET_HEALTH_CHECK,
+  RGW_OP_GET_INFO,
 
   /* rgw specific */
   RGW_OP_ADMIN_SET_METADATA
@@ -689,10 +693,15 @@ struct rgw_bucket {
 
   rgw_bucket() { }
   // cppcheck-suppress noExplicitConstructor
-  rgw_bucket(const cls_user_bucket& b) : name(b.name), data_pool(b.data_pool),
-					 data_extra_pool(b.data_extra_pool),
-					 index_pool(b.index_pool), marker(b.marker),
-					 bucket_id(b.bucket_id) {}
+  explicit rgw_bucket(const rgw_user& u, const cls_user_bucket& b)
+    : tenant(u.tenant),
+      name(b.name),
+      data_pool(b.data_pool),
+      data_extra_pool(b.data_extra_pool),
+      index_pool(b.index_pool),
+      marker(b.marker),
+      bucket_id(b.bucket_id) {
+  }
   rgw_bucket(const string& s) : name(s) {
     data_pool = index_pool = s;
     marker = "";
@@ -1348,11 +1357,13 @@ struct RGWBucketEnt {
 
   RGWBucketEnt() : size(0), size_rounded(0), count(0) {}
 
-  explicit RGWBucketEnt(const cls_user_bucket_entry& e) : bucket(e.bucket),
-		  					  size(e.size), 
-			  				  size_rounded(e.size_rounded),
-							  creation_time(e.creation_time),
-							  count(e.count) {}
+  explicit RGWBucketEnt(const rgw_user& u, const cls_user_bucket_entry& e)
+    : bucket(u, e.bucket),
+      size(e.size),
+      size_rounded(e.size_rounded),
+      creation_time(e.creation_time),
+      count(e.count) {
+  }
 
   void convert(cls_user_bucket_entry *b) {
     bucket.convert(&b->bucket);
diff --git a/src/rgw/rgw_coroutine.cc b/src/rgw/rgw_coroutine.cc
index 455c178..d9fb350 100644
--- a/src/rgw/rgw_coroutine.cc
+++ b/src/rgw/rgw_coroutine.cc
@@ -33,7 +33,6 @@ void RGWCompletionManager::register_completion_notifier(RGWAioCompletionNotifier
   Mutex::Locker l(lock);
   if (cn) {
     cns.insert(cn);
-    cn->get();
   }
 }
 
@@ -42,7 +41,6 @@ void RGWCompletionManager::unregister_completion_notifier(RGWAioCompletionNotifi
   Mutex::Locker l(lock);
   if (cn) {
     cns.erase(cn);
-    cn->put();
   }
 }
 
@@ -50,7 +48,6 @@ void RGWCompletionManager::_complete(RGWAioCompletionNotifier *cn, void *user_in
 {
   if (cn) {
     cns.erase(cn);
-    cn->put();
   }
   complete_reqs.push_back(user_info);
   cond.Signal();
@@ -363,8 +360,6 @@ bool RGWCoroutinesStack::collect(int *ret, RGWCoroutinesStack *skip_stack) /* re
   return collect(NULL, ret, skip_stack);
 }
 
-static void _aio_completion_notifier_cb(librados::completion_t cb, void *arg);
-
 static void _aio_completion_notifier_cb(librados::completion_t cb, void *arg)
 {
   ((RGWAioCompletionNotifier *)arg)->cb();
@@ -372,7 +367,8 @@ static void _aio_completion_notifier_cb(librados::completion_t cb, void *arg)
 
 RGWAioCompletionNotifier::RGWAioCompletionNotifier(RGWCompletionManager *_mgr, void *_user_data) : completion_mgr(_mgr),
                                                                          user_data(_user_data), lock("RGWAioCompletionNotifier"), registered(true) {
-  c = librados::Rados::aio_create_completion((void *)this, _aio_completion_notifier_cb, NULL);
+  c = librados::Rados::aio_create_completion((void *)this, NULL,
+					     _aio_completion_notifier_cb);
 }
 
 RGWAioCompletionNotifier *RGWCoroutinesStack::create_completion_notifier()
@@ -451,6 +447,7 @@ int RGWCoroutinesManager::run(list<RGWCoroutinesStack *>& stacks)
   int ret = 0;
   int blocked_count = 0;
   int interval_wait_count = 0;
+  bool canceled = false; // set on going_down
   RGWCoroutinesEnv env;
 
   uint64_t run_context = run_context_count.inc();
@@ -562,20 +559,19 @@ int RGWCoroutinesManager::run(list<RGWCoroutinesStack *>& stacks)
       if (going_down.read() > 0) {
 	ldout(cct, 5) << __func__ << "(): was stopped, exiting" << dendl;
 	ret = -ECANCELED;
+        canceled = true;
         break;
       }
       handle_unblocked_stack(context_stacks, scheduled_stacks, blocked_stack, &blocked_count);
       iter = scheduled_stacks.begin();
     }
-    if (ret == -ECANCELED) {
+    if (canceled) {
       break;
     }
 
     if (iter == scheduled_stacks.end()) {
       iter = scheduled_stacks.begin();
     }
-
-    ret = 0;
   }
 
   lock.get_write();
diff --git a/src/rgw/rgw_coroutine.h b/src/rgw/rgw_coroutine.h
index 6b17fa0..b92321f 100644
--- a/src/rgw/rgw_coroutine.h
+++ b/src/rgw/rgw_coroutine.h
@@ -7,6 +7,7 @@
 #endif
 
 #include <boost/asio.hpp>
+#include <boost/intrusive_ptr.hpp>
 
 #ifdef NEED_ASSERT_H
 #pragma pop_macro("_ASSERT_H")
@@ -30,7 +31,8 @@ class RGWAioCompletionNotifier;
 class RGWCompletionManager : public RefCountedObject {
   CephContext *cct;
   list<void *> complete_reqs;
-  set<RGWAioCompletionNotifier *> cns;
+  using NotifierRef = boost::intrusive_ptr<RGWAioCompletionNotifier>;
+  set<NotifierRef> cns;
 
   Mutex lock;
   Cond cond;
diff --git a/src/rgw/rgw_cors.cc b/src/rgw/rgw_cors.cc
index 1ad5b43..f2c7f3a 100644
--- a/src/rgw/rgw_cors.cc
+++ b/src/rgw/rgw_cors.cc
@@ -104,7 +104,8 @@ static bool is_string_in_set(set<string>& s, string h) {
         string sl = ssplit.front();
         dout(10) << "Finding " << sl << ", in " << h 
           << ", at offset not less than " << flen << dendl;
-        if (h.compare((h.size() - sl.size()), sl.size(), sl) != 0)
+        if (h.size() < sl.size() ||
+	    h.compare((h.size() - sl.size()), sl.size(), sl) != 0)
           continue;
         ssplit.pop_front();
       }
diff --git a/src/rgw/rgw_cr_rados.h b/src/rgw/rgw_cr_rados.h
index 24a284d..cc40ed1 100644
--- a/src/rgw/rgw_cr_rados.h
+++ b/src/rgw/rgw_cr_rados.h
@@ -11,21 +11,18 @@ class RGWAsyncRadosRequest : public RefCountedObject {
 
   int retcode;
 
-  bool done;
-
   Mutex lock;
 
 protected:
   virtual int _send_request() = 0;
 public:
   RGWAsyncRadosRequest(RGWCoroutine *_caller, RGWAioCompletionNotifier *_cn) : caller(_caller), notifier(_cn), retcode(0),
-                                                                               done(false), lock("RGWAsyncRadosRequest::lock") {
-    notifier->get();
-    caller->get();
+                                                                               lock("RGWAsyncRadosRequest::lock") {
   }
   virtual ~RGWAsyncRadosRequest() {
-    notifier->put();
-    caller->put();
+    if (notifier) {
+      notifier->put();
+    }
   }
 
   void send_request() {
@@ -33,8 +30,9 @@ public:
     retcode = _send_request();
     {
       Mutex::Locker l(lock);
-      if (!done) {
-        notifier->cb();
+      if (notifier) {
+        notifier->cb(); // drops its own ref
+        notifier = nullptr;
       }
     }
     put();
@@ -45,7 +43,11 @@ public:
   void finish() {
     {
       Mutex::Locker l(lock);
-      done = true;
+      if (notifier) {
+        // we won't call notifier->cb() to drop its ref, so drop it here
+        notifier->put();
+        notifier = nullptr;
+      }
     }
     put();
   }
@@ -199,7 +201,7 @@ public:
   ~RGWSimpleRadosReadCR() {
     request_cleanup();
   }
-                                                         
+
   void request_cleanup() {
     if (req) {
       req->finish();
@@ -239,9 +241,17 @@ int RGWSimpleRadosReadCR<T>::request_complete()
     if (ret < 0) {
       return ret;
     }
-    bufferlist::iterator iter = bl.begin();
     try {
-      ::decode(*result, iter);
+      bufferlist::iterator iter = bl.begin();
+      if (iter.end()) {
+        // allow successful reads with empty buffers. ReadSyncStatus coroutines
+        // depend on this to be able to read without locking, because the
+        // cls lock from InitSyncStatus will create an empty object if it didnt
+        // exist
+        *result = T();
+      } else {
+        ::decode(*result, iter);
+      }
     } catch (buffer::error& err) {
       return -EIO;
     }
diff --git a/src/rgw/rgw_data_sync.cc b/src/rgw/rgw_data_sync.cc
index 101ddb0..e92f783 100644
--- a/src/rgw/rgw_data_sync.cc
+++ b/src/rgw/rgw_data_sync.cc
@@ -1642,13 +1642,14 @@ class RGWInitBucketShardSyncStatusCoroutine : public RGWCoroutine {
 
   string lock_name;
   string cookie;
-  rgw_bucket_shard_sync_info status;
+  rgw_bucket_shard_sync_info& status;
 
   bucket_index_marker_info info;
 public:
   RGWInitBucketShardSyncStatusCoroutine(RGWDataSyncEnv *_sync_env,
-                                        const rgw_bucket_shard& bs)
-    : RGWCoroutine(_sync_env->cct), sync_env(_sync_env), bs(bs) {
+                                        const rgw_bucket_shard& bs,
+                                        rgw_bucket_shard_sync_info& _status)
+    : RGWCoroutine(_sync_env->cct), sync_env(_sync_env), bs(bs), status(_status) {
     store = sync_env->store;
     lock_name = "sync_lock";
 
@@ -1709,7 +1710,7 @@ public:
 
 RGWCoroutine *RGWRemoteBucketLog::init_sync_status_cr()
 {
-  return new RGWInitBucketShardSyncStatusCoroutine(&sync_env, bs);
+  return new RGWInitBucketShardSyncStatusCoroutine(&sync_env, bs, init_status);
 }
 
 template <class T>
@@ -2640,8 +2641,7 @@ int RGWRunBucketSyncCoroutine::operate()
 
     yield {
       if ((rgw_bucket_shard_sync_info::SyncState)sync_status.state == rgw_bucket_shard_sync_info::StateInit) {
-        call(new RGWInitBucketShardSyncStatusCoroutine(sync_env, bs));
-        sync_status.state = rgw_bucket_shard_sync_info::StateFullSync;
+        call(new RGWInitBucketShardSyncStatusCoroutine(sync_env, bs, sync_status));
       }
     }
 
@@ -2694,9 +2694,6 @@ int RGWBucketSyncStatusManager::init()
     return -EINVAL;
   }
 
-  async_rados = new RGWAsyncRadosProcessor(store, store->ctx()->_conf->rgw_num_async_rados_threads);
-  async_rados->start();
-
   int ret = http_manager.set_threaded();
   if (ret < 0) {
     ldout(store->ctx(), 0) << "failed in http_manager.set_threaded() ret=" << ret << dendl;
@@ -2725,6 +2722,8 @@ int RGWBucketSyncStatusManager::init()
 
   int effective_num_shards = (num_shards ? num_shards : 1);
 
+  auto async_rados = store->get_async_rados();
+
   for (int i = 0; i < effective_num_shards; i++) {
     RGWRemoteBucketLog *l = new RGWRemoteBucketLog(store, this, async_rados, &http_manager);
     ret = l->init(source_zone, conn, bucket, (num_shards ? i : -1), error_logger);
diff --git a/src/rgw/rgw_data_sync.h b/src/rgw/rgw_data_sync.h
index f3fc2f2..b72bc90 100644
--- a/src/rgw/rgw_data_sync.h
+++ b/src/rgw/rgw_data_sync.h
@@ -403,6 +403,7 @@ class RGWRemoteBucketLog : public RGWCoroutinesManager {
   RGWHTTPManager *http_manager;
 
   RGWDataSyncEnv sync_env;
+  rgw_bucket_shard_sync_info init_status;
 
   RGWBucketSyncCR *sync_cr{nullptr};
 
@@ -429,7 +430,6 @@ class RGWBucketSyncStatusManager {
 
   RGWCoroutinesManager cr_mgr;
 
-  RGWAsyncRadosProcessor *async_rados;
   RGWHTTPManager http_manager;
 
   string source_zone;
@@ -453,7 +453,6 @@ public:
   RGWBucketSyncStatusManager(RGWRados *_store, const string& _source_zone,
                              const rgw_bucket& bucket) : store(_store),
                                                                                      cr_mgr(_store->ctx(), _store->get_cr_registry()),
-                                                                                     async_rados(NULL),
                                                                                      http_manager(store->ctx(), cr_mgr.get_completion_mgr()),
                                                                                      source_zone(_source_zone),
                                                                                      conn(NULL), error_logger(NULL),
diff --git a/src/rgw/rgw_env.cc b/src/rgw/rgw_env.cc
index ffa479b..6436ee8 100644
--- a/src/rgw/rgw_env.cc
+++ b/src/rgw/rgw_env.cc
@@ -7,6 +7,8 @@
 #include <string>
 #include <map>
 
+#include "include/assert.h"
+
 #define dout_subsys ceph_subsys_rgw
 
 RGWEnv::RGWEnv()
diff --git a/src/rgw/rgw_file.cc b/src/rgw/rgw_file.cc
index 69f6b70..b87c388 100644
--- a/src/rgw/rgw_file.cc
+++ b/src/rgw/rgw_file.cc
@@ -80,22 +80,6 @@ namespace rgw {
     using std::get;
 
     LookupFHResult fhr{nullptr, 0};
-#if 0
-    RGWFileHandle::directory* d = parent->get_directory();
-    if (! d->name_cache.empty()) {
-      RGWFileHandle::dirent_string name{path};
-      const auto& diter = d->name_cache.find(name);
-      if (diter != d->name_cache.end()) {
-	fhr = lookup_fh(parent, path,
-			RGWFileHandle::FLAG_CREATE|
-			((diter->second == RGW_FS_TYPE_DIRECTORY) ?
-			  RGWFileHandle::FLAG_DIRECTORY :
-			  RGWFileHandle::FLAG_NONE));
-	if (get<0>(fhr))
-	  return fhr;
-      }
-    }
-#endif
 
     /* XXX the need for two round-trip operations to identify file or
      * directory leaf objects is unecessary--the current proposed
@@ -628,13 +612,34 @@ namespace rgw {
     rele();
   } /* RGWLibFS::close */
 
+  std::ostream& operator<<(std::ostream &os, RGWLibFS::event const &ev) {
+    os << "<event:";
+      switch (ev.t) {
+      case RGWLibFS::event::type::READDIR:
+	os << "type=READDIR;";
+	break;
+      default:
+	os << "type=UNKNOWN;";
+	break;
+      };
+    os << "fid=" << ev.fhk.fh_hk.bucket << ":" << ev.fhk.fh_hk.object
+       << ";ts=<timespec:" << ev.ts.tv_sec << ";" << ev.ts.tv_nsec << ">>";
+    return os;
+  }
+
   void RGWLibFS::gc()
   {
     using std::get;
     using directory = RGWFileHandle::directory;
 
-    static constexpr uint32_t max_ev = 24;
-    static constexpr uint16_t expire_s = 300; /* 5m */
+    /* dirent invalidate timeout--basically, the upper-bound on
+     * inconsistency with the S3 namespace */
+    auto expire_s
+      = get_context()->_conf->rgw_nfs_namespace_expire_secs;
+
+    /* max events to gc in one cycle */
+    uint32_t max_ev =
+      std::max(1, get_context()->_conf->rgw_nfs_max_gc);
 
     struct timespec now;
     event_vector ve;
@@ -645,11 +650,15 @@ namespace rgw {
     do {
       {
 	lock_guard guard(state.mtx); /* LOCKED */
+	/* just return if no events */
+	if (events.empty()) {
+	  return;
+	}
 	uint32_t _max_ev =
 	  (events.size() < 500) ? max_ev : (events.size() / 4);
 	for (uint32_t ix = 0; (ix < _max_ev) && (events.size() > 0); ++ix) {
 	  event& ev = events.front();
-	  if (ev.ts.tv_sec < (now.tv_sec + expire_s)) {
+	  if (ev.ts.tv_sec > (now.tv_sec + expire_s)) {
 	    stop = true;
 	    break;
 	  }
@@ -659,8 +668,12 @@ namespace rgw {
       } /* anon */
       /* !LOCKED */
       for (auto& ev : ve) {
+	lsubdout(get_context(), rgw, 15)
+	  << "try-expire ev: " << ev << dendl;
 	if (likely(ev.t == event::type::READDIR)) {
 	  RGWFileHandle* rgw_fh = lookup_handle(ev.fhk.fh_hk);
+	  lsubdout(get_context(), rgw, 15)
+	    << "ev rgw_fh: " << rgw_fh << dendl;
 	  if (rgw_fh) {
 	    RGWFileHandle::directory* d;
 	    if (unlikely(! rgw_fh->is_dir())) {
@@ -677,14 +690,15 @@ namespace rgw {
 	    if (d) {
 	      lock_guard guard(rgw_fh->mtx);
 	      d->clear_state();
+	      rgw_fh->invalidate();
 	    }
 	  rele:
 	    unref(rgw_fh);
 	  } /* rgw_fh */
 	} /* event::type::READDIR */
       } /* ev */
-      std::this_thread::sleep_for(std::chrono::seconds(120));
-    } while (! stop);
+      ve.clear();
+    } while (! (stop || shutdown));
   } /* RGWLibFS::gc */
 
   void RGWFileHandle::encode_attrs(ceph::buffer::list& ux_key1,
@@ -719,7 +733,6 @@ namespace rgw {
     int rc = 0;
     struct timespec now;
     CephContext* cct = fs->get_context();
-    directory* d = get_directory(); /* already type-checked */
 
     (void) clock_gettime(CLOCK_MONOTONIC_COARSE, &now); /* !LOCKED */
 
@@ -734,8 +747,9 @@ namespace rgw {
 				offset);
       rc = rgwlib.get_fe()->execute_req(&req);
       if (! rc) {
-	set_nlink(2 + d->name_cache.size());
+	lock_guard guard(mtx);
 	state.atime = now;
+	set_nlink(2 + 1);
 	*eof = req.eof();
 	event ev(event::type::READDIR, get_key(), state.atime);
 	fs->state.push_event(ev);
@@ -745,8 +759,9 @@ namespace rgw {
       RGWReaddirRequest req(cct, fs->get_user(), this, rcb, cb_arg, offset);
       rc = rgwlib.get_fe()->execute_req(&req);
       if (! rc) {
+	lock_guard guard(mtx);
 	state.atime = now;
-	set_nlink(2 + d->name_cache.size());
+	set_nlink(2 + 1);
 	*eof = req.eof();
 	event ev(event::type::READDIR, get_key(), state.atime);
 	fs->state.push_event(ev);
@@ -903,6 +918,18 @@ namespace rgw {
     delete write_req;
   }
 
+  void RGWFileHandle::directory::clear_state()
+  {
+    marker_cache.clear();
+  }
+
+  void RGWFileHandle::invalidate() {
+    RGWLibFS *fs = get_fs();
+    if (fs->invalidate_cb) {
+      fs->invalidate_cb(fs->invalidate_arg, get_key().fh_hk);
+    }
+  }
+
   int RGWWriteRequest::exec_start() {
     struct req_state* s = get_state();
 
@@ -1111,6 +1138,17 @@ extern "C" {
 }
 
 /*
+ register invalidate callbacks
+*/
+int rgw_register_invalidate(struct rgw_fs *rgw_fs, rgw_fh_callback_t cb,
+			    void *arg, uint32_t flags)
+
+{
+  RGWLibFS *fs = static_cast<RGWLibFS*>(rgw_fs->fs_private);
+  return fs->register_invalidate(cb, arg, flags);
+}
+
+/*
  detach rgw namespace
 */
 int rgw_umount(struct rgw_fs *rgw_fs, uint32_t flags)
diff --git a/src/rgw/rgw_file.h b/src/rgw/rgw_file.h
index 69ec707..d9fc942 100644
--- a/src/rgw/rgw_file.h
+++ b/src/rgw/rgw_file.h
@@ -169,7 +169,6 @@ namespace rgw {
     using dirent_string = basic_sstring<char, uint16_t, 32>;
 
     using marker_cache_t = flat_map<uint64_t, dirent_string>;
-    using name_cache_t = flat_map<dirent_string, uint8_t>;
 
     struct State {
       uint64_t dev;
@@ -199,19 +198,10 @@ namespace rgw {
 
       uint32_t flags;
       marker_cache_t marker_cache;
-      name_cache_t name_cache;
 
       directory() : flags(FLAG_NONE) {}
 
-      void clear_state() {
-	marker_cache.clear();
-	name_cache.clear();
-      }
-
-      void set_overflow() {
-	clear_state();
-	flags |= FLAG_OVERFLOW;
-      }
+      void clear_state();
     };
 
     boost::variant<file, directory> variant_type;
@@ -463,18 +453,9 @@ namespace rgw {
 	// XXXX check for failure (dup key)
 	d->marker_cache.insert(
 	  marker_cache_t::value_type(off, marker.data()));
-	/* 90% of directories hold <= 32 entries (Yifan Wang, CMU),
-	 * but go big */
-	if (d->name_cache.size() < 128) {
-	  d->name_cache.insert(
-	    name_cache_t::value_type(marker.data(), obj_type));
-	} else {
-	  d->set_overflow(); // too many
-	}
       }
     }
 
-    /* XXX */
     std::string find_marker(uint64_t off) { // XXX copy
       using std::get;
       directory* d = get<directory>(&variant_type);
@@ -601,6 +582,8 @@ namespace rgw {
     void decode_attrs(const ceph::buffer::list* ux_key1,
 		      const ceph::buffer::list* ux_attrs1);
 
+    void invalidate();
+
     virtual bool reclaim();
 
     typedef cohort::lru::LRU<std::mutex> FhLRU;
@@ -698,6 +681,9 @@ namespace rgw {
     CephContext* cct;
     struct rgw_fs fs;
     RGWFileHandle root_fh;
+    rgw_fh_callback_t invalidate_cb;
+    void *invalidate_arg;
+    bool shutdown;
 
     mutable std::atomic<uint64_t> refcnt;
 
@@ -726,6 +712,9 @@ namespace rgw {
 	: t(t), fhk(k), ts(ts) {}
     };
 
+    friend std::ostream& operator<<(std::ostream &os,
+				    RGWLibFS::event const &ev);
+
     using event_vector = /* boost::small_vector<event, 16> */
       std::vector<event>;
 
@@ -753,7 +742,6 @@ namespace rgw {
       State() : flags(0) {}
 
       void push_event(const event& ev) {
-	lock_guard guard(mtx);
 	events.push_back(ev);
       }
     } state;
@@ -768,16 +756,20 @@ namespace rgw {
 
     RGWLibFS(CephContext* _cct, const char *_uid, const char *_user_id,
 	    const char* _key)
-      : cct(_cct), root_fh(this, get_inst()), refcnt(1),
+      : cct(_cct), root_fh(this, get_inst()), invalidate_cb(nullptr),
+	invalidate_arg(nullptr), shutdown(false), refcnt(1),
 	fh_cache(cct->_conf->rgw_nfs_fhcache_partitions,
 		 cct->_conf->rgw_nfs_fhcache_size),
 	fh_lru(cct->_conf->rgw_nfs_lru_lanes,
 	       cct->_conf->rgw_nfs_lru_lane_hiwat),
 	uid(_uid), key(_user_id, _key) {
 
+      /* fixup fs_inst */
+      root_fh.state.dev = ++fs_inst;
+
       /* no bucket may be named rgw_fs_inst-(.*) */
       fsid = RGWFileHandle::root_name + "rgw_fs_inst-" +
-	std::to_string(++(fs_inst));
+	std::to_string(fs_inst);
 
       root_fh.init_rootfs(fsid /* bucket */, RGWFileHandle::root_name);
 
@@ -808,6 +800,8 @@ namespace rgw {
       intrusive_ptr_release(this);
     }
 
+    void stop() { shutdown = true; }
+
     void release_evict(RGWFileHandle* fh) {
       /* remove from cache, releases sentinel ref */
       fh_cache.remove(fh->fh.fh_hk.object, fh,
@@ -851,6 +845,12 @@ namespace rgw {
       return ret;
     } /* authorize */
 
+    int register_invalidate(rgw_fh_callback_t cb, void *arg, uint32_t flags) {
+      invalidate_cb = cb;
+      invalidate_arg = arg;
+      return 0;
+    }
+
     /* find RGWFileHandle by id  */
     LookupFHResult lookup_fh(const fh_key& fhk,
 			     const uint32_t flags = RGWFileHandle::FLAG_NONE) {
@@ -1038,6 +1038,15 @@ namespace rgw {
       fh->mtx.unlock(); /* !LOCKED */
     out:
       lat.lock->unlock(); /* !LATCHED */
+
+      /* special case:  lookup root_fh */
+      if (! fh) {
+	if (unlikely(fh_hk == root_fh.fh.fh_hk)) {
+	  fh = &root_fh;
+	  ref(fh);
+	}
+      }
+
       return fh;
     }
 
diff --git a/src/rgw/rgw_http_client.cc b/src/rgw/rgw_http_client.cc
index 283886a..c3f58d9 100644
--- a/src/rgw/rgw_http_client.cc
+++ b/src/rgw/rgw_http_client.cc
@@ -1,6 +1,9 @@
 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
 // vim: ts=8 sw=2 smarttab
 
+#include "include/compat.h"
+#include <mutex>
+
 #include <curl/curl.h>
 #include <curl/easy.h>
 #include <curl/multi.h>
@@ -200,6 +203,14 @@ static curl_slist *headers_to_slist(list<pair<string, string> >& headers)
   return h;
 }
 
+static bool is_upload_request(const char *method)
+{
+  if (method == nullptr) {
+    return false;
+  }
+  return strcmp(method, "POST") == 0 || strcmp(method, "PUT") == 0;
+}
+
 /*
  * process a single simple one off request, not going through RGWHTTPManager. Not using
  * req_data.
@@ -234,7 +245,9 @@ int RGWHTTPClient::process(const char *method, const char *url)
   }
   curl_easy_setopt(curl_handle, CURLOPT_READFUNCTION, simple_send_http_data);
   curl_easy_setopt(curl_handle, CURLOPT_READDATA, (void *)this);
-  curl_easy_setopt(curl_handle, CURLOPT_UPLOAD, 1L); 
+  if (is_upload_request(method)) {
+    curl_easy_setopt(curl_handle, CURLOPT_UPLOAD, 1L);
+  }
   if (has_send_len) {
     curl_easy_setopt(curl_handle, CURLOPT_INFILESIZE, (void *)send_len); 
   }
@@ -310,7 +323,9 @@ int RGWHTTPClient::init_request(const char *method, const char *url, rgw_http_re
   }
   curl_easy_setopt(easy_handle, CURLOPT_READFUNCTION, send_http_data);
   curl_easy_setopt(easy_handle, CURLOPT_READDATA, (void *)req_data);
-  curl_easy_setopt(easy_handle, CURLOPT_UPLOAD, 1L); 
+  if (is_upload_request(method)) {
+    curl_easy_setopt(easy_handle, CURLOPT_UPLOAD, 1L);
+  }
   if (has_send_len) {
     curl_easy_setopt(easy_handle, CURLOPT_INFILESIZE, (void *)send_len); 
   }
@@ -344,8 +359,82 @@ RGWHTTPClient::~RGWHTTPClient()
 }
 
 
+static int clear_signal(int fd)
+{
+  // since we're in non-blocking mode, we can try to read a lot more than
+  // one signal from signal_thread() to avoid later wakeups. non-blocking reads
+  // are also required to support the curl_multi_wait bug workaround
+  std::array<char, 256> buf;
+  int ret = ::read(fd, (void *)buf.data(), buf.size());
+  if (ret < 0) {
+    ret = -errno;
+    return ret == -EAGAIN ? 0 : ret; // clear EAGAIN
+  }
+  return 0;
+}
+
 #if HAVE_CURL_MULTI_WAIT
 
+static std::once_flag detect_flag;
+static bool curl_multi_wait_bug_present = false;
+
+static int detect_curl_multi_wait_bug(CephContext *cct, CURLM *handle,
+                                      int write_fd, int read_fd)
+{
+  int ret = 0;
+
+  // write to write_fd so that read_fd becomes readable
+  uint32_t buf = 0;
+  ret = ::write(write_fd, &buf, sizeof(buf));
+  if (ret < 0) {
+    ret = -errno;
+    ldout(cct, 0) << "ERROR: " << __func__ << "(): write() returned " << ret << dendl;
+    return ret;
+  }
+
+  // pass read_fd in extra_fds for curl_multi_wait()
+  int num_fds;
+  struct curl_waitfd wait_fd;
+
+  wait_fd.fd = read_fd;
+  wait_fd.events = CURL_WAIT_POLLIN;
+  wait_fd.revents = 0;
+
+  ret = curl_multi_wait(handle, &wait_fd, 1, 0, &num_fds);
+  if (ret != CURLM_OK) {
+    ldout(cct, 0) << "ERROR: curl_multi_wait() returned " << ret << dendl;
+    return -EIO;
+  }
+
+  // curl_multi_wait should flag revents when extra_fd is readable. if it
+  // doesn't, the bug is present and we can't rely on revents
+  if (wait_fd.revents == 0) {
+    curl_multi_wait_bug_present = true;
+    ldout(cct, 0) << "WARNING: detected a version of libcurl which contains a "
+        "bug in curl_multi_wait(). enabling a workaround that may degrade "
+        "performance slightly." << dendl;
+  }
+
+  return clear_signal(read_fd);
+}
+
+static bool is_signaled(const curl_waitfd& wait_fd)
+{
+  if (wait_fd.fd < 0) {
+    // no fd to signal
+    return false;
+  }
+
+  if (curl_multi_wait_bug_present) {
+    // we can't rely on revents, so we always return true if a wait_fd is given.
+    // this means we'll be trying a non-blocking read on this fd every time that
+    // curl_multi_wait() wakes up
+    return true;
+  }
+
+  return wait_fd.revents > 0;
+}
+
 static int do_curl_wait(CephContext *cct, CURLM *handle, int signal_fd)
 {
   int num_fds;
@@ -357,16 +446,14 @@ static int do_curl_wait(CephContext *cct, CURLM *handle, int signal_fd)
 
   int ret = curl_multi_wait(handle, &wait_fd, 1, cct->_conf->rgw_curl_wait_timeout_ms, &num_fds);
   if (ret) {
-    dout(0) << "ERROR: curl_multi_wait() returned " << ret << dendl;
+    ldout(cct, 0) << "ERROR: curl_multi_wait() returned " << ret << dendl;
     return -EIO;
   }
 
-  if (wait_fd.revents > 0) {
-    uint32_t buf;
-    ret = read(signal_fd, (void *)&buf, sizeof(buf));
+  if (is_signaled(wait_fd)) {
+    ret = clear_signal(signal_fd);
     if (ret < 0) {
-      ret = -errno;
-      dout(0) << "ERROR: " << __func__ << "(): read() returned " << ret << dendl;
+      ldout(cct, 0) << "ERROR: " << __func__ << "(): read() returned " << ret << dendl;
       return ret;
     }
   }
@@ -389,12 +476,15 @@ static int do_curl_wait(CephContext *cct, CURLM *handle, int signal_fd)
   /* get file descriptors from the transfers */ 
   int ret = curl_multi_fdset(handle, &fdread, &fdwrite, &fdexcep, &maxfd);
   if (ret) {
-    generic_dout(0) << "ERROR: curl_multi_fdset returned " << ret << dendl;
+    ldout(cct, 0) << "ERROR: curl_multi_fdset returned " << ret << dendl;
     return -EIO;
   }
 
-  if (signal_fd >= maxfd) {
-    maxfd = signal_fd + 1;
+  if (signal_fd > 0) {
+    FD_SET(signal_fd, &fdread);
+    if (signal_fd >= maxfd) {
+      maxfd = signal_fd + 1;
+    }
   }
 
   /* forcing a strict timeout, as the returned fdsets might not reference all fds we wait on */
@@ -409,16 +499,14 @@ static int do_curl_wait(CephContext *cct, CURLM *handle, int signal_fd)
   ret = select(maxfd+1, &fdread, &fdwrite, &fdexcep, &timeout);
   if (ret < 0) {
     ret = -errno;
-    dout(0) << "ERROR: select returned " << ret << dendl;
+    ldout(cct, 0) << "ERROR: select returned " << ret << dendl;
     return ret;
   }
 
   if (signal_fd > 0 && FD_ISSET(signal_fd, &fdread)) {
-    uint32_t buf;
-    ret = read(signal_fd, (void *)&buf, sizeof(buf));
+    ret = clear_signal(signal_fd);
     if (ret < 0) {
-      ret = -errno;
-      dout(0) << "ERROR: " << __func__ << "(): read() returned " << ret << dendl;
+      ldout(cct, 0) << "ERROR: " << __func__ << "(): read() returned " << ret << dendl;
       return ret;
     }
   }
@@ -707,16 +795,34 @@ int RGWHTTPManager::complete_requests()
 
 int RGWHTTPManager::set_threaded()
 {
-  is_threaded = true;
-  reqs_thread = new ReqsThread(this);
-  reqs_thread->create("http_manager");
-
   int r = pipe(thread_pipe);
   if (r < 0) {
     r = -errno;
     ldout(cct, 0) << "ERROR: pipe() returned errno=" << r << dendl;
     return r;
   }
+
+  // enable non-blocking reads
+  r = ::fcntl(thread_pipe[0], F_SETFL, O_NONBLOCK);
+  if (r < 0) {
+    r = -errno;
+    ldout(cct, 0) << "ERROR: fcntl() returned errno=" << r << dendl;
+    TEMP_FAILURE_RETRY(::close(thread_pipe[0]));
+    TEMP_FAILURE_RETRY(::close(thread_pipe[1]));
+    return r;
+  }
+
+#ifdef HAVE_CURL_MULTI_WAIT
+  // on first initialization, use this pipe to detect whether we're using a
+  // buggy version of libcurl
+  std::call_once(detect_flag, detect_curl_multi_wait_bug, cct,
+                 static_cast<CURLM*>(multi_handle),
+                 thread_pipe[1], thread_pipe[0]);
+#endif
+
+  is_threaded = true;
+  reqs_thread = new ReqsThread(this);
+  reqs_thread->create("http_manager");
   return 0;
 }
 
@@ -733,6 +839,8 @@ void RGWHTTPManager::stop()
     signal_thread();
     reqs_thread->join();
     delete reqs_thread;
+    TEMP_FAILURE_RETRY(::close(thread_pipe[1]));
+    TEMP_FAILURE_RETRY(::close(thread_pipe[0]));
   }
 }
 
diff --git a/src/rgw/rgw_http_errors.h b/src/rgw/rgw_http_errors.h
index aebf801..2d998d2 100644
--- a/src/rgw/rgw_http_errors.h
+++ b/src/rgw/rgw_http_errors.h
@@ -67,6 +67,7 @@ const static struct rgw_http_errors RGW_HTTP_ERRORS[] = {
     { ERR_LOCKED, 423, "Locked" },
     { ERR_INTERNAL_ERROR, 500, "InternalError" },
     { ERR_NOT_IMPLEMENTED, 501, "NotImplemented" },
+    { ERR_SERVICE_UNAVAILABLE, 503, "ServiceUnavailable"}
 };
 
 const static struct rgw_http_errors RGW_HTTP_SWIFT_ERRORS[] = {
diff --git a/src/rgw/rgw_json_enc.cc b/src/rgw/rgw_json_enc.cc
index 0062dc5..0593963 100644
--- a/src/rgw/rgw_json_enc.cc
+++ b/src/rgw/rgw_json_enc.cc
@@ -536,8 +536,9 @@ void RGWBucketEntryPoint::dump(Formatter *f) const
 void RGWBucketEntryPoint::decode_json(JSONObj *obj) {
   JSONDecoder::decode_json("bucket", bucket, obj);
   JSONDecoder::decode_json("owner", owner, obj);
-  utime_t ut(creation_time);
+  utime_t ut;
   JSONDecoder::decode_json("creation_time", ut, obj);
+  creation_time = ut.to_real_time();
   JSONDecoder::decode_json("linked", linked, obj);
   JSONDecoder::decode_json("has_bucket_info", has_bucket_info, obj);
   if (has_bucket_info) {
@@ -646,12 +647,14 @@ void RGWBucketInfo::dump(Formatter *f) const
   }
   encode_json("swift_versioning", swift_versioning, f);
   encode_json("swift_ver_location", swift_ver_location, f);
+  encode_json("index_type", (uint32_t)index_type, f);
 }
 
 void RGWBucketInfo::decode_json(JSONObj *obj) {
   JSONDecoder::decode_json("bucket", bucket, obj);
-  utime_t ut(creation_time);
+  utime_t ut;
   JSONDecoder::decode_json("creation_time", ut, obj);
+  creation_time = ut.to_real_time();
   JSONDecoder::decode_json("owner", owner, obj);
   JSONDecoder::decode_json("flags", flags, obj);
   JSONDecoder::decode_json("zonegroup", zonegroup, obj);
@@ -673,6 +676,9 @@ void RGWBucketInfo::decode_json(JSONObj *obj) {
   }
   JSONDecoder::decode_json("swift_versioning", swift_versioning, obj);
   JSONDecoder::decode_json("swift_ver_location", swift_ver_location, obj);
+  uint32_t it;
+  JSONDecoder::decode_json("index_type", it, obj);
+  index_type = (RGWBucketIndexType)it;
 }
 
 void rgw_obj_key::dump(Formatter *f) const
@@ -1037,8 +1043,9 @@ void RGWMetadataLogInfo::dump(Formatter *f) const
 void RGWMetadataLogInfo::decode_json(JSONObj *obj)
 {
   JSONDecoder::decode_json("marker", marker, obj);
-  utime_t ut(last_update);
+  utime_t ut;
   JSONDecoder::decode_json("last_update", ut, obj);
+  last_update = ut.to_real_time();
 }
 
 void RGWDataChangesLogInfo::dump(Formatter *f) const
@@ -1051,8 +1058,9 @@ void RGWDataChangesLogInfo::dump(Formatter *f) const
 void RGWDataChangesLogInfo::decode_json(JSONObj *obj)
 {
   JSONDecoder::decode_json("marker", marker, obj);
-  utime_t ut(last_update);
+  utime_t ut;
   JSONDecoder::decode_json("last_update", ut, obj);
+  last_update = ut.to_real_time();
 }
 
 
diff --git a/src/rgw/rgw_ldap.h b/src/rgw/rgw_ldap.h
index 5d33406..ab84d82 100644
--- a/src/rgw/rgw_ldap.h
+++ b/src/rgw/rgw_ldap.h
@@ -79,7 +79,7 @@ namespace rgw {
       int ret = ldap_initialize(&tldap, uri.c_str());
       if (ret == LDAP_SUCCESS) {
 	unsigned long ldap_ver = LDAP_VERSION3;
-	ret = ldap_set_option(ldap, LDAP_OPT_PROTOCOL_VERSION,
+	ret = ldap_set_option(tldap, LDAP_OPT_PROTOCOL_VERSION,
 			      (void*) &ldap_ver);
 	if (ret == LDAP_SUCCESS) {
 	  ret = ldap_simple_bind_s(tldap, dn, pwd.c_str());
diff --git a/src/rgw/rgw_lib_frontend.h b/src/rgw/rgw_lib_frontend.h
index 9f407f1..af99638 100644
--- a/src/rgw/rgw_lib_frontend.h
+++ b/src/rgw/rgw_lib_frontend.h
@@ -33,6 +33,13 @@ namespace rgw {
     void run();
     void checkpoint();
 
+    void stop() {
+      shutdown = true;
+      for (const auto& fs: mounted_fs) {
+	fs.second->stop();
+      }
+    }
+
     void register_fs(RGWLibFS* fs) {
       lock_guard guard(mtx);
       mounted_fs.insert(FSMAP::value_type(fs, fs));
diff --git a/src/rgw/rgw_log.cc b/src/rgw/rgw_log.cc
index 3bef606..3768fef 100644
--- a/src/rgw/rgw_log.cc
+++ b/src/rgw/rgw_log.cc
@@ -12,6 +12,7 @@
 #include "rgw_acl.h"
 #include "rgw_rados.h"
 #include "rgw_client_io.h"
+#include "rgw_rest.h"
 
 #define dout_subsys ceph_subsys_rgw
 
@@ -245,11 +246,20 @@ void rgw_format_ops_log_entry(struct rgw_log_entry& entry, Formatter *formatter)
   formatter->dump_int("bytes_sent", entry.bytes_sent);
   formatter->dump_int("bytes_received", entry.bytes_received);
   formatter->dump_int("object_size", entry.obj_size);
-  uint64_t total_time =  entry.total_time.sec() * 1000000LL * entry.total_time.usec();
+  uint64_t total_time =  entry.total_time.sec() * 1000000LL + entry.total_time.usec();
 
   formatter->dump_int("total_time", total_time);
   formatter->dump_string("user_agent",  entry.user_agent);
   formatter->dump_string("referrer",  entry.referrer);
+  if (entry.x_headers.size() > 0) {
+    formatter->open_array_section("http_x_headers");
+    for (const auto& iter: entry.x_headers) {
+      formatter->open_object_section(iter.first.c_str());
+      formatter->dump_string(iter.first.c_str(), iter.second);
+      formatter->close_section();
+    }
+    formatter->close_section();
+  }
   formatter->close_section();
 }
 
@@ -290,7 +300,8 @@ void OpsLogSocket::log(struct rgw_log_entry& entry)
   append_output(bl);
 }
 
-int rgw_log_op(RGWRados *store, struct req_state *s, const string& op_name, OpsLogSocket *olog)
+int rgw_log_op(RGWRados *store, RGWREST* const rest, struct req_state *s,
+	       const string& op_name, OpsLogSocket *olog)
 {
   struct rgw_log_entry entry;
   string bucket_id;
@@ -339,6 +350,18 @@ int rgw_log_op(RGWRados *store, struct req_state *s, const string& op_name, OpsL
   set_param_str(s, "REQUEST_URI", entry.uri);
   set_param_str(s, "REQUEST_METHOD", entry.op);
 
+  /* custom header logging */
+  if (rest) {
+    if (rest->log_x_headers()) {
+      for (const auto& iter : s->info.env->get_map()) {
+	if (rest->log_x_header(iter.first)) {
+	  entry.x_headers.insert(
+	    rgw_log_entry::headers_map::value_type(iter.first, iter.second));
+	}
+      }
+    }
+  }
+
   entry.user = s->user->user_id.to_str();
   if (s->object_acl)
     entry.object_owner = s->object_acl->get_owner().get_id();
diff --git a/src/rgw/rgw_log.h b/src/rgw/rgw_log.h
index 51acfdf..ab800c7 100644
--- a/src/rgw/rgw_log.h
+++ b/src/rgw/rgw_log.h
@@ -4,6 +4,7 @@
 #ifndef CEPH_RGW_LOG_H
 #define CEPH_RGW_LOG_H
 
+#include <boost/container/flat_set.hpp>
 #include "rgw_common.h"
 #include "include/utime.h"
 #include "common/Formatter.h"
@@ -12,6 +13,9 @@
 class RGWRados;
 
 struct rgw_log_entry {
+
+  using headers_map = std::map<std::string, std::string>;
+
   rgw_user object_owner;
   rgw_user bucket_owner;
   string bucket;
@@ -30,9 +34,10 @@ struct rgw_log_entry {
   string user_agent;
   string referrer;
   string bucket_id;
+  headers_map x_headers;
 
   void encode(bufferlist &bl) const {
-    ENCODE_START(8, 5, bl);
+    ENCODE_START(9, 5, bl);
     ::encode(object_owner.id, bl);
     ::encode(bucket_owner.id, bl);
     ::encode(bucket, bl);
@@ -54,6 +59,7 @@ struct rgw_log_entry {
     ::encode(obj, bl);
     ::encode(object_owner, bl);
     ::encode(bucket_owner, bl);
+    ::encode(x_headers, bl);
     ENCODE_FINISH(bl);
   }
   void decode(bufferlist::iterator &p) {
@@ -100,6 +106,9 @@ struct rgw_log_entry {
       ::decode(object_owner, p);
       ::decode(bucket_owner, p);
     }
+    if (struct_v >= 9) {
+      ::decode(x_headers, p);
+    }
     DECODE_FINISH(p);
   }
   void dump(Formatter *f) const;
@@ -123,10 +132,14 @@ public:
   void log(struct rgw_log_entry& entry);
 };
 
-int rgw_log_op(RGWRados *store, struct req_state *s, const string& op_name, OpsLogSocket *olog);
+class RGWREST;
+
+int rgw_log_op(RGWRados *store, RGWREST* const rest, struct req_state *s,
+	       const string& op_name, OpsLogSocket *olog);
 void rgw_log_usage_init(CephContext *cct, RGWRados *store);
 void rgw_log_usage_finalize();
-void rgw_format_ops_log_entry(struct rgw_log_entry& entry, Formatter *formatter);
+void rgw_format_ops_log_entry(struct rgw_log_entry& entry,
+			      Formatter *formatter);
 
-#endif
+#endif /* CEPH_RGW_LOG_H */
 
diff --git a/src/rgw/rgw_main.cc b/src/rgw/rgw_main.cc
index b5adf5e..d03fd25 100644
--- a/src/rgw/rgw_main.cc
+++ b/src/rgw/rgw_main.cc
@@ -255,6 +255,20 @@ int main(int argc, const char **argv)
     }
   }
 
+  // maintain existing region root pool for new multisite objects
+  if (!g_conf->rgw_region_root_pool.empty()) {
+    const char *root_pool = g_conf->rgw_region_root_pool.c_str();
+    if (g_conf->rgw_zonegroup_root_pool.empty()) {
+      g_conf->set_val_or_die("rgw_zonegroup_root_pool", root_pool);
+    }
+    if (g_conf->rgw_period_root_pool.empty()) {
+      g_conf->set_val_or_die("rgw_period_root_pool", root_pool);
+    }
+    if (g_conf->rgw_realm_root_pool.empty()) {
+      g_conf->set_val_or_die("rgw_realm_root_pool", root_pool);
+    }
+  }
+
   check_curl();
 
   if (g_conf->daemonize) {
@@ -325,20 +339,54 @@ int main(int argc, const char **argv)
   }
 
   // S3 website mode is a specialization of S3
-  bool s3website_enabled = apis_map.count("s3website") > 0;
-  if (apis_map.count("s3") > 0 || s3website_enabled)
-    rest.register_default_mgr(set_logging(new RGWRESTMgr_S3(s3website_enabled)));
+  const bool s3website_enabled = apis_map.count("s3website") > 0;
+  // Swift API entrypoint could placed in the root instead of S3
+  const bool swift_at_root = g_conf->rgw_swift_url_prefix == "/";
+  if (apis_map.count("s3") > 0 || s3website_enabled) {
+    if (! swift_at_root) {
+      rest.register_default_mgr(set_logging(new RGWRESTMgr_S3(s3website_enabled)));
+    } else {
+      derr << "Cannot have the S3 or S3 Website enabled together with "
+           << "Swift API placed in the root of hierarchy" << dendl;
+      return EINVAL;
+    }
+  }
 
   if (apis_map.count("swift") > 0) {
     do_swift = true;
     swift_init(g_ceph_context);
-    rest.register_resource(g_conf->rgw_swift_url_prefix,
-			   set_logging(new RGWRESTMgr_SWIFT));
+
+    RGWRESTMgr_SWIFT* const swift_resource = new RGWRESTMgr_SWIFT;
+
+    if (! g_conf->rgw_cross_domain_policy.empty()) {
+      swift_resource->register_resource("crossdomain.xml",
+                          set_logging(new RGWRESTMgr_SWIFT_CrossDomain));
+    }
+
+    swift_resource->register_resource("healthcheck",
+                          set_logging(new RGWRESTMgr_SWIFT_HealthCheck));
+
+    swift_resource->register_resource("info",
+                          set_logging(new RGWRESTMgr_SWIFT_Info));
+
+    if (! swift_at_root) {
+      rest.register_resource(g_conf->rgw_swift_url_prefix,
+                          set_logging(swift_resource));
+    } else {
+      if (store->get_zonegroup().zones.size() > 1) {
+        derr << "Placing Swift API in the root of URL hierarchy while running"
+             << " multi-site configuration requires another instance of RadosGW"
+             << " with S3 API enabled!" << dendl;
+      }
+
+      rest.register_default_mgr(set_logging(swift_resource));
+    }
   }
 
-  if (apis_map.count("swift_auth") > 0)
+  if (apis_map.count("swift_auth") > 0) {
     rest.register_resource(g_conf->rgw_swift_auth_entry,
 			   set_logging(new RGWRESTMgr_SWIFT_Auth));
+  }
 
   if (apis_map.count("admin") > 0) {
     RGWRESTMgr_Admin *admin_resource = new RGWRESTMgr_Admin;
@@ -356,6 +404,9 @@ int main(int argc, const char **argv)
     rest.register_resource(g_conf->rgw_admin_entry, admin_resource);
   }
 
+  /* Header custom behavior */
+  rest.register_x_headers(g_conf->rgw_log_http_headers);
+
   OpsLogSocket *olog = NULL;
 
   if (!g_conf->rgw_ops_log_socket_path.empty()) {
diff --git a/src/rgw/rgw_metadata.cc b/src/rgw/rgw_metadata.cc
index ea6e793..5c7b760 100644
--- a/src/rgw/rgw_metadata.cc
+++ b/src/rgw/rgw_metadata.cc
@@ -3,6 +3,7 @@
 
 #include <boost/intrusive_ptr.hpp>
 #include "common/ceph_json.h"
+#include "common/errno.h"
 #include "rgw_metadata.h"
 #include "rgw_coroutine.h"
 #include "cls/version/cls_version_types.h"
@@ -13,6 +14,8 @@
 #include "rgw_cr_rados.h"
 #include "rgw_boost_asio_yield.h"
 
+#include "include/assert.h"
+
 #define dout_subsys ceph_subsys_rgw
 
 void LogStatusDump::dump(Formatter *f) const {
@@ -176,71 +179,35 @@ int RGWMetadataLog::get_info(int shard_id, RGWMetadataLogInfo *info)
   return 0;
 }
 
-static void _mdlog_info_completion(librados::completion_t cb, void *arg);
-
-class RGWMetadataLogInfoCompletion : public RefCountedObject {
-  RGWMetadataLogInfo *pinfo;
-  RGWCompletionManager *completion_manager;
-  void *user_info;
-  int *pret;
-  cls_log_header header;
-  librados::IoCtx io_ctx;
-  librados::AioCompletion *completion;
-
-public:
-  RGWMetadataLogInfoCompletion(RGWMetadataLogInfo *_pinfo, RGWCompletionManager *_cm, void *_uinfo, int *_pret) :
-                                               pinfo(_pinfo), completion_manager(_cm), user_info(_uinfo), pret(_pret) {
-    completion = librados::Rados::aio_create_completion((void *)this, _mdlog_info_completion, NULL);
-  }
-
-  ~RGWMetadataLogInfoCompletion() {
-    completion->release();
-  }
-
-  void finish(librados::completion_t cb) {
-    *pret = completion->get_return_value();
-    if (*pret >= 0) {
-      pinfo->marker = header.max_marker;
-      pinfo->last_update = header.max_time.to_real_time();
-    }
-    completion_manager->complete(NULL, user_info);
-    put();
-  }
-
-  librados::IoCtx& get_io_ctx() { return io_ctx; }
-
-  cls_log_header *get_header() {
-    return &header;
-  }
-
-  librados::AioCompletion *get_completion() {
-    return completion;
-  }
-};
-
 static void _mdlog_info_completion(librados::completion_t cb, void *arg)
 {
-  RGWMetadataLogInfoCompletion *infoc = (RGWMetadataLogInfoCompletion *)arg;
+  auto infoc = static_cast<RGWMetadataLogInfoCompletion *>(arg);
   infoc->finish(cb);
+  infoc->put(); // drop the ref from get_info_async()
 }
 
-int RGWMetadataLog::get_info_async(int shard_id, RGWMetadataLogInfo *info, RGWCompletionManager *completion_manager, void *user_info, int *pret)
+RGWMetadataLogInfoCompletion::RGWMetadataLogInfoCompletion(info_callback_t cb)
+  : completion(librados::Rados::aio_create_completion((void *)this, nullptr,
+                                                      _mdlog_info_completion)),
+    callback(cb)
 {
-  string oid;
-  get_shard_oid(shard_id, oid);
-
-  RGWMetadataLogInfoCompletion *req_completion = new RGWMetadataLogInfoCompletion(info, completion_manager, user_info, pret);
+}
 
-  req_completion->get();
+RGWMetadataLogInfoCompletion::~RGWMetadataLogInfoCompletion()
+{
+  completion->release();
+}
 
-  int ret = store->time_log_info_async(req_completion->get_io_ctx(), oid, req_completion->get_header(), req_completion->get_completion());
-  if (ret < 0) {
-    return ret;
-  }
+int RGWMetadataLog::get_info_async(int shard_id, RGWMetadataLogInfoCompletion *completion)
+{
+  string oid;
+  get_shard_oid(shard_id, oid);
 
-  req_completion->put();
+  completion->get(); // hold a ref until the completion fires
 
-  return 0;
+  return store->time_log_info_async(completion->get_io_ctx(), oid,
+                                    &completion->get_header(),
+                                    completion->get_completion());
 }
 
 int RGWMetadataLog::trim(int shard_id, const real_time& from_time, const real_time& end_time,
@@ -345,6 +312,29 @@ public:
 
 static RGWMetadataTopHandler md_top_handler;
 
+
+static const std::string mdlog_history_oid = "meta.history";
+
+struct RGWMetadataLogHistory {
+  epoch_t oldest_realm_epoch;
+  std::string oldest_period_id;
+
+  void encode(bufferlist& bl) const {
+    ENCODE_START(1, 1, bl);
+    ::encode(oldest_realm_epoch, bl);
+    ::encode(oldest_period_id, bl);
+    ENCODE_FINISH(bl);
+  }
+  void decode(bufferlist::iterator& p) {
+    DECODE_START(1, p);
+    ::decode(oldest_realm_epoch, p);
+    ::decode(oldest_period_id, p);
+    DECODE_FINISH(p);
+  }
+};
+WRITE_CLASS_ENCODER(RGWMetadataLogHistory)
+
+
 RGWMetadataManager::RGWMetadataManager(CephContext *_cct, RGWRados *_store)
   : cct(_cct), store(_store)
 {
@@ -363,93 +353,55 @@ RGWMetadataManager::~RGWMetadataManager()
 
 namespace {
 
-class FindAnyShardCR : public RGWCoroutine {
-  RGWRados *const store;
-  const RGWMetadataLog& mdlog;
-  const int num_shards;
-  int ret = 0;
- public:
-  FindAnyShardCR(RGWRados *store, const RGWMetadataLog& mdlog, int num_shards)
-    : RGWCoroutine(store->ctx()), store(store), mdlog(mdlog),
-      num_shards(num_shards) {}
-
-  int operate() {
-    reenter(this) {
-      // send stat requests for each shard in parallel
-      yield {
-        auto async_rados = store->get_async_rados();
-        auto& pool = store->get_zone_params().log_pool;
-        auto oid = std::string{};
-
-        for (int i = 0; i < num_shards; i++) {
-          mdlog.get_shard_oid(i, oid);
-          auto obj = rgw_obj{pool, oid};
-          spawn(new RGWStatObjCR(async_rados, store, obj), true);
-        }
-      }
-      drain_all();
-      // if any shards were found, return success
-      while (collect_next(&ret)) {
-        if (ret == 0) {
-          // TODO: cancel instead of waiting for the rest
-          return set_cr_done();
-        }
-        ret = 0; // collect_next() won't modify &ret unless it's a failure
-      }
-      // no shards found
-      set_retcode(-ENOENT);
-      return set_cr_error(-ENOENT);
-    }
-    return 0;
+int read_history(RGWRados *store, RGWMetadataLogHistory *state)
+{
+  RGWObjectCtx ctx{store};
+  auto& pool = store->get_zone_params().log_pool;
+  const auto& oid = mdlog_history_oid;
+  bufferlist bl;
+  int ret = rgw_get_system_obj(store, ctx, pool, oid, bl, nullptr, nullptr);
+  if (ret < 0) {
+    return ret;
   }
-};
+  try {
+    auto p = bl.begin();
+    state->decode(p);
+  } catch (buffer::error& e) {
+    ldout(store->ctx(), 1) << "failed to decode the mdlog history: "
+        << e.what() << dendl;
+    return -EIO;
+  }
+  return 0;
+}
 
-// return true if any log shards exist for the given period
-int find_shards_for_period(RGWRados *store, const std::string& period_id)
+int write_history(RGWRados *store, const RGWMetadataLogHistory& state,
+                  bool exclusive = false)
 {
-  auto cct = store->ctx();
-  RGWMetadataLog mdlog(cct, store, period_id);
-  auto num_shards = cct->_conf->rgw_md_log_max_shards;
+  bufferlist bl;
+  state.encode(bl);
 
-  using FindAnyShardCRRef = boost::intrusive_ptr<FindAnyShardCR>;
-  auto cr = FindAnyShardCRRef{new FindAnyShardCR(store, mdlog, num_shards)};
-
-  RGWCoroutinesManager mgr(cct, nullptr);
-  int r = mgr.run(cr.get());
-  if (r < 0) {
-    return r;
-  }
-  return cr->get_ret_status();
+  auto& pool = store->get_zone_params().log_pool;
+  const auto& oid = mdlog_history_oid;
+  return rgw_put_system_obj(store, pool, oid, bl.c_str(), bl.length(),
+                            exclusive, nullptr, real_time{});
 }
 
-RGWPeriodHistory::Cursor find_oldest_log_period(RGWRados *store)
+using Cursor = RGWPeriodHistory::Cursor;
+
+// traverse all the way back to the beginning of the period history, and
+// return a cursor to the first period in a fully attached history
+Cursor find_oldest_period(RGWRados *store)
 {
-  // search backwards through the period history for the first period with no
-  // log shard objects, and return its successor (some shards may be missing
-  // if they contain no metadata yet, so we need to check all shards)
+  auto cct = store->ctx();
   auto cursor = store->period_history->get_current();
-  auto oldest_log = cursor;
 
   while (cursor) {
-    // search for an existing log shard object for this period
-    int r = find_shards_for_period(store, cursor.get_period().get_id());
-    if (r == -ENOENT) {
-      ldout(store->ctx(), 10) << "find_oldest_log_period found no log shards "
-          "for period " << cursor.get_period().get_id() << "; returning "
-          "period " << oldest_log.get_period().get_id() << dendl;
-      return oldest_log;
-    }
-    if (r < 0) {
-      return RGWPeriodHistory::Cursor{r};
-    }
-    oldest_log = cursor;
-
     // advance to the period's predecessor
     if (!cursor.has_prev()) {
       auto& predecessor = cursor.get_period().get_predecessor();
       if (predecessor.empty()) {
         // this is the first period, so our logs must start here
-        ldout(store->ctx(), 10) << "find_oldest_log_period returning first "
+        ldout(cct, 10) << "find_oldest_period returning first "
             "period " << cursor.get_period().get_id() << dendl;
         return cursor;
       }
@@ -457,30 +409,99 @@ RGWPeriodHistory::Cursor find_oldest_log_period(RGWRados *store)
       RGWPeriod period;
       int r = store->period_puller->pull(predecessor, period);
       if (r < 0) {
-        return RGWPeriodHistory::Cursor{r};
+        return Cursor{r};
       }
       auto prev = store->period_history->insert(std::move(period));
       if (!prev) {
         return prev;
       }
-      ldout(store->ctx(), 10) << "find_oldest_log_period advancing to "
+      ldout(cct, 20) << "find_oldest_period advancing to "
           "predecessor period " << predecessor << dendl;
       assert(cursor.has_prev());
     }
     cursor.prev();
   }
-  ldout(store->ctx(), 10) << "find_oldest_log_period returning empty cursor" << dendl;
+  ldout(cct, 10) << "find_oldest_period returning empty cursor" << dendl;
   return cursor;
 }
 
 } // anonymous namespace
 
-int RGWMetadataManager::init(const std::string& current_period)
+Cursor RGWMetadataManager::init_oldest_log_period()
 {
-  if (store->is_meta_master()) {
-    // find our oldest log so we can tell other zones where to start their sync
-    oldest_log_period = find_oldest_log_period(store);
+  // read the mdlog history
+  RGWMetadataLogHistory state;
+  int ret = read_history(store, &state);
+
+  if (ret == -ENOENT) {
+    // initialize the mdlog history and write it
+    ldout(cct, 10) << "initializing mdlog history" << dendl;
+    auto cursor = find_oldest_period(store);
+    if (!cursor) {
+      return cursor;
+    }
+
+    // write the initial history
+    state.oldest_realm_epoch = cursor.get_epoch();
+    state.oldest_period_id = cursor.get_period().get_id();
+
+    constexpr bool exclusive = true; // don't overwrite
+    int ret = write_history(store, state, exclusive);
+    if (ret < 0 && ret != -EEXIST) {
+      ldout(cct, 1) << "failed to write mdlog history: "
+          << cpp_strerror(ret) << dendl;
+      return Cursor{ret};
+    }
+    return cursor;
+  } else if (ret < 0) {
+    ldout(cct, 1) << "failed to read mdlog history: "
+        << cpp_strerror(ret) << dendl;
+    return Cursor{ret};
   }
+
+  // if it's already in the history, return it
+  auto cursor = store->period_history->lookup(state.oldest_realm_epoch);
+  if (cursor) {
+    return cursor;
+  }
+  // pull the oldest period by id
+  RGWPeriod period;
+  ret = store->period_puller->pull(state.oldest_period_id, period);
+  if (ret < 0) {
+    ldout(cct, 1) << "failed to read period id=" << state.oldest_period_id
+        << " for mdlog history: " << cpp_strerror(ret) << dendl;
+    return Cursor{ret};
+  }
+  // verify its realm_epoch
+  if (period.get_realm_epoch() != state.oldest_realm_epoch) {
+    ldout(cct, 1) << "inconsistent mdlog history: read period id="
+        << period.get_id() << " with realm_epoch=" << period.get_realm_epoch()
+        << ", expected realm_epoch=" << state.oldest_realm_epoch << dendl;
+    return Cursor{-EINVAL};
+  }
+  // attach the period to our history
+  return store->period_history->attach(std::move(period));
+}
+
+Cursor RGWMetadataManager::read_oldest_log_period() const
+{
+  RGWMetadataLogHistory state;
+  int ret = read_history(store, &state);
+  if (ret < 0) {
+    ldout(store->ctx(), 1) << "failed to read mdlog history: "
+        << cpp_strerror(ret) << dendl;
+    return Cursor{ret};
+  }
+
+  ldout(store->ctx(), 10) << "read mdlog history with oldest period id="
+      << state.oldest_period_id << " realm_epoch="
+      << state.oldest_realm_epoch << dendl;
+
+  return store->period_history->lookup(state.oldest_realm_epoch);
+}
+
+int RGWMetadataManager::init(const std::string& current_period)
+{
   // open a log for the current period
   current_log = get_log(current_period);
   return 0;
diff --git a/src/rgw/rgw_metadata.h b/src/rgw/rgw_metadata.h
index 7cd51e0..1b83ae0 100644
--- a/src/rgw/rgw_metadata.h
+++ b/src/rgw/rgw_metadata.h
@@ -5,6 +5,7 @@
 #define CEPH_RGW_METADATA_H
 
 #include <string>
+#include <boost/optional.hpp>
 
 #include "include/types.h"
 #include "rgw_common.h"
@@ -12,6 +13,7 @@
 #include "cls/version/cls_version_types.h"
 #include "cls/log/cls_log_types.h"
 #include "common/RWLock.h"
+#include "common/RefCountedObj.h"
 #include "common/ceph_time.h"
 
 
@@ -140,6 +142,35 @@ struct RGWMetadataLogInfo {
 
 class RGWCompletionManager;
 
+class RGWMetadataLogInfoCompletion : public RefCountedObject {
+ public:
+  using info_callback_t = std::function<void(int, const cls_log_header&)>;
+ private:
+  cls_log_header header;
+  librados::IoCtx io_ctx;
+  librados::AioCompletion *completion;
+  std::mutex mutex; //< protects callback between cancel/complete
+  boost::optional<info_callback_t> callback; //< cleared on cancel
+ public:
+  RGWMetadataLogInfoCompletion(info_callback_t callback);
+  virtual ~RGWMetadataLogInfoCompletion();
+
+  librados::IoCtx& get_io_ctx() { return io_ctx; }
+  cls_log_header& get_header() { return header; }
+  librados::AioCompletion* get_completion() { return completion; }
+
+  void finish(librados::completion_t cb) {
+    std::lock_guard<std::mutex> lock(mutex);
+    if (callback) {
+      (*callback)(completion->get_return_value(), header);
+    }
+  }
+  void cancel() {
+    std::lock_guard<std::mutex> lock(mutex);
+    callback = boost::none;
+  }
+};
+
 class RGWMetadataLog {
   CephContext *cct;
   RGWRados *store;
@@ -193,7 +224,7 @@ public:
 
   int trim(int shard_id, const real_time& from_time, const real_time& end_time, const string& start_marker, const string& end_marker);
   int get_info(int shard_id, RGWMetadataLogInfo *info);
-  int get_info_async(int shard_id, RGWMetadataLogInfo *info, RGWCompletionManager *completion_manager, void *user_info, int *pret);
+  int get_info_async(int shard_id, RGWMetadataLogInfoCompletion *completion);
   int lock_exclusive(int shard_id, timespan duration, string&zone_id, string& owner_id);
   int unlock(int shard_id, string& zone_id, string& owner_id);
 
@@ -232,8 +263,6 @@ class RGWMetadataManager {
   std::map<std::string, RGWMetadataLog> md_logs;
   // use the current period's log for mutating operations
   RGWMetadataLog* current_log = nullptr;
-  // oldest log's position in the period history
-  RGWPeriodHistory::Cursor oldest_log_period;
 
   void parse_metadata_key(const string& metadata_key, string& type, string& entry);
 
@@ -255,9 +284,13 @@ public:
 
   int init(const std::string& current_period);
 
-  RGWPeriodHistory::Cursor get_oldest_log_period() const {
-    return oldest_log_period;
-  }
+  /// initialize the oldest log period if it doesn't exist, and attach it to
+  /// our current history
+  RGWPeriodHistory::Cursor init_oldest_log_period();
+
+  /// read the oldest log period, and return a cursor to it in our existing
+  /// period history
+  RGWPeriodHistory::Cursor read_oldest_log_period() const;
 
   /// find or create the metadata log for the given period
   RGWMetadataLog* get_log(const std::string& period);
diff --git a/src/rgw/rgw_op.cc b/src/rgw/rgw_op.cc
index 2da2129..d1431f5 100644
--- a/src/rgw/rgw_op.cc
+++ b/src/rgw/rgw_op.cc
@@ -3,6 +3,7 @@
 
 #include <errno.h>
 #include <stdlib.h>
+#include <unistd.h>
 
 #include <sstream>
 
@@ -1578,8 +1579,8 @@ void RGWSetBucketVersioning::execute()
     op_ret = forward_request_to_master(s, NULL, store, in_data, nullptr);
     if (op_ret < 0) {
       ldout(s->cct, 20) << __func__ << "forward_request_to_master returned ret=" << op_ret << dendl;
+      return;
     }
-    return;
   }
 
   if (enable_versioning) {
@@ -2203,6 +2204,14 @@ void RGWDeleteBucket::execute()
   }
 
   op_ret = store->delete_bucket(s->bucket, ot);
+
+  if (op_ret == -ECANCELED) {
+    // lost a race, either with mdlog sync or another delete bucket operation.
+    // in either case, we've already called rgw_unlink_bucket()
+    op_ret = 0;
+    return;
+  }
+
   if (op_ret == 0) {
     op_ret = rgw_unlink_bucket(store, s->user->user_id, s->bucket.tenant,
 			       s->bucket.name, false);
@@ -4391,6 +4400,16 @@ void RGWListBucketMultiparts::execute()
   }
 }
 
+void RGWGetHealthCheck::execute()
+{
+  if (! g_conf->rgw_healthcheck_disabling_path.empty() &&
+      ::access(g_conf->rgw_healthcheck_disabling_path.c_str(), F_OK )) {
+    op_ret = -ERR_SERVICE_UNAVAILABLE;
+  } else {
+    op_ret = 0; /* 200 OK */
+  }
+}
+
 int RGWDeleteMultiObj::verify_permission()
 {
   if (!verify_bucket_permission(s, RGW_PERM_WRITE))
@@ -4449,7 +4468,8 @@ void RGWDeleteMultiObj::execute()
   for (iter = multi_delete->objects.begin();
         iter != multi_delete->objects.end() && num_processed < max_to_delete;
         ++iter, num_processed++) {
-    rgw_obj obj(bucket, *iter);
+    rgw_obj obj(bucket, iter->name);
+    obj.set_instance(iter->instance);
 
     obj_ctx->set_atomic(obj);
 
diff --git a/src/rgw/rgw_op.h b/src/rgw/rgw_op.h
index 624139b..b6751b0 100644
--- a/src/rgw/rgw_op.h
+++ b/src/rgw/rgw_op.h
@@ -1302,6 +1302,58 @@ public:
 };
 
 
+class RGWGetCrossDomainPolicy : public RGWOp {
+public:
+  RGWGetCrossDomainPolicy() = default;
+  ~RGWGetCrossDomainPolicy() = default;
+
+  int verify_permission() override {
+    return 0;
+  }
+
+  void execute() override {
+    op_ret = 0;
+  }
+
+  const string name() override {
+    return "get_crossdomain_policy";
+  }
+
+  RGWOpType get_type() override {
+    return RGW_OP_GET_CROSS_DOMAIN_POLICY;
+  }
+
+  uint32_t op_mask() override {
+    return RGW_OP_TYPE_READ;
+  }
+};
+
+
+class RGWGetHealthCheck : public RGWOp {
+public:
+  RGWGetHealthCheck() = default;
+  ~RGWGetHealthCheck() = default;
+
+  int verify_permission() override {
+    return 0;
+  }
+
+  void execute() override;
+
+  const string name() override {
+    return "get_health_check";
+  }
+
+  RGWOpType get_type() override {
+    return RGW_OP_GET_HEALTH_CHECK;
+  }
+
+  uint32_t op_mask() override {
+    return RGW_OP_TYPE_READ;
+  }
+};
+
+
 class RGWDeleteMultiObj : public RGWOp {
 protected:
   int max_to_delete;
@@ -1334,6 +1386,17 @@ public:
   virtual uint32_t op_mask() { return RGW_OP_TYPE_DELETE; }
 };
 
+class RGWInfo: public RGWOp {
+public:
+  RGWInfo() = default;
+  ~RGWInfo() = default;
+
+  int verify_permission() override { return 0; }
+  const string name() override { return "get info"; }
+  RGWOpType get_type() override { return RGW_OP_GET_INFO; }
+  uint32_t op_mask() override { return RGW_OP_TYPE_READ; }
+};
+
 class RGWHandler {
 protected:
   RGWRados *store;
diff --git a/src/rgw/rgw_period_history.cc b/src/rgw/rgw_period_history.cc
index 2992623..eff0e78 100644
--- a/src/rgw/rgw_period_history.cc
+++ b/src/rgw/rgw_period_history.cc
@@ -4,6 +4,8 @@
 #include "rgw_period_history.h"
 #include "rgw_rados.h"
 
+#include "include/assert.h"
+
 #define dout_subsys ceph_subsys_rgw
 
 #undef dout_prefix
diff --git a/src/rgw/rgw_process.cc b/src/rgw/rgw_process.cc
index 6a92c49..97b4d13 100644
--- a/src/rgw/rgw_process.cc
+++ b/src/rgw/rgw_process.cc
@@ -81,6 +81,7 @@ int process_request(RGWRados* store, RGWREST* rest, RGWRequest* req,
     abort_early(s, NULL, -ERR_METHOD_NOT_ALLOWED, handler);
     goto done;
   }
+
   req->op = op;
   dout(10) << "op=" << typeid(*op).name() << dendl;
 
@@ -180,7 +181,7 @@ done:
     dout(0) << "ERROR: client_io->complete_request() returned " << r << dendl;
   }
   if (should_log) {
-    rgw_log_op(store, s, (op ? op->name() : "unknown"), olog);
+    rgw_log_op(store, rest, s, (op ? op->name() : "unknown"), olog);
   }
 
   int http_ret = s->err.http_ret;
diff --git a/src/rgw/rgw_rados.cc b/src/rgw/rgw_rados.cc
index 3f34850..d6284c7 100644
--- a/src/rgw/rgw_rados.cc
+++ b/src/rgw/rgw_rados.cc
@@ -1,4 +1,3 @@
-
 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
 // vim: ts=8 sw=2 smarttab
 
@@ -64,8 +63,6 @@ using namespace librados;
 
 #define dout_subsys ceph_subsys_rgw
 
-#define MAX_BUCKET_INDEX_SHARDS_PRIME 7877
-
 using namespace std;
 
 static RGWCache<RGWRados> cached_rados_provider;
@@ -388,8 +385,8 @@ int RGWSystemMetaObj::init(CephContext *_cct, RGWRados *_store, bool setup_obj,
     } else if (!old_format) {
       r = read_id(name, id);
       if (r < 0) {
-	ldout(cct, 0) << "error in read_id for id " << id << " : " << cpp_strerror(-r) << dendl;
-	return r;
+        ldout(cct, 0) << "error in read_id for object name: " << name << " : " << cpp_strerror(-r) << dendl;
+        return r;
       }
     }
   }
@@ -902,7 +899,12 @@ int RGWPeriod::init(CephContext *_cct, RGWRados *_store, bool setup_obj)
 
 
 int RGWPeriod::get_zonegroup(RGWZoneGroup& zonegroup, const string& zonegroup_id) {
-  map<string, RGWZoneGroup>::const_iterator iter = period_map.zonegroups.find(zonegroup_id);
+  map<string, RGWZoneGroup>::const_iterator iter;
+  if (!zonegroup_id.empty()) {
+    iter = period_map.zonegroups.find(zonegroup_id);
+  } else {
+    iter = period_map.zonegroups.find("default");
+  }
   if (iter != period_map.zonegroups.end()) {
     zonegroup = iter->second;
     return 0;
@@ -3237,9 +3239,9 @@ int RGWRados::convert_regionmap()
 {
   RGWZoneGroupMap zonegroupmap;
 
-  string pool_name = cct->_conf->rgw_zone_root_pool;
+  string pool_name = cct->_conf->rgw_region_root_pool;
   if (pool_name.empty()) {
-    pool_name = RGW_DEFAULT_ZONE_ROOT_POOL;
+    pool_name = RGW_DEFAULT_ZONEGROUP_ROOT_POOL;
   }
   string oid = region_map_oid; 
 
@@ -3568,7 +3570,7 @@ int RGWRados::init_zg_from_period(bool *initialized)
     return 0;
   }
   if (ret < 0) {
-    ldout(cct, 0) << "failed reading zonegroup info: " << " " << cpp_strerror(-ret) << dendl;
+    ldout(cct, 0) << "failed reading zonegroup info: " << cpp_strerror(-ret) << dendl;
     return ret;
   }
   ldout(cct, 20) << "period zonegroup name " << zonegroup.get_name() << dendl;
@@ -3579,10 +3581,23 @@ int RGWRados::init_zg_from_period(bool *initialized)
   if (iter != current_period.get_map().zonegroups.end()) {
     ldout(cct, 20) << "using current period zonegroup " << zonegroup.get_name() << dendl;
     zonegroup = iter->second;
+    ret = zonegroup.init(cct, this, false);
+    if (ret < 0) {
+      ldout(cct, 0) << "failed init zonegroup: " << " " << cpp_strerror(-ret) << dendl;
+      return ret;
+    }
     ret = zone_params.init(cct, this);
     if (ret < 0 && ret != -ENOENT) {
       ldout(cct, 0) << "failed reading zone params info: " << " " << cpp_strerror(-ret) << dendl;
       return ret;
+    } if (ret ==-ENOENT && zonegroup.get_name() == default_zonegroup_name) {
+      ldout(cct, 10) << " Using default name "<< default_zone_name << dendl;
+      zone_params.set_name(default_zone_name);
+      ret = zone_params.init(cct, this);
+      if (ret < 0 && ret != -ENOENT) {
+       ldout(cct, 0) << "failed reading zone params info: " << " " << cpp_strerror(-ret) << dendl;
+       return ret;
+      }
     }
   }
   for (iter = current_period.get_map().zonegroups.begin();
@@ -3591,9 +3606,37 @@ int RGWRados::init_zg_from_period(bool *initialized)
     // use endpoints from the zonegroup's master zone
     auto master = zg.zones.find(zg.master_zone);
     if (master == zg.zones.end()) {
-      ldout(cct, 0) << "zonegroup " << zg.get_name() << " missing zone for "
-          "master_zone=" << zg.master_zone << dendl;
-      return -EINVAL;
+      // fix missing master zone for a single zone zonegroup
+      if (zg.master_zone.empty() && zg.zones.size() == 1) {
+	master = zg.zones.begin();
+	ldout(cct, 0) << "zonegroup " << zg.get_name() << " missing master_zone, setting zone " <<
+	  master->second.name << " id:" << master->second.id << " as master" << dendl;
+	if (zonegroup.get_id() == zg.get_id()) {
+	  zonegroup.master_zone = master->second.id;
+	  ret = zonegroup.update();
+	  if (ret < 0) {
+	    ldout(cct, 0) << "error updating zonegroup : " << cpp_strerror(-ret) << dendl;
+	    return ret;
+	  }
+	} else {
+	  RGWZoneGroup fixed_zg(zg.get_id(),zg.get_name());
+	  ret = fixed_zg.init(cct, this);
+	  if (ret < 0) {
+	    ldout(cct, 0) << "error initializing zonegroup : " << cpp_strerror(-ret) << dendl;
+	    return ret;
+	  }
+	  fixed_zg.master_zone = master->second.id;
+	  ret = fixed_zg.update();
+	  if (ret < 0) {
+	    ldout(cct, 0) << "error initializing zonegroup : " << cpp_strerror(-ret) << dendl;
+	    return ret;
+	  }
+	}
+      } else {
+	ldout(cct, 0) << "zonegroup " << zg.get_name() << " missing zone for master_zone=" <<
+	  zg.master_zone << dendl;
+	return -EINVAL;
+      }
     }
     const auto& endpoints = master->second.endpoints;
     add_new_connection_to_map(zonegroup_conn_map, zg, new RGWRESTConn(cct, this, zg.get_id(), endpoints));
@@ -3635,9 +3678,22 @@ int RGWRados::init_zg_from_local(bool *creating_defaults)
     // use endpoints from the zonegroup's master zone
     auto master = zonegroup.zones.find(zonegroup.master_zone);
     if (master == zonegroup.zones.end()) {
-      ldout(cct, 0) << "zonegroup " << zonegroup.get_name() << " missing zone for "
+      // fix missing master zone for a single zone zonegroup
+      if (zonegroup.master_zone.empty() && zonegroup.zones.size() == 1) {
+	master = zonegroup.zones.begin();
+	ldout(cct, 0) << "zonegroup " << zonegroup.get_name() << " missing master_zone, setting zone " <<
+	  master->second.name << " id:" << master->second.id << " as master" << dendl;
+	zonegroup.master_zone = master->second.id;
+	ret = zonegroup.update();
+	if (ret < 0) {
+	  ldout(cct, 0) << "error initializing zonegroup : " << cpp_strerror(-ret) << dendl;
+	  return ret;
+	}
+      } else {
+	ldout(cct, 0) << "zonegroup " << zonegroup.get_name() << " missing zone for "
           "master_zone=" << zonegroup.master_zone << dendl;
-      return -EINVAL;
+	return -EINVAL;
+      }
     }
     const auto& endpoints = master->second.endpoints;
     rest_master_conn = new RGWRESTConn(cct, this, zonegroup.get_id(), endpoints);
@@ -3791,6 +3847,13 @@ int RGWRados::init_complete()
     obj_expirer->start_processor();
   }
 
+  if (run_sync_thread) {
+    // initialize the log period history. we want to do this any time we're not
+    // running under radosgw-admin, so we check run_sync_thread here before
+    // disabling it based on the zone/zonegroup setup
+    meta_mgr->init_oldest_log_period();
+  }
+
   /* not point of running sync thread if there is a single zone or
      we don't have a master zone configured or there is no rest_master_conn */
   if (get_zonegroup().zones.size() < 2 || get_zonegroup().master_zone.empty() || !rest_master_conn) {
@@ -4881,21 +4944,20 @@ int RGWRados::Bucket::List::list_objects(int max, vector<RGWObjEnt> *result,
 
   result->clear();
 
-  rgw_obj marker_obj, end_marker_obj, prefix_obj;
-  marker_obj.set_instance(params.marker.instance);
-  marker_obj.set_ns(params.ns);
-  marker_obj.set_obj(params.marker.name);
-  rgw_obj_key cur_marker;
-  marker_obj.get_index_key(&cur_marker);
-
-  end_marker_obj.set_instance(params.end_marker.instance);
-  end_marker_obj.set_ns(params.ns);
-  end_marker_obj.set_obj(params.end_marker.name);
+  rgw_bucket b;
+  rgw_obj marker_obj(b, params.marker);
+  rgw_obj end_marker_obj(b, params.end_marker);
+  rgw_obj prefix_obj;
   rgw_obj_key cur_end_marker;
-  if (params.ns.empty()) { /* no support for end marker for namespaced objects */
+  if (!params.ns.empty()) {
+    marker_obj.set_ns(params.ns);
+    end_marker_obj.set_ns(params.ns);
     end_marker_obj.get_index_key(&cur_end_marker);
   }
-  const bool cur_end_marker_valid = !cur_end_marker.empty();
+  rgw_obj_key cur_marker;
+  marker_obj.get_index_key(&cur_marker);
+
+  const bool cur_end_marker_valid = !params.end_marker.empty();
 
   prefix_obj.set_ns(params.ns);
   prefix_obj.set_obj(params.prefix);
@@ -4974,8 +5036,8 @@ int RGWRados::Bucket::List::list_objects(int max, vector<RGWObjEnt> *result,
       }
 
       if (count < max) {
-        params.marker = obj;
-        next_marker = obj;
+        params.marker = key;
+        next_marker = key;
       }
 
       if (params.filter && !params.filter->filter(obj.name, key.name))
@@ -5072,7 +5134,7 @@ int RGWRados::init_bucket_index(rgw_bucket& bucket, int num_shards)
     return r;
 
   string dir_oid =  dir_oid_prefix;
-  dir_oid.append(bucket.marker);
+  dir_oid.append(bucket.bucket_id);
 
   map<int, string> bucket_objs;
   get_bucket_index_objects(dir_oid, num_shards, bucket_objs);
@@ -5080,6 +5142,15 @@ int RGWRados::init_bucket_index(rgw_bucket& bucket, int num_shards)
   return CLSRGWIssueBucketIndexInit(index_ctx, bucket_objs, cct->_conf->rgw_bucket_index_max_aio)();
 }
 
+void RGWRados::create_bucket_id(string *bucket_id)
+{
+  uint64_t iid = instance_id();
+  uint64_t bid = next_bucket_id();
+  char buf[get_zone_params().get_id().size() + 48];
+  snprintf(buf, sizeof(buf), "%s.%llu.%llu", get_zone_params().get_id().c_str(), (long long)iid, (long long)bid);
+  *bucket_id = buf;
+}
+
 /**
  * create a bucket with name bucket and the given list of attrs
  * returns 0 on success, -ERR# otherwise.
@@ -5120,11 +5191,7 @@ int RGWRados::create_bucket(RGWUserInfo& owner, rgw_bucket& bucket,
       return r;
 
     if (!pmaster_bucket) {
-      uint64_t iid = instance_id();
-      uint64_t bid = next_bucket_id();
-      char buf[get_zone_params().get_id().size() + 48];
-      snprintf(buf, sizeof(buf), "%s.%llu.%llu", get_zone_params().get_id().c_str(), (long long)iid, (long long)bid);
-      bucket.marker = buf;
+      create_bucket_id(&bucket.marker);
       bucket.bucket_id = bucket.marker;
     } else {
       bucket.marker = pmaster_bucket->marker;
@@ -5154,7 +5221,7 @@ int RGWRados::create_bucket(RGWUserInfo& owner, rgw_bucket& bucket,
     info.bucket_index_shard_hash_type = RGWBucketInfo::MOD;
     info.requester_pays = false;
     if (real_clock::is_zero(creation_time))
-      creation_time = ceph::real_clock::now(cct);
+      info.creation_time = ceph::real_clock::now(cct);
     else
       info.creation_time = creation_time;
     ret = put_linked_bucket_info(info, exclusive, ceph::real_time(), pep_objv, &attrs, true);
@@ -5795,6 +5862,21 @@ int RGWRados::BucketShard::init(rgw_bucket& _bucket, rgw_obj& obj)
   return 0;
 }
 
+int RGWRados::BucketShard::init(rgw_bucket& _bucket, int sid)
+{
+  bucket = _bucket;
+  shard_id = sid;
+
+  int ret = store->open_bucket_index_shard(bucket, index_ctx, shard_id, &bucket_obj);
+  if (ret < 0) {
+    ldout(store->ctx(), 0) << "ERROR: open_bucket_index_shard() returned ret=" << ret << dendl;
+    return ret;
+  }
+  ldout(store->ctx(), 20) << " bucket index object: " << bucket_obj << dendl;
+
+  return 0;
+}
+
 
 /* Execute @handler on last item in bucket listing for bucket specified
  * in @bucket_info. @obj_prefix and @obj_delim narrow down the listing
@@ -6421,7 +6503,7 @@ int RGWRados::aio_put_obj_data(void *ctx, rgw_obj& obj, bufferlist& bl,
 int RGWRados::aio_wait(void *handle)
 {
   AioCompletion *c = (AioCompletion *)handle;
-  c->wait_for_complete();
+  c->wait_for_safe();
   int ret = c->get_return_value();
   c->release();
   return ret;
@@ -6430,7 +6512,7 @@ int RGWRados::aio_wait(void *handle)
 bool RGWRados::aio_completed(void *handle)
 {
   AioCompletion *c = (AioCompletion *)handle;
-  return c->is_complete();
+  return c->is_safe();
 }
 
 class RGWRadosPutObj : public RGWGetDataCB
@@ -7442,6 +7524,10 @@ int RGWRados::Object::complete_atomic_modification()
   cls_rgw_obj_chain chain;
   store->update_gc_chain(obj, state->manifest, &chain);
 
+  if (chain.empty()) {
+    return 0;
+  }
+
   string tag = state->obj_tag.to_str();
   int ret = store->gc->send_chain(chain, tag, false);  // do it async
 
@@ -7474,13 +7560,13 @@ int RGWRados::open_bucket_index(rgw_bucket& bucket, librados::IoCtx& index_ctx,
   if (r < 0)
     return r;
 
-  if (bucket.marker.empty()) {
-    ldout(cct, 0) << "ERROR: empty marker for bucket operation" << dendl;
+  if (bucket.bucket_id.empty()) {
+    ldout(cct, 0) << "ERROR: empty bucket id for bucket operation" << dendl;
     return -EIO;
   }
 
   bucket_oid = dir_oid_prefix;
-  bucket_oid.append(bucket.marker);
+  bucket_oid.append(bucket.bucket_id);
 
   return 0;
 }
@@ -7491,13 +7577,13 @@ int RGWRados::open_bucket_index_base(rgw_bucket& bucket, librados::IoCtx& index_
   if (r < 0)
     return r;
 
-  if (bucket.marker.empty()) {
-    ldout(cct, 0) << "ERROR: empty marker for bucket operation" << dendl;
+  if (bucket.bucket_id.empty()) {
+    ldout(cct, 0) << "ERROR: empty bucket_id for bucket operation" << dendl;
     return -EIO;
   }
 
   bucket_oid_base = dir_oid_prefix;
-  bucket_oid_base.append(bucket.marker);
+  bucket_oid_base.append(bucket.bucket_id);
 
   return 0;
 
@@ -7565,6 +7651,27 @@ int RGWRados::open_bucket_index_shard(rgw_bucket& bucket, librados::IoCtx& index
   return 0;
 }
 
+int RGWRados::open_bucket_index_shard(rgw_bucket& bucket, librados::IoCtx& index_ctx,
+                                      int shard_id, string *bucket_obj)
+{
+  string bucket_oid_base;
+  int ret = open_bucket_index_base(bucket, index_ctx, bucket_oid_base);
+  if (ret < 0)
+    return ret;
+
+  RGWObjectCtx obj_ctx(this);
+
+  // Get the bucket info
+  RGWBucketInfo binfo;
+  ret = get_bucket_instance_info(obj_ctx, bucket, binfo, NULL, NULL);
+  if (ret < 0)
+    return ret;
+
+  get_bucket_index_object(bucket_oid_base, binfo.num_shards,
+                          shard_id, bucket_obj);
+  return 0;
+}
+
 static void accumulate_raw_stats(rgw_bucket_dir_header& header, map<RGWObjCategory, RGWStorageStats>& stats)
 {
   map<uint8_t, struct rgw_bucket_category_stats>::iterator iter = header.stats.begin();
@@ -8241,7 +8348,7 @@ int RGWRados::Object::Stat::wait()
     return state.ret;
   }
 
-  state.completion->wait_for_complete();
+  state.completion->wait_for_safe();
   state.ret = state.completion->get_return_value();
   state.completion->release();
 
@@ -9057,14 +9164,8 @@ int RGWRados::get_system_obj(RGWObjectCtx& obj_ctx, RGWRados::SystemObject::Read
   uint64_t len;
   ObjectReadOperation op;
 
-  RGWObjState *astate = NULL;
-
   get_obj_bucket_and_oid_loc(obj, bucket, oid, key);
 
-  int r = get_system_obj_state(&obj_ctx, obj, &astate, NULL);
-  if (r < 0)
-    return r;
-
   if (end < 0)
     len = 0;
   else
@@ -9082,7 +9183,7 @@ int RGWRados::get_system_obj(RGWObjectCtx& obj_ctx, RGWRados::SystemObject::Read
   }
 
   librados::IoCtx *io_ctx;
-  r = read_state.get_ioctx(this, obj, &io_ctx);
+  int r = read_state.get_ioctx(this, obj, &io_ctx);
   if (r < 0) {
     ldout(cct, 20) << "get_ioctx() on obj=" << obj << " returned " << r << dendl;
     return r;
@@ -9187,7 +9288,7 @@ struct get_obj_data : public RefCountedObject {
     librados::AioCompletion *c = iter->second;
     lock.Unlock();
 
-    c->wait_for_complete_and_cb();
+    c->wait_for_safe_and_cb();
     int r = c->get_return_value();
 
     lock.Lock();
@@ -9206,7 +9307,12 @@ struct get_obj_data : public RefCountedObject {
   void add_io(off_t ofs, off_t len, bufferlist **pbl, AioCompletion **pc) {
     Mutex::Locker l(lock);
 
-    get_obj_io& io = io_map[ofs];
+    const auto& io_iter = io_map.insert(
+      map<off_t, get_obj_io>::value_type(ofs, get_obj_io()));
+
+    assert(io_iter.second); // assert new insertion
+
+    get_obj_io& io = (io_iter.first)->second;
     *pbl = &io.bl;
 
     struct get_obj_aio_data aio;
@@ -9218,7 +9324,7 @@ struct get_obj_data : public RefCountedObject {
 
     struct get_obj_aio_data *paio_data =  &aio_data.back(); /* last element */
 
-    librados::AioCompletion *c = librados::Rados::aio_create_completion((void *)paio_data, _get_obj_aio_completion_cb, NULL);
+    librados::AioCompletion *c = librados::Rados::aio_create_completion((void *)paio_data, NULL, _get_obj_aio_completion_cb);
     completion_map[ofs] = c;
 
     *pc = c;
@@ -9280,7 +9386,7 @@ struct get_obj_data : public RefCountedObject {
 
     for (; aiter != completion_map.end(); ++aiter) {
       completion = aiter->second;
-      if (!completion->is_complete()) {
+      if (!completion->is_safe()) {
         /* reached a request that is not yet complete, stop */
         break;
       }
@@ -9453,9 +9559,10 @@ int RGWRados::get_obj_iterate_cb(RGWObjectCtx *ctx, RGWObjState *astate,
   io_ctx.locator_set_key(key);
 
   r = io_ctx.aio_operate(oid, c, &op, NULL);
-  ldout(cct, 20) << "rados->aio_operate r=" << r << " bl.length=" << pbl->length() << dendl;
-  if (r < 0)
-    goto done_err;
+  if (r < 0) {
+	ldout(cct, 0) << "rados->aio_operate r=" << r << dendl;
+	goto done_err;
+  }
 
   // Flush data to client if there is any
   r = flush_read_list(d);
@@ -10847,11 +10954,33 @@ int RGWRados::omap_get_vals(rgw_obj& obj, bufferlist& header, const string& mark
  
 }
 
-int RGWRados::omap_get_all(rgw_obj& obj, bufferlist& header, std::map<string, bufferlist>& m)
+int RGWRados::omap_get_all(rgw_obj& obj, bufferlist& header,
+			   std::map<string, bufferlist>& m)
 {
+  rgw_rados_ref ref;
+  rgw_bucket bucket;
+  int r = get_obj_ref(obj, &ref, &bucket);
+  if (r < 0) {
+    return r;
+  }
+
+#define MAX_OMAP_GET_ENTRIES 1024
+  const int count = MAX_OMAP_GET_ENTRIES;
   string start_after;
 
-  return omap_get_vals(obj, header, start_after, (uint64_t)-1, m);
+  while (true) {
+    std::map<string, bufferlist> t;
+    r = ref.ioctx.omap_get_vals(ref.oid, start_after, count, &t);
+    if (r < 0) {
+      return r;
+    }
+    if (t.empty()) {
+      break;
+    }
+    start_after = t.rbegin()->first;
+    m.insert(t.begin(), t.end());
+  }
+  return 0;
 }
 
 int RGWRados::omap_set(rgw_obj& obj, std::string& key, bufferlist& bl)
@@ -11216,6 +11345,20 @@ int RGWRados::bi_get(rgw_bucket& bucket, rgw_obj& obj, BIIndexType index_type, r
   return 0;
 }
 
+void RGWRados::bi_put(ObjectWriteOperation& op, BucketShard& bs, rgw_cls_bi_entry& entry)
+{
+  cls_rgw_bi_put(op, bs.bucket_obj, entry);
+}
+
+int RGWRados::bi_put(BucketShard& bs, rgw_cls_bi_entry& entry)
+{
+  int ret = cls_rgw_bi_put(bs.index_ctx, bs.bucket_obj, entry);
+  if (ret < 0)
+    return ret;
+
+  return 0;
+}
+
 int RGWRados::bi_put(rgw_bucket& bucket, rgw_obj& obj, rgw_cls_bi_entry& entry)
 {
   BucketShard bs(this);
@@ -11225,11 +11368,7 @@ int RGWRados::bi_put(rgw_bucket& bucket, rgw_obj& obj, rgw_cls_bi_entry& entry)
     return ret;
   }
 
-  ret = cls_rgw_bi_put(bs.index_ctx, bs.bucket_obj, entry);
-  if (ret < 0)
-    return ret;
-
-  return 0;
+  return bi_put(bs, entry);
 }
 
 int RGWRados::bi_list(rgw_bucket& bucket, const string& obj_name, const string& marker, uint32_t max, list<rgw_cls_bi_entry> *entries, bool *is_truncated)
@@ -11249,6 +11388,41 @@ int RGWRados::bi_list(rgw_bucket& bucket, const string& obj_name, const string&
   return 0;
 }
 
+int RGWRados::bi_list(BucketShard& bs, const string& filter_obj, const string& marker, uint32_t max, list<rgw_cls_bi_entry> *entries, bool *is_truncated)
+{
+  int ret = cls_rgw_bi_list(bs.index_ctx, bs.bucket_obj, filter_obj, marker, max, entries, is_truncated);
+  if (ret < 0)
+    return ret;
+
+  return 0;
+}
+
+int RGWRados::bi_remove(BucketShard& bs)
+{
+  int ret = bs.index_ctx.remove(bs.bucket_obj);
+  if (ret == -ENOENT) {
+    ret = 0;
+  }
+  if (ret < 0) {
+    ldout(cct, 5) << "bs.index_ctx.remove(" << bs.bucket_obj << ") returned ret=" << ret << dendl;
+    return ret;
+  }
+
+  return 0;
+}
+
+int RGWRados::bi_list(rgw_bucket& bucket, int shard_id, const string& filter_obj, const string& marker, uint32_t max, list<rgw_cls_bi_entry> *entries, bool *is_truncated)
+{
+  BucketShard bs(this);
+  int ret = bs.init(bucket, shard_id);
+  if (ret < 0) {
+    ldout(cct, 5) << "bs.init() returned ret=" << ret << dendl;
+    return ret;
+  }
+
+  return bi_list(bs, filter_obj, marker, max, entries, is_truncated);
+}
+
 int RGWRados::gc_operate(string& oid, librados::ObjectWriteOperation *op)
 {
   return gc_pool_ctx.operate(oid, op);
@@ -11996,6 +12170,44 @@ void RGWRados::get_bucket_instance_ids(RGWBucketInfo& bucket_info, int shard_id,
   }
 }
 
+int RGWRados::get_target_shard_id(const RGWBucketInfo& bucket_info, const string& obj_key,
+                                  int *shard_id)
+{
+  int r = 0;
+  switch (bucket_info.bucket_index_shard_hash_type) {
+    case RGWBucketInfo::MOD:
+      if (!bucket_info.num_shards) {
+        if (shard_id) {
+          *shard_id = -1;
+        }
+      } else {
+        uint32_t sid = ceph_str_hash_linux(obj_key.c_str(), obj_key.size());
+        uint32_t sid2 = sid ^ ((sid & 0xFF) << 24);
+        sid = sid2 % MAX_BUCKET_INDEX_SHARDS_PRIME % bucket_info.num_shards;
+        if (shard_id) {
+          *shard_id = (int)sid;
+        }
+      }
+      break;
+    default:
+      r = -ENOTSUP;
+  }
+  return r;
+}
+
+void RGWRados::get_bucket_index_object(const string& bucket_oid_base, uint32_t num_shards,
+                                      int shard_id, string *bucket_obj)
+{
+  if (!num_shards) {
+    // By default with no sharding, we use the bucket oid as itself
+    (*bucket_obj) = bucket_oid_base;
+  } else {
+    char buf[bucket_oid_base.size() + 32];
+    snprintf(buf, sizeof(buf), "%s.%d", bucket_oid_base.c_str(), shard_id);
+    (*bucket_obj) = buf;
+  }
+}
+
 int RGWRados::get_bucket_index_object(const string& bucket_oid_base, const string& obj_key,
     uint32_t num_shards, RGWBucketInfo::BIShardsHashType hash_type, string *bucket_obj, int *shard_id)
 {
@@ -12370,3 +12582,48 @@ librados::Rados* RGWRados::get_rados_handle()
   }
 }
 
+int RGWRados::delete_obj_aio(rgw_obj& obj, rgw_bucket& bucket,
+                             RGWBucketInfo& bucket_info, RGWObjState *astate,
+                             list<librados::AioCompletion *>& handles, bool keep_index_consistent)
+{
+  rgw_rados_ref ref;
+  int ret = get_obj_ref(obj, &ref, &bucket);
+  if (ret < 0) {
+    lderr(cct) << "ERROR: failed to get obj ref with ret=" << ret << dendl;
+    return ret;
+  }
+
+  if (keep_index_consistent) {
+    RGWRados::Bucket bop(this, bucket_info);
+    RGWRados::Bucket::UpdateIndex index_op(&bop, obj, astate);
+
+    ret = index_op.prepare(CLS_RGW_OP_DEL);
+    if (ret < 0) {
+      lderr(cct) << "ERROR: failed to prepare index op with ret=" << ret << dendl;
+      return ret;
+    }
+  }
+
+  ObjectWriteOperation op;
+  list<string> prefixes;
+  cls_rgw_remove_obj(op, prefixes);
+
+  AioCompletion *c = librados::Rados::aio_create_completion(NULL, NULL, NULL);
+  ret = ref.ioctx.aio_operate(ref.oid, c, &op);
+  if (ret < 0) {
+    lderr(cct) << "ERROR: AioOperate failed with ret=" << ret << dendl;
+    return ret;
+  }
+
+  handles.push_back(c);
+
+  if (keep_index_consistent) {
+    ret = delete_obj_index(obj);
+    if (ret < 0) {
+      lderr(cct) << "ERROR: failed to delete obj index with ret=" << ret << dendl;
+      return ret;
+    }
+  }
+  return ret;
+}
+
diff --git a/src/rgw/rgw_rados.h b/src/rgw/rgw_rados.h
index 0d3db37..7a9ea52 100644
--- a/src/rgw/rgw_rados.h
+++ b/src/rgw/rgw_rados.h
@@ -46,6 +46,8 @@ class RGWRESTConn;
 
 #define RGW_NO_SHARD -1
 
+#define MAX_BUCKET_INDEX_SHARDS_PRIME 7877
+
 static inline void prepend_bucket_marker(rgw_bucket& bucket, const string& orig_oid, string& oid)
 {
   if (bucket.marker.empty() || orig_oid.empty()) {
@@ -1066,18 +1068,15 @@ WRITE_CLASS_ENCODER(RGWDefaultZoneGroupInfo)
 
 struct RGWZoneGroupPlacementTarget {
   string name;
-  list<string> tags;
+  set<string> tags;
 
   bool user_permitted(list<string>& user_tags) {
     if (tags.empty()) {
       return true;
     }
-    for (list<string>::iterator uiter = user_tags.begin(); uiter != user_tags.end(); ++uiter) { /* we don't expect many of either, so we can handle this kind of lookup */
-      string& rule = *uiter;
-      for (list<string>::iterator iter = tags.begin(); iter != tags.end(); ++iter) {
-        if (rule == *iter) {
-          return true;
-        }
+    for (auto& rule : user_tags) {
+      if (tags.find(rule) != tags.end()) {
+        return true;
       }
     }
     return false;
@@ -1230,6 +1229,7 @@ struct RGWPeriodMap
   void reset() {
     zonegroups.clear();
     zonegroups_by_api.clear();
+    master_zonegroup.clear();
   }
 
   uint32_t get_zone_short_id(const string& zone_id) const;
@@ -1383,7 +1383,10 @@ public:
     return current_period;
   }
   int set_current_period(RGWPeriod& period);
-
+  void clear_current_period_and_epoch() {
+    current_period.clear();
+    epoch = 0;
+  }
   epoch_t get_epoch() const { return epoch; }
 
   string get_control_oid();
@@ -1778,6 +1781,8 @@ class RGWRados
       string& bucket_oid_base);
   int open_bucket_index_shard(rgw_bucket& bucket, librados::IoCtx& index_ctx,
       const string& obj_key, string *bucket_obj, int *shard_id);
+  int open_bucket_index_shard(rgw_bucket& bucket, librados::IoCtx& index_ctx,
+                              int shard_id, string *bucket_obj);
   int open_bucket_index(rgw_bucket& bucket, librados::IoCtx& index_ctx,
       map<int, string>& bucket_objs, int shard_id = -1, map<int, string> *bucket_instance_ids = NULL);
   template<typename T>
@@ -2021,6 +2026,10 @@ public:
   int get_required_alignment(rgw_bucket& bucket, uint64_t *alignment);
   int get_max_chunk_size(rgw_bucket& bucket, uint64_t *max_chunk_size);
 
+  uint32_t get_max_bucket_shards() {
+    return MAX_BUCKET_INDEX_SHARDS_PRIME;
+  }
+
   int list_raw_objects(rgw_bucket& pool, const string& prefix_filter, int max,
                        RGWListRawObjsCtx& ctx, list<string>& oids,
                        bool *is_truncated);
@@ -2097,6 +2106,7 @@ public:
                                  RGWZonePlacementInfo *rule_info);
   int set_bucket_location_by_rule(const string& location_rule, const string& tenant_name, const string& bucket_name, rgw_bucket& bucket,
                                   RGWZonePlacementInfo *rule_info);
+  void create_bucket_id(string *bucket_id);
   virtual int create_bucket(RGWUserInfo& owner, rgw_bucket& bucket,
                             const string& zonegroup_id,
                             const string& placement_rule,
@@ -2178,6 +2188,7 @@ public:
 
     explicit BucketShard(RGWRados *_store) : store(_store), shard_id(-1) {}
     int init(rgw_bucket& _bucket, rgw_obj& obj);
+    int init(rgw_bucket& _bucket, int sid);
   };
 
   class Object {
@@ -2836,9 +2847,14 @@ public:
 
   int bi_get_instance(rgw_obj& obj, rgw_bucket_dir_entry *dirent);
   int bi_get(rgw_bucket& bucket, rgw_obj& obj, BIIndexType index_type, rgw_cls_bi_entry *entry);
+  void bi_put(librados::ObjectWriteOperation& op, BucketShard& bs, rgw_cls_bi_entry& entry);
+  int bi_put(BucketShard& bs, rgw_cls_bi_entry& entry);
   int bi_put(rgw_bucket& bucket, rgw_obj& obj, rgw_cls_bi_entry& entry);
+  int bi_list(rgw_bucket& bucket, int shard_id, const string& filter_obj, const string& marker, uint32_t max, list<rgw_cls_bi_entry> *entries, bool *is_truncated);
+  int bi_list(BucketShard& bs, const string& filter_obj, const string& marker, uint32_t max, list<rgw_cls_bi_entry> *entries, bool *is_truncated);
   int bi_list(rgw_bucket& bucket, const string& obj_name, const string& marker, uint32_t max,
               list<rgw_cls_bi_entry> *entries, bool *is_truncated);
+  int bi_remove(BucketShard& bs);
 
   int cls_obj_usage_log_add(const string& oid, rgw_usage_log_info& info);
   int cls_obj_usage_log_read(string& oid, string& user, uint64_t start_epoch, uint64_t end_epoch, uint32_t max_entries,
@@ -2849,6 +2865,7 @@ public:
   void shard_name(const string& prefix, unsigned max_shards, const string& key, string& name, int *shard_id);
   void shard_name(const string& prefix, unsigned max_shards, const string& section, const string& key, string& name);
   void shard_name(const string& prefix, unsigned shard_id, string& name);
+  int get_target_shard_id(const RGWBucketInfo& bucket_info, const string& obj_key, int *shard_id);
   void time_log_prepare_entry(cls_log_entry& entry, const ceph::real_time& ut, const string& section, const string& key, bufferlist& bl);
   int time_log_add_init(librados::IoCtx& io_ctx);
   int time_log_add(const string& oid, list<cls_log_entry>& entries,
@@ -2988,6 +3005,8 @@ public:
 
   librados::Rados* get_rados_handle();
 
+  int delete_obj_aio(rgw_obj& obj, rgw_bucket& bucket, RGWBucketInfo& info, RGWObjState *astate,
+                     list<librados::AioCompletion *>& handles, bool keep_index_consistent);
  private:
   /**
    * This is a helper method, it generates a list of bucket index objects with the given
@@ -3015,6 +3034,9 @@ public:
   int get_bucket_index_object(const string& bucket_oid_base, const string& obj_key,
       uint32_t num_shards, RGWBucketInfo::BIShardsHashType hash_type, string *bucket_obj, int *shard);
 
+  void get_bucket_index_object(const string& bucket_oid_base, uint32_t num_shards,
+                               int shard_id, string *bucket_obj);
+
   /**
    * Check the actual on-disk state of the object specified
    * by list_state, and fill in the time and size of object.
diff --git a/src/rgw/rgw_rest.cc b/src/rgw/rgw_rest.cc
index e485d99..c8e3904 100644
--- a/src/rgw/rgw_rest.cc
+++ b/src/rgw/rgw_rest.cc
@@ -4,6 +4,7 @@
 #include <errno.h>
 #include <limits.h>
 
+#include <boost/algorithm/string.hpp>
 #include "common/Formatter.h"
 #include "common/HTMLFormatter.h"
 #include "common/utf8.h"
@@ -205,12 +206,10 @@ void rgw_rest_init(CephContext *cct, RGWRados *store, RGWZoneGroup& zone_group)
     http_status_names[h->code] = h->name;
   }
 
-  if (!cct->_conf->rgw_dns_name.empty()) {
-    hostnames_set.insert(cct->_conf->rgw_dns_name);
-  }
-  hostnames_set.insert(zone_group.hostnames.begin(),  zone_group.hostnames.end());
-  string s;
-  ldout(cct, 20) << "RGW hostnames: " << std::accumulate(hostnames_set.begin(), hostnames_set.end(), s) << dendl;
+  hostnames_set.insert(cct->_conf->rgw_dns_name);
+  hostnames_set.insert(zone_group.hostnames.begin(), zone_group.hostnames.end());
+  hostnames_set.erase(""); // filter out empty hostnames
+  ldout(cct, 20) << "RGW hostnames: " << hostnames_set << dendl;
   /* TODO: We should have a sanity check that no hostname matches the end of
    * any other hostname, otherwise we will get ambigious results from
    * rgw_find_host_in_domains.
@@ -221,12 +220,10 @@ void rgw_rest_init(CephContext *cct, RGWRados *store, RGWZoneGroup& zone_group)
    * X.B.A ambigously splits to both {X, B.A} and {X.B, A}
    */
 
-  if (!cct->_conf->rgw_dns_s3website_name.empty()) {
-    hostnames_s3website_set.insert(cct->_conf->rgw_dns_s3website_name);
-  }
+  hostnames_s3website_set.insert(cct->_conf->rgw_dns_s3website_name);
   hostnames_s3website_set.insert(zone_group.hostnames_s3website.begin(), zone_group.hostnames_s3website.end());
-  s.clear();
-  ldout(cct, 20) << "RGW S3website hostnames: " << std::accumulate(hostnames_s3website_set.begin(), hostnames_s3website_set.end(), s) << dendl;
+  hostnames_s3website_set.erase(""); // filter out empty hostnames
+  ldout(cct, 20) << "RGW S3website hostnames: " << hostnames_s3website_set << dendl;
   /* TODO: we should repeat the hostnames_set sanity check here
    * and ALSO decide about overlap, if any
    */
@@ -1478,7 +1475,7 @@ int RGWHandler_REST::validate_bucket_name(const string& bucket)
     // Name too short
     return -ERR_INVALID_BUCKET_NAME;
   }
-  else if (len > 255) {
+  else if (len > MAX_BUCKET_NAME_LEN) {
     // Name too long
     return -ERR_INVALID_BUCKET_NAME;
   }
@@ -1493,7 +1490,7 @@ int RGWHandler_REST::validate_bucket_name(const string& bucket)
 int RGWHandler_REST::validate_object_name(const string& object)
 {
   int len = object.size();
-  if (len > 1024) {
+  if (len > MAX_OBJ_NAME_LEN) {
     // Name too long
     return -ERR_INVALID_OBJECT_NAME;
   }
@@ -1629,12 +1626,22 @@ RGWRESTMgr *RGWRESTMgr::get_resource_mgr(struct req_state *s, const string& uri,
     }
   }
 
-  if (default_mgr)
-    return default_mgr;
+  if (default_mgr) {
+    return default_mgr->get_resource_mgr_as_default(s, uri, out_uri);
+  }
 
   return this;
 }
 
+void RGWREST::register_x_headers(const string& s_headers)
+{
+  std::vector<std::string> hdrs = get_str_vec(s_headers);
+  for (auto& hdr : hdrs) {
+    boost::algorithm::to_upper(hdr); // XXX
+    (void) x_headers.insert(hdr);
+  }
+}
+
 RGWRESTMgr::~RGWRESTMgr()
 {
   map<string, RGWRESTMgr *>::iterator iter;
@@ -1767,10 +1774,12 @@ int RGWREST::preprocess(struct req_state *s, RGWClientIO* cio)
     // As additional checks:
     // - if the Host header is an IP, we're using path-style access without DNS
     // - Also check that the Host header is a valid bucket name before using it.
+    // - Don't enable virtual hosting if no hostnames are configured
     if (subdomain.empty()
         && (domain.empty() || domain != info.host)
         && !looks_like_ip_address(info.host.c_str())
-        && RGWHandler_REST::validate_bucket_name(info.host)) {
+        && RGWHandler_REST::validate_bucket_name(info.host) == 0
+        && !(hostnames_set.empty() && hostnames_s3website_set.empty())) {
       subdomain.append(info.host);
       in_hosted_domain = 1;
     }
diff --git a/src/rgw/rgw_rest.h b/src/rgw/rgw_rest.h
index 96947b3..b27639b 100644
--- a/src/rgw/rgw_rest.h
+++ b/src/rgw/rgw_rest.h
@@ -6,6 +6,7 @@
 
 #define TIME_BUF_SIZE 128
 
+#include "common/sstring.hh"
 #include "common/ceph_json.h"
 #include "include/assert.h" /* needed because of common/ceph_json.h */
 #include "rgw_op.h"
@@ -257,6 +258,18 @@ public:
   ~RGWDeleteObj_ObjStore() {}
 };
 
+class  RGWGetCrossDomainPolicy_ObjStore : public RGWGetCrossDomainPolicy {
+public:
+  RGWGetCrossDomainPolicy_ObjStore() = default;
+  ~RGWGetCrossDomainPolicy_ObjStore() = default;
+};
+
+class  RGWGetHealthCheck_ObjStore : public RGWGetHealthCheck {
+public:
+  RGWGetHealthCheck_ObjStore() = default;
+  ~RGWGetHealthCheck_ObjStore() = default;
+};
+
 class RGWCopyObj_ObjStore : public RGWCopyObj {
 public:
   RGWCopyObj_ObjStore() {}
@@ -351,6 +364,12 @@ public:
   virtual int get_params();
 };
 
+class RGWInfo_ObjStore : public RGWInfo {
+public:
+    RGWInfo_ObjStore() = default;
+    ~RGWInfo_ObjStore() = default;
+};
+
 class RGWRESTOp : public RGWOp {
 protected:
   int http_ret;
@@ -370,6 +389,7 @@ public:
 
 class RGWHandler_REST : public RGWHandler {
 protected:
+
   virtual bool is_obj_update_op() { return false; }
   virtual RGWOp *op_get() { return NULL; }
   virtual RGWOp *op_put() { return NULL; }
@@ -382,6 +402,9 @@ protected:
   static int allocate_formatter(struct req_state *s, int default_formatter,
 				bool configurable);
 public:
+  static constexpr int MAX_BUCKET_NAME_LEN = 255;
+  static constexpr int MAX_OBJ_NAME_LEN = 1024;
+
   RGWHandler_REST() {}
   virtual ~RGWHandler_REST() {}
 
@@ -411,6 +434,7 @@ class RGWHandler_REST_S3;
 
 class RGWRESTMgr {
   bool should_log;
+
 protected:
   map<string, RGWRESTMgr *> resource_mgrs;
   multimap<size_t, string> resources_by_size;
@@ -425,6 +449,13 @@ public:
 
   virtual RGWRESTMgr *get_resource_mgr(struct req_state *s, const string& uri,
 				       string *out_uri);
+
+  virtual RGWRESTMgr* get_resource_mgr_as_default(struct req_state* s,
+                                                  const std::string& uri,
+                                                  std::string* our_uri) {
+    return this;
+  }
+
   virtual RGWHandler_REST *get_handler(struct req_state *s) { return NULL; }
   virtual void put_handler(RGWHandler_REST *handler) { delete handler; }
 
@@ -435,6 +466,8 @@ public:
 class RGWLibIO;
 
 class RGWREST {
+  using x_header = basic_sstring<char, uint16_t, 32>;
+  std::set<x_header> x_headers;
   RGWRESTMgr mgr;
 
   static int preprocess(struct req_state *s, RGWClientIO *sio);
@@ -448,6 +481,7 @@ public:
 			  RGWLibIO *io, RGWRESTMgr **pmgr,
 			  int *init_error);
 #endif
+
   void put_handler(RGWHandler_REST *handler) {
     mgr.put_handler(handler);
   }
@@ -459,9 +493,20 @@ public:
 
     mgr.register_resource(resource, m);
   }
+
   void register_default_mgr(RGWRESTMgr *m) {
     mgr.register_default_mgr(m);
   }
+
+  void register_x_headers(const std::string& headers);
+
+  bool log_x_headers(void) {
+    return (x_headers.size() > 0);
+  }
+
+  bool log_x_header(const std::string& header) {
+    return (x_headers.find(header) != x_headers.end());
+  }
 };
 
 static constexpr int64_t NO_CONTENT_LENGTH = -1;
diff --git a/src/rgw/rgw_rest_log.cc b/src/rgw/rgw_rest_log.cc
index 6f8bc64..aded109 100644
--- a/src/rgw/rgw_rest_log.cc
+++ b/src/rgw/rgw_rest_log.cc
@@ -131,7 +131,7 @@ void RGWOp_MDLog_List::send_response() {
 
 void RGWOp_MDLog_Info::execute() {
   num_objects = s->cct->_conf->rgw_md_log_max_shards;
-  period = store->meta_mgr->get_oldest_log_period();
+  period = store->meta_mgr->read_oldest_log_period();
   http_ret = period.get_error();
 }
 
diff --git a/src/rgw/rgw_rest_s3.cc b/src/rgw/rgw_rest_s3.cc
index da93a95..8e237f0 100644
--- a/src/rgw/rgw_rest_s3.cc
+++ b/src/rgw/rgw_rest_s3.cc
@@ -10,6 +10,7 @@
 #include "common/ceph_json.h"
 #include "common/safe_io.h"
 #include <boost/algorithm/string.hpp>
+#include <boost/algorithm/string/replace.hpp>
 
 #include "rgw_rest.h"
 #include "rgw_rest_s3.h"
@@ -3580,6 +3581,8 @@ int RGW_Auth_S3::authorize_v4(RGWRados *store, struct req_state *s)
 
   if (s->aws4_auth->canonical_uri.empty()) {
     s->aws4_auth->canonical_uri = "/";
+  } else {
+    boost::replace_all(s->aws4_auth->canonical_uri, "+", "%20");
   }
 
   /* craft canonical query string */
@@ -3953,7 +3956,13 @@ int RGW_Auth_S3::authorize_v2(RGWRados *store, struct req_state *s)
       << store->ctx()->_conf->rgw_ldap_uri
       << dendl;
 
-    RGWToken token{from_base64(auth_id)};
+    RGWToken token;
+    /* boost filters and/or string_ref may throw on invalid input */
+    try {
+      token = rgw::from_base64(auth_id);
+    } catch(...) {
+      token = std::string("");
+    }
 
     if (! token.valid())
       external_auth_result = -EACCES;
diff --git a/src/rgw/rgw_rest_swift.cc b/src/rgw/rgw_rest_swift.cc
index a6d3623..7b4a3e1 100644
--- a/src/rgw/rgw_rest_swift.cc
+++ b/src/rgw/rgw_rest_swift.cc
@@ -5,6 +5,7 @@
 #include <boost/utility/in_place_factory.hpp>
 
 #include "include/assert.h"
+#include "ceph_ver.h"
 
 #include "common/Formatter.h"
 #include "common/utf8.h"
@@ -23,6 +24,7 @@
 
 int RGWListBuckets_ObjStore_SWIFT::get_params()
 {
+  prefix = s->info.args.get("prefix");
   marker = s->info.args.get("marker");
   end_marker = s->info.args.get("end_marker");
 
@@ -144,14 +146,20 @@ void RGWListBuckets_ObjStore_SWIFT::send_response_begin(bool has_buckets)
 
 void RGWListBuckets_ObjStore_SWIFT::send_response_data(RGWUserBuckets& buckets)
 {
-  map<string, RGWBucketEnt>& m = buckets.get_buckets();
-  map<string, RGWBucketEnt>::iterator iter;
-
-  if (!sent_data)
+  if (! sent_data) {
     return;
+  }
+
+  /* Take care of the prefix parameter of Swift API. There is no business
+   * in applying the filter earlier as we really need to go through all
+   * entries regardless of it (the headers like X-Account-Container-Count
+   * aren't affected by specifying prefix). */
+  const std::map<std::string, RGWBucketEnt>& m = buckets.get_buckets();
+  for (auto iter = m.lower_bound(prefix);
+       iter != m.end() && boost::algorithm::starts_with(iter->first, prefix);
+       ++iter) {
+    const RGWBucketEnt& obj = iter->second;
 
-  for (iter = m.begin(); iter != m.end(); ++iter) {
-    RGWBucketEnt obj = iter->second;
     s->formatter->open_object_section("container");
     s->formatter->dump_string("name", obj.bucket.name);
     if (need_stats) {
@@ -159,7 +167,7 @@ void RGWListBuckets_ObjStore_SWIFT::send_response_data(RGWUserBuckets& buckets)
       s->formatter->dump_int("bytes", obj.size);
     }
     s->formatter->close_section();
-    if (!g_conf->rgw_swift_enforce_content_length) {
+    if (! g_conf->rgw_swift_enforce_content_length) {
       rgw_flush_formatter(s, s->formatter);
     }
   }
@@ -1249,6 +1257,162 @@ void RGWBulkDelete_ObjStore_SWIFT::send_response()
   rgw_flush_formatter_and_reset(s, s->formatter);
 }
 
+void RGWGetCrossDomainPolicy_ObjStore_SWIFT::send_response()
+{
+  set_req_state_err(s, op_ret);
+  dump_errno(s);
+  end_header(s, this, "application/xml");
+
+  std::stringstream ss;
+
+  ss << R"(<?xml version="1.0"?>)" << "\n"
+     << R"(<!DOCTYPE cross-domain-policy SYSTEM )"
+     << R"("http://www.adobe.com/xml/dtds/cross-domain-policy.dtd" >)" << "\n"
+     << R"(<cross-domain-policy>)" << "\n"
+     << g_conf->rgw_cross_domain_policy << "\n"
+     << R"(</cross-domain-policy>)";
+
+  STREAM_IO(s)->write(ss.str().c_str(), ss.str().length());
+}
+
+void RGWGetHealthCheck_ObjStore_SWIFT::send_response()
+{
+  set_req_state_err(s, op_ret);
+  dump_errno(s);
+  end_header(s, this, "application/xml");
+
+  if (op_ret) {
+    STREAM_IO(s)->print("DISABLED BY FILE");
+  }
+}
+
+const vector<pair<string, RGWInfo_ObjStore_SWIFT::info>> RGWInfo_ObjStore_SWIFT::swift_info =
+{
+    {"bulk_delete", {false, nullptr}},
+    {"container_quotas", {false, nullptr}},
+    {"swift", {false, RGWInfo_ObjStore_SWIFT::list_swift_data}},
+    {"tempurl", { false, RGWInfo_ObjStore_SWIFT::list_tempurl_data}},
+    {"slo", {false, RGWInfo_ObjStore_SWIFT::list_slo_data}},
+    {"account_quotas", {false, nullptr}},
+    {"staticweb", {false, nullptr}},
+    {"tempauth", {false, nullptr}},
+};
+
+void RGWInfo_ObjStore_SWIFT::execute()
+{
+  bool is_admin_info_enabled = false;
+
+  const string& swiftinfo_sig = s->info.args.get("swiftinfo_sig");
+  const string& swiftinfo_expires = s->info.args.get("swiftinfo_expires");
+
+  if (!swiftinfo_sig.empty() &&
+      !swiftinfo_expires.empty() &&
+      !is_expired(swiftinfo_expires, s->cct)) {
+    is_admin_info_enabled = true;
+  }
+
+  s->formatter->open_object_section("info");
+
+  for (const auto& pair : swift_info) {
+    if(!is_admin_info_enabled && pair.second.is_admin_info)
+      continue;
+
+    if (!pair.second.list_data) {
+      s->formatter->open_object_section((pair.first).c_str());
+      s->formatter->close_section();
+    }
+    else {
+      pair.second.list_data(*(s->formatter), *(s->cct->_conf), *store);
+    }
+  }
+
+  s->formatter->close_section();
+}
+
+void RGWInfo_ObjStore_SWIFT::send_response()
+{
+  if (op_ret <  0) {
+    op_ret = STATUS_NO_CONTENT;
+  }
+  set_req_state_err(s, op_ret);
+  dump_errno(s);
+  end_header(s, this);
+  rgw_flush_formatter_and_reset(s, s->formatter);
+}
+
+void RGWInfo_ObjStore_SWIFT::list_swift_data(Formatter& formatter,
+                                              const md_config_t& config,
+                                              RGWRados& store)
+{
+  formatter.open_object_section("swift");
+  formatter.dump_int("max_file_size", config.rgw_max_put_size);
+  formatter.dump_int("container_listing_limit", RGW_LIST_BUCKETS_LIMIT_MAX);
+
+  string ceph_version(CEPH_GIT_NICE_VER);
+  formatter.dump_string("version", ceph_version);
+  formatter.dump_int("max_meta_name_length", 81);
+
+  formatter.open_array_section("policies");
+  RGWZoneGroup& zonegroup = store.get_zonegroup();
+
+  for (const auto& placement_targets : zonegroup.placement_targets) {
+    formatter.open_object_section("policy");
+    if (placement_targets.second.name.compare(zonegroup.default_placement) == 0)
+      formatter.dump_bool("default", true);
+    formatter.dump_string("name", placement_targets.second.name.c_str());
+    formatter.close_section();
+  }
+  formatter.close_section();
+
+  formatter.dump_int("max_object_name_size", RGWHandler_REST::MAX_OBJ_NAME_LEN);
+  formatter.dump_bool("strict_cors_mode", true);
+  formatter.dump_int("max_container_name_length", RGWHandler_REST::MAX_BUCKET_NAME_LEN);
+  formatter.close_section();
+}
+
+void RGWInfo_ObjStore_SWIFT::list_tempurl_data(Formatter& formatter,
+                                                const md_config_t& config,
+                                                RGWRados& store)
+{
+  formatter.open_object_section("tempurl");
+  formatter.open_array_section("methods");
+  formatter.dump_string("methodname", "GET");
+  formatter.dump_string("methodname", "HEAD");
+  formatter.dump_string("methodname", "PUT");
+  formatter.dump_string("methodname", "POST");
+  formatter.dump_string("methodname", "DELETE");
+  formatter.close_section();
+  formatter.close_section();
+}
+
+void RGWInfo_ObjStore_SWIFT::list_slo_data(Formatter& formatter,
+                                            const md_config_t& config,
+                                            RGWRados& store)
+{
+  formatter.open_object_section("slo");
+  formatter.dump_int("max_manifest_segments", config.rgw_max_slo_entries);
+  formatter.close_section();
+}
+
+bool RGWInfo_ObjStore_SWIFT::is_expired(const std::string& expires, CephContext* cct)
+{
+  string err;
+  const utime_t now = ceph_clock_now(cct);
+  const uint64_t expiration = (uint64_t)strict_strtoll(expires.c_str(),
+                                                       10, &err);
+  if (!err.empty()) {
+    ldout(cct, 5) << "failed to parse siginfo_expires: " << err << dendl;
+    return true;
+  }
+
+  if (expiration <= (uint64_t)now.sec()) {
+    ldout(cct, 5) << "siginfo expired: " << expiration << " <= " << now.sec() << dendl;
+    return true;
+  }
+
+  return false;
+}
+
 RGWOp *RGWHandler_REST_Service_SWIFT::op_get()
 {
   return new RGWListBuckets_ObjStore_SWIFT;
@@ -1528,43 +1692,53 @@ int RGWHandler_REST_SWIFT::init_from_header(struct req_state *s)
   s->info.args.set(p);
   s->info.args.parse();
 
-  if (*req_name != '/')
+  /* Skip the leading slash of URL hierarchy. */
+  if (req_name[0] != '/') {
     return 0;
+  } else {
+    req_name++;
+  }
 
-  req_name++;
-
-  if (!*req_name)
-    return 0;
+  if ('\0' == req_name[0]) {
+    return g_conf->rgw_swift_url_prefix == "/" ? -ERR_BAD_URL : 0;
+  }
 
   req = req_name;
 
-  int pos = req.find('/');
-  if (pos >= 0) {
+  size_t pos = req.find('/');
+  if (std::string::npos != pos && g_conf->rgw_swift_url_prefix != "/") {
     bool cut_url = g_conf->rgw_swift_url_prefix.length();
     first = req.substr(0, pos);
+
     if (first.compare(g_conf->rgw_swift_url_prefix) == 0) {
       if (cut_url) {
+        /* Rewind to the "v1/..." part. */
         next_tok(req, first, '/');
       }
     }
+  } else if (req.compare(g_conf->rgw_swift_url_prefix) == 0) {
+    s->formatter = new RGWFormatter_Plain;
+    return -ERR_BAD_URL;
   } else {
-    if (req.compare(g_conf->rgw_swift_url_prefix) == 0) {
-      s->formatter = new RGWFormatter_Plain;
-      return -ERR_BAD_URL;
-    }
     first = req;
   }
 
-  string tenant_path;
-  if (!g_conf->rgw_swift_tenant_name.empty()) {
+  std::string tenant_path;
+  if (! g_conf->rgw_swift_tenant_name.empty()) {
     tenant_path = "/AUTH_";
     tenant_path.append(g_conf->rgw_swift_tenant_name);
   }
 
   /* verify that the request_uri conforms with what's expected */
   char buf[g_conf->rgw_swift_url_prefix.length() + 16 + tenant_path.length()];
-  int blen = sprintf(buf, "/%s/v1%s",
-		    g_conf->rgw_swift_url_prefix.c_str(), tenant_path.c_str());
+  int blen;
+  if (g_conf->rgw_swift_url_prefix == "/") {
+    blen = sprintf(buf, "/v1%s", tenant_path.c_str());
+  } else {
+    blen = sprintf(buf, "/%s/v1%s",
+                   g_conf->rgw_swift_url_prefix.c_str(), tenant_path.c_str());
+  }
+
   if (s->decoded_uri[0] != '/' ||
     s->decoded_uri.compare(0, blen, buf) !=  0) {
     return -ENOENT;
@@ -1677,3 +1851,9 @@ RGWHandler_REST* RGWRESTMgr_SWIFT::get_handler(struct req_state *s)
 
   return new RGWHandler_REST_Obj_SWIFT;
 }
+
+RGWHandler_REST* RGWRESTMgr_SWIFT_Info::get_handler(struct req_state *s)
+{
+  s->prot_flags |= RGW_REST_SWIFT;
+  return new RGWHandler_REST_SWIFT_Info;
+}
diff --git a/src/rgw/rgw_rest_swift.h b/src/rgw/rgw_rest_swift.h
index 0c4b1e2..b3b2d09 100644
--- a/src/rgw/rgw_rest_swift.h
+++ b/src/rgw/rgw_rest_swift.h
@@ -21,6 +21,7 @@ public:
 
 class RGWListBuckets_ObjStore_SWIFT : public RGWListBuckets_ObjStore {
   bool need_stats;
+  std::string prefix;
 
   uint64_t get_default_max() const override {
     return 0;
@@ -185,8 +186,30 @@ public:
   void send_response();
 };
 
+class RGWInfo_ObjStore_SWIFT : public RGWInfo_ObjStore {
+protected:
+  struct info
+  {
+    bool is_admin_info;
+    function<void (Formatter&, const md_config_t&, RGWRados&)> list_data;
+  };
+
+  static const vector<pair<string, struct info>> swift_info;
+public:
+  RGWInfo_ObjStore_SWIFT() {}
+  ~RGWInfo_ObjStore_SWIFT() {}
+
+  void execute() override;
+  void send_response() override;
+  static void list_swift_data(Formatter& formatter, const md_config_t& config, RGWRados& store);
+  static void list_tempurl_data(Formatter& formatter, const md_config_t& config, RGWRados& store);
+  static void list_slo_data(Formatter& formatter, const md_config_t& config, RGWRados& store);
+  static bool is_expired(const std::string& expires, CephContext* cct);
+};
+
 class RGWHandler_REST_SWIFT : public RGWHandler_REST {
   friend class RGWRESTMgr_SWIFT;
+  friend class RGWRESTMgr_SWIFT_Info;
 protected:
   virtual bool is_acl_op() {
     return false;
@@ -262,6 +285,176 @@ public:
   virtual ~RGWRESTMgr_SWIFT() {}
 
   virtual RGWHandler_REST *get_handler(struct req_state *s);
+
+  RGWRESTMgr* get_resource_mgr_as_default(struct req_state* s,
+                                          const std::string& uri,
+                                          std::string* out_uri) override {
+    return this->get_resource_mgr(s, uri, out_uri);
+  }
+};
+
+
+class  RGWGetCrossDomainPolicy_ObjStore_SWIFT
+  : public RGWGetCrossDomainPolicy_ObjStore {
+public:
+  RGWGetCrossDomainPolicy_ObjStore_SWIFT() = default;
+  ~RGWGetCrossDomainPolicy_ObjStore_SWIFT() = default;
+
+  void send_response() override;
+};
+
+class  RGWGetHealthCheck_ObjStore_SWIFT
+  : public RGWGetHealthCheck_ObjStore {
+public:
+  RGWGetHealthCheck_ObjStore_SWIFT() = default;
+  ~RGWGetHealthCheck_ObjStore_SWIFT() = default;
+
+  void send_response() override;
+};
+
+class RGWHandler_SWIFT_CrossDomain : public RGWHandler_REST {
+public:
+  RGWHandler_SWIFT_CrossDomain() = default;
+  ~RGWHandler_SWIFT_CrossDomain() = default;
+
+  RGWOp *op_get() override {
+    return new RGWGetCrossDomainPolicy_ObjStore_SWIFT();
+  }
+
+  int init(RGWRados* const store,
+           struct req_state* const state,
+           RGWClientIO* const cio) override {
+    state->dialect = "swift";
+    state->formatter = new JSONFormatter;
+    state->format = RGW_FORMAT_JSON;
+
+    return RGWHandler::init(store, state, cio);
+  }
+
+  int authorize() override {
+    return 0;
+  }
+
+  int postauth_init() override {
+    return 0;
+  }
+
+  int read_permissions(RGWOp *) override {
+    return 0;
+  }
+
+  virtual RGWAccessControlPolicy *alloc_policy() { return nullptr; }
+  virtual void free_policy(RGWAccessControlPolicy *policy) {}
+};
+
+class RGWRESTMgr_SWIFT_CrossDomain : public RGWRESTMgr {
+public:
+  RGWRESTMgr_SWIFT_CrossDomain() = default;
+  ~RGWRESTMgr_SWIFT_CrossDomain() = default;
+
+  RGWRESTMgr *get_resource_mgr(struct req_state* const s,
+                               const std::string& uri,
+                               std::string* const out_uri) override {
+    return this;
+  }
+
+  RGWHandler_REST* get_handler(struct req_state* const s) override {
+    s->prot_flags |= RGW_REST_SWIFT;
+    return new RGWHandler_SWIFT_CrossDomain;
+  }
+};
+
+
+class RGWHandler_SWIFT_HealthCheck : public RGWHandler_REST {
+public:
+  RGWHandler_SWIFT_HealthCheck() = default;
+  ~RGWHandler_SWIFT_HealthCheck() = default;
+
+  RGWOp *op_get() override {
+    return new RGWGetHealthCheck_ObjStore_SWIFT();
+  }
+
+  int init(RGWRados* const store,
+           struct req_state* const state,
+           RGWClientIO* const cio) override {
+    state->dialect = "swift";
+    state->formatter = new JSONFormatter;
+    state->format = RGW_FORMAT_JSON;
+
+    return RGWHandler::init(store, state, cio);
+  }
+
+  int authorize() override {
+    return 0;
+  }
+
+  int postauth_init() override {
+    return 0;
+  }
+
+  int read_permissions(RGWOp *) override {
+    return 0;
+  }
+
+  virtual RGWAccessControlPolicy *alloc_policy() { return nullptr; }
+  virtual void free_policy(RGWAccessControlPolicy *policy) {}
+};
+
+class RGWRESTMgr_SWIFT_HealthCheck : public RGWRESTMgr {
+public:
+  RGWRESTMgr_SWIFT_HealthCheck() = default;
+  ~RGWRESTMgr_SWIFT_HealthCheck() = default;
+
+  RGWRESTMgr *get_resource_mgr(struct req_state* const s,
+                               const std::string& uri,
+                               std::string* const out_uri) override {
+    return this;
+  }
+
+  RGWHandler_REST* get_handler(struct req_state* const s) override {
+    s->prot_flags |= RGW_REST_SWIFT;
+    return new RGWHandler_SWIFT_HealthCheck;
+  }
+};
+
+
+class RGWHandler_REST_SWIFT_Info : public RGWHandler_REST_SWIFT {
+public:
+  RGWHandler_REST_SWIFT_Info() = default;
+  ~RGWHandler_REST_SWIFT_Info() = default;
+
+  RGWOp *op_get() override {
+    return new RGWInfo_ObjStore_SWIFT();
+  }
+
+  int init(RGWRados* const store,
+           struct req_state* const state,
+           RGWClientIO* const cio) override {
+    state->dialect = "swift";
+    state->formatter = new JSONFormatter;
+    state->format = RGW_FORMAT_JSON;
+
+    return RGWHandler::init(store, state, cio);
+  }
+
+  int authorize() override {
+    return 0;
+  }
+
+  int postauth_init() override {
+    return 0;
+  }
+
+  int read_permissions(RGWOp *) override {
+    return 0;
+  }
+};
+
+class RGWRESTMgr_SWIFT_Info : public RGWRESTMgr {
+public:
+  RGWRESTMgr_SWIFT_Info() = default;
+  virtual ~RGWRESTMgr_SWIFT_Info() = default;
+  virtual RGWHandler_REST *get_handler(struct req_state *s) override;
 };
 
 #endif
diff --git a/src/rgw/rgw_swift.cc b/src/rgw/rgw_swift.cc
index 0fdb310..42f5c07 100644
--- a/src/rgw/rgw_swift.cc
+++ b/src/rgw/rgw_swift.cc
@@ -617,10 +617,28 @@ int authenticate_temp_url(RGWRados * const store, req_state * const s)
    * about account is neccessary to obtain its bucket tenant. Without that,
    * the access would be limited to accounts with empty tenant. */
   string bucket_tenant;
-  if (!s->account_name.empty()) {
+  if (! s->account_name.empty()) {
     RGWUserInfo uinfo;
-
-    if (rgw_get_user_info_by_uid(store, s->account_name, uinfo) < 0) {
+    const rgw_user acct_user(s->account_name);
+
+    ldout(s->cct, 20) << "temp url: loading RGWUserInfo for rgw_user="
+                      << acct_user << dendl;
+
+    if (acct_user.tenant.empty()) {
+      rgw_user tenanted_acct_user(acct_user);
+      tenanted_acct_user.tenant = acct_user.id;
+
+      /* The account name specified in the URL doesn't have the tenant part.
+       * This means we have to handle the special case for Keystone-created
+       * accounts when the "rgw_keystone_implicit_tenants" was turned on.
+       * For more details about this mechanism please refer to the comment
+       * in RGWSwift::update_user_info(). */
+      if (rgw_get_user_info_by_uid(store, tenanted_acct_user, uinfo) < 0) {
+        if (rgw_get_user_info_by_uid(store, acct_user, uinfo) < 0) {
+          return -EPERM;
+        }
+      }
+    } else if (rgw_get_user_info_by_uid(store, acct_user, uinfo) < 0) {
       return -EPERM;
     }
 
diff --git a/src/rgw/rgw_swift_auth.cc b/src/rgw/rgw_swift_auth.cc
index d4e07d0..eb90f05 100644
--- a/src/rgw/rgw_swift_auth.cc
+++ b/src/rgw/rgw_swift_auth.cc
@@ -13,7 +13,7 @@
 
 #define dout_subsys ceph_subsys_rgw
 
-#define DEFAULT_SWIFT_PREFIX "swift"
+#define DEFAULT_SWIFT_PREFIX "/swift"
 
 using namespace ceph::crypto;
 
@@ -141,6 +141,7 @@ int rgw_swift_verify_signed_token(CephContext *cct, RGWRados *store,
 void RGW_SWIFT_Auth_Get::execute()
 {
   int ret = -EPERM;
+  const char *token_tag = "rgwtk";
 
   const char *key = s->info.env->get("HTTP_X_AUTH_KEY");
   const char *user = s->info.env->get("HTTP_X_AUTH_USER");
@@ -157,8 +158,20 @@ void RGW_SWIFT_Auth_Get::execute()
   string swift_prefix = g_conf->rgw_swift_url_prefix;
   string tenant_path;
 
+  /*
+   * We did not allow an empty Swift prefix before, but we want it now.
+   * So, we take rgw_swift_url_prefix = "/" to yield the empty prefix.
+   * The rgw_swift_url_prefix = "" is the default and yields "/swift"
+   * in a backwards-compatible way.
+   */
   if (swift_prefix.size() == 0) {
     swift_prefix = DEFAULT_SWIFT_PREFIX;
+  } else if (swift_prefix == "/") {
+    swift_prefix.clear();
+  } else {
+    if (swift_prefix[0] != '/') {
+      swift_prefix.insert(0, "/");
+    }
   }
 
   if (swift_url.size() == 0) {
@@ -220,8 +233,8 @@ void RGW_SWIFT_Auth_Get::execute()
     tenant_path.append(info.user_id.to_str());
   }
 
-  STREAM_IO(s)->print("X-Storage-Url: %s/%s/v1%s\r\n", swift_url.c_str(),
-		swift_prefix.c_str(), tenant_path.c_str());
+  STREAM_IO(s)->print("X-Storage-Url: %s%s/v1%s\r\n", swift_url.c_str(),
+	        swift_prefix.c_str(), tenant_path.c_str());
 
   if ((ret = encode_token(s->cct, swift_key->id, swift_key->key, bl)) < 0)
     goto done;
diff --git a/src/rgw/rgw_sync.cc b/src/rgw/rgw_sync.cc
index 3d494ef..3128a18 100644
--- a/src/rgw/rgw_sync.cc
+++ b/src/rgw/rgw_sync.cc
@@ -244,7 +244,7 @@ int RGWRemoteMetaLog::read_log_info(rgw_mdlog_info *log_info)
   return 0;
 }
 
-int RGWRemoteMetaLog::read_master_log_shards_info(string *master_period, map<int, RGWMetadataLogInfo> *shards_info)
+int RGWRemoteMetaLog::read_master_log_shards_info(const string &master_period, map<int, RGWMetadataLogInfo> *shards_info)
 {
   if (store->is_meta_master()) {
     return 0;
@@ -256,9 +256,7 @@ int RGWRemoteMetaLog::read_master_log_shards_info(string *master_period, map<int
     return ret;
   }
 
-  *master_period = log_info.period;
-
-  return run(new RGWReadRemoteMDLogInfoCR(&sync_env, log_info.period, log_info.num_shards, shards_info));
+  return run(new RGWReadRemoteMDLogInfoCR(&sync_env, master_period, log_info.num_shards, shards_info));
 }
 
 int RGWRemoteMetaLog::read_master_log_shards_next(const string& period, map<int, string> shard_markers, map<int, rgw_mdlog_shard_data> *result)
@@ -733,7 +731,7 @@ class RGWFetchAllMetaCR : public RGWCoroutine {
   list<string> result;
   list<string>::iterator iter;
 
-  RGWShardedOmapCRManager *entries_index;
+  std::unique_ptr<RGWShardedOmapCRManager> entries_index;
 
   RGWContinuousLeaseCR *lease_cr;
   RGWCoroutinesStack *lease_stack;
@@ -746,7 +744,7 @@ public:
   RGWFetchAllMetaCR(RGWMetaSyncEnv *_sync_env, int _num_shards,
                     map<uint32_t, rgw_meta_sync_marker>& _markers) : RGWCoroutine(_sync_env->cct), sync_env(_sync_env),
 						      num_shards(_num_shards),
-						      ret_status(0), entries_index(NULL), lease_cr(nullptr), lease_stack(nullptr),
+						      ret_status(0), lease_cr(nullptr), lease_stack(nullptr),
                                                       lost_lock(false), failed(false), markers(_markers) {
   }
 
@@ -803,15 +801,16 @@ public:
         set_sleeping(true);
         yield;
       }
-      entries_index = new RGWShardedOmapCRManager(sync_env->async_rados, sync_env->store, this, num_shards,
-						  sync_env->store->get_zone_params().log_pool,
-                                                  mdlog_sync_full_sync_index_prefix);
+      entries_index.reset(new RGWShardedOmapCRManager(sync_env->async_rados, sync_env->store, this, num_shards,
+                                                      sync_env->store->get_zone_params().log_pool,
+                                                      mdlog_sync_full_sync_index_prefix));
       yield {
 	call(new RGWReadRESTResourceCR<list<string> >(cct, conn, sync_env->http_manager,
 				       "/admin/metadata", NULL, &sections));
       }
       if (get_ret_status() < 0) {
         ldout(cct, 0) << "ERROR: failed to fetch metadata sections" << dendl;
+        yield entries_index->finish();
         yield lease_cr->go_down();
         drain_all();
 	return set_cr_error(get_ret_status());
@@ -827,6 +826,7 @@ public:
 	}
         if (get_ret_status() < 0) {
           ldout(cct, 0) << "ERROR: failed to fetch metadata section: " << *sections_iter << dendl;
+          yield entries_index->finish();
           yield lease_cr->go_down();
           drain_all();
           return set_cr_error(get_ret_status());
@@ -1187,8 +1187,8 @@ class RGWCloneMetaLogCoroutine : public RGWCoroutine {
   int max_entries = CLONE_MAX_ENTRIES;
 
   RGWRESTReadResource *http_op = nullptr;
+  boost::intrusive_ptr<RGWMetadataLogInfoCompletion> completion;
 
-  int req_ret = 0;
   RGWMetadataLogInfo shard_info;
   rgw_mdlog_shard_data data;
 
@@ -1206,6 +1206,9 @@ public:
     if (http_op) {
       http_op->put();
     }
+    if (completion) {
+      completion->cancel();
+    }
   }
 
   int operate();
@@ -1258,7 +1261,9 @@ class RGWMetaSyncShardCR : public RGWCoroutine {
 
   bool *reset_backoff;
 
-  map<RGWCoroutinesStack *, string> stack_to_pos;
+  // hold a reference to the cr stack while it's in the map
+  using StackRef = boost::intrusive_ptr<RGWCoroutinesStack>;
+  map<StackRef, string> stack_to_pos;
   map<string, string> pos_to_prev;
 
   bool can_adjust_marker = false;
@@ -1320,7 +1325,7 @@ public:
     int child_ret;
     RGWCoroutinesStack *child;
     while (collect_next(&child_ret, &child)) {
-      map<RGWCoroutinesStack *, string>::iterator iter = stack_to_pos.find(child);
+      auto iter = stack_to_pos.find(child);
       if (iter == stack_to_pos.end()) {
         /* some other stack that we don't care about */
         continue;
@@ -1360,8 +1365,6 @@ public:
 
       ldout(sync_env->cct, 0) << *this << ": adjusting marker pos=" << sync_marker.marker << dendl;
       stack_to_pos.erase(iter);
-
-      child->put();
     }
   }
 
@@ -1432,8 +1435,7 @@ public:
             // fetch remote and write locally
             yield {
               RGWCoroutinesStack *stack = spawn(new RGWMetaSyncSingleEntryCR(sync_env, iter->first, iter->first, MDLOG_STATUS_COMPLETE, marker_tracker), false);
-              stack->get();
-
+              // stack_to_pos holds a reference to the stack
               stack_to_pos[stack] = iter->first;
               pos_to_prev[iter->first] = marker;
             }
@@ -1581,8 +1583,7 @@ public:
               yield {
                 RGWCoroutinesStack *stack = spawn(new RGWMetaSyncSingleEntryCR(sync_env, raw_key, log_iter->id, mdlog_entry.log_data.status, marker_tracker), false);
                 assert(stack);
-                stack->get();
-
+                // stack_to_pos holds a reference to the stack
                 stack_to_pos[stack] = log_iter->id;
                 pos_to_prev[log_iter->id] = marker;
               }
@@ -1672,6 +1673,7 @@ class RGWMetaSyncCR : public RGWCoroutine {
   using StackRef = boost::intrusive_ptr<RGWCoroutinesStack>;
   using RefPair = std::pair<ControlCRRef, StackRef>;
   map<int, RefPair> shard_crs;
+  int ret{0};
 
 public:
   RGWMetaSyncCR(RGWMetaSyncEnv *_sync_env, RGWPeriodHistory::Cursor cursor,
@@ -1681,7 +1683,6 @@ public:
       cursor(cursor), sync_status(_sync_status) {}
 
   int operate() {
-    int ret = 0;
     reenter(this) {
       // loop through one period at a time
       for (;;) {
@@ -1734,7 +1735,10 @@ public:
           }
         }
         // wait for each shard to complete
-        collect(&ret, NULL);
+        while (ret == 0 && num_spawned() > 0) {
+          yield wait_for_child();
+          collect(&ret, nullptr);
+        }
         drain_all();
         {
           // drop shard cr refs under lock
@@ -1876,8 +1880,8 @@ int RGWRemoteMetaLog::run_sync()
       return 0;
     }
     r = read_log_info(&mdlog_info);
-    if (r == -EIO) {
-      // keep retrying if master isn't alive
+    if (r == -EIO || r == -ENOENT) {
+      // keep retrying if master isn't alive or hasn't initialized the log
       ldout(store->ctx(), 10) << __func__ << "(): waiting for master.." << dendl;
       backoff.backoff_sleep();
       continue;
@@ -1908,6 +1912,9 @@ int RGWRemoteMetaLog::run_sync()
       if (sync_status.sync_info.period.empty() ||
           sync_status.sync_info.realm_epoch < mdlog_info.realm_epoch) {
         sync_status.sync_info.state = rgw_meta_sync_info::StateInit;
+        ldout(store->ctx(), 1) << "epoch=" << sync_status.sync_info.realm_epoch
+           << " in sync status comes before remote's oldest mdlog epoch="
+           << mdlog_info.realm_epoch << ", restarting sync" << dendl;
       }
     }
 
@@ -2047,7 +2054,23 @@ int RGWCloneMetaLogCoroutine::state_init()
 
 int RGWCloneMetaLogCoroutine::state_read_shard_status()
 {
-  int ret = mdlog->get_info_async(shard_id, &shard_info, stack->get_completion_mgr(), (void *)stack, &req_ret);
+  const bool add_ref = false; // default constructs with refs=1
+
+  using CompletionRef = boost::intrusive_ptr<RGWMetadataLogInfoCompletion>;
+  completion = CompletionRef(new RGWMetadataLogInfoCompletion(
+    [this](int ret, const cls_log_header& header) {
+      if (ret < 0) {
+        ldout(cct, 1) << "ERROR: failed to read mdlog info with "
+            << cpp_strerror(ret) << dendl;
+      } else {
+        shard_info.marker = header.max_marker;
+        shard_info.last_update = header.max_time.to_real_time();
+      }
+      // wake up parent stack
+      stack->get_completion_mgr()->complete(nullptr, stack);
+    }), add_ref);
+
+  int ret = mdlog->get_info_async(shard_id, completion.get());
   if (ret < 0) {
     ldout(cct, 0) << "ERROR: mdlog->get_info_async() returned ret=" << ret << dendl;
     return set_cr_error(ret);
@@ -2058,6 +2081,8 @@ int RGWCloneMetaLogCoroutine::state_read_shard_status()
 
 int RGWCloneMetaLogCoroutine::state_read_shard_status_complete()
 {
+  completion.reset();
+
   ldout(cct, 20) << "shard_id=" << shard_id << " marker=" << shard_info.marker << " last_update=" << shard_info.last_update << dendl;
 
   marker = shard_info.marker;
diff --git a/src/rgw/rgw_sync.h b/src/rgw/rgw_sync.h
index 59f6b42..6ddfd35 100644
--- a/src/rgw/rgw_sync.h
+++ b/src/rgw/rgw_sync.h
@@ -212,7 +212,7 @@ public:
   void finish();
 
   int read_log_info(rgw_mdlog_info *log_info);
-  int read_master_log_shards_info(string *master_period, map<int, RGWMetadataLogInfo> *shards_info);
+  int read_master_log_shards_info(const string& master_period, map<int, RGWMetadataLogInfo> *shards_info);
   int read_master_log_shards_next(const string& period, map<int, string> shard_markers, map<int, rgw_mdlog_shard_data> *result);
   int read_sync_status();
   int init_sync_status();
@@ -268,7 +268,7 @@ public:
   int read_log_info(rgw_mdlog_info *log_info) {
     return master_log.read_log_info(log_info);
   }
-  int read_master_log_shards_info(string *master_period, map<int, RGWMetadataLogInfo> *shards_info) {
+  int read_master_log_shards_info(const string& master_period, map<int, RGWMetadataLogInfo> *shards_info) {
     return master_log.read_master_log_shards_info(master_period, shards_info);
   }
   int read_master_log_shards_next(const string& period, map<int, string> shard_markers, map<int, rgw_mdlog_shard_data> *result) {
diff --git a/src/rgw/rgw_tools.cc b/src/rgw/rgw_tools.cc
index e97b432..26bb906 100644
--- a/src/rgw/rgw_tools.cc
+++ b/src/rgw/rgw_tools.cc
@@ -18,7 +18,7 @@
 
 static map<string, string> ext_mime_map;
 
-int rgw_put_system_obj(RGWRados *rgwstore, rgw_bucket& bucket, string& oid, const char *data, size_t size, bool exclusive,
+int rgw_put_system_obj(RGWRados *rgwstore, rgw_bucket& bucket, const string& oid, const char *data, size_t size, bool exclusive,
                        RGWObjVersionTracker *objv_tracker, real_time set_mtime, map<string, bufferlist> *pattrs)
 {
   map<string,bufferlist> no_attrs;
diff --git a/src/rgw/rgw_tools.h b/src/rgw/rgw_tools.h
index f778b47..e44d615 100644
--- a/src/rgw/rgw_tools.h
+++ b/src/rgw/rgw_tools.h
@@ -16,7 +16,7 @@ struct RGWObjVersionTracker;
 
 struct obj_version;
 
-int rgw_put_system_obj(RGWRados *rgwstore, rgw_bucket& bucket, string& oid, const char *data, size_t size, bool exclusive,
+int rgw_put_system_obj(RGWRados *rgwstore, rgw_bucket& bucket, const string& oid, const char *data, size_t size, bool exclusive,
                        RGWObjVersionTracker *objv_tracker, real_time set_mtime, map<string, bufferlist> *pattrs = NULL);
 int rgw_get_system_obj(RGWRados *rgwstore, RGWObjectCtx& obj_ctx, rgw_bucket& bucket, const string& key, bufferlist& bl,
                        RGWObjVersionTracker *objv_tracker, real_time *pmtime, map<string, bufferlist> *pattrs = NULL,
diff --git a/src/test/Makefile-client.am b/src/test/Makefile-client.am
index 6eade23..1346e00 100644
--- a/src/test/Makefile-client.am
+++ b/src/test/Makefile-client.am
@@ -3,7 +3,6 @@ ceph_dencoder_SOURCES = \
 	test/encoding/ceph_dencoder.cc \
 	$(DENCODER_SOURCES)
 ceph_dencoder_LDADD = \
-	$(LIBRGW) \
 	$(LIBRADOS) \
 	$(LIBRBD_TYPES) \
 	$(LIBOSD_TYPES) \
@@ -26,6 +25,9 @@ ceph_dencoder_CXXFLAGS += -DWITH_RBD
 endif
 if WITH_RADOSGW
 ceph_dencoder_CXXFLAGS += -DWITH_RADOSGW
+ceph_dencoder_LDADD += \
+	$(LIBRGW) \
+	$(LIBRGW_DEPS) 
 endif
 
 
@@ -383,14 +385,19 @@ librbd_test_mock_la_CXXFLAGS = $(UNITTEST_CXXFLAGS)
 noinst_LTLIBRARIES += librbd_test_mock.la
 
 unittest_librbd_SOURCES = \
+        test/librbd/test_BlockGuard.cc \
         test/librbd/test_main.cc \
 	test/librbd/test_mock_fixture.cc \
 	test/librbd/test_mock_ExclusiveLock.cc \
 	test/librbd/test_mock_Journal.cc \
 	test/librbd/test_mock_ObjectWatcher.cc \
 	test/librbd/exclusive_lock/test_mock_AcquireRequest.cc \
+	test/librbd/exclusive_lock/test_mock_BreakRequest.cc \
+	test/librbd/exclusive_lock/test_mock_GetLockerRequest.cc \
+	test/librbd/exclusive_lock/test_mock_ReacquireRequest.cc \
 	test/librbd/exclusive_lock/test_mock_ReleaseRequest.cc \
 	test/librbd/image/test_mock_RefreshRequest.cc \
+	test/librbd/image_watcher/test_mock_RewatchRequest.cc \
 	test/librbd/journal/test_mock_Replay.cc \
 	test/librbd/object_map/test_mock_InvalidateRequest.cc \
 	test/librbd/object_map/test_mock_LockRequest.cc \
diff --git a/src/test/Makefile.am b/src/test/Makefile.am
index ce6fc20..480b7df 100644
--- a/src/test/Makefile.am
+++ b/src/test/Makefile.am
@@ -289,7 +289,7 @@ unittest_str_list_LDADD = $(UNITTEST_LDADD) $(CEPH_GLOBAL)
 check_TESTPROGRAMS += unittest_str_list
 
 unittest_log_SOURCES = log/test.cc
-unittest_log_LDADD = $(LIBCOMMON) $(UNITTEST_LDADD)
+unittest_log_LDADD = $(UNITTEST_LDADD) $(CEPH_GLOBAL)
 unittest_log_CXXFLAGS = $(UNITTEST_CXXFLAGS) -O2
 check_TESTPROGRAMS += unittest_log
 
@@ -344,7 +344,7 @@ unittest_bufferlist_CXXFLAGS = $(UNITTEST_CXXFLAGS)
 check_TESTPROGRAMS += unittest_bufferlist
 
 unittest_xlist_SOURCES = test/test_xlist.cc
-unittest_xlist_LDADD = $(UNITTEST_LDADD) $(LIBCOMMON)
+unittest_xlist_LDADD = $(UNITTEST_LDADD) $(CEPH_GLOBAL)
 unittest_xlist_CXXFLAGS = $(UNITTEST_CXXFLAGS)
 check_TESTPROGRAMS += unittest_xlist
 
@@ -424,7 +424,7 @@ unittest_safe_io_CXXFLAGS = $(UNITTEST_CXXFLAGS)
 check_TESTPROGRAMS += unittest_safe_io
 
 unittest_heartbeatmap_SOURCES = test/heartbeat_map.cc
-unittest_heartbeatmap_LDADD = $(LIBCOMMON) $(UNITTEST_LDADD) $(CEPH_GLOBAL)
+unittest_heartbeatmap_LDADD = $(UNITTEST_LDADD) $(CEPH_GLOBAL)
 unittest_heartbeatmap_CXXFLAGS = $(UNITTEST_CXXFLAGS)
 check_TESTPROGRAMS += unittest_heartbeatmap
 
@@ -447,7 +447,7 @@ unittest_ipaddr_CXXFLAGS = $(UNITTEST_CXXFLAGS)
 check_TESTPROGRAMS += unittest_ipaddr
 
 unittest_texttable_SOURCES = test/test_texttable.cc
-unittest_texttable_LDADD = $(LIBCOMMON) $(UNITTEST_LDADD)
+unittest_texttable_LDADD = $(UNITTEST_LDADD) $(CEPH_GLOBAL)
 unittest_texttable_CXXFLAGS = $(UNITTEST_CXXFLAGS)
 check_TESTPROGRAMS += unittest_texttable
 
@@ -476,7 +476,7 @@ unittest_interval_set_LDADD = $(UNITTEST_LDADD) $(CEPH_GLOBAL)
 check_TESTPROGRAMS += unittest_interval_set
 
 unittest_subprocess_SOURCES = test/test_subprocess.cc
-unittest_subprocess_LDADD = $(LIBCOMMON) $(UNITTEST_LDADD)
+unittest_subprocess_LDADD = $(UNITTEST_LDADD) $(CEPH_GLOBAL)
 unittest_subprocess_CXXFLAGS = $(UNITTEST_CXXFLAGS)
 check_PROGRAMS += unittest_subprocess
 
diff --git a/src/test/centos-6/ceph.spec.in b/src/test/centos-6/ceph.spec.in
index 9ddd75f..fd8ab92 100644
--- a/src/test/centos-6/ceph.spec.in
+++ b/src/test/centos-6/ceph.spec.in
@@ -216,12 +216,8 @@ Requires:      cryptsetup
 Requires:      findutils
 Requires:      which
 %if 0%{?suse_version}
-Requires:      lsb-release
 Recommends:    ntp-daemon
 %endif
-%if 0%{?fedora} || 0%{?rhel}
-Requires:      redhat-lsb-core
-%endif
 %if 0%{with xio}
 Requires:      libxio
 %endif
@@ -700,7 +696,7 @@ make %{?_smp_mflags} check
 make DESTDIR=%{buildroot} install
 find %{buildroot} -type f -name "*.la" -exec rm -f {} ';'
 find %{buildroot} -type f -name "*.a" -exec rm -f {} ';'
-install -D src/etc-rbdmap %{buildroot}%{_sysconfdir}/ceph/rbdmap
+install -m 0644 -D src/etc-rbdmap %{buildroot}%{_sysconfdir}/ceph/rbdmap
 %if 0%{?fedora} || 0%{?rhel}
 install -m 0644 -D etc/sysconfig/ceph %{buildroot}%{_sysconfdir}/sysconfig/ceph
 %endif
diff --git a/src/test/centos-6/install-deps.sh b/src/test/centos-6/install-deps.sh
index 129178f..94def86 100755
--- a/src/test/centos-6/install-deps.sh
+++ b/src/test/centos-6/install-deps.sh
@@ -19,21 +19,12 @@ if test $(id -u) != 0 ; then
 fi
 export LC_ALL=C # the following is vulnerable to i18n
 
-if test -f /etc/redhat-release ; then
-    $SUDO yum install -y redhat-lsb-core
-fi
-
-if type apt-get > /dev/null 2>&1 ; then
-    $SUDO apt-get install -y lsb-release devscripts equivs
-fi
-
-if type zypper > /dev/null 2>&1 ; then
-    $SUDO zypper --gpg-auto-import-keys --non-interactive install lsb-release systemd-rpm-macros
-fi
-
-case $(lsb_release -si) in
-Ubuntu|Debian|Devuan)
-        $SUDO apt-get install -y dpkg-dev
+source /etc/os-release
+case $ID in
+    debian|ubuntu|devuan)
+        echo "Using apt-get to install dependencies"
+        $SUDO apt-get install -y lsb-release devscripts equivs
+        $SUDO apt-get install -y dpkg-dev gcc
         if ! test -r debian/control ; then
             echo debian/control is not a readable file
             exit 1
@@ -57,7 +48,9 @@ Ubuntu|Debian|Devuan)
 	$SUDO env DEBIAN_FRONTEND=noninteractive apt-get -y remove ceph-build-deps
 	if [ -n "$backports" ] ; then rm $control; fi
         ;;
-CentOS|Fedora|RedHatEnterpriseServer)
+    centos|fedora|rhel)
+        echo "Using yum to install dependencies"
+        $SUDO yum install -y redhat-lsb-core
         case $(lsb_release -si) in
             Fedora)
                 $SUDO yum install -y yum-utils
@@ -82,12 +75,14 @@ CentOS|Fedora|RedHatEnterpriseServer)
         $SUDO yum-builddep -y $DIR/ceph.spec 2>&1 | tee $DIR/yum-builddep.out
         ! grep -q -i error: $DIR/yum-builddep.out || exit 1
         ;;
-*SUSE*)
+    opensuse|suse|sles)
+        echo "Using zypper to install dependencies"
+        $SUDO zypper --gpg-auto-import-keys --non-interactive install lsb-release systemd-rpm-macros
         sed -e 's/@//g' < ceph.spec.in > $DIR/ceph.spec
         $SUDO zypper --non-interactive install $(rpmspec -q --buildrequires $DIR/ceph.spec) || exit 1
         ;;
-*)
-        echo "$(lsb_release -si) is unknown, dependencies will have to be installed manually."
+    *)
+        echo "$ID is unknown, dependencies will have to be installed manually."
         ;;
 esac
 
diff --git a/src/test/centos-7/ceph.spec.in b/src/test/centos-7/ceph.spec.in
index 9ddd75f..fd8ab92 100644
--- a/src/test/centos-7/ceph.spec.in
+++ b/src/test/centos-7/ceph.spec.in
@@ -216,12 +216,8 @@ Requires:      cryptsetup
 Requires:      findutils
 Requires:      which
 %if 0%{?suse_version}
-Requires:      lsb-release
 Recommends:    ntp-daemon
 %endif
-%if 0%{?fedora} || 0%{?rhel}
-Requires:      redhat-lsb-core
-%endif
 %if 0%{with xio}
 Requires:      libxio
 %endif
@@ -700,7 +696,7 @@ make %{?_smp_mflags} check
 make DESTDIR=%{buildroot} install
 find %{buildroot} -type f -name "*.la" -exec rm -f {} ';'
 find %{buildroot} -type f -name "*.a" -exec rm -f {} ';'
-install -D src/etc-rbdmap %{buildroot}%{_sysconfdir}/ceph/rbdmap
+install -m 0644 -D src/etc-rbdmap %{buildroot}%{_sysconfdir}/ceph/rbdmap
 %if 0%{?fedora} || 0%{?rhel}
 install -m 0644 -D etc/sysconfig/ceph %{buildroot}%{_sysconfdir}/sysconfig/ceph
 %endif
diff --git a/src/test/centos-7/install-deps.sh b/src/test/centos-7/install-deps.sh
index 129178f..94def86 100755
--- a/src/test/centos-7/install-deps.sh
+++ b/src/test/centos-7/install-deps.sh
@@ -19,21 +19,12 @@ if test $(id -u) != 0 ; then
 fi
 export LC_ALL=C # the following is vulnerable to i18n
 
-if test -f /etc/redhat-release ; then
-    $SUDO yum install -y redhat-lsb-core
-fi
-
-if type apt-get > /dev/null 2>&1 ; then
-    $SUDO apt-get install -y lsb-release devscripts equivs
-fi
-
-if type zypper > /dev/null 2>&1 ; then
-    $SUDO zypper --gpg-auto-import-keys --non-interactive install lsb-release systemd-rpm-macros
-fi
-
-case $(lsb_release -si) in
-Ubuntu|Debian|Devuan)
-        $SUDO apt-get install -y dpkg-dev
+source /etc/os-release
+case $ID in
+    debian|ubuntu|devuan)
+        echo "Using apt-get to install dependencies"
+        $SUDO apt-get install -y lsb-release devscripts equivs
+        $SUDO apt-get install -y dpkg-dev gcc
         if ! test -r debian/control ; then
             echo debian/control is not a readable file
             exit 1
@@ -57,7 +48,9 @@ Ubuntu|Debian|Devuan)
 	$SUDO env DEBIAN_FRONTEND=noninteractive apt-get -y remove ceph-build-deps
 	if [ -n "$backports" ] ; then rm $control; fi
         ;;
-CentOS|Fedora|RedHatEnterpriseServer)
+    centos|fedora|rhel)
+        echo "Using yum to install dependencies"
+        $SUDO yum install -y redhat-lsb-core
         case $(lsb_release -si) in
             Fedora)
                 $SUDO yum install -y yum-utils
@@ -82,12 +75,14 @@ CentOS|Fedora|RedHatEnterpriseServer)
         $SUDO yum-builddep -y $DIR/ceph.spec 2>&1 | tee $DIR/yum-builddep.out
         ! grep -q -i error: $DIR/yum-builddep.out || exit 1
         ;;
-*SUSE*)
+    opensuse|suse|sles)
+        echo "Using zypper to install dependencies"
+        $SUDO zypper --gpg-auto-import-keys --non-interactive install lsb-release systemd-rpm-macros
         sed -e 's/@//g' < ceph.spec.in > $DIR/ceph.spec
         $SUDO zypper --non-interactive install $(rpmspec -q --buildrequires $DIR/ceph.spec) || exit 1
         ;;
-*)
-        echo "$(lsb_release -si) is unknown, dependencies will have to be installed manually."
+    *)
+        echo "$ID is unknown, dependencies will have to be installed manually."
         ;;
 esac
 
diff --git a/src/test/cli/crushtool/compile-decompile-recompile.t b/src/test/cli/crushtool/compile-decompile-recompile.t
index 6724756..7a1b61d 100644
--- a/src/test/cli/crushtool/compile-decompile-recompile.t
+++ b/src/test/cli/crushtool/compile-decompile-recompile.t
@@ -9,3 +9,7 @@
 # worked
   $ cmp need_tree_order.crush nto.conf
   $ cmp nto.compiled nto.recompiled
+
+  $ crushtool -c "$TESTDIR/missing-bucket.crushmap.txt"
+  in rule 'rule-bad' item 'root-404' not defined
+  [1]
diff --git a/src/test/cli/crushtool/missing-bucket.crushmap.txt b/src/test/cli/crushtool/missing-bucket.crushmap.txt
new file mode 100644
index 0000000..4ef7f44
--- /dev/null
+++ b/src/test/cli/crushtool/missing-bucket.crushmap.txt
@@ -0,0 +1,39 @@
+device 0 device0
+device 1 device1
+device 2 device2
+device 3 device3
+device 4 device4
+
+type 0 osd
+type 1 domain
+
+domain root {
+	id -1
+	alg straw
+	hash 0
+	item device0 weight 1.000
+	item device1 weight 1.000
+	item device2 weight 1.000
+	item device3 weight 1.000
+	item device4 weight 1.000
+}
+
+rule rule-bad {
+	ruleset 0
+	type replicated
+	min_size 1
+	max_size 10
+	step take root-404
+	step choose firstn 0 type osd
+	step emit
+}
+
+rule rule-good {
+	ruleset 1
+	type erasure
+	min_size 1
+	max_size 10
+	step take root
+	step choose indep 0 type osd
+	step emit
+}
diff --git a/src/test/cli/radosgw-admin/help.t b/src/test/cli/radosgw-admin/help.t
index 5fb1321..bac3e0b 100644
--- a/src/test/cli/radosgw-admin/help.t
+++ b/src/test/cli/radosgw-admin/help.t
@@ -22,10 +22,13 @@
     bucket stats               returns bucket statistics
     bucket rm                  remove bucket
     bucket check               check bucket index
+    bucket reshard             reshard bucket
+    bi get                     retrieve bucket index object entries
+    bi put                     store bucket index object entries
+    bi list                    list raw bucket index entries
     object rm                  remove object
     object unlink              unlink object from bucket index
     objects expire             run expired objects cleanup
-    period prepare             prepare a new period
     period delete              delete a period
     period get                 get period info
     period get-current         get current period info
@@ -58,6 +61,11 @@
     zonegroup remove           remove a zone from a zonegroup
     zonegroup rename           rename a zone group
     zonegroup list             list all zone groups set on this cluster
+    zonegroup placement list   list zonegroup's placement targets
+    zonegroup placement add    add a placement target id to a zonegroup
+    zonegroup placement modify modify a placement target of a specific zonegroup
+    zonegroup placement rm     remove a placement target from a zonegroup
+    zonegroup placement default  set a zonegroup's default placement target
     zonegroup-map get          show zonegroup-map
     zonegroup-map set          set zonegroup-map (requires infile)
     zone create                create a new zone
@@ -67,6 +75,10 @@
     zone set                   set zone cluster params (requires infile)
     zone list                  list all zones set on this cluster
     zone rename                rename a zone
+    zone placement list        list zone's placement targets
+    zone placement add         add a zone placement target
+    zone placement modify      modify a zone placement target
+    zone placement rm          remove a zone placement target
     pool add                   add an existing pool for data placement
     pool rm                    remove an existing pool from data placement set
     pools list                 list placement active set
@@ -154,7 +166,16 @@
      --source-zone             specify the source zone (for data sync)
      --default                 set entity (realm, zonegroup, zone) as default
      --read-only               set zone as read-only (when adding to zonegroup)
+     --placement-id            placement id for zonegroup placement commands
+     --tags=<list>             list of tags for zonegroup placement add and modify commands
+     --tags-add=<list>         list of tags to add for zonegroup placement modify command
+     --tags-rm=<list>          list of tags to remove for zonegroup placement modify command
      --endpoints=<list>        zone endpoints
+     --index_pool=<pool>       placement target index pool
+     --data_pool=<pool>        placement target data pool
+     --data_extra_pool=<pool>  placement target data extra (non-ec) pool
+     --placement-index-type=<type>
+                               placement target index type (normal, indexless, or #id)
      --fix                     besides checking bucket index, will also fix it
      --check-objects           bucket check: rebuilds bucket index according to
                                actual objects state
@@ -180,6 +201,10 @@
      --caps=<caps>             list of caps (e.g., "usage=read, write; user=read"
      --yes-i-really-mean-it    required for certain operations
      --reset-regions           reset regionmap when regionmap update
+     --bypass-gc               when specified with bucket deletion, triggers
+                               object deletions by not involving GC
+     --inconsistent-index      when specified with bucket deletion and bypass-gc set to true,
+                               ignores bucket index consistency
   
   <date> := "YYYY-MM-DD[ hh:mm:ss]"
   
diff --git a/src/test/cli/rbd/help.t b/src/test/cli/rbd/help.t
index 6c86a41..9c7bd9e 100644
--- a/src/test/cli/rbd/help.t
+++ b/src/test/cli/rbd/help.t
@@ -973,7 +973,8 @@
   
   rbd help nbd map
   usage: rbd nbd map [--pool <pool>] [--image <image>] [--snap <snap>] 
-                     [--read-only] [--device <device>] 
+                     [--read-only] [--device <device>] [--nbds_max <nbds_max>] 
+                     [--max_part <max_part>] 
                      <image-or-snap-spec> 
   
   Map image to a nbd device.
@@ -988,6 +989,8 @@
     --snap arg            snapshot name
     --read-only           mount read-only
     --device arg          specify nbd device
+    --nbds_max arg        override module param nbds_max
+    --max_part arg        override module param max_part
   
   rbd help nbd unmap
   usage: rbd nbd unmap 
diff --git a/src/test/cls_lock/test_cls_lock.cc b/src/test/cls_lock/test_cls_lock.cc
index 72dbb8e..1040815 100644
--- a/src/test/cls_lock/test_cls_lock.cc
+++ b/src/test/cls_lock/test_cls_lock.cc
@@ -338,3 +338,54 @@ TEST(ClsLock, TestAssertLocked) {
 
   ASSERT_EQ(0, destroy_one_pool_pp(pool_name, cluster));
 }
+
+TEST(ClsLock, TestSetCookie) {
+  Rados cluster;
+  std::string pool_name = get_temp_pool_name();
+  ASSERT_EQ("", create_one_pool_pp(pool_name, cluster));
+  IoCtx ioctx;
+  cluster.ioctx_create(pool_name.c_str(), ioctx);
+
+  string oid = "foo";
+  string name = "name";
+  string tag = "tag";
+  string cookie = "cookie";
+  string new_cookie = "new cookie";
+  librados::ObjectWriteOperation op1;
+  set_cookie(&op1, name, LOCK_SHARED, cookie, tag, new_cookie);
+  ASSERT_EQ(-ENOENT, ioctx.operate(oid, &op1));
+
+  librados::ObjectWriteOperation op2;
+  lock(&op2, name, LOCK_SHARED, cookie, tag, "", utime_t{}, 0);
+  ASSERT_EQ(0, ioctx.operate(oid, &op2));
+
+  librados::ObjectWriteOperation op3;
+  lock(&op3, name, LOCK_SHARED, "cookie 2", tag, "", utime_t{}, 0);
+  ASSERT_EQ(0, ioctx.operate(oid, &op3));
+
+  librados::ObjectWriteOperation op4;
+  set_cookie(&op4, name, LOCK_SHARED, cookie, tag, cookie);
+  ASSERT_EQ(-EBUSY, ioctx.operate(oid, &op4));
+
+  librados::ObjectWriteOperation op5;
+  set_cookie(&op5, name, LOCK_SHARED, cookie, "wrong tag", new_cookie);
+  ASSERT_EQ(-EBUSY, ioctx.operate(oid, &op5));
+
+  librados::ObjectWriteOperation op6;
+  set_cookie(&op6, name, LOCK_SHARED, "wrong cookie", tag, new_cookie);
+  ASSERT_EQ(-EBUSY, ioctx.operate(oid, &op6));
+
+  librados::ObjectWriteOperation op7;
+  set_cookie(&op7, name, LOCK_EXCLUSIVE, cookie, tag, new_cookie);
+  ASSERT_EQ(-EBUSY, ioctx.operate(oid, &op7));
+
+  librados::ObjectWriteOperation op8;
+  set_cookie(&op8, name, LOCK_SHARED, cookie, tag, "cookie 2");
+  ASSERT_EQ(-EBUSY, ioctx.operate(oid, &op8));
+
+  librados::ObjectWriteOperation op9;
+  set_cookie(&op9, name, LOCK_SHARED, cookie, tag, new_cookie);
+  ASSERT_EQ(0, ioctx.operate(oid, &op9));
+
+  ASSERT_EQ(0, destroy_one_pool_pp(pool_name, cluster));
+}
diff --git a/src/test/common/test_util.cc b/src/test/common/test_util.cc
index cb22047..2ea40b3 100644
--- a/src/test/common/test_util.cc
+++ b/src/test/common/test_util.cc
@@ -12,6 +12,7 @@
  *
  */
 
+#include "common/ceph_context.h"
 #include "include/util.h"
 #include "gtest/gtest.h"
 
@@ -30,3 +31,17 @@ TEST(util, unit_to_bytesize)
 
   ASSERT_EQ(65536ll, unit_to_bytesize(" 64K", &cerr));
 }
+
+TEST(util, collect_sys_info)
+{
+  map<string, string> sys_info;
+
+  CephContext *cct = (new CephContext(CEPH_ENTITY_TYPE_CLIENT))->get();
+  collect_sys_info(&sys_info, cct);
+
+  ASSERT_TRUE(sys_info.find("distro") != sys_info.end());
+  ASSERT_TRUE(sys_info.find("distro_version") != sys_info.end());
+  ASSERT_TRUE(sys_info.find("distro_description") != sys_info.end());
+
+  cct->put();
+}
diff --git a/src/test/debian-jessie/install-deps.sh b/src/test/debian-jessie/install-deps.sh
index 129178f..94def86 100755
--- a/src/test/debian-jessie/install-deps.sh
+++ b/src/test/debian-jessie/install-deps.sh
@@ -19,21 +19,12 @@ if test $(id -u) != 0 ; then
 fi
 export LC_ALL=C # the following is vulnerable to i18n
 
-if test -f /etc/redhat-release ; then
-    $SUDO yum install -y redhat-lsb-core
-fi
-
-if type apt-get > /dev/null 2>&1 ; then
-    $SUDO apt-get install -y lsb-release devscripts equivs
-fi
-
-if type zypper > /dev/null 2>&1 ; then
-    $SUDO zypper --gpg-auto-import-keys --non-interactive install lsb-release systemd-rpm-macros
-fi
-
-case $(lsb_release -si) in
-Ubuntu|Debian|Devuan)
-        $SUDO apt-get install -y dpkg-dev
+source /etc/os-release
+case $ID in
+    debian|ubuntu|devuan)
+        echo "Using apt-get to install dependencies"
+        $SUDO apt-get install -y lsb-release devscripts equivs
+        $SUDO apt-get install -y dpkg-dev gcc
         if ! test -r debian/control ; then
             echo debian/control is not a readable file
             exit 1
@@ -57,7 +48,9 @@ Ubuntu|Debian|Devuan)
 	$SUDO env DEBIAN_FRONTEND=noninteractive apt-get -y remove ceph-build-deps
 	if [ -n "$backports" ] ; then rm $control; fi
         ;;
-CentOS|Fedora|RedHatEnterpriseServer)
+    centos|fedora|rhel)
+        echo "Using yum to install dependencies"
+        $SUDO yum install -y redhat-lsb-core
         case $(lsb_release -si) in
             Fedora)
                 $SUDO yum install -y yum-utils
@@ -82,12 +75,14 @@ CentOS|Fedora|RedHatEnterpriseServer)
         $SUDO yum-builddep -y $DIR/ceph.spec 2>&1 | tee $DIR/yum-builddep.out
         ! grep -q -i error: $DIR/yum-builddep.out || exit 1
         ;;
-*SUSE*)
+    opensuse|suse|sles)
+        echo "Using zypper to install dependencies"
+        $SUDO zypper --gpg-auto-import-keys --non-interactive install lsb-release systemd-rpm-macros
         sed -e 's/@//g' < ceph.spec.in > $DIR/ceph.spec
         $SUDO zypper --non-interactive install $(rpmspec -q --buildrequires $DIR/ceph.spec) || exit 1
         ;;
-*)
-        echo "$(lsb_release -si) is unknown, dependencies will have to be installed manually."
+    *)
+        echo "$ID is unknown, dependencies will have to be installed manually."
         ;;
 esac
 
diff --git a/src/test/encoding/types.h b/src/test/encoding/types.h
index 7bed2f3..e202436 100644
--- a/src/test/encoding/types.h
+++ b/src/test/encoding/types.h
@@ -379,6 +379,7 @@ TYPE(cls_lock_get_info_op)
 TYPE(cls_lock_get_info_reply)
 TYPE(cls_lock_list_locks_reply)
 TYPE(cls_lock_assert_op)
+TYPE(cls_lock_set_cookie_op)
 
 #include "cls/replica_log/cls_replica_log_types.h"
 TYPE(cls_replica_log_item_marker)
diff --git a/src/test/fedora-21/ceph.spec.in b/src/test/fedora-21/ceph.spec.in
index 9ddd75f..fd8ab92 100644
--- a/src/test/fedora-21/ceph.spec.in
+++ b/src/test/fedora-21/ceph.spec.in
@@ -216,12 +216,8 @@ Requires:      cryptsetup
 Requires:      findutils
 Requires:      which
 %if 0%{?suse_version}
-Requires:      lsb-release
 Recommends:    ntp-daemon
 %endif
-%if 0%{?fedora} || 0%{?rhel}
-Requires:      redhat-lsb-core
-%endif
 %if 0%{with xio}
 Requires:      libxio
 %endif
@@ -700,7 +696,7 @@ make %{?_smp_mflags} check
 make DESTDIR=%{buildroot} install
 find %{buildroot} -type f -name "*.la" -exec rm -f {} ';'
 find %{buildroot} -type f -name "*.a" -exec rm -f {} ';'
-install -D src/etc-rbdmap %{buildroot}%{_sysconfdir}/ceph/rbdmap
+install -m 0644 -D src/etc-rbdmap %{buildroot}%{_sysconfdir}/ceph/rbdmap
 %if 0%{?fedora} || 0%{?rhel}
 install -m 0644 -D etc/sysconfig/ceph %{buildroot}%{_sysconfdir}/sysconfig/ceph
 %endif
diff --git a/src/test/fedora-21/install-deps.sh b/src/test/fedora-21/install-deps.sh
index 129178f..94def86 100755
--- a/src/test/fedora-21/install-deps.sh
+++ b/src/test/fedora-21/install-deps.sh
@@ -19,21 +19,12 @@ if test $(id -u) != 0 ; then
 fi
 export LC_ALL=C # the following is vulnerable to i18n
 
-if test -f /etc/redhat-release ; then
-    $SUDO yum install -y redhat-lsb-core
-fi
-
-if type apt-get > /dev/null 2>&1 ; then
-    $SUDO apt-get install -y lsb-release devscripts equivs
-fi
-
-if type zypper > /dev/null 2>&1 ; then
-    $SUDO zypper --gpg-auto-import-keys --non-interactive install lsb-release systemd-rpm-macros
-fi
-
-case $(lsb_release -si) in
-Ubuntu|Debian|Devuan)
-        $SUDO apt-get install -y dpkg-dev
+source /etc/os-release
+case $ID in
+    debian|ubuntu|devuan)
+        echo "Using apt-get to install dependencies"
+        $SUDO apt-get install -y lsb-release devscripts equivs
+        $SUDO apt-get install -y dpkg-dev gcc
         if ! test -r debian/control ; then
             echo debian/control is not a readable file
             exit 1
@@ -57,7 +48,9 @@ Ubuntu|Debian|Devuan)
 	$SUDO env DEBIAN_FRONTEND=noninteractive apt-get -y remove ceph-build-deps
 	if [ -n "$backports" ] ; then rm $control; fi
         ;;
-CentOS|Fedora|RedHatEnterpriseServer)
+    centos|fedora|rhel)
+        echo "Using yum to install dependencies"
+        $SUDO yum install -y redhat-lsb-core
         case $(lsb_release -si) in
             Fedora)
                 $SUDO yum install -y yum-utils
@@ -82,12 +75,14 @@ CentOS|Fedora|RedHatEnterpriseServer)
         $SUDO yum-builddep -y $DIR/ceph.spec 2>&1 | tee $DIR/yum-builddep.out
         ! grep -q -i error: $DIR/yum-builddep.out || exit 1
         ;;
-*SUSE*)
+    opensuse|suse|sles)
+        echo "Using zypper to install dependencies"
+        $SUDO zypper --gpg-auto-import-keys --non-interactive install lsb-release systemd-rpm-macros
         sed -e 's/@//g' < ceph.spec.in > $DIR/ceph.spec
         $SUDO zypper --non-interactive install $(rpmspec -q --buildrequires $DIR/ceph.spec) || exit 1
         ;;
-*)
-        echo "$(lsb_release -si) is unknown, dependencies will have to be installed manually."
+    *)
+        echo "$ID is unknown, dependencies will have to be installed manually."
         ;;
 esac
 
diff --git a/src/test/libcephfs/flock.cc b/src/test/libcephfs/flock.cc
index 0b3f0d7..a4b8da0 100644
--- a/src/test/libcephfs/flock.cc
+++ b/src/test/libcephfs/flock.cc
@@ -70,7 +70,6 @@ static const struct timespec* abstime(struct timespec &ts, long ms) {
 }
 
 /* Basic locking */
-
 TEST(LibCephFS, BasicLocking) {
   struct ceph_mount_info *cmount = NULL;
   STARTUP_CEPH();
@@ -427,7 +426,8 @@ static void process_ConcurrentLocking(str_ConcurrentLocking& s) {
   exit(EXIT_SUCCESS);
 }
 
-TEST(LibCephFS, InterProcessLocking) {
+// Disabled because of fork() issues (http://tracker.ceph.com/issues/16556)
+TEST(LibCephFS, DISABLED_InterProcessLocking) {
   PROCESS_SLOW_MS();
   // Process synchronization
   char c_file[1024];
@@ -526,7 +526,8 @@ TEST(LibCephFS, InterProcessLocking) {
   CLEANUP_CEPH();
 }
 
-TEST(LibCephFS, ThreesomeInterProcessLocking) {
+// Disabled because of fork() issues (http://tracker.ceph.com/issues/16556)
+TEST(LibCephFS, DISABLED_ThreesomeInterProcessLocking) {
   PROCESS_SLOW_MS();
   // Process synchronization
   char c_file[1024];
diff --git a/src/test/libcephfs/test.cc b/src/test/libcephfs/test.cc
index b0314b6..42b02f1 100644
--- a/src/test/libcephfs/test.cc
+++ b/src/test/libcephfs/test.cc
@@ -1374,3 +1374,41 @@ TEST(LibCephFS, OpenNoClose) {
   // shutdown should force close opened file/dir
   ceph_shutdown(cmount);
 }
+
+TEST(LibCephFS, OperationsOnRoot)
+{
+  struct ceph_mount_info *cmount;
+  ASSERT_EQ(ceph_create(&cmount, NULL), 0);
+  ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
+  ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
+  ASSERT_EQ(ceph_mount(cmount, "/"), 0);
+
+  char dirname[32];
+  sprintf(dirname, "/somedir%x", getpid());
+
+  ASSERT_EQ(ceph_mkdir(cmount, dirname, 0755), 0);
+
+  ASSERT_EQ(ceph_rmdir(cmount, "/"), -EBUSY);
+
+  ASSERT_EQ(ceph_link(cmount, "/", "/"), -EEXIST);
+  ASSERT_EQ(ceph_link(cmount, dirname, "/"), -EEXIST);
+  ASSERT_EQ(ceph_link(cmount, "nonExisitingDir", "/"), -ENOENT);
+
+  ASSERT_EQ(ceph_unlink(cmount, "/"), -EISDIR);
+
+  ASSERT_EQ(ceph_rename(cmount, "/", "/"), -EBUSY);
+  ASSERT_EQ(ceph_rename(cmount, dirname, "/"), -EBUSY);
+  ASSERT_EQ(ceph_rename(cmount, "nonExistingDir", "/"), -EBUSY);
+  ASSERT_EQ(ceph_rename(cmount, "/", dirname), -EBUSY);
+  ASSERT_EQ(ceph_rename(cmount, "/", "nonExistingDir"), -EBUSY);
+
+  ASSERT_EQ(ceph_mkdir(cmount, "/", 0777), -EEXIST);
+
+  ASSERT_EQ(ceph_mknod(cmount, "/", 0, 0), -EEXIST);
+
+  ASSERT_EQ(ceph_symlink(cmount, "/", "/"), -EEXIST);
+  ASSERT_EQ(ceph_symlink(cmount, dirname, "/"), -EEXIST);
+  ASSERT_EQ(ceph_symlink(cmount, "nonExistingDir", "/"), -EEXIST);
+
+  ceph_shutdown(cmount);
+}
diff --git a/src/test/librbd/CMakeLists.txt b/src/test/librbd/CMakeLists.txt
index aff27a6..60cf23f 100644
--- a/src/test/librbd/CMakeLists.txt
+++ b/src/test/librbd/CMakeLists.txt
@@ -15,15 +15,21 @@ set_target_properties(rbd_test PROPERTIES COMPILE_FLAGS ${UNITTEST_CXX_FLAGS})
 # unittest_librbd
 # doesn't use add_ceph_test because it is called by run-rbd-unit-tests.sh
 set(unittest_librbd_srcs
+  test_BlockGuard.cc
   test_main.cc
   test_mock_fixture.cc
   test_mock_AioImageRequest.cc
   test_mock_ExclusiveLock.cc
   test_mock_Journal.cc
+  test_mock_ObjectMap.cc
   test_mock_ObjectWatcher.cc
   exclusive_lock/test_mock_AcquireRequest.cc
+  exclusive_lock/test_mock_BreakRequest.cc
+  exclusive_lock/test_mock_GetLockerRequest.cc
+  exclusive_lock/test_mock_ReacquireRequest.cc
   exclusive_lock/test_mock_ReleaseRequest.cc
   image/test_mock_RefreshRequest.cc
+  image_watcher/test_mock_RewatchRequest.cc
   journal/test_mock_Replay.cc
   object_map/test_mock_InvalidateRequest.cc
   object_map/test_mock_LockRequest.cc
diff --git a/src/test/librbd/exclusive_lock/test_mock_AcquireRequest.cc b/src/test/librbd/exclusive_lock/test_mock_AcquireRequest.cc
index b7fa20d..1c96cdb 100644
--- a/src/test/librbd/exclusive_lock/test_mock_AcquireRequest.cc
+++ b/src/test/librbd/exclusive_lock/test_mock_AcquireRequest.cc
@@ -13,15 +13,104 @@
 #include "cls/lock/cls_lock_ops.h"
 #include "librbd/ExclusiveLock.h"
 #include "librbd/exclusive_lock/AcquireRequest.h"
+#include "librbd/exclusive_lock/BreakRequest.h"
+#include "librbd/exclusive_lock/GetLockerRequest.h"
+#include "librbd/image/RefreshRequest.h"
 #include "gmock/gmock.h"
 #include "gtest/gtest.h"
 #include <arpa/inet.h>
 #include <list>
 
+namespace librbd {
+namespace {
+
+struct MockTestImageCtx : public librbd::MockImageCtx {
+  MockTestImageCtx(librbd::ImageCtx &image_ctx)
+    : librbd::MockImageCtx(image_ctx) {
+  }
+};
+
+} // anonymous namespace
+
+namespace exclusive_lock {
+
+template<>
+struct BreakRequest<librbd::MockTestImageCtx> {
+  Context *on_finish = nullptr;
+  static BreakRequest *s_instance;
+  static BreakRequest *create(librbd::MockTestImageCtx &image_ctx,
+                              const Locker &locker, bool blacklist_locker,
+                              bool force_break_lock, Context *on_finish) {
+    EXPECT_EQ(image_ctx.blacklist_on_break_lock, blacklist_locker);
+    EXPECT_FALSE(force_break_lock);
+    assert(s_instance != nullptr);
+    s_instance->on_finish = on_finish;
+    return s_instance;
+  }
+
+  BreakRequest() {
+    s_instance = this;
+  }
+  MOCK_METHOD0(send, void());
+};
+
+template <>
+struct GetLockerRequest<librbd::MockTestImageCtx> {
+  Locker *locker;
+  Context *on_finish;
+
+  static GetLockerRequest *s_instance;
+  static GetLockerRequest *create(librbd::MockTestImageCtx &image_ctx,
+                                  Locker *locker, Context *on_finish) {
+    assert(s_instance != nullptr);
+    s_instance->locker = locker;
+    s_instance->on_finish = on_finish;
+    return s_instance;
+  }
+
+  GetLockerRequest() {
+    s_instance = this;
+  }
+
+  MOCK_METHOD0(send, void());
+};
+
+BreakRequest<librbd::MockTestImageCtx> *BreakRequest<librbd::MockTestImageCtx>::s_instance = nullptr;
+GetLockerRequest<librbd::MockTestImageCtx> *GetLockerRequest<librbd::MockTestImageCtx>::s_instance = nullptr;
+
+} // namespace exclusive_lock
+
+namespace image {
+
+template<>
+struct RefreshRequest<librbd::MockTestImageCtx> {
+  static RefreshRequest *s_instance;
+  Context *on_finish;
+
+  static RefreshRequest *create(librbd::MockTestImageCtx &image_ctx,
+                                bool acquire_lock_refresh,
+                                bool skip_open_parent, Context *on_finish) {
+    EXPECT_TRUE(acquire_lock_refresh);
+    assert(s_instance != nullptr);
+    s_instance->on_finish = on_finish;
+    return s_instance;
+  }
+
+  RefreshRequest() {
+    s_instance = this;
+  }
+  MOCK_METHOD0(send, void());
+};
+
+RefreshRequest<librbd::MockTestImageCtx> *RefreshRequest<librbd::MockTestImageCtx>::s_instance = nullptr;
+
+} // namespace image
+} // namespace librbd
+
 // template definitions
 #include "librbd/Journal.cc"
 #include "librbd/exclusive_lock/AcquireRequest.cc"
-template class librbd::exclusive_lock::AcquireRequest<librbd::MockImageCtx>;
+template class librbd::exclusive_lock::AcquireRequest<librbd::MockTestImageCtx>;
 
 namespace librbd {
 namespace exclusive_lock {
@@ -29,6 +118,7 @@ namespace exclusive_lock {
 using ::testing::_;
 using ::testing::DoAll;
 using ::testing::InSequence;
+using ::testing::Invoke;
 using ::testing::Return;
 using ::testing::SetArgPointee;
 using ::testing::StrEq;
@@ -38,80 +128,85 @@ static const std::string TEST_COOKIE("auto 123");
 
 class TestMockExclusiveLockAcquireRequest : public TestMockFixture {
 public:
-  typedef AcquireRequest<MockImageCtx> MockAcquireRequest;
-  typedef ExclusiveLock<MockImageCtx> MockExclusiveLock;
+  typedef AcquireRequest<MockTestImageCtx> MockAcquireRequest;
+  typedef BreakRequest<MockTestImageCtx> MockBreakRequest;
+  typedef GetLockerRequest<MockTestImageCtx> MockGetLockerRequest;
+  typedef ExclusiveLock<MockTestImageCtx> MockExclusiveLock;
+  typedef librbd::image::RefreshRequest<MockTestImageCtx> MockRefreshRequest;
 
-  void expect_test_features(MockImageCtx &mock_image_ctx, uint64_t features,
+  void expect_test_features(MockTestImageCtx &mock_image_ctx, uint64_t features,
                             bool enabled) {
     EXPECT_CALL(mock_image_ctx, test_features(features))
                   .WillOnce(Return(enabled));
   }
 
-  void expect_test_features(MockImageCtx &mock_image_ctx, uint64_t features,
+  void expect_test_features(MockTestImageCtx &mock_image_ctx, uint64_t features,
                             RWLock &lock, bool enabled) {
     EXPECT_CALL(mock_image_ctx, test_features(features, _))
                   .WillOnce(Return(enabled));
   }
 
-  void expect_lock(MockImageCtx &mock_image_ctx, int r) {
+  void expect_lock(MockTestImageCtx &mock_image_ctx, int r) {
     EXPECT_CALL(get_mock_io_ctx(mock_image_ctx.md_ctx),
                 exec(mock_image_ctx.header_oid, _, StrEq("lock"), StrEq("lock"), _, _, _))
                   .WillOnce(Return(r));
   }
 
-  void expect_unlock(MockImageCtx &mock_image_ctx, int r) {
+  void expect_unlock(MockTestImageCtx &mock_image_ctx, int r) {
     EXPECT_CALL(get_mock_io_ctx(mock_image_ctx.md_ctx),
                 exec(mock_image_ctx.header_oid, _, StrEq("lock"), StrEq("unlock"), _, _, _))
                   .WillOnce(Return(r));
   }
 
-  void expect_is_refresh_required(MockImageCtx &mock_image_ctx, bool required) {
+  void expect_is_refresh_required(MockTestImageCtx &mock_image_ctx, bool required) {
     EXPECT_CALL(*mock_image_ctx.state, is_refresh_required())
       .WillOnce(Return(required));
   }
 
-  void expect_refresh(MockImageCtx &mock_image_ctx, int r) {
-    EXPECT_CALL(*mock_image_ctx.state, acquire_lock_refresh(_))
-                  .WillOnce(CompleteContext(r, mock_image_ctx.image_ctx->op_work_queue));
+  void expect_refresh(MockTestImageCtx &mock_image_ctx,
+                      MockRefreshRequest &mock_refresh_request, int r) {
+    EXPECT_CALL(mock_refresh_request, send())
+                  .WillOnce(FinishRequest(&mock_refresh_request, r,
+                                          &mock_image_ctx));
   }
 
-  void expect_create_object_map(MockImageCtx &mock_image_ctx,
+  void expect_create_object_map(MockTestImageCtx &mock_image_ctx,
                                 MockObjectMap *mock_object_map) {
     EXPECT_CALL(mock_image_ctx, create_object_map(_))
                   .WillOnce(Return(mock_object_map));
   }
 
-  void expect_open_object_map(MockImageCtx &mock_image_ctx,
+  void expect_open_object_map(MockTestImageCtx &mock_image_ctx,
                               MockObjectMap &mock_object_map, int r) {
     EXPECT_CALL(mock_object_map, open(_))
                   .WillOnce(CompleteContext(r, mock_image_ctx.image_ctx->op_work_queue));
   }
 
-  void expect_close_object_map(MockImageCtx &mock_image_ctx,
+  void expect_close_object_map(MockTestImageCtx &mock_image_ctx,
                               MockObjectMap &mock_object_map) {
     EXPECT_CALL(mock_object_map, close(_))
                   .WillOnce(CompleteContext(0, mock_image_ctx.image_ctx->op_work_queue));
   }
 
-  void expect_create_journal(MockImageCtx &mock_image_ctx,
+  void expect_create_journal(MockTestImageCtx &mock_image_ctx,
                              MockJournal *mock_journal) {
     EXPECT_CALL(mock_image_ctx, create_journal())
                   .WillOnce(Return(mock_journal));
   }
 
-  void expect_open_journal(MockImageCtx &mock_image_ctx,
+  void expect_open_journal(MockTestImageCtx &mock_image_ctx,
                            MockJournal &mock_journal, int r) {
     EXPECT_CALL(mock_journal, open(_))
                   .WillOnce(CompleteContext(r, mock_image_ctx.image_ctx->op_work_queue));
   }
 
-  void expect_close_journal(MockImageCtx &mock_image_ctx,
+  void expect_close_journal(MockTestImageCtx &mock_image_ctx,
                             MockJournal &mock_journal) {
     EXPECT_CALL(mock_journal, close(_))
                   .WillOnce(CompleteContext(0, mock_image_ctx.image_ctx->op_work_queue));
   }
 
-  void expect_get_journal_policy(MockImageCtx &mock_image_ctx,
+  void expect_get_journal_policy(MockTestImageCtx &mock_image_ctx,
                                  MockJournalPolicy &mock_journal_policy) {
     EXPECT_CALL(mock_image_ctx, get_journal_policy())
                   .WillOnce(Return(&mock_journal_policy));
@@ -123,80 +218,47 @@ public:
       .WillOnce(Return(disabled));
   }
 
-  void expect_allocate_journal_tag(MockImageCtx &mock_image_ctx,
+  void expect_allocate_journal_tag(MockTestImageCtx &mock_image_ctx,
                                    MockJournalPolicy &mock_journal_policy,
                                    int r) {
     EXPECT_CALL(mock_journal_policy, allocate_tag_on_lock(_))
                   .WillOnce(CompleteContext(r, mock_image_ctx.image_ctx->op_work_queue));
   }
 
-  void expect_get_lock_info(MockImageCtx &mock_image_ctx, int r,
-                            const entity_name_t &locker_entity,
-                            const std::string &locker_address,
-                            const std::string &locker_cookie,
-                            const std::string &lock_tag,
-                            ClsLockType lock_type) {
-    auto &expect = EXPECT_CALL(get_mock_io_ctx(mock_image_ctx.md_ctx),
-                               exec(mock_image_ctx.header_oid, _, StrEq("lock"),
-                               StrEq("get_info"), _, _, _));
-    if (r < 0 && r != -ENOENT) {
-      expect.WillOnce(Return(r));
-    } else {
-      entity_name_t entity(locker_entity);
-      entity_addr_t entity_addr;
-      entity_addr.addr.ss_family = AF_INET;
-      inet_pton(AF_INET, locker_address.c_str(), &entity_addr.addr4.sin_addr);
-
-      cls_lock_get_info_reply reply;
-      if (r != -ENOENT) {
-        reply.lockers = decltype(reply.lockers){
-          {rados::cls::lock::locker_id_t(entity, locker_cookie),
-           rados::cls::lock::locker_info_t(utime_t(), entity_addr, "")}};
-        reply.tag = lock_tag;
-        reply.lock_type = lock_type;
-      }
-
-      bufferlist bl;
-      ::encode(reply, bl);
-
-      std::string str(bl.c_str(), bl.length());
-      expect.WillOnce(DoAll(WithArg<5>(CopyInBufferlist(str)), Return(0)));
-    }
+  void expect_get_locker(MockTestImageCtx &mock_image_ctx,
+                         MockGetLockerRequest &mock_get_locker_request,
+                         const Locker &locker, int r) {
+    EXPECT_CALL(mock_get_locker_request, send())
+      .WillOnce(Invoke([&mock_image_ctx, &mock_get_locker_request, locker, r]() {
+          *mock_get_locker_request.locker = locker;
+          mock_image_ctx.image_ctx->op_work_queue->queue(
+            mock_get_locker_request.on_finish, r);
+        }));
   }
 
-  void expect_list_watchers(MockImageCtx &mock_image_ctx, int r,
-                            const std::string &address, uint64_t watch_handle) {
-    auto &expect = EXPECT_CALL(get_mock_io_ctx(mock_image_ctx.md_ctx),
-                               list_watchers(mock_image_ctx.header_oid, _));
-    if (r < 0) {
-      expect.WillOnce(Return(r));
-    } else {
-      obj_watch_t watcher;
-      strcpy(watcher.addr, (address + ":0/0").c_str());
-      watcher.cookie = watch_handle;
-
-      std::list<obj_watch_t> watchers;
-      watchers.push_back(watcher);
-
-      expect.WillOnce(DoAll(SetArgPointee<1>(watchers), Return(0)));
-    }
+  void expect_break_lock(MockTestImageCtx &mock_image_ctx,
+                         MockBreakRequest &mock_break_request, int r) {
+    EXPECT_CALL(mock_break_request, send())
+                  .WillOnce(FinishRequest(&mock_break_request, r,
+                                          &mock_image_ctx));
   }
 
-  void expect_blacklist_add(MockImageCtx &mock_image_ctx, int r) {
-    EXPECT_CALL(get_mock_rados_client(), blacklist_add(_, _))
-                  .WillOnce(Return(r));
+  void expect_flush_notifies(MockTestImageCtx &mock_image_ctx) {
+    EXPECT_CALL(*mock_image_ctx.image_watcher, flush(_))
+                  .WillOnce(CompleteContext(0, mock_image_ctx.image_ctx->op_work_queue));
   }
 
-  void expect_break_lock(MockImageCtx &mock_image_ctx, int r) {
-    EXPECT_CALL(get_mock_io_ctx(mock_image_ctx.md_ctx),
-                exec(mock_image_ctx.header_oid, _, StrEq("lock"), StrEq("break_lock"), _, _, _))
-                  .WillOnce(Return(r));
+  void expect_prepare_lock(MockTestImageCtx &mock_image_ctx) {
+    EXPECT_CALL(*mock_image_ctx.state, prepare_lock(_))
+      .WillOnce(Invoke([](Context *on_ready) {
+                  on_ready->complete(0);
+                }));
   }
 
-  void expect_flush_notifies(MockImageCtx &mock_image_ctx) {
-    EXPECT_CALL(*mock_image_ctx.image_watcher, flush(_))
-                  .WillOnce(CompleteContext(0, mock_image_ctx.image_ctx->op_work_queue));
+  void expect_handle_prepare_lock_complete(MockTestImageCtx &mock_image_ctx) {
+    EXPECT_CALL(*mock_image_ctx.state, handle_prepare_lock_complete());
   }
+
 };
 
 TEST_F(TestMockExclusiveLockAcquireRequest, Success) {
@@ -205,11 +267,14 @@ TEST_F(TestMockExclusiveLockAcquireRequest, Success) {
   librbd::ImageCtx *ictx;
   ASSERT_EQ(0, open_image(m_image_name, &ictx));
 
-  MockImageCtx mock_image_ctx(*ictx);
+  MockTestImageCtx mock_image_ctx(*ictx);
+  MockGetLockerRequest mock_get_locker_request;
   expect_op_work_queue(mock_image_ctx);
 
   InSequence seq;
+  expect_prepare_lock(mock_image_ctx);
   expect_flush_notifies(mock_image_ctx);
+  expect_get_locker(mock_image_ctx, mock_get_locker_request, {}, 0);
   expect_lock(mock_image_ctx, 0);
   expect_is_refresh_required(mock_image_ctx, false);
 
@@ -225,6 +290,7 @@ TEST_F(TestMockExclusiveLockAcquireRequest, Success) {
   expect_get_journal_policy(mock_image_ctx, mock_journal_policy);
   expect_journal_disabled(mock_journal_policy, false);
   expect_create_journal(mock_image_ctx, &mock_journal);
+  expect_handle_prepare_lock_complete(mock_image_ctx);
   expect_open_journal(mock_image_ctx, mock_journal, 0);
   expect_get_journal_policy(mock_image_ctx, mock_journal_policy);
   expect_allocate_journal_tag(mock_image_ctx, mock_journal_policy, 0);
@@ -245,19 +311,24 @@ TEST_F(TestMockExclusiveLockAcquireRequest, SuccessRefresh) {
   librbd::ImageCtx *ictx;
   ASSERT_EQ(0, open_image(m_image_name, &ictx));
 
-  MockImageCtx mock_image_ctx(*ictx);
+  MockTestImageCtx mock_image_ctx(*ictx);
+  MockGetLockerRequest mock_get_locker_request;
+  MockRefreshRequest mock_refresh_request;
   expect_op_work_queue(mock_image_ctx);
 
   InSequence seq;
+  expect_prepare_lock(mock_image_ctx);
   expect_flush_notifies(mock_image_ctx);
+  expect_get_locker(mock_image_ctx, mock_get_locker_request, {}, 0);
   expect_lock(mock_image_ctx, 0);
   expect_is_refresh_required(mock_image_ctx, true);
-  expect_refresh(mock_image_ctx, 0);
+  expect_refresh(mock_image_ctx, mock_refresh_request, 0);
 
   MockObjectMap mock_object_map;
   expect_test_features(mock_image_ctx, RBD_FEATURE_OBJECT_MAP, false);
   expect_test_features(mock_image_ctx, RBD_FEATURE_JOURNALING,
                        mock_image_ctx.snap_lock, false);
+  expect_handle_prepare_lock_complete(mock_image_ctx);
 
   C_SaferCond acquire_ctx;
   C_SaferCond ctx;
@@ -275,11 +346,14 @@ TEST_F(TestMockExclusiveLockAcquireRequest, SuccessJournalDisabled) {
   librbd::ImageCtx *ictx;
   ASSERT_EQ(0, open_image(m_image_name, &ictx));
 
-  MockImageCtx mock_image_ctx(*ictx);
+  MockTestImageCtx mock_image_ctx(*ictx);
+  MockGetLockerRequest mock_get_locker_request;
   expect_op_work_queue(mock_image_ctx);
 
   InSequence seq;
+  expect_prepare_lock(mock_image_ctx);
   expect_flush_notifies(mock_image_ctx);
+  expect_get_locker(mock_image_ctx, mock_get_locker_request, {}, 0);
   expect_lock(mock_image_ctx, 0);
   expect_is_refresh_required(mock_image_ctx, false);
 
@@ -290,6 +364,7 @@ TEST_F(TestMockExclusiveLockAcquireRequest, SuccessJournalDisabled) {
 
   expect_test_features(mock_image_ctx, RBD_FEATURE_JOURNALING,
                        mock_image_ctx.snap_lock, false);
+  expect_handle_prepare_lock_complete(mock_image_ctx);
 
   C_SaferCond acquire_ctx;
   C_SaferCond ctx;
@@ -307,11 +382,14 @@ TEST_F(TestMockExclusiveLockAcquireRequest, SuccessObjectMapDisabled) {
   librbd::ImageCtx *ictx;
   ASSERT_EQ(0, open_image(m_image_name, &ictx));
 
-  MockImageCtx mock_image_ctx(*ictx);
+  MockTestImageCtx mock_image_ctx(*ictx);
+  MockGetLockerRequest mock_get_locker_request;
   expect_op_work_queue(mock_image_ctx);
 
   InSequence seq;
+  expect_prepare_lock(mock_image_ctx);
   expect_flush_notifies(mock_image_ctx);
+  expect_get_locker(mock_image_ctx, mock_get_locker_request, {}, 0);
   expect_lock(mock_image_ctx, 0);
   expect_is_refresh_required(mock_image_ctx, false);
 
@@ -324,6 +402,7 @@ TEST_F(TestMockExclusiveLockAcquireRequest, SuccessObjectMapDisabled) {
   expect_get_journal_policy(mock_image_ctx, mock_journal_policy);
   expect_journal_disabled(mock_journal_policy, false);
   expect_create_journal(mock_image_ctx, &mock_journal);
+  expect_handle_prepare_lock_complete(mock_image_ctx);
   expect_open_journal(mock_image_ctx, mock_journal, 0);
   expect_get_journal_policy(mock_image_ctx, mock_journal_policy);
   expect_allocate_journal_tag(mock_image_ctx, mock_journal_policy, 0);
@@ -344,15 +423,20 @@ TEST_F(TestMockExclusiveLockAcquireRequest, RefreshError) {
   librbd::ImageCtx *ictx;
   ASSERT_EQ(0, open_image(m_image_name, &ictx));
 
-  MockImageCtx mock_image_ctx(*ictx);
+  MockTestImageCtx mock_image_ctx(*ictx);
+  MockGetLockerRequest mock_get_locker_request;
+  MockRefreshRequest mock_refresh_request;
   expect_op_work_queue(mock_image_ctx);
 
   InSequence seq;
+  expect_prepare_lock(mock_image_ctx);
   expect_flush_notifies(mock_image_ctx);
+  expect_get_locker(mock_image_ctx, mock_get_locker_request, {}, 0);
   expect_lock(mock_image_ctx, 0);
   expect_is_refresh_required(mock_image_ctx, true);
-  expect_refresh(mock_image_ctx, -EINVAL);
+  expect_refresh(mock_image_ctx, mock_refresh_request, -EINVAL);
   expect_unlock(mock_image_ctx, 0);
+  expect_handle_prepare_lock_complete(mock_image_ctx);
 
   C_SaferCond *acquire_ctx = new C_SaferCond();
   C_SaferCond ctx;
@@ -363,17 +447,55 @@ TEST_F(TestMockExclusiveLockAcquireRequest, RefreshError) {
   ASSERT_EQ(-EINVAL, ctx.wait());
 }
 
+TEST_F(TestMockExclusiveLockAcquireRequest, RefreshLockDisabled) {
+  REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK);
+
+  librbd::ImageCtx *ictx;
+  ASSERT_EQ(0, open_image(m_image_name, &ictx));
+
+  MockTestImageCtx mock_image_ctx(*ictx);
+  MockGetLockerRequest mock_get_locker_request;
+  MockRefreshRequest mock_refresh_request;
+  expect_op_work_queue(mock_image_ctx);
+
+  InSequence seq;
+  expect_prepare_lock(mock_image_ctx);
+  expect_flush_notifies(mock_image_ctx);
+  expect_get_locker(mock_image_ctx, mock_get_locker_request, {}, 0);
+  expect_lock(mock_image_ctx, 0);
+  expect_is_refresh_required(mock_image_ctx, true);
+  expect_refresh(mock_image_ctx, mock_refresh_request, -ERESTART);
+
+  MockObjectMap mock_object_map;
+  expect_test_features(mock_image_ctx, RBD_FEATURE_OBJECT_MAP, false);
+  expect_test_features(mock_image_ctx, RBD_FEATURE_JOURNALING,
+                       mock_image_ctx.snap_lock, false);
+  expect_handle_prepare_lock_complete(mock_image_ctx);
+
+  C_SaferCond acquire_ctx;
+  C_SaferCond ctx;
+  MockAcquireRequest *req = MockAcquireRequest::create(mock_image_ctx,
+                                                       TEST_COOKIE,
+                                                       &acquire_ctx, &ctx);
+  req->send();
+  ASSERT_EQ(0, acquire_ctx.wait());
+  ASSERT_EQ(0, ctx.wait());
+}
+
 TEST_F(TestMockExclusiveLockAcquireRequest, JournalError) {
   REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK);
 
   librbd::ImageCtx *ictx;
   ASSERT_EQ(0, open_image(m_image_name, &ictx));
 
-  MockImageCtx mock_image_ctx(*ictx);
+  MockTestImageCtx mock_image_ctx(*ictx);
+  MockGetLockerRequest mock_get_locker_request;
   expect_op_work_queue(mock_image_ctx);
 
   InSequence seq;
+  expect_prepare_lock(mock_image_ctx);
   expect_flush_notifies(mock_image_ctx);
+  expect_get_locker(mock_image_ctx, mock_get_locker_request, {}, 0);
   expect_lock(mock_image_ctx, 0);
   expect_is_refresh_required(mock_image_ctx, false);
 
@@ -389,6 +511,7 @@ TEST_F(TestMockExclusiveLockAcquireRequest, JournalError) {
   expect_get_journal_policy(mock_image_ctx, mock_journal_policy);
   expect_journal_disabled(mock_journal_policy, false);
   expect_create_journal(mock_image_ctx, mock_journal);
+  expect_handle_prepare_lock_complete(mock_image_ctx);
   expect_open_journal(mock_image_ctx, *mock_journal, -EINVAL);
   expect_close_journal(mock_image_ctx, *mock_journal);
   expect_close_object_map(mock_image_ctx, *mock_object_map);
@@ -409,11 +532,14 @@ TEST_F(TestMockExclusiveLockAcquireRequest, AllocateJournalTagError) {
   librbd::ImageCtx *ictx;
   ASSERT_EQ(0, open_image(m_image_name, &ictx));
 
-  MockImageCtx mock_image_ctx(*ictx);
+  MockTestImageCtx mock_image_ctx(*ictx);
+  MockGetLockerRequest mock_get_locker_request;
   expect_op_work_queue(mock_image_ctx);
 
   InSequence seq;
+  expect_prepare_lock(mock_image_ctx);
   expect_flush_notifies(mock_image_ctx);
+  expect_get_locker(mock_image_ctx, mock_get_locker_request, {}, 0);
   expect_lock(mock_image_ctx, 0);
   expect_is_refresh_required(mock_image_ctx, false);
 
@@ -429,6 +555,7 @@ TEST_F(TestMockExclusiveLockAcquireRequest, AllocateJournalTagError) {
   expect_get_journal_policy(mock_image_ctx, mock_journal_policy);
   expect_journal_disabled(mock_journal_policy, false);
   expect_create_journal(mock_image_ctx, mock_journal);
+  expect_handle_prepare_lock_complete(mock_image_ctx);
   expect_open_journal(mock_image_ctx, *mock_journal, 0);
   expect_get_journal_policy(mock_image_ctx, mock_journal_policy);
   expect_allocate_journal_tag(mock_image_ctx, mock_journal_policy, -EPERM);
@@ -451,19 +578,22 @@ TEST_F(TestMockExclusiveLockAcquireRequest, LockBusy) {
   librbd::ImageCtx *ictx;
   ASSERT_EQ(0, open_image(m_image_name, &ictx));
 
-  MockImageCtx mock_image_ctx(*ictx);
+  MockTestImageCtx mock_image_ctx(*ictx);
+  MockGetLockerRequest mock_get_locker_request;
+  MockBreakRequest mock_break_request;
   expect_op_work_queue(mock_image_ctx);
 
   InSequence seq;
+  expect_prepare_lock(mock_image_ctx);
   expect_flush_notifies(mock_image_ctx);
+  expect_get_locker(mock_image_ctx, mock_get_locker_request,
+                    {entity_name_t::CLIENT(1), "auto 123", "1.2.3.4:0/0", 123},
+                    0);
   expect_lock(mock_image_ctx, -EBUSY);
-  expect_get_lock_info(mock_image_ctx, 0, entity_name_t::CLIENT(1), "1.2.3.4",
-                       "auto 123", MockExclusiveLock::WATCHER_LOCK_TAG,
-                       LOCK_EXCLUSIVE);
-  expect_list_watchers(mock_image_ctx, 0, "dead client", 123);
-  expect_blacklist_add(mock_image_ctx, 0);
-  expect_break_lock(mock_image_ctx, 0);
+  expect_break_lock(mock_image_ctx, mock_break_request, 0);
+  expect_get_locker(mock_image_ctx, mock_get_locker_request, {}, 0);
   expect_lock(mock_image_ctx, -ENOENT);
+  expect_handle_prepare_lock_complete(mock_image_ctx);
 
   C_SaferCond ctx;
   MockAcquireRequest *req = MockAcquireRequest::create(mock_image_ctx,
@@ -479,14 +609,15 @@ TEST_F(TestMockExclusiveLockAcquireRequest, GetLockInfoError) {
   librbd::ImageCtx *ictx;
   ASSERT_EQ(0, open_image(m_image_name, &ictx));
 
-  MockImageCtx mock_image_ctx(*ictx);
+  MockTestImageCtx mock_image_ctx(*ictx);
+  MockGetLockerRequest mock_get_locker_request;
   expect_op_work_queue(mock_image_ctx);
 
   InSequence seq;
+  expect_prepare_lock(mock_image_ctx);
   expect_flush_notifies(mock_image_ctx);
-  expect_lock(mock_image_ctx, -EBUSY);
-  expect_get_lock_info(mock_image_ctx, -EINVAL, entity_name_t::CLIENT(1), "",
-                       "", "", LOCK_EXCLUSIVE);
+  expect_get_locker(mock_image_ctx, mock_get_locker_request, {}, -EINVAL);
+  expect_handle_prepare_lock_complete(mock_image_ctx);
 
   C_SaferCond ctx;
   MockAcquireRequest *req = MockAcquireRequest::create(mock_image_ctx,
@@ -502,218 +633,16 @@ TEST_F(TestMockExclusiveLockAcquireRequest, GetLockInfoEmpty) {
   librbd::ImageCtx *ictx;
   ASSERT_EQ(0, open_image(m_image_name, &ictx));
 
-  MockImageCtx mock_image_ctx(*ictx);
-  expect_op_work_queue(mock_image_ctx);
-
-  InSequence seq;
-  expect_flush_notifies(mock_image_ctx);
-  expect_lock(mock_image_ctx, -EBUSY);
-  expect_get_lock_info(mock_image_ctx, -ENOENT, entity_name_t::CLIENT(1), "",
-                       "", "", LOCK_EXCLUSIVE);
-  expect_lock(mock_image_ctx, -EINVAL);
-
-  C_SaferCond ctx;
-  MockAcquireRequest *req = MockAcquireRequest::create(mock_image_ctx,
-                                                       TEST_COOKIE,
-                                                       nullptr, &ctx);
-  req->send();
-  ASSERT_EQ(-EINVAL, ctx.wait());
-}
-
-TEST_F(TestMockExclusiveLockAcquireRequest, GetLockInfoExternalTag) {
-  REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK);
-
-  librbd::ImageCtx *ictx;
-  ASSERT_EQ(0, open_image(m_image_name, &ictx));
-
-  MockImageCtx mock_image_ctx(*ictx);
-  expect_op_work_queue(mock_image_ctx);
-
-  InSequence seq;
-  expect_flush_notifies(mock_image_ctx);
-  expect_lock(mock_image_ctx, -EBUSY);
-  expect_get_lock_info(mock_image_ctx, 0, entity_name_t::CLIENT(1), "1.2.3.4",
-                       "auto 123", "external tag", LOCK_EXCLUSIVE);
-
-  C_SaferCond ctx;
-  MockAcquireRequest *req = MockAcquireRequest::create(mock_image_ctx,
-                                                       TEST_COOKIE,
-                                                       nullptr, &ctx);
-  req->send();
-  ASSERT_EQ(-EBUSY, ctx.wait());
-}
-
-TEST_F(TestMockExclusiveLockAcquireRequest, GetLockInfoShared) {
-  REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK);
-
-  librbd::ImageCtx *ictx;
-  ASSERT_EQ(0, open_image(m_image_name, &ictx));
-
-  MockImageCtx mock_image_ctx(*ictx);
-  expect_op_work_queue(mock_image_ctx);
-
-  InSequence seq;
-  expect_flush_notifies(mock_image_ctx);
-  expect_lock(mock_image_ctx, -EBUSY);
-  expect_get_lock_info(mock_image_ctx, 0, entity_name_t::CLIENT(1), "1.2.3.4",
-                       "auto 123", MockExclusiveLock::WATCHER_LOCK_TAG,
-                       LOCK_SHARED);
-
-  C_SaferCond ctx;
-  MockAcquireRequest *req = MockAcquireRequest::create(mock_image_ctx,
-                                                       TEST_COOKIE,
-                                                       nullptr, &ctx);
-  req->send();
-  ASSERT_EQ(-EBUSY, ctx.wait());
-}
-
-TEST_F(TestMockExclusiveLockAcquireRequest, GetLockInfoExternalCookie) {
-  REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK);
-
-  librbd::ImageCtx *ictx;
-  ASSERT_EQ(0, open_image(m_image_name, &ictx));
-
-  MockImageCtx mock_image_ctx(*ictx);
-  expect_op_work_queue(mock_image_ctx);
-
-  InSequence seq;
-  expect_flush_notifies(mock_image_ctx);
-  expect_lock(mock_image_ctx, -EBUSY);
-  expect_get_lock_info(mock_image_ctx, 0, entity_name_t::CLIENT(1), "1.2.3.4",
-                       "external cookie", MockExclusiveLock::WATCHER_LOCK_TAG,
-                       LOCK_EXCLUSIVE);
-
-  C_SaferCond ctx;
-  MockAcquireRequest *req = MockAcquireRequest::create(mock_image_ctx,
-                                                       TEST_COOKIE,
-                                                       nullptr, &ctx);
-  req->send();
-  ASSERT_EQ(-EBUSY, ctx.wait());
-}
-
-TEST_F(TestMockExclusiveLockAcquireRequest, GetWatchersError) {
-  REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK);
-
-  librbd::ImageCtx *ictx;
-  ASSERT_EQ(0, open_image(m_image_name, &ictx));
-
-  MockImageCtx mock_image_ctx(*ictx);
-  expect_op_work_queue(mock_image_ctx);
-
-  InSequence seq;
-  expect_flush_notifies(mock_image_ctx);
-  expect_lock(mock_image_ctx, -EBUSY);
-  expect_get_lock_info(mock_image_ctx, 0, entity_name_t::CLIENT(1), "1.2.3.4",
-                       "auto 123", MockExclusiveLock::WATCHER_LOCK_TAG,
-                       LOCK_EXCLUSIVE);
-  expect_list_watchers(mock_image_ctx, -EINVAL, "dead client", 123);
-
-  C_SaferCond ctx;
-  MockAcquireRequest *req = MockAcquireRequest::create(mock_image_ctx,
-                                                       TEST_COOKIE,
-                                                       nullptr, &ctx);
-  req->send();
-  ASSERT_EQ(-EINVAL, ctx.wait());
-}
-
-TEST_F(TestMockExclusiveLockAcquireRequest, GetWatchersAlive) {
-  REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK);
-
-  librbd::ImageCtx *ictx;
-  ASSERT_EQ(0, open_image(m_image_name, &ictx));
-
-  MockImageCtx mock_image_ctx(*ictx);
-  expect_op_work_queue(mock_image_ctx);
-
-  InSequence seq;
-  expect_flush_notifies(mock_image_ctx);
-  expect_lock(mock_image_ctx, -EBUSY);
-  expect_get_lock_info(mock_image_ctx, 0, entity_name_t::CLIENT(1), "1.2.3.4",
-                       "auto 123", MockExclusiveLock::WATCHER_LOCK_TAG,
-                       LOCK_EXCLUSIVE);
-  expect_list_watchers(mock_image_ctx, 0, "1.2.3.4", 123);
-
-  C_SaferCond ctx;
-  MockAcquireRequest *req = MockAcquireRequest::create(mock_image_ctx,
-                                                       TEST_COOKIE,
-                                                       nullptr, &ctx);
-  req->send();
-  ASSERT_EQ(-EAGAIN, ctx.wait());
-}
-
-TEST_F(TestMockExclusiveLockAcquireRequest, BlacklistDisabled) {
-  REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK);
-
-  librbd::ImageCtx *ictx;
-  ASSERT_EQ(0, open_image(m_image_name, &ictx));
-
-  MockImageCtx mock_image_ctx(*ictx);
+  MockTestImageCtx mock_image_ctx(*ictx);
+  MockGetLockerRequest mock_get_locker_request;
   expect_op_work_queue(mock_image_ctx);
-  mock_image_ctx.blacklist_on_break_lock = false;
 
   InSequence seq;
+  expect_prepare_lock(mock_image_ctx);
   expect_flush_notifies(mock_image_ctx);
-  expect_lock(mock_image_ctx, -EBUSY);
-  expect_get_lock_info(mock_image_ctx, 0, entity_name_t::CLIENT(1), "1.2.3.4",
-                       "auto 123", MockExclusiveLock::WATCHER_LOCK_TAG,
-                       LOCK_EXCLUSIVE);
-  expect_list_watchers(mock_image_ctx, 0, "dead client", 123);
-  expect_break_lock(mock_image_ctx, 0);
-  expect_lock(mock_image_ctx, -ENOENT);
-
-  C_SaferCond ctx;
-  MockAcquireRequest *req = MockAcquireRequest::create(mock_image_ctx,
-                                                       TEST_COOKIE,
-                                                       nullptr, &ctx);
-  req->send();
-  ASSERT_EQ(-ENOENT, ctx.wait());
-}
-
-TEST_F(TestMockExclusiveLockAcquireRequest, BlacklistError) {
-  REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK);
-
-  librbd::ImageCtx *ictx;
-  ASSERT_EQ(0, open_image(m_image_name, &ictx));
-
-  MockImageCtx mock_image_ctx(*ictx);
-  expect_op_work_queue(mock_image_ctx);
-
-  InSequence seq;
-  expect_flush_notifies(mock_image_ctx);
-  expect_lock(mock_image_ctx, -EBUSY);
-  expect_get_lock_info(mock_image_ctx, 0, entity_name_t::CLIENT(1), "1.2.3.4",
-                       "auto 123", MockExclusiveLock::WATCHER_LOCK_TAG,
-                       LOCK_EXCLUSIVE);
-  expect_list_watchers(mock_image_ctx, 0, "dead client", 123);
-  expect_blacklist_add(mock_image_ctx, -EINVAL);
-
-  C_SaferCond ctx;
-  MockAcquireRequest *req = MockAcquireRequest::create(mock_image_ctx,
-                                                       TEST_COOKIE,
-                                                       nullptr, &ctx);
-  req->send();
-  ASSERT_EQ(-EINVAL, ctx.wait());
-}
-
-TEST_F(TestMockExclusiveLockAcquireRequest, BreakLockMissing) {
-  REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK);
-
-  librbd::ImageCtx *ictx;
-  ASSERT_EQ(0, open_image(m_image_name, &ictx));
-
-  MockImageCtx mock_image_ctx(*ictx);
-  expect_op_work_queue(mock_image_ctx);
-
-  InSequence seq;
-  expect_flush_notifies(mock_image_ctx);
-  expect_lock(mock_image_ctx, -EBUSY);
-  expect_get_lock_info(mock_image_ctx, 0, entity_name_t::CLIENT(1), "1.2.3.4",
-                       "auto 123", MockExclusiveLock::WATCHER_LOCK_TAG,
-                       LOCK_EXCLUSIVE);
-  expect_list_watchers(mock_image_ctx, 0, "dead client", 123);
-  expect_blacklist_add(mock_image_ctx, 0);
-  expect_break_lock(mock_image_ctx, -ENOENT);
+  expect_get_locker(mock_image_ctx, mock_get_locker_request, {}, -ENOENT);
   expect_lock(mock_image_ctx, -EINVAL);
+  expect_handle_prepare_lock_complete(mock_image_ctx);
 
   C_SaferCond ctx;
   MockAcquireRequest *req = MockAcquireRequest::create(mock_image_ctx,
@@ -729,18 +658,20 @@ TEST_F(TestMockExclusiveLockAcquireRequest, BreakLockError) {
   librbd::ImageCtx *ictx;
   ASSERT_EQ(0, open_image(m_image_name, &ictx));
 
-  MockImageCtx mock_image_ctx(*ictx);
+  MockTestImageCtx mock_image_ctx(*ictx);
+  MockGetLockerRequest mock_get_locker_request;
+  MockBreakRequest mock_break_request;
   expect_op_work_queue(mock_image_ctx);
 
   InSequence seq;
+  expect_prepare_lock(mock_image_ctx);
   expect_flush_notifies(mock_image_ctx);
+  expect_get_locker(mock_image_ctx, mock_get_locker_request,
+                    {entity_name_t::CLIENT(1), "auto 123", "1.2.3.4:0/0", 123},
+                    0);
   expect_lock(mock_image_ctx, -EBUSY);
-  expect_get_lock_info(mock_image_ctx, 0, entity_name_t::CLIENT(1), "1.2.3.4",
-                       "auto 123", MockExclusiveLock::WATCHER_LOCK_TAG,
-                       LOCK_EXCLUSIVE);
-  expect_list_watchers(mock_image_ctx, 0, "dead client", 123);
-  expect_blacklist_add(mock_image_ctx, 0);
-  expect_break_lock(mock_image_ctx, -EINVAL);
+  expect_break_lock(mock_image_ctx, mock_break_request, -EINVAL);
+  expect_handle_prepare_lock_complete(mock_image_ctx);
 
   C_SaferCond ctx;
   MockAcquireRequest *req = MockAcquireRequest::create(mock_image_ctx,
@@ -756,11 +687,14 @@ TEST_F(TestMockExclusiveLockAcquireRequest, OpenObjectMapError) {
   librbd::ImageCtx *ictx;
   ASSERT_EQ(0, open_image(m_image_name, &ictx));
 
-  MockImageCtx mock_image_ctx(*ictx);
+  MockTestImageCtx mock_image_ctx(*ictx);
+  MockGetLockerRequest mock_get_locker_request;
   expect_op_work_queue(mock_image_ctx);
 
   InSequence seq;
+  expect_prepare_lock(mock_image_ctx);
   expect_flush_notifies(mock_image_ctx);
+  expect_get_locker(mock_image_ctx, mock_get_locker_request, {}, 0);
   expect_lock(mock_image_ctx, 0);
   expect_is_refresh_required(mock_image_ctx, false);
 
@@ -776,6 +710,7 @@ TEST_F(TestMockExclusiveLockAcquireRequest, OpenObjectMapError) {
   expect_get_journal_policy(mock_image_ctx, mock_journal_policy);
   expect_journal_disabled(mock_journal_policy, false);
   expect_create_journal(mock_image_ctx, &mock_journal);
+  expect_handle_prepare_lock_complete(mock_image_ctx);
   expect_open_journal(mock_image_ctx, mock_journal, 0);
   expect_get_journal_policy(mock_image_ctx, mock_journal_policy);
   expect_allocate_journal_tag(mock_image_ctx, mock_journal_policy, 0);
diff --git a/src/test/librbd/exclusive_lock/test_mock_BreakRequest.cc b/src/test/librbd/exclusive_lock/test_mock_BreakRequest.cc
new file mode 100644
index 0000000..27bb8e1
--- /dev/null
+++ b/src/test/librbd/exclusive_lock/test_mock_BreakRequest.cc
@@ -0,0 +1,249 @@
+// -*- mode:C; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include "test/librbd/test_mock_fixture.h"
+#include "test/librbd/test_support.h"
+#include "test/librbd/mock/MockImageCtx.h"
+#include "test/librados_test_stub/MockTestMemIoCtxImpl.h"
+#include "test/librados_test_stub/MockTestMemRadosClient.h"
+#include "cls/lock/cls_lock_ops.h"
+#include "librbd/ExclusiveLock.h"
+#include "librbd/exclusive_lock/BreakRequest.h"
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include <arpa/inet.h>
+#include <list>
+
+namespace librbd {
+namespace {
+
+struct MockTestImageCtx : public librbd::MockImageCtx {
+  MockTestImageCtx(librbd::ImageCtx &image_ctx)
+    : librbd::MockImageCtx(image_ctx) {
+  }
+};
+
+} // anonymous namespace
+} // namespace librbd
+
+// template definitions
+#include "librbd/exclusive_lock/BreakRequest.cc"
+
+namespace librbd {
+namespace exclusive_lock {
+
+using ::testing::_;
+using ::testing::DoAll;
+using ::testing::InSequence;
+using ::testing::Return;
+using ::testing::SetArgPointee;
+using ::testing::StrEq;
+using ::testing::WithArg;
+
+class TestMockExclusiveLockBreakRequest : public TestMockFixture {
+public:
+  typedef BreakRequest<MockTestImageCtx> MockBreakRequest;
+
+  void expect_list_watchers(MockTestImageCtx &mock_image_ctx, int r,
+                            const std::string &address, uint64_t watch_handle) {
+    auto &expect = EXPECT_CALL(get_mock_io_ctx(mock_image_ctx.md_ctx),
+                               list_watchers(mock_image_ctx.header_oid, _));
+    if (r < 0) {
+      expect.WillOnce(Return(r));
+    } else {
+      obj_watch_t watcher;
+      strcpy(watcher.addr, (address + ":0/0").c_str());
+      watcher.cookie = watch_handle;
+
+      std::list<obj_watch_t> watchers;
+      watchers.push_back(watcher);
+
+      expect.WillOnce(DoAll(SetArgPointee<1>(watchers), Return(0)));
+    }
+  }
+
+  void expect_blacklist_add(MockTestImageCtx &mock_image_ctx, int r) {
+    EXPECT_CALL(get_mock_rados_client(), blacklist_add(_, _))
+                  .WillOnce(Return(r));
+  }
+
+  void expect_break_lock(MockTestImageCtx &mock_image_ctx, int r) {
+    EXPECT_CALL(get_mock_io_ctx(mock_image_ctx.md_ctx),
+                exec(mock_image_ctx.header_oid, _, StrEq("lock"), StrEq("break_lock"), _, _, _))
+                  .WillOnce(Return(r));
+  }
+};
+
+TEST_F(TestMockExclusiveLockBreakRequest, DeadLockOwner) {
+  REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK);
+
+  librbd::ImageCtx *ictx;
+  ASSERT_EQ(0, open_image(m_image_name, &ictx));
+
+  MockTestImageCtx mock_image_ctx(*ictx);
+  expect_op_work_queue(mock_image_ctx);
+
+  InSequence seq;
+  expect_list_watchers(mock_image_ctx, 0, "dead client", 123);
+  expect_blacklist_add(mock_image_ctx, 0);
+  expect_break_lock(mock_image_ctx, 0);
+
+  C_SaferCond ctx;
+  Locker locker{entity_name_t::CLIENT(1), "auto 123", "1.2.3.4:0/0", 123};
+  MockBreakRequest *req = MockBreakRequest::create(mock_image_ctx, locker,
+                                                   true, false, &ctx);
+  req->send();
+  ASSERT_EQ(0, ctx.wait());
+}
+
+TEST_F(TestMockExclusiveLockBreakRequest, ForceBreak) {
+  REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK);
+
+  librbd::ImageCtx *ictx;
+  ASSERT_EQ(0, open_image(m_image_name, &ictx));
+
+  MockTestImageCtx mock_image_ctx(*ictx);
+  expect_op_work_queue(mock_image_ctx);
+
+  InSequence seq;
+  expect_list_watchers(mock_image_ctx, 0, "1.2.3.4", 123);
+  expect_blacklist_add(mock_image_ctx, 0);
+  expect_break_lock(mock_image_ctx, 0);
+
+  C_SaferCond ctx;
+  Locker locker{entity_name_t::CLIENT(1), "auto 123", "1.2.3.4:0/0", 123};
+  MockBreakRequest *req = MockBreakRequest::create(mock_image_ctx, locker,
+                                                   true, true, &ctx);
+  req->send();
+  ASSERT_EQ(0, ctx.wait());
+}
+
+TEST_F(TestMockExclusiveLockBreakRequest, GetWatchersError) {
+  REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK);
+
+  librbd::ImageCtx *ictx;
+  ASSERT_EQ(0, open_image(m_image_name, &ictx));
+
+  MockTestImageCtx mock_image_ctx(*ictx);
+  expect_op_work_queue(mock_image_ctx);
+
+  InSequence seq;
+  expect_list_watchers(mock_image_ctx, -EINVAL, "dead client", 123);
+
+  C_SaferCond ctx;
+  Locker locker{entity_name_t::CLIENT(1), "auto 123", "1.2.3.4:0/0", 123};
+  MockBreakRequest *req = MockBreakRequest::create(mock_image_ctx, locker,
+                                                   true, false, &ctx);
+  req->send();
+  ASSERT_EQ(-EINVAL, ctx.wait());
+}
+
+TEST_F(TestMockExclusiveLockBreakRequest, GetWatchersAlive) {
+  REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK);
+
+  librbd::ImageCtx *ictx;
+  ASSERT_EQ(0, open_image(m_image_name, &ictx));
+
+  MockTestImageCtx mock_image_ctx(*ictx);
+  expect_op_work_queue(mock_image_ctx);
+
+  InSequence seq;
+  expect_list_watchers(mock_image_ctx, 0, "1.2.3.4", 123);
+
+  C_SaferCond ctx;
+  Locker locker{entity_name_t::CLIENT(1), "auto 123", "1.2.3.4:0/0", 123};
+  MockBreakRequest *req = MockBreakRequest::create(mock_image_ctx, locker,
+                                                   true, false, &ctx);
+  req->send();
+  ASSERT_EQ(-EAGAIN, ctx.wait());
+}
+
+TEST_F(TestMockExclusiveLockBreakRequest, BlacklistDisabled) {
+  REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK);
+
+  librbd::ImageCtx *ictx;
+  ASSERT_EQ(0, open_image(m_image_name, &ictx));
+
+  MockTestImageCtx mock_image_ctx(*ictx);
+  expect_op_work_queue(mock_image_ctx);
+
+  InSequence seq;
+  expect_list_watchers(mock_image_ctx, 0, "dead client", 123);
+  expect_break_lock(mock_image_ctx, 0);
+
+  C_SaferCond ctx;
+  Locker locker{entity_name_t::CLIENT(1), "auto 123", "1.2.3.4:0/0", 123};
+  MockBreakRequest *req = MockBreakRequest::create(mock_image_ctx, locker,
+                                                   false, false, &ctx);
+  req->send();
+  ASSERT_EQ(0, ctx.wait());
+}
+
+TEST_F(TestMockExclusiveLockBreakRequest, BlacklistError) {
+  REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK);
+
+  librbd::ImageCtx *ictx;
+  ASSERT_EQ(0, open_image(m_image_name, &ictx));
+
+  MockTestImageCtx mock_image_ctx(*ictx);
+  expect_op_work_queue(mock_image_ctx);
+
+  InSequence seq;
+  expect_list_watchers(mock_image_ctx, 0, "dead client", 123);
+  expect_blacklist_add(mock_image_ctx, -EINVAL);
+
+  C_SaferCond ctx;
+  Locker locker{entity_name_t::CLIENT(1), "auto 123", "1.2.3.4:0/0", 123};
+  MockBreakRequest *req = MockBreakRequest::create(mock_image_ctx, locker,
+                                                   true, false, &ctx);
+  req->send();
+  ASSERT_EQ(-EINVAL, ctx.wait());
+}
+
+TEST_F(TestMockExclusiveLockBreakRequest, BreakLockMissing) {
+  REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK);
+
+  librbd::ImageCtx *ictx;
+  ASSERT_EQ(0, open_image(m_image_name, &ictx));
+
+  MockTestImageCtx mock_image_ctx(*ictx);
+  expect_op_work_queue(mock_image_ctx);
+
+  InSequence seq;
+  expect_list_watchers(mock_image_ctx, 0, "dead client", 123);
+  expect_blacklist_add(mock_image_ctx, 0);
+  expect_break_lock(mock_image_ctx, -ENOENT);
+
+  C_SaferCond ctx;
+  Locker locker{entity_name_t::CLIENT(1), "auto 123", "1.2.3.4:0/0", 123};
+  MockBreakRequest *req = MockBreakRequest::create(mock_image_ctx, locker,
+                                                   true, false, &ctx);
+  req->send();
+  ASSERT_EQ(0, ctx.wait());
+}
+
+TEST_F(TestMockExclusiveLockBreakRequest, BreakLockError) {
+  REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK);
+
+  librbd::ImageCtx *ictx;
+  ASSERT_EQ(0, open_image(m_image_name, &ictx));
+
+  MockTestImageCtx mock_image_ctx(*ictx);
+  expect_op_work_queue(mock_image_ctx);
+
+  InSequence seq;
+  expect_list_watchers(mock_image_ctx, 0, "dead client", 123);
+  expect_blacklist_add(mock_image_ctx, 0);
+  expect_break_lock(mock_image_ctx, -EINVAL);
+
+  C_SaferCond ctx;
+  Locker locker{entity_name_t::CLIENT(1), "auto 123", "1.2.3.4:0/0", 123};
+  MockBreakRequest *req = MockBreakRequest::create(mock_image_ctx, locker,
+                                                   true, false, &ctx);
+  req->send();
+  ASSERT_EQ(-EINVAL, ctx.wait());
+}
+
+} // namespace exclusive_lock
+} // namespace librbd
+
diff --git a/src/test/librbd/exclusive_lock/test_mock_GetLockerRequest.cc b/src/test/librbd/exclusive_lock/test_mock_GetLockerRequest.cc
new file mode 100644
index 0000000..ec701fc
--- /dev/null
+++ b/src/test/librbd/exclusive_lock/test_mock_GetLockerRequest.cc
@@ -0,0 +1,216 @@
+// -*- mode:C; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include "test/librbd/test_mock_fixture.h"
+#include "test/librbd/test_support.h"
+#include "test/librbd/mock/MockImageCtx.h"
+#include "test/librados_test_stub/MockTestMemIoCtxImpl.h"
+#include "test/librados_test_stub/MockTestMemRadosClient.h"
+#include "cls/lock/cls_lock_ops.h"
+#include "librbd/ExclusiveLock.h"
+#include "librbd/exclusive_lock/GetLockerRequest.h"
+#include "librbd/exclusive_lock/Types.h"
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include <arpa/inet.h>
+#include <list>
+
+namespace librbd {
+namespace {
+
+struct MockTestImageCtx : public librbd::MockImageCtx {
+  MockTestImageCtx(librbd::ImageCtx &image_ctx)
+    : librbd::MockImageCtx(image_ctx) {
+  }
+};
+
+} // anonymous namespace
+} // namespace librbd
+
+// template definitions
+#include "librbd/exclusive_lock/GetLockerRequest.cc"
+
+namespace librbd {
+namespace exclusive_lock {
+
+using ::testing::_;
+using ::testing::DoAll;
+using ::testing::InSequence;
+using ::testing::Return;
+using ::testing::StrEq;
+using ::testing::WithArg;
+
+class TestMockExclusiveLockGetLockerRequest : public TestMockFixture {
+public:
+  typedef GetLockerRequest<MockTestImageCtx> MockGetLockerRequest;
+
+  void expect_get_lock_info(MockTestImageCtx &mock_image_ctx, int r,
+                            const entity_name_t &locker_entity,
+                            const std::string &locker_address,
+                            const std::string &locker_cookie,
+                            const std::string &lock_tag,
+                            ClsLockType lock_type) {
+    auto &expect = EXPECT_CALL(get_mock_io_ctx(mock_image_ctx.md_ctx),
+                               exec(mock_image_ctx.header_oid, _, StrEq("lock"),
+                               StrEq("get_info"), _, _, _));
+    if (r < 0 && r != -ENOENT) {
+      expect.WillOnce(Return(r));
+    } else {
+      entity_name_t entity(locker_entity);
+      entity_addr_t entity_addr;
+      entity_addr.parse(locker_address.c_str(), NULL);
+
+      cls_lock_get_info_reply reply;
+      if (r != -ENOENT) {
+        reply.lockers = decltype(reply.lockers){
+          {rados::cls::lock::locker_id_t(entity, locker_cookie),
+           rados::cls::lock::locker_info_t(utime_t(), entity_addr, "")}};
+        reply.tag = lock_tag;
+        reply.lock_type = lock_type;
+      }
+
+      bufferlist bl;
+      ::encode(reply, bl, CEPH_FEATURES_SUPPORTED_DEFAULT);
+
+      std::string str(bl.c_str(), bl.length());
+      expect.WillOnce(DoAll(WithArg<5>(CopyInBufferlist(str)), Return(0)));
+    }
+  }
+};
+
+TEST_F(TestMockExclusiveLockGetLockerRequest, Success) {
+  REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK);
+
+  librbd::ImageCtx *ictx;
+  ASSERT_EQ(0, open_image(m_image_name, &ictx));
+
+  MockTestImageCtx mock_image_ctx(*ictx);
+  expect_op_work_queue(mock_image_ctx);
+
+  InSequence seq;
+  expect_get_lock_info(mock_image_ctx, 0, entity_name_t::CLIENT(1), "1.2.3.4",
+                       "auto 123", ExclusiveLock<>::WATCHER_LOCK_TAG,
+                       LOCK_EXCLUSIVE);
+
+  C_SaferCond ctx;
+  Locker locker;
+  MockGetLockerRequest *req = MockGetLockerRequest::create(mock_image_ctx,
+                                                           &locker, &ctx);
+  req->send();
+  ASSERT_EQ(0, ctx.wait());
+
+  ASSERT_EQ(entity_name_t::CLIENT(1), locker.entity);
+  ASSERT_EQ("1.2.3.4:0/0", locker.address);
+  ASSERT_EQ("auto 123", locker.cookie);
+  ASSERT_EQ(123U, locker.handle);
+}
+
+TEST_F(TestMockExclusiveLockGetLockerRequest, GetLockInfoError) {
+  REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK);
+
+  librbd::ImageCtx *ictx;
+  ASSERT_EQ(0, open_image(m_image_name, &ictx));
+
+  MockTestImageCtx mock_image_ctx(*ictx);
+  expect_op_work_queue(mock_image_ctx);
+
+  InSequence seq;
+  expect_get_lock_info(mock_image_ctx, -EINVAL, entity_name_t::CLIENT(1), "",
+                       "", "", LOCK_EXCLUSIVE);
+
+  C_SaferCond ctx;
+  Locker locker;
+  MockGetLockerRequest *req = MockGetLockerRequest::create(mock_image_ctx,
+                                                           &locker, &ctx);
+  req->send();
+  ASSERT_EQ(-EINVAL, ctx.wait());
+}
+
+TEST_F(TestMockExclusiveLockGetLockerRequest, GetLockInfoEmpty) {
+  REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK);
+
+  librbd::ImageCtx *ictx;
+  ASSERT_EQ(0, open_image(m_image_name, &ictx));
+
+  MockTestImageCtx mock_image_ctx(*ictx);
+  expect_op_work_queue(mock_image_ctx);
+
+  InSequence seq;
+  expect_get_lock_info(mock_image_ctx, -ENOENT, entity_name_t::CLIENT(1), "",
+                       "", "", LOCK_EXCLUSIVE);
+
+  C_SaferCond ctx;
+  Locker locker;
+  MockGetLockerRequest *req = MockGetLockerRequest::create(mock_image_ctx,
+                                                           &locker, &ctx);
+  req->send();
+  ASSERT_EQ(-ENOENT, ctx.wait());
+}
+
+TEST_F(TestMockExclusiveLockGetLockerRequest, GetLockInfoExternalTag) {
+  REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK);
+
+  librbd::ImageCtx *ictx;
+  ASSERT_EQ(0, open_image(m_image_name, &ictx));
+
+  MockTestImageCtx mock_image_ctx(*ictx);
+  expect_op_work_queue(mock_image_ctx);
+
+  InSequence seq;
+  expect_get_lock_info(mock_image_ctx, 0, entity_name_t::CLIENT(1), "1.2.3.4",
+                       "auto 123", "external tag", LOCK_EXCLUSIVE);
+
+  C_SaferCond ctx;
+  Locker locker;
+  MockGetLockerRequest *req = MockGetLockerRequest::create(mock_image_ctx,
+                                                           &locker, &ctx);
+  req->send();
+  ASSERT_EQ(-EBUSY, ctx.wait());
+}
+
+TEST_F(TestMockExclusiveLockGetLockerRequest, GetLockInfoShared) {
+  REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK);
+
+  librbd::ImageCtx *ictx;
+  ASSERT_EQ(0, open_image(m_image_name, &ictx));
+
+  MockTestImageCtx mock_image_ctx(*ictx);
+  expect_op_work_queue(mock_image_ctx);
+
+  InSequence seq;
+  expect_get_lock_info(mock_image_ctx, 0, entity_name_t::CLIENT(1), "1.2.3.4",
+                       "auto 123", ExclusiveLock<>::WATCHER_LOCK_TAG,
+                       LOCK_SHARED);
+
+  C_SaferCond ctx;
+  Locker locker;
+  MockGetLockerRequest *req = MockGetLockerRequest::create(mock_image_ctx,
+                                                           &locker, &ctx);
+  req->send();
+  ASSERT_EQ(-EBUSY, ctx.wait());
+}
+
+TEST_F(TestMockExclusiveLockGetLockerRequest, GetLockInfoExternalCookie) {
+  REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK);
+
+  librbd::ImageCtx *ictx;
+  ASSERT_EQ(0, open_image(m_image_name, &ictx));
+
+  MockTestImageCtx mock_image_ctx(*ictx);
+  expect_op_work_queue(mock_image_ctx);
+
+  InSequence seq;
+  expect_get_lock_info(mock_image_ctx, 0, entity_name_t::CLIENT(1), "1.2.3.4",
+                       "external cookie", ExclusiveLock<>::WATCHER_LOCK_TAG,
+                       LOCK_EXCLUSIVE);
+
+  C_SaferCond ctx;
+  Locker locker;
+  MockGetLockerRequest *req = MockGetLockerRequest::create(mock_image_ctx,
+                                                           &locker, &ctx);
+  req->send();
+  ASSERT_EQ(-EBUSY, ctx.wait());
+}
+
+} // namespace exclusive_lock
+} // namespace librbd
diff --git a/src/test/librbd/exclusive_lock/test_mock_ReacquireRequest.cc b/src/test/librbd/exclusive_lock/test_mock_ReacquireRequest.cc
new file mode 100644
index 0000000..1ca9f78
--- /dev/null
+++ b/src/test/librbd/exclusive_lock/test_mock_ReacquireRequest.cc
@@ -0,0 +1,101 @@
+// -*- mode:C; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include "test/librbd/test_mock_fixture.h"
+#include "test/librbd/test_support.h"
+#include "test/librbd/mock/MockImageCtx.h"
+#include "test/librbd/mock/MockImageState.h"
+#include "test/librados_test_stub/MockTestMemIoCtxImpl.h"
+#include "test/librados_test_stub/MockTestMemRadosClient.h"
+#include "cls/lock/cls_lock_ops.h"
+#include "librbd/ExclusiveLock.h"
+#include "librbd/exclusive_lock/ReacquireRequest.h"
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include <arpa/inet.h>
+#include <list>
+
+// template definitions
+#include "librbd/exclusive_lock/ReacquireRequest.cc"
+template class librbd::exclusive_lock::ReacquireRequest<librbd::MockImageCtx>;
+
+namespace librbd {
+namespace exclusive_lock {
+
+using ::testing::_;
+using ::testing::InSequence;
+using ::testing::Return;
+using ::testing::StrEq;
+
+class TestMockExclusiveLockReacquireRequest : public TestMockFixture {
+public:
+  typedef ReacquireRequest<MockImageCtx> MockReacquireRequest;
+  typedef ExclusiveLock<MockImageCtx> MockExclusiveLock;
+
+  void expect_set_cookie(MockImageCtx &mock_image_ctx, int r) {
+    EXPECT_CALL(get_mock_io_ctx(mock_image_ctx.md_ctx),
+                exec(mock_image_ctx.header_oid, _, StrEq("lock"),
+                     StrEq("set_cookie"), _, _, _))
+                  .WillOnce(Return(r));
+  }
+};
+
+TEST_F(TestMockExclusiveLockReacquireRequest, Success) {
+  REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK);
+
+  librbd::ImageCtx *ictx;
+  ASSERT_EQ(0, open_image(m_image_name, &ictx));
+
+  MockImageCtx mock_image_ctx(*ictx);
+
+  InSequence seq;
+  expect_set_cookie(mock_image_ctx, 0);
+
+  C_SaferCond ctx;
+  MockReacquireRequest *req = MockReacquireRequest::create(mock_image_ctx,
+                                                           "old cookie",
+                                                           "new cookie", &ctx);
+  req->send();
+  ASSERT_EQ(0, ctx.wait());
+}
+
+TEST_F(TestMockExclusiveLockReacquireRequest, NotSupported) {
+  REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK);
+
+  librbd::ImageCtx *ictx;
+  ASSERT_EQ(0, open_image(m_image_name, &ictx));
+
+  MockImageCtx mock_image_ctx(*ictx);
+
+  InSequence seq;
+  expect_set_cookie(mock_image_ctx, -EOPNOTSUPP);
+
+  C_SaferCond ctx;
+  MockReacquireRequest *req = MockReacquireRequest::create(mock_image_ctx,
+                                                           "old cookie",
+                                                           "new cookie", &ctx);
+  req->send();
+  ASSERT_EQ(-EOPNOTSUPP, ctx.wait());
+}
+
+TEST_F(TestMockExclusiveLockReacquireRequest, Error) {
+  REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK);
+
+  librbd::ImageCtx *ictx;
+  ASSERT_EQ(0, open_image(m_image_name, &ictx));
+
+  MockImageCtx mock_image_ctx(*ictx);
+
+  InSequence seq;
+  expect_set_cookie(mock_image_ctx, -EBUSY);
+
+  C_SaferCond ctx;
+  MockReacquireRequest *req = MockReacquireRequest::create(mock_image_ctx,
+                                                           "old cookie",
+                                                           "new cookie", &ctx);
+  req->send();
+  ASSERT_EQ(-EBUSY, ctx.wait());
+}
+
+} // namespace exclusive_lock
+} // namespace librbd
diff --git a/src/test/librbd/exclusive_lock/test_mock_ReleaseRequest.cc b/src/test/librbd/exclusive_lock/test_mock_ReleaseRequest.cc
index 99ae094..f15c2ff 100644
--- a/src/test/librbd/exclusive_lock/test_mock_ReleaseRequest.cc
+++ b/src/test/librbd/exclusive_lock/test_mock_ReleaseRequest.cc
@@ -30,8 +30,10 @@ struct MockContext : public Context {
 
 using ::testing::_;
 using ::testing::InSequence;
+using ::testing::Invoke;
 using ::testing::Return;
 using ::testing::StrEq;
+using ::testing::WithArg;
 
 static const std::string TEST_COOKIE("auto 123");
 
@@ -90,10 +92,37 @@ public:
                   .WillOnce(CompleteContext(0, mock_image_ctx.image_ctx->op_work_queue));
   }
 
+  void expect_invalidate_cache(MockImageCtx &mock_image_ctx, bool purge,
+                               int r) {
+    if (mock_image_ctx.object_cacher != nullptr) {
+      EXPECT_CALL(mock_image_ctx, invalidate_cache(purge, _))
+                    .WillOnce(WithArg<1>(CompleteContext(r, NULL)));
+    }
+  }
+
+  void expect_is_cache_empty(MockImageCtx &mock_image_ctx, bool empty) {
+    if (mock_image_ctx.object_cacher != nullptr) {
+      EXPECT_CALL(mock_image_ctx, is_cache_empty())
+        .WillOnce(Return(empty));
+    }
+  }
+
   void expect_flush_notifies(MockImageCtx &mock_image_ctx) {
     EXPECT_CALL(*mock_image_ctx.image_watcher, flush(_))
                   .WillOnce(CompleteContext(0, mock_image_ctx.image_ctx->op_work_queue));
   }
+
+  void expect_prepare_lock(MockImageCtx &mock_image_ctx) {
+    EXPECT_CALL(*mock_image_ctx.state, prepare_lock(_))
+      .WillOnce(Invoke([](Context *on_ready) {
+                  on_ready->complete(0);
+                }));
+  }
+
+  void expect_handle_prepare_lock_complete(MockImageCtx &mock_image_ctx) {
+    EXPECT_CALL(*mock_image_ctx.state, handle_prepare_lock_complete());
+  }
+
 };
 
 TEST_F(TestMockExclusiveLockReleaseRequest, Success) {
@@ -106,8 +135,10 @@ TEST_F(TestMockExclusiveLockReleaseRequest, Success) {
   expect_op_work_queue(mock_image_ctx);
 
   InSequence seq;
+  expect_prepare_lock(mock_image_ctx);
   expect_cancel_op_requests(mock_image_ctx, 0);
   expect_block_writes(mock_image_ctx, 0);
+  expect_invalidate_cache(mock_image_ctx, false, 0);
   expect_flush_notifies(mock_image_ctx);
 
   MockJournal *mock_journal = new MockJournal();
@@ -121,12 +152,13 @@ TEST_F(TestMockExclusiveLockReleaseRequest, Success) {
   MockContext mock_releasing_ctx;
   expect_complete_context(mock_releasing_ctx, 0);
   expect_unlock(mock_image_ctx, 0);
+  expect_handle_prepare_lock_complete(mock_image_ctx);
 
   C_SaferCond ctx;
   MockReleaseRequest *req = MockReleaseRequest::create(mock_image_ctx,
                                                        TEST_COOKIE,
                                                        &mock_releasing_ctx,
-                                                       &ctx);
+                                                       &ctx, false);
   req->send();
   ASSERT_EQ(0, ctx.wait());
 }
@@ -142,7 +174,9 @@ TEST_F(TestMockExclusiveLockReleaseRequest, SuccessJournalDisabled) {
   expect_op_work_queue(mock_image_ctx);
 
   InSequence seq;
+  expect_prepare_lock(mock_image_ctx);
   expect_cancel_op_requests(mock_image_ctx, 0);
+  expect_invalidate_cache(mock_image_ctx, false, 0);
   expect_flush_notifies(mock_image_ctx);
 
   MockObjectMap *mock_object_map = new MockObjectMap();
@@ -150,12 +184,14 @@ TEST_F(TestMockExclusiveLockReleaseRequest, SuccessJournalDisabled) {
   expect_close_object_map(mock_image_ctx, *mock_object_map);
 
   expect_unlock(mock_image_ctx, 0);
+  expect_handle_prepare_lock_complete(mock_image_ctx);
 
   C_SaferCond release_ctx;
   C_SaferCond ctx;
   MockReleaseRequest *req = MockReleaseRequest::create(mock_image_ctx,
                                                        TEST_COOKIE,
-                                                       &release_ctx, &ctx);
+                                                       &release_ctx, &ctx,
+                                                       false);
   req->send();
   ASSERT_EQ(0, release_ctx.wait());
   ASSERT_EQ(0, ctx.wait());
@@ -173,6 +209,7 @@ TEST_F(TestMockExclusiveLockReleaseRequest, SuccessObjectMapDisabled) {
 
   InSequence seq;
   expect_cancel_op_requests(mock_image_ctx, 0);
+  expect_invalidate_cache(mock_image_ctx, false, 0);
   expect_flush_notifies(mock_image_ctx);
 
   expect_unlock(mock_image_ctx, 0);
@@ -181,12 +218,54 @@ TEST_F(TestMockExclusiveLockReleaseRequest, SuccessObjectMapDisabled) {
   C_SaferCond ctx;
   MockReleaseRequest *req = MockReleaseRequest::create(mock_image_ctx,
                                                        TEST_COOKIE,
-                                                       &release_ctx, &ctx);
+                                                       &release_ctx, &ctx,
+                                                       true);
   req->send();
   ASSERT_EQ(0, release_ctx.wait());
   ASSERT_EQ(0, ctx.wait());
 }
 
+TEST_F(TestMockExclusiveLockReleaseRequest, Blacklisted) {
+  REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK);
+
+  librbd::ImageCtx *ictx;
+  ASSERT_EQ(0, open_image(m_image_name, &ictx));
+
+  MockImageCtx mock_image_ctx(*ictx);
+  expect_op_work_queue(mock_image_ctx);
+
+  InSequence seq;
+  expect_prepare_lock(mock_image_ctx);
+  expect_cancel_op_requests(mock_image_ctx, 0);
+  expect_block_writes(mock_image_ctx, -EBLACKLISTED);
+  expect_invalidate_cache(mock_image_ctx, false, -EBLACKLISTED);
+  expect_is_cache_empty(mock_image_ctx, false);
+  expect_invalidate_cache(mock_image_ctx, true, -EBLACKLISTED);
+  expect_is_cache_empty(mock_image_ctx, true);
+  expect_flush_notifies(mock_image_ctx);
+
+  MockJournal *mock_journal = new MockJournal();
+  mock_image_ctx.journal = mock_journal;
+  expect_close_journal(mock_image_ctx, *mock_journal, -EBLACKLISTED);
+
+  MockObjectMap *mock_object_map = new MockObjectMap();
+  mock_image_ctx.object_map = mock_object_map;
+  expect_close_object_map(mock_image_ctx, *mock_object_map);
+
+  MockContext mock_releasing_ctx;
+  expect_complete_context(mock_releasing_ctx, 0);
+  expect_unlock(mock_image_ctx, -EBLACKLISTED);
+  expect_handle_prepare_lock_complete(mock_image_ctx);
+
+  C_SaferCond ctx;
+  MockReleaseRequest *req = MockReleaseRequest::create(mock_image_ctx,
+                                                       TEST_COOKIE,
+                                                       &mock_releasing_ctx,
+                                                       &ctx, false);
+  req->send();
+  ASSERT_EQ(0, ctx.wait());
+}
+
 TEST_F(TestMockExclusiveLockReleaseRequest, BlockWritesError) {
   REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK);
 
@@ -204,7 +283,8 @@ TEST_F(TestMockExclusiveLockReleaseRequest, BlockWritesError) {
   C_SaferCond ctx;
   MockReleaseRequest *req = MockReleaseRequest::create(mock_image_ctx,
                                                        TEST_COOKIE,
-                                                       nullptr, &ctx);
+                                                       nullptr, &ctx,
+                                                       true);
   req->send();
   ASSERT_EQ(-EINVAL, ctx.wait());
 }
@@ -221,6 +301,7 @@ TEST_F(TestMockExclusiveLockReleaseRequest, UnlockError) {
   InSequence seq;
   expect_cancel_op_requests(mock_image_ctx, 0);
   expect_block_writes(mock_image_ctx, 0);
+  expect_invalidate_cache(mock_image_ctx, false, 0);
   expect_flush_notifies(mock_image_ctx);
 
   expect_unlock(mock_image_ctx, -EINVAL);
@@ -228,7 +309,8 @@ TEST_F(TestMockExclusiveLockReleaseRequest, UnlockError) {
   C_SaferCond ctx;
   MockReleaseRequest *req = MockReleaseRequest::create(mock_image_ctx,
                                                        TEST_COOKIE,
-                                                       nullptr, &ctx);
+                                                       nullptr, &ctx,
+                                                       true);
   req->send();
   ASSERT_EQ(0, ctx.wait());
 }
diff --git a/src/test/librbd/image/test_mock_RefreshRequest.cc b/src/test/librbd/image/test_mock_RefreshRequest.cc
index 4076e25..6b0ff52 100644
--- a/src/test/librbd/image/test_mock_RefreshRequest.cc
+++ b/src/test/librbd/image/test_mock_RefreshRequest.cc
@@ -321,7 +321,7 @@ TEST_F(TestMockImageRefreshRequest, SuccessV1) {
   expect_init_layout(mock_image_ctx);
 
   C_SaferCond ctx;
-  MockRefreshRequest *req = new MockRefreshRequest(mock_image_ctx, false, &ctx);
+  MockRefreshRequest *req = new MockRefreshRequest(mock_image_ctx, false, false, &ctx);
   req->send();
 
   ASSERT_EQ(0, ctx.wait());
@@ -347,7 +347,7 @@ TEST_F(TestMockImageRefreshRequest, SuccessSnapshotV1) {
   expect_add_snap(mock_image_ctx, "snap", ictx->snap_ids.begin()->second);
 
   C_SaferCond ctx;
-  MockRefreshRequest *req = new MockRefreshRequest(mock_image_ctx, false, &ctx);
+  MockRefreshRequest *req = new MockRefreshRequest(mock_image_ctx, false, false, &ctx);
   req->send();
 
   ASSERT_EQ(0, ctx.wait());
@@ -374,7 +374,7 @@ TEST_F(TestMockImageRefreshRequest, SuccessV2) {
   }
 
   C_SaferCond ctx;
-  MockRefreshRequest *req = new MockRefreshRequest(mock_image_ctx, false, &ctx);
+  MockRefreshRequest *req = new MockRefreshRequest(mock_image_ctx, false, false, &ctx);
   req->send();
 
   ASSERT_EQ(0, ctx.wait());
@@ -405,7 +405,7 @@ TEST_F(TestMockImageRefreshRequest, SuccessSnapshotV2) {
   expect_add_snap(mock_image_ctx, "snap", ictx->snap_ids.begin()->second);
 
   C_SaferCond ctx;
-  MockRefreshRequest *req = new MockRefreshRequest(mock_image_ctx, false, &ctx);
+  MockRefreshRequest *req = new MockRefreshRequest(mock_image_ctx, false, false, &ctx);
   req->send();
 
   ASSERT_EQ(0, ctx.wait());
@@ -438,7 +438,7 @@ TEST_F(TestMockImageRefreshRequest, SuccessSetSnapshotV2) {
   expect_get_snap_id(mock_image_ctx, "snap", ictx->snap_ids.begin()->second);
 
   C_SaferCond ctx;
-  MockRefreshRequest *req = new MockRefreshRequest(mock_image_ctx, false, &ctx);
+  MockRefreshRequest *req = new MockRefreshRequest(mock_image_ctx, false, false, &ctx);
   req->send();
 
   ASSERT_EQ(0, ctx.wait());
@@ -488,7 +488,52 @@ TEST_F(TestMockImageRefreshRequest, SuccessChild) {
   expect_refresh_parent_finalize(mock_image_ctx, *mock_refresh_parent_request, 0);
 
   C_SaferCond ctx;
-  MockRefreshRequest *req = new MockRefreshRequest(mock_image_ctx, false, &ctx);
+  MockRefreshRequest *req = new MockRefreshRequest(mock_image_ctx, false, false, &ctx);
+  req->send();
+
+  ASSERT_EQ(0, ctx.wait());
+}
+
+TEST_F(TestMockImageRefreshRequest, SuccessChildDontOpenParent) {
+  REQUIRE_FEATURE(RBD_FEATURE_LAYERING);
+
+  librbd::ImageCtx *ictx;
+  librbd::ImageCtx *ictx2 = nullptr;
+  std::string clone_name = get_temp_image_name();
+
+  ASSERT_EQ(0, open_image(m_image_name, &ictx));
+  ASSERT_EQ(0, snap_create(*ictx, "snap"));
+  ASSERT_EQ(0, snap_protect(*ictx, "snap"));
+  BOOST_SCOPE_EXIT_ALL((&)) {
+    if (ictx2 != nullptr) {
+      close_image(ictx2);
+    }
+
+    librbd::NoOpProgressContext no_op;
+    ASSERT_EQ(0, librbd::remove(m_ioctx, clone_name, "", no_op));
+    ASSERT_EQ(0, ictx->operations->snap_unprotect("snap"));
+  };
+
+  int order = ictx->order;
+  ASSERT_EQ(0, librbd::clone(m_ioctx, m_image_name.c_str(), "snap", m_ioctx,
+                             clone_name.c_str(), ictx->features, &order, 0, 0));
+
+  ASSERT_EQ(0, open_image(clone_name, &ictx2));
+
+  MockRefreshImageCtx mock_image_ctx(*ictx2);
+  MockExclusiveLock mock_exclusive_lock;
+  expect_op_work_queue(mock_image_ctx);
+  expect_test_features(mock_image_ctx);
+
+  InSequence seq;
+  expect_get_mutable_metadata(mock_image_ctx, 0);
+  expect_get_flags(mock_image_ctx, 0);
+  if (ictx->test_features(RBD_FEATURE_EXCLUSIVE_LOCK)) {
+    expect_init_exclusive_lock(mock_image_ctx, mock_exclusive_lock, 0);
+  }
+
+  C_SaferCond ctx;
+  MockRefreshRequest *req = new MockRefreshRequest(mock_image_ctx, false, true, &ctx);
   req->send();
 
   ASSERT_EQ(0, ctx.wait());
@@ -544,7 +589,7 @@ TEST_F(TestMockImageRefreshRequest, DisableExclusiveLock) {
   expect_shut_down_exclusive_lock(mock_image_ctx, *mock_exclusive_lock, 0);
 
   C_SaferCond ctx;
-  MockRefreshRequest *req = new MockRefreshRequest(mock_image_ctx, false, &ctx);
+  MockRefreshRequest *req = new MockRefreshRequest(mock_image_ctx, false, false, &ctx);
   req->send();
 
   ASSERT_EQ(0, ctx.wait());
@@ -589,7 +634,7 @@ TEST_F(TestMockImageRefreshRequest, DisableExclusiveLockWhileAcquiringLock) {
   expect_refresh_parent_is_required(mock_refresh_parent_request, false);
 
   C_SaferCond ctx;
-  MockRefreshRequest *req = new MockRefreshRequest(mock_image_ctx, true, &ctx);
+  MockRefreshRequest *req = new MockRefreshRequest(mock_image_ctx, true, false, &ctx);
   req->send();
 
   ASSERT_EQ(-ERESTART, ctx.wait());
@@ -631,7 +676,7 @@ TEST_F(TestMockImageRefreshRequest, JournalDisabledByPolicy) {
   expect_journal_disabled(mock_journal_policy, true);
 
   C_SaferCond ctx;
-  MockRefreshRequest *req = new MockRefreshRequest(mock_image_ctx, false, &ctx);
+  MockRefreshRequest *req = new MockRefreshRequest(mock_image_ctx, false, false, &ctx);
   req->send();
 
   ASSERT_EQ(0, ctx.wait());
@@ -675,7 +720,7 @@ TEST_F(TestMockImageRefreshRequest, EnableJournalWithExclusiveLock) {
   expect_open_journal(mock_image_ctx, mock_journal, 0);
 
   C_SaferCond ctx;
-  MockRefreshRequest *req = new MockRefreshRequest(mock_image_ctx, false, &ctx);
+  MockRefreshRequest *req = new MockRefreshRequest(mock_image_ctx, false, false, &ctx);
   req->send();
 
   ASSERT_EQ(0, ctx.wait());
@@ -713,7 +758,7 @@ TEST_F(TestMockImageRefreshRequest, EnableJournalWithoutExclusiveLock) {
   expect_set_require_lock_on_read(mock_image_ctx);
 
   C_SaferCond ctx;
-  MockRefreshRequest *req = new MockRefreshRequest(mock_image_ctx, false, &ctx);
+  MockRefreshRequest *req = new MockRefreshRequest(mock_image_ctx, false, false, &ctx);
   req->send();
 
   ASSERT_EQ(0, ctx.wait());
@@ -757,7 +802,7 @@ TEST_F(TestMockImageRefreshRequest, DisableJournal) {
   expect_unblock_writes(mock_image_ctx);
 
   C_SaferCond ctx;
-  MockRefreshRequest *req = new MockRefreshRequest(mock_image_ctx, false, &ctx);
+  MockRefreshRequest *req = new MockRefreshRequest(mock_image_ctx, false, false, &ctx);
   req->send();
 
   ASSERT_EQ(0, ctx.wait());
@@ -793,7 +838,7 @@ TEST_F(TestMockImageRefreshRequest, EnableObjectMapWithExclusiveLock) {
   expect_open_object_map(mock_image_ctx, &mock_object_map, 0);
 
   C_SaferCond ctx;
-  MockRefreshRequest *req = new MockRefreshRequest(mock_image_ctx, false, &ctx);
+  MockRefreshRequest *req = new MockRefreshRequest(mock_image_ctx, false, false, &ctx);
   req->send();
 
   ASSERT_EQ(0, ctx.wait());
@@ -826,7 +871,7 @@ TEST_F(TestMockImageRefreshRequest, EnableObjectMapWithoutExclusiveLock) {
   expect_refresh_parent_is_required(mock_refresh_parent_request, false);
 
   C_SaferCond ctx;
-  MockRefreshRequest *req = new MockRefreshRequest(mock_image_ctx, false, &ctx);
+  MockRefreshRequest *req = new MockRefreshRequest(mock_image_ctx, false, false, &ctx);
   req->send();
 
   ASSERT_EQ(0, ctx.wait());
@@ -871,7 +916,7 @@ TEST_F(TestMockImageRefreshRequest, DisableObjectMap) {
   expect_close_object_map(mock_image_ctx, *mock_object_map, 0);
 
   C_SaferCond ctx;
-  MockRefreshRequest *req = new MockRefreshRequest(mock_image_ctx, false, &ctx);
+  MockRefreshRequest *req = new MockRefreshRequest(mock_image_ctx, false, false, &ctx);
   req->send();
 
   ASSERT_EQ(0, ctx.wait());
@@ -907,7 +952,7 @@ TEST_F(TestMockImageRefreshRequest, OpenObjectMapError) {
   expect_open_object_map(mock_image_ctx, mock_object_map, -EFBIG);
 
   C_SaferCond ctx;
-  MockRefreshRequest *req = new MockRefreshRequest(mock_image_ctx, false, &ctx);
+  MockRefreshRequest *req = new MockRefreshRequest(mock_image_ctx, false, false, &ctx);
   req->send();
 
   ASSERT_EQ(0, ctx.wait());
diff --git a/src/test/librbd/image_watcher/test_mock_RewatchRequest.cc b/src/test/librbd/image_watcher/test_mock_RewatchRequest.cc
new file mode 100644
index 0000000..5609134
--- /dev/null
+++ b/src/test/librbd/image_watcher/test_mock_RewatchRequest.cc
@@ -0,0 +1,215 @@
+// -*- mode:C; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include "test/librbd/test_mock_fixture.h"
+#include "include/rados/librados.hpp"
+#include "test/librados_test_stub/MockTestMemIoCtxImpl.h"
+#include "test/librados_test_stub/MockTestMemRadosClient.h"
+#include "test/librbd/test_support.h"
+#include "test/librbd/mock/MockExclusiveLock.h"
+#include "test/librbd/mock/MockImageCtx.h"
+#include "librados/AioCompletionImpl.h"
+#include "librbd/image_watcher/RewatchRequest.h"
+
+namespace librbd {
+namespace {
+
+struct MockTestImageCtx : public MockImageCtx {
+  MockTestImageCtx(ImageCtx &image_ctx) : MockImageCtx(image_ctx) {
+  }
+};
+
+} // anonymous namespace
+} // namespace librbd
+
+#include "librbd/image_watcher/RewatchRequest.cc"
+
+namespace librbd {
+namespace image_watcher {
+
+using ::testing::_;
+using ::testing::DoAll;
+using ::testing::InSequence;
+using ::testing::Invoke;
+using ::testing::Return;
+using ::testing::WithArg;
+
+struct TestMockImageWatcherRewatchRequest : public TestMockFixture {
+  typedef RewatchRequest<librbd::MockTestImageCtx> MockRewatchRequest;
+
+  TestMockImageWatcherRewatchRequest()
+    : m_watch_lock("watch_lock") {
+  }
+
+  void expect_aio_watch(MockImageCtx &mock_image_ctx, int r) {
+    librados::MockTestMemIoCtxImpl &mock_io_ctx(get_mock_io_ctx(
+      mock_image_ctx.md_ctx));
+
+    EXPECT_CALL(mock_io_ctx, aio_watch(mock_image_ctx.header_oid, _, _, _))
+      .WillOnce(DoAll(WithArg<1>(Invoke([&mock_image_ctx, &mock_io_ctx, r](librados::AioCompletionImpl *c) {
+                                   c->get();
+                                   mock_image_ctx.image_ctx->op_work_queue->queue(new FunctionContext([&mock_io_ctx, c](int r) {
+                                       mock_io_ctx.get_mock_rados_client()->finish_aio_completion(c, r);
+                                     }), r);
+                                   })),
+                      Return(0)));
+  }
+
+  void expect_aio_unwatch(MockImageCtx &mock_image_ctx, int r) {
+    librados::MockTestMemIoCtxImpl &mock_io_ctx(get_mock_io_ctx(
+      mock_image_ctx.md_ctx));
+
+    EXPECT_CALL(mock_io_ctx, aio_unwatch(m_watch_handle, _))
+      .WillOnce(DoAll(Invoke([&mock_image_ctx, &mock_io_ctx, r](uint64_t handle,
+                                                                librados::AioCompletionImpl *c) {
+                        c->get();
+                        mock_image_ctx.image_ctx->op_work_queue->queue(new FunctionContext([&mock_io_ctx, c](int r) {
+                            mock_io_ctx.get_mock_rados_client()->finish_aio_completion(c, r);
+                          }), r);
+                        }),
+                      Return(0)));
+  }
+
+  void expect_reacquire_lock(MockExclusiveLock &mock_exclusive_lock) {
+    EXPECT_CALL(mock_exclusive_lock, reacquire_lock());
+  }
+
+  struct WatchCtx : public librados::WatchCtx2 {
+    virtual void handle_notify(uint64_t, uint64_t, uint64_t,
+                               ceph::bufferlist&) {
+      assert(false);
+    }
+    virtual void handle_error(uint64_t, int) {
+      assert(false);
+    }
+  };
+
+  RWLock m_watch_lock;
+  WatchCtx m_watch_ctx;
+  uint64_t m_watch_handle = 123;
+};
+
+TEST_F(TestMockImageWatcherRewatchRequest, Success) {
+  librbd::ImageCtx *ictx;
+  ASSERT_EQ(0, open_image(m_image_name, &ictx));
+
+  MockTestImageCtx mock_image_ctx(*ictx);
+
+  InSequence seq;
+  expect_aio_unwatch(mock_image_ctx, 0);
+  expect_aio_watch(mock_image_ctx, 0);
+
+  MockExclusiveLock mock_exclusive_lock;
+  if (ictx->test_features(RBD_FEATURE_EXCLUSIVE_LOCK)) {
+    mock_image_ctx.exclusive_lock = &mock_exclusive_lock;
+    expect_reacquire_lock(mock_exclusive_lock);
+  }
+
+  C_SaferCond ctx;
+  MockRewatchRequest *req = MockRewatchRequest::create(mock_image_ctx,
+                                                       m_watch_lock,
+                                                       &m_watch_ctx,
+                                                       &m_watch_handle,
+                                                       &ctx);
+  {
+    RWLock::WLocker watch_locker(m_watch_lock);
+    req->send();
+  }
+  ASSERT_EQ(0, ctx.wait());
+}
+
+TEST_F(TestMockImageWatcherRewatchRequest, UnwatchError) {
+  librbd::ImageCtx *ictx;
+  ASSERT_EQ(0, open_image(m_image_name, &ictx));
+
+  MockTestImageCtx mock_image_ctx(*ictx);
+
+  InSequence seq;
+  expect_aio_unwatch(mock_image_ctx, -EINVAL);
+  expect_aio_watch(mock_image_ctx, 0);
+
+  C_SaferCond ctx;
+  MockRewatchRequest *req = MockRewatchRequest::create(mock_image_ctx,
+                                                       m_watch_lock,
+                                                       &m_watch_ctx,
+                                                       &m_watch_handle,
+                                                       &ctx);
+  {
+    RWLock::WLocker watch_locker(m_watch_lock);
+    req->send();
+  }
+  ASSERT_EQ(0, ctx.wait());
+}
+
+TEST_F(TestMockImageWatcherRewatchRequest, WatchBlacklist) {
+  librbd::ImageCtx *ictx;
+  ASSERT_EQ(0, open_image(m_image_name, &ictx));
+
+  MockTestImageCtx mock_image_ctx(*ictx);
+
+  InSequence seq;
+  expect_aio_unwatch(mock_image_ctx, 0);
+  expect_aio_watch(mock_image_ctx, -EBLACKLISTED);
+
+  C_SaferCond ctx;
+  MockRewatchRequest *req = MockRewatchRequest::create(mock_image_ctx,
+                                                       m_watch_lock,
+                                                       &m_watch_ctx,
+                                                       &m_watch_handle,
+                                                       &ctx);
+  {
+    RWLock::WLocker watch_locker(m_watch_lock);
+    req->send();
+  }
+  ASSERT_EQ(-EBLACKLISTED, ctx.wait());
+}
+
+TEST_F(TestMockImageWatcherRewatchRequest, WatchDNE) {
+  librbd::ImageCtx *ictx;
+  ASSERT_EQ(0, open_image(m_image_name, &ictx));
+
+  MockTestImageCtx mock_image_ctx(*ictx);
+
+  InSequence seq;
+  expect_aio_unwatch(mock_image_ctx, 0);
+  expect_aio_watch(mock_image_ctx, -ENOENT);
+
+  C_SaferCond ctx;
+  MockRewatchRequest *req = MockRewatchRequest::create(mock_image_ctx,
+                                                       m_watch_lock,
+                                                       &m_watch_ctx,
+                                                       &m_watch_handle,
+                                                       &ctx);
+  {
+    RWLock::WLocker watch_locker(m_watch_lock);
+    req->send();
+  }
+  ASSERT_EQ(-ENOENT, ctx.wait());
+}
+
+TEST_F(TestMockImageWatcherRewatchRequest, WatchError) {
+  librbd::ImageCtx *ictx;
+  ASSERT_EQ(0, open_image(m_image_name, &ictx));
+
+  MockTestImageCtx mock_image_ctx(*ictx);
+
+  InSequence seq;
+  expect_aio_unwatch(mock_image_ctx, 0);
+  expect_aio_watch(mock_image_ctx, -EINVAL);
+  expect_aio_watch(mock_image_ctx, 0);
+
+  C_SaferCond ctx;
+  MockRewatchRequest *req = MockRewatchRequest::create(mock_image_ctx,
+                                                       m_watch_lock,
+                                                       &m_watch_ctx,
+                                                       &m_watch_handle,
+                                                       &ctx);
+  {
+    RWLock::WLocker watch_locker(m_watch_lock);
+    req->send();
+  }
+  ASSERT_EQ(0, ctx.wait());
+}
+
+} // namespace image_watcher
+} // namespace librbd
diff --git a/src/test/librbd/mock/MockExclusiveLock.h b/src/test/librbd/mock/MockExclusiveLock.h
index 97f9da1..83d3058 100644
--- a/src/test/librbd/mock/MockExclusiveLock.h
+++ b/src/test/librbd/mock/MockExclusiveLock.h
@@ -19,6 +19,8 @@ struct MockExclusiveLock {
 
   MOCK_METHOD2(init, void(uint64_t features, Context*));
   MOCK_METHOD1(shut_down, void(Context*));
+
+  MOCK_METHOD0(reacquire_lock, void());
 };
 
 } // namespace librbd
diff --git a/src/test/librbd/mock/MockImageCtx.h b/src/test/librbd/mock/MockImageCtx.h
index 2a8b98e..040ab57 100644
--- a/src/test/librbd/mock/MockImageCtx.h
+++ b/src/test/librbd/mock/MockImageCtx.h
@@ -158,8 +158,9 @@ struct MockImageCtx {
   MOCK_METHOD1(flush_copyup, void(Context *));
 
   MOCK_METHOD1(flush_cache, void(Context *));
-  MOCK_METHOD1(invalidate_cache, void(Context *));
+  MOCK_METHOD2(invalidate_cache, void(bool, Context *));
   MOCK_METHOD1(shut_down_cache, void(Context *));
+  MOCK_METHOD0(is_cache_empty, bool());
 
   MOCK_CONST_METHOD1(test_features, bool(uint64_t test_features));
   MOCK_CONST_METHOD2(test_features, bool(uint64_t test_features,
diff --git a/src/test/librbd/mock/MockImageState.h b/src/test/librbd/mock/MockImageState.h
index 3ba1ee5..59c9da8 100644
--- a/src/test/librbd/mock/MockImageState.h
+++ b/src/test/librbd/mock/MockImageState.h
@@ -13,7 +13,6 @@ namespace librbd {
 struct MockImageState {
   MOCK_CONST_METHOD0(is_refresh_required, bool());
   MOCK_METHOD1(refresh, void(Context*));
-  MOCK_METHOD1(acquire_lock_refresh, void(Context*));
 
   MOCK_METHOD1(open, void(Context*));
 
@@ -21,6 +20,9 @@ struct MockImageState {
   MOCK_METHOD1(close, void(Context*));
 
   MOCK_METHOD2(snap_set, void(const std::string &, Context*));
+
+  MOCK_METHOD1(prepare_lock, void(Context*));
+  MOCK_METHOD0(handle_prepare_lock_complete, void());
 };
 
 } // namespace librbd
diff --git a/src/test/librbd/mock/MockObjectMap.h b/src/test/librbd/mock/MockObjectMap.h
index 25d14ed..dd27423 100644
--- a/src/test/librbd/mock/MockObjectMap.h
+++ b/src/test/librbd/mock/MockObjectMap.h
@@ -5,6 +5,7 @@
 #define CEPH_TEST_LIBRBD_MOCK_OBJECT_MAP_H
 
 #include "common/RWLock.h"
+#include "librbd/Utils.h"
 #include "gmock/gmock.h"
 
 namespace librbd {
@@ -17,7 +18,25 @@ struct MockObjectMap {
 
   MOCK_METHOD3(aio_resize, void(uint64_t new_size, uint8_t default_object_state,
                                 Context *on_finish));
-  MOCK_METHOD6(aio_update, void(uint64_t snap_id, uint64_t start_object_no,
+
+  template <typename T, void(T::*MF)(int)>
+  bool aio_update(uint64_t snap_id, uint64_t start_object_no, uint8_t new_state,
+                  const boost::optional<uint8_t> &current_state,
+                  T *callback_object) {
+    return aio_update<T, MF>(snap_id, start_object_no, start_object_no + 1,
+                             new_state, current_state, callback_object);
+  }
+
+  template <typename T, void(T::*MF)(int)>
+  bool aio_update(uint64_t snap_id, uint64_t start_object_no,
+                  uint64_t end_object_no, uint8_t new_state,
+                  const boost::optional<uint8_t> &current_state,
+                  T *callback_object) {
+    return aio_update(snap_id, start_object_no, end_object_no, new_state,
+                      current_state,
+                      util::create_context_callback<T, MF>(callback_object));
+  }
+  MOCK_METHOD6(aio_update, bool(uint64_t snap_id, uint64_t start_object_no,
                                 uint64_t end_object_no, uint8_t new_state,
                                 const boost::optional<uint8_t> &current_state,
                                 Context *on_finish));
diff --git a/src/test/librbd/object_map/test_mock_LockRequest.cc b/src/test/librbd/object_map/test_mock_LockRequest.cc
index 0ee782b..819b012 100644
--- a/src/test/librbd/object_map/test_mock_LockRequest.cc
+++ b/src/test/librbd/object_map/test_mock_LockRequest.cc
@@ -27,14 +27,16 @@ public:
   typedef LockRequest<MockImageCtx> MockLockRequest;
 
   void expect_lock(MockImageCtx &mock_image_ctx, int r) {
-    std::string oid(ObjectMap::object_map_name(mock_image_ctx.id, CEPH_NOSNAP));
+    std::string oid(ObjectMap<>::object_map_name(mock_image_ctx.id,
+                                                 CEPH_NOSNAP));
     EXPECT_CALL(get_mock_io_ctx(mock_image_ctx.md_ctx),
                 exec(oid, _, StrEq("lock"), StrEq("lock"), _, _, _))
                   .WillOnce(Return(r));
   }
 
   void expect_get_lock_info(MockImageCtx &mock_image_ctx, int r) {
-    std::string oid(ObjectMap::object_map_name(mock_image_ctx.id, CEPH_NOSNAP));
+    std::string oid(ObjectMap<>::object_map_name(mock_image_ctx.id,
+                                                 CEPH_NOSNAP));
     auto &expect = EXPECT_CALL(get_mock_io_ctx(mock_image_ctx.md_ctx),
                                exec(oid, _, StrEq("lock"), StrEq("get_info"), _, _, _));
     if (r < 0) {
@@ -59,7 +61,8 @@ public:
   }
 
   void expect_break_lock(MockImageCtx &mock_image_ctx, int r) {
-    std::string oid(ObjectMap::object_map_name(mock_image_ctx.id, CEPH_NOSNAP));
+    std::string oid(ObjectMap<>::object_map_name(mock_image_ctx.id,
+                                                 CEPH_NOSNAP));
     auto &expect = EXPECT_CALL(get_mock_io_ctx(mock_image_ctx.md_ctx),
                                exec(oid, _, StrEq("lock"), StrEq("break_lock"), _, _, _));
     if (r < 0) {
diff --git a/src/test/librbd/object_map/test_mock_RefreshRequest.cc b/src/test/librbd/object_map/test_mock_RefreshRequest.cc
index be982ce..8d67a1a 100644
--- a/src/test/librbd/object_map/test_mock_RefreshRequest.cc
+++ b/src/test/librbd/object_map/test_mock_RefreshRequest.cc
@@ -86,7 +86,7 @@ public:
   void expect_object_map_load(MockObjectMapImageCtx &mock_image_ctx,
                               ceph::BitVector<2> *object_map, uint64_t snap_id,
                               int r) {
-    std::string oid(ObjectMap::object_map_name(mock_image_ctx.id, snap_id));
+    std::string oid(ObjectMap<>::object_map_name(mock_image_ctx.id, snap_id));
     auto &expect = EXPECT_CALL(get_mock_io_ctx(mock_image_ctx.md_ctx),
                                exec(oid, _, StrEq("rbd"), StrEq("object_map_load"), _, _, _));
     if (r < 0) {
@@ -117,14 +117,16 @@ public:
   }
 
   void expect_truncate_request(MockObjectMapImageCtx &mock_image_ctx) {
-    std::string oid(ObjectMap::object_map_name(mock_image_ctx.id, TEST_SNAP_ID));
+    std::string oid(ObjectMap<>::object_map_name(mock_image_ctx.id,
+                                                 TEST_SNAP_ID));
     EXPECT_CALL(get_mock_io_ctx(mock_image_ctx.md_ctx), truncate(oid, 0, _))
                   .WillOnce(Return(0));
   }
 
   void expect_object_map_resize(MockObjectMapImageCtx &mock_image_ctx,
                                 uint64_t num_objects, int r) {
-    std::string oid(ObjectMap::object_map_name(mock_image_ctx.id, TEST_SNAP_ID));
+    std::string oid(ObjectMap<>::object_map_name(mock_image_ctx.id,
+                                                 TEST_SNAP_ID));
     auto &expect = EXPECT_CALL(get_mock_io_ctx(mock_image_ctx.md_ctx),
                                exec(oid, _, StrEq("rbd"), StrEq("object_map_resize"), _, _, _));
     expect.WillOnce(Return(r));
diff --git a/src/test/librbd/object_map/test_mock_ResizeRequest.cc b/src/test/librbd/object_map/test_mock_ResizeRequest.cc
index 42007d3..44fd714 100644
--- a/src/test/librbd/object_map/test_mock_ResizeRequest.cc
+++ b/src/test/librbd/object_map/test_mock_ResizeRequest.cc
@@ -22,7 +22,7 @@ using ::testing::StrEq;
 class TestMockObjectMapResizeRequest : public TestMockFixture {
 public:
   void expect_resize(librbd::ImageCtx *ictx, uint64_t snap_id, int r) {
-    std::string oid(ObjectMap::object_map_name(ictx->id, snap_id));
+    std::string oid(ObjectMap<>::object_map_name(ictx->id, snap_id));
     if (snap_id == CEPH_NOSNAP) {
       EXPECT_CALL(get_mock_io_ctx(ictx->md_ctx),
                   exec(oid, _, StrEq("lock"), StrEq("assert_locked"), _, _, _))
diff --git a/src/test/librbd/object_map/test_mock_SnapshotCreateRequest.cc b/src/test/librbd/object_map/test_mock_SnapshotCreateRequest.cc
index 3dd6298..823a32d 100644
--- a/src/test/librbd/object_map/test_mock_SnapshotCreateRequest.cc
+++ b/src/test/librbd/object_map/test_mock_SnapshotCreateRequest.cc
@@ -31,11 +31,11 @@ public:
   void expect_read_map(librbd::ImageCtx *ictx, int r) {
     if (r < 0) {
       EXPECT_CALL(get_mock_io_ctx(ictx->md_ctx),
-                  read(ObjectMap::object_map_name(ictx->id, CEPH_NOSNAP),
+                  read(ObjectMap<>::object_map_name(ictx->id, CEPH_NOSNAP),
                        0, 0, _)).WillOnce(Return(r));
     } else {
       EXPECT_CALL(get_mock_io_ctx(ictx->md_ctx),
-                  read(ObjectMap::object_map_name(ictx->id, CEPH_NOSNAP),
+                  read(ObjectMap<>::object_map_name(ictx->id, CEPH_NOSNAP),
                        0, 0, _)).WillOnce(DoDefault());
     }
   }
@@ -44,18 +44,18 @@ public:
     if (r < 0) {
       EXPECT_CALL(get_mock_io_ctx(ictx->md_ctx),
                   write_full(
-                    ObjectMap::object_map_name(ictx->id, snap_id), _, _))
+                    ObjectMap<>::object_map_name(ictx->id, snap_id), _, _))
                   .WillOnce(Return(r));
     } else {
       EXPECT_CALL(get_mock_io_ctx(ictx->md_ctx),
                   write_full(
-                    ObjectMap::object_map_name(ictx->id, snap_id), _, _))
+                    ObjectMap<>::object_map_name(ictx->id, snap_id), _, _))
                   .WillOnce(DoDefault());
     }
   }
 
   void expect_add_snapshot(librbd::ImageCtx *ictx, int r) {
-    std::string oid(ObjectMap::object_map_name(ictx->id, CEPH_NOSNAP));
+    std::string oid(ObjectMap<>::object_map_name(ictx->id, CEPH_NOSNAP));
     if (r < 0) {
       EXPECT_CALL(get_mock_io_ctx(ictx->md_ctx),
                   exec(oid, _, StrEq("lock"), StrEq("assert_locked"), _, _, _))
diff --git a/src/test/librbd/object_map/test_mock_SnapshotRemoveRequest.cc b/src/test/librbd/object_map/test_mock_SnapshotRemoveRequest.cc
index 215c214..270b267 100644
--- a/src/test/librbd/object_map/test_mock_SnapshotRemoveRequest.cc
+++ b/src/test/librbd/object_map/test_mock_SnapshotRemoveRequest.cc
@@ -23,7 +23,7 @@ using ::testing::StrEq;
 class TestMockObjectMapSnapshotRemoveRequest : public TestMockFixture {
 public:
   void expect_load_map(librbd::ImageCtx *ictx, uint64_t snap_id, int r) {
-    std::string snap_oid(ObjectMap::object_map_name(ictx->id, snap_id));
+    std::string snap_oid(ObjectMap<>::object_map_name(ictx->id, snap_id));
     if (r < 0) {
       EXPECT_CALL(get_mock_io_ctx(ictx->md_ctx),
                   exec(snap_oid, _, StrEq("rbd"), StrEq("object_map_load"), _, _, _))
@@ -36,7 +36,7 @@ public:
   }
 
   void expect_remove_snapshot(librbd::ImageCtx *ictx, int r) {
-    std::string oid(ObjectMap::object_map_name(ictx->id, CEPH_NOSNAP));
+    std::string oid(ObjectMap<>::object_map_name(ictx->id, CEPH_NOSNAP));
     if (r < 0) {
       EXPECT_CALL(get_mock_io_ctx(ictx->md_ctx),
                   exec(oid, _, StrEq("lock"), StrEq("assert_locked"), _, _, _))
@@ -52,7 +52,7 @@ public:
   }
 
   void expect_remove_map(librbd::ImageCtx *ictx, uint64_t snap_id, int r) {
-    std::string snap_oid(ObjectMap::object_map_name(ictx->id, snap_id));
+    std::string snap_oid(ObjectMap<>::object_map_name(ictx->id, snap_id));
     if (r < 0) {
       EXPECT_CALL(get_mock_io_ctx(ictx->md_ctx), remove(snap_oid, _))
                     .WillOnce(Return(r));
diff --git a/src/test/librbd/object_map/test_mock_SnapshotRollbackRequest.cc b/src/test/librbd/object_map/test_mock_SnapshotRollbackRequest.cc
index 51fc932..ff3e06a 100644
--- a/src/test/librbd/object_map/test_mock_SnapshotRollbackRequest.cc
+++ b/src/test/librbd/object_map/test_mock_SnapshotRollbackRequest.cc
@@ -24,29 +24,29 @@ public:
   void expect_read_map(librbd::ImageCtx *ictx, uint64_t snap_id, int r) {
     if (r < 0) {
       EXPECT_CALL(get_mock_io_ctx(ictx->md_ctx),
-                  read(ObjectMap::object_map_name(ictx->id, snap_id),
+                  read(ObjectMap<>::object_map_name(ictx->id, snap_id),
                        0, 0, _)).WillOnce(Return(r));
     } else {
       EXPECT_CALL(get_mock_io_ctx(ictx->md_ctx),
-                  read(ObjectMap::object_map_name(ictx->id, snap_id),
+                  read(ObjectMap<>::object_map_name(ictx->id, snap_id),
                        0, 0, _)).WillOnce(DoDefault());
     }
   }
 
   void expect_write_map(librbd::ImageCtx *ictx, int r) {
     EXPECT_CALL(get_mock_io_ctx(ictx->md_ctx),
-                exec(ObjectMap::object_map_name(ictx->id, CEPH_NOSNAP), _,
+                exec(ObjectMap<>::object_map_name(ictx->id, CEPH_NOSNAP), _,
 		     StrEq("lock"), StrEq("assert_locked"), _, _, _))
                   .WillOnce(DoDefault());
     if (r < 0) {
       EXPECT_CALL(get_mock_io_ctx(ictx->md_ctx),
                   write_full(
-                    ObjectMap::object_map_name(ictx->id, CEPH_NOSNAP), _, _))
+                    ObjectMap<>::object_map_name(ictx->id, CEPH_NOSNAP), _, _))
                   .WillOnce(Return(r));
     } else {
       EXPECT_CALL(get_mock_io_ctx(ictx->md_ctx),
                   write_full(
-                    ObjectMap::object_map_name(ictx->id, CEPH_NOSNAP), _, _))
+                    ObjectMap<>::object_map_name(ictx->id, CEPH_NOSNAP), _, _))
                   .WillOnce(DoDefault());
     }
   }
diff --git a/src/test/librbd/object_map/test_mock_UnlockRequest.cc b/src/test/librbd/object_map/test_mock_UnlockRequest.cc
index 7f834ca..a834a50 100644
--- a/src/test/librbd/object_map/test_mock_UnlockRequest.cc
+++ b/src/test/librbd/object_map/test_mock_UnlockRequest.cc
@@ -25,7 +25,8 @@ public:
   typedef UnlockRequest<MockImageCtx> MockUnlockRequest;
 
   void expect_unlock(MockImageCtx &mock_image_ctx, int r) {
-    std::string oid(ObjectMap::object_map_name(mock_image_ctx.id, CEPH_NOSNAP));
+    std::string oid(ObjectMap<>::object_map_name(mock_image_ctx.id,
+                                                 CEPH_NOSNAP));
     EXPECT_CALL(get_mock_io_ctx(mock_image_ctx.md_ctx),
                 exec(oid, _, StrEq("lock"), StrEq("unlock"), _, _, _))
                   .WillOnce(Return(r));
diff --git a/src/test/librbd/object_map/test_mock_UpdateRequest.cc b/src/test/librbd/object_map/test_mock_UpdateRequest.cc
index 45879f2..f82ffb8 100644
--- a/src/test/librbd/object_map/test_mock_UpdateRequest.cc
+++ b/src/test/librbd/object_map/test_mock_UpdateRequest.cc
@@ -8,6 +8,7 @@
 #include "librbd/ImageState.h"
 #include "librbd/internal.h"
 #include "librbd/ObjectMap.h"
+#include "librbd/Operations.h"
 #include "librbd/object_map/UpdateRequest.h"
 #include "gmock/gmock.h"
 #include "gtest/gtest.h"
@@ -23,7 +24,7 @@ using ::testing::StrEq;
 class TestMockObjectMapUpdateRequest : public TestMockFixture {
 public:
   void expect_update(librbd::ImageCtx *ictx, uint64_t snap_id, int r) {
-    std::string oid(ObjectMap::object_map_name(ictx->id, snap_id));
+    std::string oid(ObjectMap<>::object_map_name(ictx->id, snap_id));
     if (snap_id == CEPH_NOSNAP) {
       EXPECT_CALL(get_mock_io_ctx(ictx->md_ctx),
                   exec(oid, _, StrEq("lock"), StrEq("assert_locked"), _, _, _))
@@ -56,16 +57,19 @@ TEST_F(TestMockObjectMapUpdateRequest, UpdateInMemory) {
 
   librbd::ImageCtx *ictx;
   ASSERT_EQ(0, open_image(m_image_name, &ictx));
+
+  librbd::NoOpProgressContext no_progress;
+  ASSERT_EQ(0, ictx->operations->resize(4 << ictx->order, no_progress));
   ASSERT_EQ(0, acquire_exclusive_lock(*ictx));
 
   ceph::BitVector<2> object_map;
-  object_map.resize(1024);
+  object_map.resize(4);
   for (uint64_t i = 0; i < object_map.size(); ++i) {
     object_map[i] = i % 4;
   }
 
   C_SaferCond cond_ctx;
-  AsyncRequest<> *req = new UpdateRequest(
+  AsyncRequest<> *req = new UpdateRequest<>(
     *ictx, &object_map, CEPH_NOSNAP, 0, object_map.size(), OBJECT_NONEXISTENT,
     OBJECT_EXISTS, &cond_ctx);
   {
@@ -97,7 +101,7 @@ TEST_F(TestMockObjectMapUpdateRequest, UpdateHeadOnDisk) {
   object_map.resize(1);
 
   C_SaferCond cond_ctx;
-  AsyncRequest<> *req = new UpdateRequest(
+  AsyncRequest<> *req = new UpdateRequest<>(
     *ictx, &object_map, CEPH_NOSNAP, 0, object_map.size(), OBJECT_NONEXISTENT,
     OBJECT_EXISTS, &cond_ctx);
   {
@@ -125,7 +129,7 @@ TEST_F(TestMockObjectMapUpdateRequest, UpdateSnapOnDisk) {
   object_map.resize(1);
 
   C_SaferCond cond_ctx;
-  AsyncRequest<> *req = new UpdateRequest(
+  AsyncRequest<> *req = new UpdateRequest<>(
     *ictx, &object_map, snap_id, 0, object_map.size(), OBJECT_NONEXISTENT,
     OBJECT_EXISTS, &cond_ctx);
   {
@@ -152,7 +156,7 @@ TEST_F(TestMockObjectMapUpdateRequest, UpdateOnDiskError) {
   object_map.resize(1);
 
   C_SaferCond cond_ctx;
-  AsyncRequest<> *req = new UpdateRequest(
+  AsyncRequest<> *req = new UpdateRequest<>(
     *ictx, &object_map, CEPH_NOSNAP, 0, object_map.size(), OBJECT_NONEXISTENT,
     OBJECT_EXISTS, &cond_ctx);
   {
@@ -182,7 +186,7 @@ TEST_F(TestMockObjectMapUpdateRequest, RebuildSnapOnDisk) {
   object_map.resize(1);
 
   C_SaferCond cond_ctx;
-  AsyncRequest<> *req = new UpdateRequest(
+  AsyncRequest<> *req = new UpdateRequest<>(
     *ictx, &object_map, snap_id, 0, object_map.size(), OBJECT_EXISTS_CLEAN,
     boost::optional<uint8_t>(), &cond_ctx);
   {
diff --git a/src/test/librbd/operation/test_mock_ResizeRequest.cc b/src/test/librbd/operation/test_mock_ResizeRequest.cc
index e1998ed..f1276eb 100644
--- a/src/test/librbd/operation/test_mock_ResizeRequest.cc
+++ b/src/test/librbd/operation/test_mock_ResizeRequest.cc
@@ -122,8 +122,8 @@ public:
   }
 
   void expect_invalidate_cache(MockImageCtx &mock_image_ctx, int r) {
-    EXPECT_CALL(mock_image_ctx, invalidate_cache(_))
-                  .WillOnce(CompleteContext(r, NULL));
+    EXPECT_CALL(mock_image_ctx, invalidate_cache(false, _))
+                  .WillOnce(WithArg<1>(CompleteContext(r, NULL)));
     expect_op_work_queue(mock_image_ctx);
   }
 
diff --git a/src/test/librbd/operation/test_mock_SnapshotRollbackRequest.cc b/src/test/librbd/operation/test_mock_SnapshotRollbackRequest.cc
index 6258229..b56605d 100644
--- a/src/test/librbd/operation/test_mock_SnapshotRollbackRequest.cc
+++ b/src/test/librbd/operation/test_mock_SnapshotRollbackRequest.cc
@@ -164,8 +164,8 @@ public:
 
   void expect_invalidate_cache(MockOperationImageCtx &mock_image_ctx, int r) {
     if (mock_image_ctx.object_cacher != nullptr) {
-      EXPECT_CALL(mock_image_ctx, invalidate_cache(_))
-                    .WillOnce(CompleteContext(r, NULL));
+      EXPECT_CALL(mock_image_ctx, invalidate_cache(true, _))
+                    .WillOnce(WithArg<1>(CompleteContext(r, NULL)));
     }
   }
 
diff --git a/src/test/librbd/test_BlockGuard.cc b/src/test/librbd/test_BlockGuard.cc
new file mode 100644
index 0000000..b16188b
--- /dev/null
+++ b/src/test/librbd/test_BlockGuard.cc
@@ -0,0 +1,98 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include "test/librbd/test_fixture.h"
+#include "test/librbd/test_support.h"
+#include "librbd/BlockGuard.h"
+
+namespace librbd {
+
+class TestIOBlockGuard : public TestFixture {
+public:
+  static uint32_t s_index;
+
+  struct Operation {
+    uint32_t index;
+    Operation() : index(++s_index) {
+    }
+    Operation(Operation &&rhs) : index(rhs.index) {
+    }
+    Operation(const Operation &) = delete;
+
+    Operation& operator=(Operation &&rhs) {
+      index = rhs.index;
+      return *this;
+    }
+
+    bool operator==(const Operation &rhs) const {
+      return index == rhs.index;
+    }
+  };
+
+  typedef std::list<Operation> Operations;
+
+  typedef BlockGuard<Operation> OpBlockGuard;
+
+  virtual void SetUp() override {
+    TestFixture::SetUp();
+    m_cct = reinterpret_cast<CephContext*>(m_ioctx.cct());
+  }
+
+  CephContext *m_cct;
+};
+
+TEST_F(TestIOBlockGuard, NonDetainedOps) {
+  OpBlockGuard op_block_guard(m_cct);
+
+  Operation op1;
+  BlockGuardCell *cell1;
+  ASSERT_EQ(0, op_block_guard.detain({1, 3}, &op1, &cell1));
+
+  Operation op2;
+  BlockGuardCell *cell2;
+  ASSERT_EQ(0, op_block_guard.detain({0, 1}, &op2, &cell2));
+
+  Operation op3;
+  BlockGuardCell *cell3;
+  ASSERT_EQ(0, op_block_guard.detain({3, 6}, &op3, &cell3));
+
+  Operations released_ops;
+  op_block_guard.release(cell1, &released_ops);
+  ASSERT_TRUE(released_ops.empty());
+
+  op_block_guard.release(cell2, &released_ops);
+  ASSERT_TRUE(released_ops.empty());
+
+  op_block_guard.release(cell3, &released_ops);
+  ASSERT_TRUE(released_ops.empty());
+}
+
+TEST_F(TestIOBlockGuard, DetainedOps) {
+  OpBlockGuard op_block_guard(m_cct);
+
+  Operation op1;
+  BlockGuardCell *cell1;
+  ASSERT_EQ(0, op_block_guard.detain({1, 3}, &op1, &cell1));
+
+  Operation op2;
+  BlockGuardCell *cell2;
+  ASSERT_EQ(1, op_block_guard.detain({2, 6}, &op2, &cell2));
+  ASSERT_EQ(nullptr, cell2);
+
+  Operation op3;
+  BlockGuardCell *cell3;
+  ASSERT_EQ(2, op_block_guard.detain({0, 2}, &op3, &cell3));
+  ASSERT_EQ(nullptr, cell3);
+
+  Operations expected_ops;
+  expected_ops.push_back(std::move(op2));
+  expected_ops.push_back(std::move(op3));
+  Operations released_ops;
+  op_block_guard.release(cell1, &released_ops);
+  ASSERT_EQ(expected_ops, released_ops);
+}
+
+uint32_t TestIOBlockGuard::s_index = 0;
+
+} // namespace librbd
+
diff --git a/src/test/librbd/test_ObjectMap.cc b/src/test/librbd/test_ObjectMap.cc
index b3b19e4..1e8566e 100644
--- a/src/test/librbd/test_ObjectMap.cc
+++ b/src/test/librbd/test_ObjectMap.cc
@@ -18,7 +18,7 @@ public:
 
   int when_open_object_map(librbd::ImageCtx *ictx) {
     C_SaferCond ctx;
-    librbd::ObjectMap object_map(*ictx, ictx->snap_id);
+    librbd::ObjectMap<> object_map(*ictx, ictx->snap_id);
     object_map.open(&ctx);
     return ctx.wait();
   }
@@ -38,7 +38,7 @@ TEST_F(TestObjectMap, RefreshInvalidatesWhenCorrupt) {
   }
   ASSERT_EQ(0, lock_ctx.wait());
 
-  std::string oid = librbd::ObjectMap::object_map_name(ictx->id, CEPH_NOSNAP);
+  std::string oid = librbd::ObjectMap<>::object_map_name(ictx->id, CEPH_NOSNAP);
   bufferlist bl;
   bl.append("corrupt");
   ASSERT_EQ(0, ictx->data_ctx.write_full(oid, bl));
@@ -64,8 +64,8 @@ TEST_F(TestObjectMap, RefreshInvalidatesWhenTooSmall) {
   librados::ObjectWriteOperation op;
   librbd::cls_client::object_map_resize(&op, 0, OBJECT_NONEXISTENT);
 
-  std::string oid = librbd::ObjectMap::object_map_name(ictx->id, CEPH_NOSNAP);
-  ASSERT_EQ(0, ictx->data_ctx.operate(oid, &op));
+  std::string oid = librbd::ObjectMap<>::object_map_name(ictx->id, CEPH_NOSNAP);
+  ASSERT_EQ(0, ictx->md_ctx.operate(oid, &op));
 
   ASSERT_EQ(0, when_open_object_map(ictx));
   ASSERT_TRUE(ictx->test_flags(RBD_FLAG_OBJECT_MAP_INVALID));
@@ -85,7 +85,7 @@ TEST_F(TestObjectMap, InvalidateFlagOnDisk) {
   }
   ASSERT_EQ(0, lock_ctx.wait());
 
-  std::string oid = librbd::ObjectMap::object_map_name(ictx->id, CEPH_NOSNAP);
+  std::string oid = librbd::ObjectMap<>::object_map_name(ictx->id, CEPH_NOSNAP);
   bufferlist bl;
   bl.append("corrupt");
   ASSERT_EQ(0, ictx->data_ctx.write_full(oid, bl));
@@ -104,7 +104,7 @@ TEST_F(TestObjectMap, InvalidateFlagInMemoryOnly) {
   ASSERT_EQ(0, open_image(m_image_name, &ictx));
   ASSERT_FALSE(ictx->test_flags(RBD_FLAG_OBJECT_MAP_INVALID));
 
-  std::string oid = librbd::ObjectMap::object_map_name(ictx->id, CEPH_NOSNAP);
+  std::string oid = librbd::ObjectMap<>::object_map_name(ictx->id, CEPH_NOSNAP);
   bufferlist valid_bl;
   ASSERT_LT(0, ictx->data_ctx.read(oid, valid_bl, 0, 0));
 
diff --git a/src/test/librbd/test_fixture.cc b/src/test/librbd/test_fixture.cc
index a521a79..d3283c5 100644
--- a/src/test/librbd/test_fixture.cc
+++ b/src/test/librbd/test_fixture.cc
@@ -60,7 +60,7 @@ int TestFixture::open_image(const std::string &image_name,
   *ictx = new librbd::ImageCtx(image_name.c_str(), "", NULL, m_ioctx, false);
   m_ictxs.insert(*ictx);
 
-  return (*ictx)->state->open();
+  return (*ictx)->state->open(false);
 }
 
 int TestFixture::snap_create(librbd::ImageCtx &ictx,
diff --git a/src/test/librbd/test_internal.cc b/src/test/librbd/test_internal.cc
index c6ac977..cd7f56b 100644
--- a/src/test/librbd/test_internal.cc
+++ b/src/test/librbd/test_internal.cc
@@ -81,7 +81,7 @@ TEST_F(TestInternal, OpenByID) {
    close_image(ictx);
 
    ictx = new librbd::ImageCtx("", id, nullptr, m_ioctx, true);
-   ASSERT_EQ(0, ictx->state->open());
+   ASSERT_EQ(0, ictx->state->open(false));
    ASSERT_EQ(ictx->name, m_image_name);
    close_image(ictx);
 }
@@ -558,7 +558,7 @@ TEST_F(TestInternal, SnapshotCopyup)
         state = OBJECT_EXISTS_CLEAN;
       }
 
-      librbd::ObjectMap object_map(*ictx2, ictx2->snap_id);
+      librbd::ObjectMap<> object_map(*ictx2, ictx2->snap_id);
       C_SaferCond ctx;
       object_map.open(&ctx);
       ASSERT_EQ(0, ctx.wait());
@@ -848,3 +848,55 @@ TEST_F(TestInternal, RemoveById) {
   librbd::NoOpProgressContext remove_no_op;
   ASSERT_EQ(0, librbd::remove(m_ioctx, "", image_id, remove_no_op));
 }
+
+static int iterate_cb(uint64_t off, size_t len, int exists, void *arg)
+{
+  interval_set<uint64_t> *diff = static_cast<interval_set<uint64_t> *>(arg);
+  diff->insert(off, len);
+  return 0;
+}
+
+TEST_F(TestInternal, DiffIterateCloneOverwrite) {
+  REQUIRE_FEATURE(RBD_FEATURE_LAYERING);
+
+  librbd::RBD rbd;
+  librbd::Image image;
+  uint64_t size = 20 << 20;
+  int order = 0;
+
+  ASSERT_EQ(0, rbd.open(m_ioctx, image, m_image_name.c_str(), NULL));
+
+  bufferlist bl;
+  bl.append(std::string(4096, '1'));
+  ASSERT_EQ(4096, image.write(0, 4096, bl));
+
+  interval_set<uint64_t> one;
+  ASSERT_EQ(0, image.diff_iterate2(NULL, 0, size, false, false, iterate_cb,
+                                   (void *)&one));
+  ASSERT_EQ(0, image.snap_create("one"));
+  ASSERT_EQ(0, image.snap_protect("one"));
+
+  std::string clone_name = this->get_temp_image_name();
+  ASSERT_EQ(0, rbd.clone(m_ioctx, m_image_name.c_str(), "one", m_ioctx,
+                         clone_name.c_str(), RBD_FEATURE_LAYERING, &order));
+
+  librbd::ImageCtx *ictx;
+  ASSERT_EQ(0, open_image(clone_name, &ictx));
+  ASSERT_EQ(0, snap_create(*ictx, "one"));
+  ASSERT_EQ(0, ictx->operations->snap_protect("one"));
+
+  // Simulate a client that doesn't support deep flatten (old librbd / krbd)
+  // which will copy up the full object from the parent
+  std::string oid = ictx->object_prefix + ".0000000000000000";
+  librados::IoCtx io_ctx;
+  io_ctx.dup(m_ioctx);
+  io_ctx.selfmanaged_snap_set_write_ctx(ictx->snapc.seq, ictx->snaps);
+  ASSERT_EQ(0, io_ctx.write(oid, bl, 4096, 4096));
+
+  interval_set<uint64_t> diff;
+  ASSERT_EQ(0, librbd::snap_set(ictx, "one"));
+  ASSERT_EQ(0, librbd::diff_iterate(ictx, nullptr, 0, size, true, false,
+                                    iterate_cb, (void *)&diff));
+  ASSERT_EQ(one, diff);
+}
+
diff --git a/src/test/librbd/test_librbd.cc b/src/test/librbd/test_librbd.cc
index ff91f25..ea166db 100644
--- a/src/test/librbd/test_librbd.cc
+++ b/src/test/librbd/test_librbd.cc
@@ -276,6 +276,90 @@ TEST_F(TestLibRBD, CreateAndStatPP)
   ioctx.close();
 }
 
+TEST_F(TestLibRBD, GetId)
+{
+  rados_ioctx_t ioctx;
+  ASSERT_EQ(0, rados_ioctx_create(_cluster, m_pool_name.c_str(), &ioctx));
+
+  rbd_image_t image;
+  int order = 0;
+  std::string name = get_temp_image_name();
+
+  ASSERT_EQ(0, create_image(ioctx, name.c_str(), 0, &order));
+  ASSERT_EQ(0, rbd_open(ioctx, name.c_str(), &image, NULL));
+
+  char id[4096];
+  if (!is_feature_enabled(0)) {
+    // V1 image
+    ASSERT_EQ(-EINVAL, rbd_get_id(image, id, sizeof(id)));
+  } else {
+    ASSERT_EQ(-ERANGE, rbd_get_id(image, id, 0));
+    ASSERT_EQ(0, rbd_get_id(image, id, sizeof(id)));
+    ASSERT_LT(0U, strlen(id));
+  }
+
+  ASSERT_EQ(0, rbd_close(image));
+  rados_ioctx_destroy(ioctx);
+}
+
+TEST_F(TestLibRBD, GetIdPP)
+{
+  librados::IoCtx ioctx;
+  ASSERT_EQ(0, _rados.ioctx_create(m_pool_name.c_str(), ioctx));
+
+  librbd::RBD rbd;
+  librbd::Image image;
+  int order = 0;
+  std::string name = get_temp_image_name();
+
+  std::string id;
+  ASSERT_EQ(0, create_image_pp(rbd, ioctx, name.c_str(), 0, &order));
+  ASSERT_EQ(0, rbd.open(ioctx, image, name.c_str(), NULL));
+  if (!is_feature_enabled(0)) {
+    // V1 image
+    ASSERT_EQ(-EINVAL, image.get_id(&id));
+  } else {
+    ASSERT_EQ(0, image.get_id(&id));
+    ASSERT_LT(0U, id.size());
+  }
+}
+
+TEST_F(TestLibRBD, GetBlockNamePrefix)
+{
+  rados_ioctx_t ioctx;
+  ASSERT_EQ(0, rados_ioctx_create(_cluster, m_pool_name.c_str(), &ioctx));
+
+  rbd_image_t image;
+  int order = 0;
+  std::string name = get_temp_image_name();
+
+  ASSERT_EQ(0, create_image(ioctx, name.c_str(), 0, &order));
+  ASSERT_EQ(0, rbd_open(ioctx, name.c_str(), &image, NULL));
+
+  char prefix[4096];
+  ASSERT_EQ(-ERANGE, rbd_get_block_name_prefix(image, prefix, 0));
+  ASSERT_EQ(0, rbd_get_block_name_prefix(image, prefix, sizeof(prefix)));
+  ASSERT_LT(0U, strlen(prefix));
+
+  ASSERT_EQ(0, rbd_close(image));
+  rados_ioctx_destroy(ioctx);
+}
+
+TEST_F(TestLibRBD, GetBlockNamePrefixPP)
+{
+  librados::IoCtx ioctx;
+  ASSERT_EQ(0, _rados.ioctx_create(m_pool_name.c_str(), ioctx));
+
+  librbd::RBD rbd;
+  librbd::Image image;
+  int order = 0;
+  std::string name = get_temp_image_name();
+
+  ASSERT_EQ(0, create_image_pp(rbd, ioctx, name.c_str(), 0, &order));
+  ASSERT_EQ(0, rbd.open(ioctx, image, name.c_str(), NULL));
+  ASSERT_LT(0U, image.get_block_name_prefix().size());
+}
+
 TEST_F(TestLibRBD, OpenAio)
 {
   rados_ioctx_t ioctx;
@@ -3585,6 +3669,11 @@ TEST_F(TestLibRBD, Metadata)
   ASSERT_EQ(1U, pairs.size());
   ASSERT_EQ(0, strncmp("value2", pairs["key2"].c_str(), 6));
 
+  // test config setting
+  ASSERT_EQ(0, image1.metadata_set("conf_rbd_cache", "false"));
+  ASSERT_EQ(-EINVAL, image1.metadata_set("conf_rbd_cache", "INVALID_VALUE"));
+  ASSERT_EQ(0, image1.metadata_remove("conf_rbd_cache"));
+
   // test metadata with snapshot adding
   ASSERT_EQ(0, image1.snap_create("snap1"));
   ASSERT_EQ(0, image1.snap_protect("snap1"));
@@ -4360,6 +4449,9 @@ TEST_F(TestLibRBD, FlushCacheWithCopyupOnExternalSnapshot) {
 
 TEST_F(TestLibRBD, DiscardAfterWrite)
 {
+  CephContext* cct = reinterpret_cast<CephContext*>(_rados.cct());
+  REQUIRE(!cct->_conf->rbd_skip_partial_discard);
+
   librados::IoCtx ioctx;
   ASSERT_EQ(0, _rados.ioctx_create(m_pool_name.c_str(), ioctx));
 
@@ -4399,3 +4491,163 @@ TEST_F(TestLibRBD, DiscardAfterWrite)
   ASSERT_TRUE(read_bl.is_zero());
   read_comp->release();
 }
+
+TEST_F(TestLibRBD, ExclusiveLock)
+{
+  REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK);
+
+  static char buf[10];
+
+  rados_ioctx_t ioctx;
+  rados_ioctx_create(_cluster, m_pool_name.c_str(), &ioctx);
+
+  std::string name = get_temp_image_name();
+  uint64_t size = 2 << 20;
+  int order = 0;
+  ASSERT_EQ(0, create_image(ioctx, name.c_str(), size, &order));
+
+  rbd_image_t image1;
+  ASSERT_EQ(0, rbd_open(ioctx, name.c_str(), &image1, NULL));
+
+  int lock_owner;
+  ASSERT_EQ(0, rbd_lock_acquire(image1, RBD_LOCK_MODE_EXCLUSIVE));
+  ASSERT_EQ(0, rbd_is_exclusive_lock_owner(image1, &lock_owner));
+  ASSERT_TRUE(lock_owner);
+
+  rbd_lock_mode_t lock_mode;
+  char *lock_owners[1];
+  size_t max_lock_owners = 0;
+  ASSERT_EQ(-ERANGE, rbd_lock_get_owners(image1, &lock_mode, lock_owners,
+                                         &max_lock_owners));
+  ASSERT_EQ(1U, max_lock_owners);
+
+  max_lock_owners = 2;
+  ASSERT_EQ(0, rbd_lock_get_owners(image1, &lock_mode, lock_owners,
+                                   &max_lock_owners));
+  ASSERT_EQ(RBD_LOCK_MODE_EXCLUSIVE, lock_mode);
+  ASSERT_STRNE("", lock_owners[0]);
+  ASSERT_EQ(1U, max_lock_owners);
+
+  rbd_image_t image2;
+  ASSERT_EQ(0, rbd_open(ioctx, name.c_str(), &image2, NULL));
+
+  ASSERT_EQ(0, rbd_is_exclusive_lock_owner(image2, &lock_owner));
+  ASSERT_FALSE(lock_owner);
+
+  ASSERT_EQ(-EOPNOTSUPP, rbd_lock_break(image1, RBD_LOCK_MODE_SHARED, ""));
+  ASSERT_EQ(-EBUSY, rbd_lock_break(image1, RBD_LOCK_MODE_EXCLUSIVE,
+                                   "not the owner"));
+
+  ASSERT_EQ(0, rbd_lock_release(image1));
+  ASSERT_EQ(0, rbd_is_exclusive_lock_owner(image1, &lock_owner));
+  ASSERT_FALSE(lock_owner);
+
+  ASSERT_EQ(-ENOENT, rbd_lock_break(image1, RBD_LOCK_MODE_EXCLUSIVE,
+                                    lock_owners[0]));
+  rbd_lock_get_owners_cleanup(lock_owners, max_lock_owners);
+
+  ASSERT_EQ(-EROFS, rbd_write(image1, 0, sizeof(buf), buf));
+  ASSERT_EQ((ssize_t)sizeof(buf), rbd_write(image2, 0, sizeof(buf), buf));
+
+  ASSERT_EQ(0, rbd_lock_acquire(image2, RBD_LOCK_MODE_EXCLUSIVE));
+  ASSERT_EQ(0, rbd_is_exclusive_lock_owner(image2, &lock_owner));
+  ASSERT_TRUE(lock_owner);
+
+  ASSERT_EQ(0, rbd_lock_release(image2));
+  ASSERT_EQ(0, rbd_is_exclusive_lock_owner(image2, &lock_owner));
+  ASSERT_FALSE(lock_owner);
+
+  ASSERT_EQ(0, rbd_lock_acquire(image1, RBD_LOCK_MODE_EXCLUSIVE));
+  ASSERT_EQ(0, rbd_is_exclusive_lock_owner(image1, &lock_owner));
+  ASSERT_TRUE(lock_owner);
+
+  ASSERT_EQ((ssize_t)sizeof(buf), rbd_write(image1, 0, sizeof(buf), buf));
+  ASSERT_EQ(-EROFS, rbd_write(image2, 0, sizeof(buf), buf));
+
+  ASSERT_EQ(0, rbd_lock_release(image1));
+  ASSERT_EQ(0, rbd_is_exclusive_lock_owner(image1, &lock_owner));
+  ASSERT_FALSE(lock_owner);
+
+  int owner_id = -1;
+  Mutex lock("ping-pong");
+  class PingPong : public Thread {
+  public:
+    explicit PingPong(int id, rbd_image_t &image, int &owner_id, Mutex &lock)
+      : m_id(id), m_image(image), m_owner_id(owner_id), m_lock(lock) {
+  };
+
+  protected:
+    void *entry() {
+      for (int i = 0; i < 10; i++) {
+	{
+	  Mutex::Locker locker(m_lock);
+	  if (m_owner_id == m_id) {
+	    std::cout << m_id << ": releasing exclusive lock" << std::endl;
+	    EXPECT_EQ(0, rbd_lock_release(m_image));
+	    int lock_owner;
+	    EXPECT_EQ(0, rbd_is_exclusive_lock_owner(m_image, &lock_owner));
+	    EXPECT_FALSE(lock_owner);
+	    m_owner_id = -1;
+	    std::cout << m_id << ": exclusive lock released" << std::endl;
+	    continue;
+	  }
+	}
+
+	std::cout << m_id << ": acquiring exclusive lock" << std::endl;
+        int r;
+        do {
+          r = rbd_lock_acquire(m_image, RBD_LOCK_MODE_EXCLUSIVE);
+          if (r == -EROFS) {
+            usleep(1000);
+          }
+        } while (r == -EROFS);
+	EXPECT_EQ(0, r);
+
+	int lock_owner;
+	EXPECT_EQ(0, rbd_is_exclusive_lock_owner(m_image, &lock_owner));
+	EXPECT_TRUE(lock_owner);
+	std::cout << m_id << ": exclusive lock acquired" << std::endl;
+	{
+	  Mutex::Locker locker(m_lock);
+	  m_owner_id = m_id;
+	}
+	usleep(rand() % 50000);
+      }
+
+      Mutex::Locker locker(m_lock);
+      if (m_owner_id == m_id) {
+	EXPECT_EQ(0, rbd_lock_release(m_image));
+	int lock_owner;
+	EXPECT_EQ(0, rbd_is_exclusive_lock_owner(m_image, &lock_owner));
+	EXPECT_FALSE(lock_owner);
+	m_owner_id = -1;
+      }
+
+      return NULL;
+    }
+
+  private:
+    int m_id;
+    rbd_image_t &m_image;
+    int &m_owner_id;
+    Mutex &m_lock;
+  } ping(1, image1, owner_id, lock), pong(2, image2, owner_id, lock);
+
+  ping.create("ping");
+  pong.create("pong");
+  ping.join();
+  pong.join();
+
+  ASSERT_EQ(0, rbd_lock_acquire(image2, RBD_LOCK_MODE_EXCLUSIVE));
+  ASSERT_EQ(0, rbd_is_exclusive_lock_owner(image2, &lock_owner));
+  ASSERT_TRUE(lock_owner);
+
+  ASSERT_EQ(0, rbd_close(image2));
+
+  ASSERT_EQ(0, rbd_lock_acquire(image1, RBD_LOCK_MODE_EXCLUSIVE));
+  ASSERT_EQ(0, rbd_is_exclusive_lock_owner(image1, &lock_owner));
+  ASSERT_TRUE(lock_owner);
+
+  ASSERT_EQ(0, rbd_close(image1));
+  rados_ioctx_destroy(ioctx);
+}
diff --git a/src/test/librbd/test_mock_ExclusiveLock.cc b/src/test/librbd/test_mock_ExclusiveLock.cc
index f87a319..df6a4ae 100644
--- a/src/test/librbd/test_mock_ExclusiveLock.cc
+++ b/src/test/librbd/test_mock_ExclusiveLock.cc
@@ -6,6 +6,7 @@
 #include "test/librbd/mock/MockImageCtx.h"
 #include "librbd/ExclusiveLock.h"
 #include "librbd/exclusive_lock/AcquireRequest.h"
+#include "librbd/exclusive_lock/ReacquireRequest.h"
 #include "librbd/exclusive_lock/ReleaseRequest.h"
 #include "gmock/gmock.h"
 #include "gtest/gtest.h"
@@ -31,7 +32,8 @@ struct BaseRequest {
   Context *on_finish = nullptr;
 
   static T* create(MockExclusiveLockImageCtx &image_ctx, const std::string &cookie,
-                   Context *on_lock_unlock, Context *on_finish) {
+                   Context *on_lock_unlock, Context *on_finish,
+                   bool shutting_down = false) {
     assert(!s_requests.empty());
     T* req = s_requests.front();
     req->on_lock_unlock = on_lock_unlock;
@@ -54,6 +56,18 @@ struct AcquireRequest<MockExclusiveLockImageCtx> : public BaseRequest<AcquireReq
 };
 
 template <>
+struct ReacquireRequest<MockExclusiveLockImageCtx> : public BaseRequest<ReacquireRequest<MockExclusiveLockImageCtx> > {
+  static ReacquireRequest* create(MockExclusiveLockImageCtx &image_ctx,
+                                  const std::string &cookie,
+                                  const std::string &new_cookie,
+                                  Context *on_finish) {
+    return BaseRequest::create(image_ctx, cookie, nullptr, on_finish);
+  }
+
+  MOCK_METHOD0(send, void());
+};
+
+template <>
 struct ReleaseRequest<MockExclusiveLockImageCtx> : public BaseRequest<ReleaseRequest<MockExclusiveLockImageCtx> > {
   MOCK_METHOD0(send, void());
 };
@@ -83,11 +97,13 @@ class TestMockExclusiveLock : public TestMockFixture {
 public:
   typedef ExclusiveLock<MockExclusiveLockImageCtx> MockExclusiveLock;
   typedef exclusive_lock::AcquireRequest<MockExclusiveLockImageCtx> MockAcquireRequest;
+  typedef exclusive_lock::ReacquireRequest<MockExclusiveLockImageCtx> MockReacquireRequest;
   typedef exclusive_lock::ReleaseRequest<MockExclusiveLockImageCtx> MockReleaseRequest;
 
-  void expect_get_watch_handle(MockExclusiveLockImageCtx &mock_image_ctx) {
+  void expect_get_watch_handle(MockExclusiveLockImageCtx &mock_image_ctx,
+                               uint64_t watch_handle = 1234567890) {
     EXPECT_CALL(*mock_image_ctx.image_watcher, get_watch_handle())
-                  .WillRepeatedly(Return(1234567890));
+                  .WillRepeatedly(Return(watch_handle));
   }
 
   void expect_set_require_lock_on_read(MockExclusiveLockImageCtx &mock_image_ctx) {
@@ -138,11 +154,20 @@ public:
     }
   }
 
+  void expect_reacquire_lock(MockExclusiveLockImageCtx &mock_image_ctx,
+                             MockReacquireRequest &mock_reacquire_request,
+                             int r) {
+    expect_get_watch_handle(mock_image_ctx, 98765);
+    EXPECT_CALL(mock_reacquire_request, send())
+                  .WillOnce(FinishRequest(&mock_reacquire_request, r, &mock_image_ctx));
+  }
+
   void expect_notify_request_lock(MockExclusiveLockImageCtx &mock_image_ctx,
                                   MockExclusiveLock &mock_exclusive_lock) {
     EXPECT_CALL(*mock_image_ctx.image_watcher, notify_request_lock())
-                  .WillRepeatedly(Invoke(&mock_exclusive_lock,
-                                         &MockExclusiveLock::handle_lock_released));
+                  .WillRepeatedly(Invoke([&mock_exclusive_lock]() {
+                                           mock_exclusive_lock.handle_peer_notification(0);
+                                         }));
   }
 
   void expect_notify_acquired_lock(MockExclusiveLockImageCtx &mock_image_ctx) {
@@ -176,7 +201,7 @@ public:
   }
 
   int when_try_lock(MockExclusiveLockImageCtx &mock_image_ctx,
-                     MockExclusiveLock &exclusive_lock) {
+                    MockExclusiveLock &exclusive_lock) {
     C_SaferCond ctx;
     {
       RWLock::WLocker owner_locker(mock_image_ctx.owner_lock);
@@ -185,7 +210,7 @@ public:
     return ctx.wait();
   }
   int when_request_lock(MockExclusiveLockImageCtx &mock_image_ctx,
-                     MockExclusiveLock &exclusive_lock) {
+                        MockExclusiveLock &exclusive_lock) {
     C_SaferCond ctx;
     {
       RWLock::RLocker owner_locker(mock_image_ctx.owner_lock);
@@ -194,7 +219,7 @@ public:
     return ctx.wait();
   }
   int when_release_lock(MockExclusiveLockImageCtx &mock_image_ctx,
-                     MockExclusiveLock &exclusive_lock) {
+                        MockExclusiveLock &exclusive_lock) {
     C_SaferCond ctx;
     {
       RWLock::WLocker owner_locker(mock_image_ctx.owner_lock);
@@ -644,16 +669,93 @@ TEST_F(TestMockExclusiveLock, RequestLockWatchNotRegistered) {
   EXPECT_CALL(*mock_image_ctx.image_watcher, get_watch_handle())
     .WillOnce(DoAll(Invoke([&mock_image_ctx, &exclusive_lock]() {
                       mock_image_ctx.image_ctx->op_work_queue->queue(
-                        new FunctionContext([&exclusive_lock](int r) {
-                          exclusive_lock.handle_watch_registered();
+                        new FunctionContext([&mock_image_ctx, &exclusive_lock](int r) {
+                          RWLock::RLocker owner_locker(mock_image_ctx.owner_lock);
+                          exclusive_lock.reacquire_lock();
                         }));
                     }),
                     Return(0)));
+
+  MockAcquireRequest request_lock_acquire;
+  expect_acquire_lock(mock_image_ctx, request_lock_acquire, 0);
+  ASSERT_EQ(0, when_request_lock(mock_image_ctx, exclusive_lock));
+  ASSERT_TRUE(is_lock_owner(mock_image_ctx, exclusive_lock));
+
+  MockReleaseRequest shutdown_release;
+  expect_release_lock(mock_image_ctx, shutdown_release, 0, true);
+  ASSERT_EQ(0, when_shut_down(mock_image_ctx, exclusive_lock));
+  ASSERT_FALSE(is_lock_owner(mock_image_ctx, exclusive_lock));
+}
+
+TEST_F(TestMockExclusiveLock, ReacquireLock) {
+  REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK);
+
+  librbd::ImageCtx *ictx;
+  ASSERT_EQ(0, open_image(m_image_name, &ictx));
+
+  MockExclusiveLockImageCtx mock_image_ctx(*ictx);
+  MockExclusiveLock exclusive_lock(mock_image_ctx);
+  expect_op_work_queue(mock_image_ctx);
+
+  InSequence seq;
+  expect_block_writes(mock_image_ctx);
+  ASSERT_EQ(0, when_init(mock_image_ctx, exclusive_lock));
+
   MockAcquireRequest request_lock_acquire;
   expect_acquire_lock(mock_image_ctx, request_lock_acquire, 0);
   ASSERT_EQ(0, when_request_lock(mock_image_ctx, exclusive_lock));
   ASSERT_TRUE(is_lock_owner(mock_image_ctx, exclusive_lock));
 
+  MockReacquireRequest mock_reacquire_request;
+  C_SaferCond reacquire_ctx;
+  expect_reacquire_lock(mock_image_ctx, mock_reacquire_request, 0);
+  {
+    RWLock::RLocker owner_locker(mock_image_ctx.owner_lock);
+    exclusive_lock.reacquire_lock(&reacquire_ctx);
+  }
+  ASSERT_EQ(0, reacquire_ctx.wait());
+
+  MockReleaseRequest shutdown_release;
+  expect_release_lock(mock_image_ctx, shutdown_release, 0, true);
+  ASSERT_EQ(0, when_shut_down(mock_image_ctx, exclusive_lock));
+  ASSERT_FALSE(is_lock_owner(mock_image_ctx, exclusive_lock));
+}
+
+TEST_F(TestMockExclusiveLock, ReacquireLockError) {
+  REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK);
+
+  librbd::ImageCtx *ictx;
+  ASSERT_EQ(0, open_image(m_image_name, &ictx));
+
+  MockExclusiveLockImageCtx mock_image_ctx(*ictx);
+  MockExclusiveLock exclusive_lock(mock_image_ctx);
+  expect_op_work_queue(mock_image_ctx);
+
+  InSequence seq;
+  expect_block_writes(mock_image_ctx);
+  ASSERT_EQ(0, when_init(mock_image_ctx, exclusive_lock));
+
+  MockAcquireRequest request_lock_acquire;
+  expect_acquire_lock(mock_image_ctx, request_lock_acquire, 0);
+  ASSERT_EQ(0, when_request_lock(mock_image_ctx, exclusive_lock));
+  ASSERT_TRUE(is_lock_owner(mock_image_ctx, exclusive_lock));
+
+  MockReacquireRequest mock_reacquire_request;
+  C_SaferCond reacquire_ctx;
+  expect_reacquire_lock(mock_image_ctx, mock_reacquire_request, -EOPNOTSUPP);
+
+  MockReleaseRequest reacquire_lock_release;
+  expect_release_lock(mock_image_ctx, reacquire_lock_release, 0, false);
+
+  MockAcquireRequest reacquire_lock_acquire;
+  expect_acquire_lock(mock_image_ctx, reacquire_lock_acquire, 0);
+
+  {
+    RWLock::RLocker owner_locker(mock_image_ctx.owner_lock);
+    exclusive_lock.reacquire_lock(&reacquire_ctx);
+  }
+  ASSERT_EQ(-EOPNOTSUPP, reacquire_ctx.wait());
+
   MockReleaseRequest shutdown_release;
   expect_release_lock(mock_image_ctx, shutdown_release, 0, true);
   ASSERT_EQ(0, when_shut_down(mock_image_ctx, exclusive_lock));
diff --git a/src/test/librbd/test_mock_fixture.cc b/src/test/librbd/test_mock_fixture.cc
index 3fb246d..efa8556 100644
--- a/src/test/librbd/test_mock_fixture.cc
+++ b/src/test/librbd/test_mock_fixture.cc
@@ -9,12 +9,10 @@
 // template definitions
 #include "librbd/AsyncRequest.cc"
 #include "librbd/AsyncObjectThrottle.cc"
-#include "librbd/ExclusiveLock.cc"
 #include "librbd/operation/Request.cc"
 
 template class librbd::AsyncRequest<librbd::MockImageCtx>;
 template class librbd::AsyncObjectThrottle<librbd::MockImageCtx>;
-template class librbd::ExclusiveLock<librbd::MockImageCtx>;
 template class librbd::operation::Request<librbd::MockImageCtx>;
 
 using ::testing::_;
diff --git a/src/test/librbd/test_support.cc b/src/test/librbd/test_support.cc
index 5b5adf3..e9f63bd 100644
--- a/src/test/librbd/test_support.cc
+++ b/src/test/librbd/test_support.cc
@@ -41,17 +41,10 @@ int create_image_pp(librbd::RBD &rbd, librados::IoCtx &ioctx,
 
 int get_image_id(librbd::Image &image, std::string *image_id)
 {
-  librbd::image_info_t info;
-  int r = image.stat(info, sizeof(info));
+  int r = image.get_id(image_id);
   if (r < 0) {
     return r;
   }
-
-  char prefix[RBD_MAX_BLOCK_NAME_SIZE + 1];
-  strncpy(prefix, info.block_name_prefix, RBD_MAX_BLOCK_NAME_SIZE);
-  prefix[RBD_MAX_BLOCK_NAME_SIZE] = '\0';
-
-  *image_id = std::string(prefix + strlen(RBD_DATA_PREFIX));
   return 0;
 }
 
diff --git a/src/test/objectstore/store_test.cc b/src/test/objectstore/store_test.cc
index 9a5de2e..bdf027e 100644
--- a/src/test/objectstore/store_test.cc
+++ b/src/test/objectstore/store_test.cc
@@ -262,6 +262,8 @@ TEST_P(StoreTest, FiemapEmpty) {
 
 TEST_P(StoreTest, FiemapHoles) {
   ObjectStore::Sequencer osr("test");
+  const uint64_t MAX_EXTENTS = 4000;
+  const uint64_t SKIP_STEP = 65536;
   coll_t cid;
   int r = 0;
   ghobject_t oid(hobject_t(sobject_t("fiemap_object", CEPH_NOSNAP)));
@@ -271,27 +273,28 @@ TEST_P(StoreTest, FiemapHoles) {
     ObjectStore::Transaction t;
     t.create_collection(cid, 0);
     t.touch(cid, oid);
-    t.write(cid, oid, 0, 3, bl);
-    t.write(cid, oid, 1048576, 3, bl);
-    t.write(cid, oid, 4194304, 3, bl);
+    for (uint64_t i = 0; i < MAX_EXTENTS; i++)
+      t.write(cid, oid, SKIP_STEP * i, 3, bl);
     r = apply_transaction(store, &osr, std::move(t));
     ASSERT_EQ(r, 0);
   }
   {
     bufferlist bl;
-    store->fiemap(cid, oid, 0, 4194307, bl);
+    store->fiemap(cid, oid, 0, SKIP_STEP * (MAX_EXTENTS - 1) + 3, bl);
     map<uint64_t,uint64_t> m, e;
     bufferlist::iterator p = bl.begin();
     ::decode(m, p);
     cout << " got " << m << std::endl;
     ASSERT_TRUE(!m.empty());
     ASSERT_GE(m[0], 3u);
+    bool extents_exist = true;
+    if (m.size() == MAX_EXTENTS) {
+      for (uint64_t i = 0; i < MAX_EXTENTS; i++)
+        extents_exist = extents_exist && m.count(SKIP_STEP*i);
+    }
     ASSERT_TRUE((m.size() == 1 &&
-		 m[0] > 4194304u) ||
-		(m.size() == 3 &&
-		 m.count(0) &&
-		 m.count(1048576) &&
-		 m.count(4194304)));
+		 m[0] > SKIP_STEP * (MAX_EXTENTS - 1)) ||
+		 (m.size() == MAX_EXTENTS && extents_exist));
   }
   {
     ObjectStore::Transaction t;
diff --git a/src/test/opensuse-13.2/ceph.spec.in b/src/test/opensuse-13.2/ceph.spec.in
index 9ddd75f..fd8ab92 100644
--- a/src/test/opensuse-13.2/ceph.spec.in
+++ b/src/test/opensuse-13.2/ceph.spec.in
@@ -216,12 +216,8 @@ Requires:      cryptsetup
 Requires:      findutils
 Requires:      which
 %if 0%{?suse_version}
-Requires:      lsb-release
 Recommends:    ntp-daemon
 %endif
-%if 0%{?fedora} || 0%{?rhel}
-Requires:      redhat-lsb-core
-%endif
 %if 0%{with xio}
 Requires:      libxio
 %endif
@@ -700,7 +696,7 @@ make %{?_smp_mflags} check
 make DESTDIR=%{buildroot} install
 find %{buildroot} -type f -name "*.la" -exec rm -f {} ';'
 find %{buildroot} -type f -name "*.a" -exec rm -f {} ';'
-install -D src/etc-rbdmap %{buildroot}%{_sysconfdir}/ceph/rbdmap
+install -m 0644 -D src/etc-rbdmap %{buildroot}%{_sysconfdir}/ceph/rbdmap
 %if 0%{?fedora} || 0%{?rhel}
 install -m 0644 -D etc/sysconfig/ceph %{buildroot}%{_sysconfdir}/sysconfig/ceph
 %endif
diff --git a/src/test/opensuse-13.2/install-deps.sh b/src/test/opensuse-13.2/install-deps.sh
index 129178f..94def86 100755
--- a/src/test/opensuse-13.2/install-deps.sh
+++ b/src/test/opensuse-13.2/install-deps.sh
@@ -19,21 +19,12 @@ if test $(id -u) != 0 ; then
 fi
 export LC_ALL=C # the following is vulnerable to i18n
 
-if test -f /etc/redhat-release ; then
-    $SUDO yum install -y redhat-lsb-core
-fi
-
-if type apt-get > /dev/null 2>&1 ; then
-    $SUDO apt-get install -y lsb-release devscripts equivs
-fi
-
-if type zypper > /dev/null 2>&1 ; then
-    $SUDO zypper --gpg-auto-import-keys --non-interactive install lsb-release systemd-rpm-macros
-fi
-
-case $(lsb_release -si) in
-Ubuntu|Debian|Devuan)
-        $SUDO apt-get install -y dpkg-dev
+source /etc/os-release
+case $ID in
+    debian|ubuntu|devuan)
+        echo "Using apt-get to install dependencies"
+        $SUDO apt-get install -y lsb-release devscripts equivs
+        $SUDO apt-get install -y dpkg-dev gcc
         if ! test -r debian/control ; then
             echo debian/control is not a readable file
             exit 1
@@ -57,7 +48,9 @@ Ubuntu|Debian|Devuan)
 	$SUDO env DEBIAN_FRONTEND=noninteractive apt-get -y remove ceph-build-deps
 	if [ -n "$backports" ] ; then rm $control; fi
         ;;
-CentOS|Fedora|RedHatEnterpriseServer)
+    centos|fedora|rhel)
+        echo "Using yum to install dependencies"
+        $SUDO yum install -y redhat-lsb-core
         case $(lsb_release -si) in
             Fedora)
                 $SUDO yum install -y yum-utils
@@ -82,12 +75,14 @@ CentOS|Fedora|RedHatEnterpriseServer)
         $SUDO yum-builddep -y $DIR/ceph.spec 2>&1 | tee $DIR/yum-builddep.out
         ! grep -q -i error: $DIR/yum-builddep.out || exit 1
         ;;
-*SUSE*)
+    opensuse|suse|sles)
+        echo "Using zypper to install dependencies"
+        $SUDO zypper --gpg-auto-import-keys --non-interactive install lsb-release systemd-rpm-macros
         sed -e 's/@//g' < ceph.spec.in > $DIR/ceph.spec
         $SUDO zypper --non-interactive install $(rpmspec -q --buildrequires $DIR/ceph.spec) || exit 1
         ;;
-*)
-        echo "$(lsb_release -si) is unknown, dependencies will have to be installed manually."
+    *)
+        echo "$ID is unknown, dependencies will have to be installed manually."
         ;;
 esac
 
diff --git a/src/test/rbd_mirror/image_replayer/test_mock_BootstrapRequest.cc b/src/test/rbd_mirror/image_replayer/test_mock_BootstrapRequest.cc
index c681cbf..542b78c 100644
--- a/src/test/rbd_mirror/image_replayer/test_mock_BootstrapRequest.cc
+++ b/src/test/rbd_mirror/image_replayer/test_mock_BootstrapRequest.cc
@@ -9,6 +9,7 @@
 #include "tools/rbd_mirror/image_replayer/BootstrapRequest.h"
 #include "tools/rbd_mirror/image_replayer/CloseImageRequest.h"
 #include "tools/rbd_mirror/image_replayer/CreateImageRequest.h"
+#include "tools/rbd_mirror/image_replayer/IsPrimaryRequest.h"
 #include "tools/rbd_mirror/image_replayer/OpenImageRequest.h"
 #include "tools/rbd_mirror/image_replayer/OpenLocalImageRequest.h"
 #include "test/journal/mock/MockJournaler.h"
@@ -150,6 +151,31 @@ struct CreateImageRequest<librbd::MockTestImageCtx> {
 };
 
 template<>
+struct IsPrimaryRequest<librbd::MockTestImageCtx> {
+  static IsPrimaryRequest* s_instance;
+  bool *primary = nullptr;
+  Context *on_finish = nullptr;
+
+  static IsPrimaryRequest* create(librbd::MockTestImageCtx *image_ctx,
+                                  bool *primary, Context *on_finish) {
+    assert(s_instance != nullptr);
+    s_instance->primary = primary;
+    s_instance->on_finish = on_finish;
+    return s_instance;
+  }
+
+  IsPrimaryRequest() {
+    assert(s_instance == nullptr);
+    s_instance = this;
+  }
+  ~IsPrimaryRequest() {
+    s_instance = nullptr;
+  }
+
+  MOCK_METHOD0(send, void());
+};
+
+template<>
 struct OpenImageRequest<librbd::MockTestImageCtx> {
   static OpenImageRequest* s_instance;
   librbd::MockTestImageCtx **image_ctx = nullptr;
@@ -216,6 +242,8 @@ CloseImageRequest<librbd::MockTestImageCtx>*
   CloseImageRequest<librbd::MockTestImageCtx>::s_instance = nullptr;
 CreateImageRequest<librbd::MockTestImageCtx>*
   CreateImageRequest<librbd::MockTestImageCtx>::s_instance = nullptr;
+IsPrimaryRequest<librbd::MockTestImageCtx>*
+  IsPrimaryRequest<librbd::MockTestImageCtx>::s_instance = nullptr;
 OpenImageRequest<librbd::MockTestImageCtx>*
   OpenImageRequest<librbd::MockTestImageCtx>::s_instance = nullptr;
 OpenLocalImageRequest<librbd::MockTestImageCtx>*
@@ -251,6 +279,7 @@ public:
   typedef ImageSyncThrottlerRef<librbd::MockTestImageCtx> MockImageSyncThrottler;
   typedef BootstrapRequest<librbd::MockTestImageCtx> MockBootstrapRequest;
   typedef CloseImageRequest<librbd::MockTestImageCtx> MockCloseImageRequest;
+  typedef IsPrimaryRequest<librbd::MockTestImageCtx> MockIsPrimaryRequest;
   typedef OpenImageRequest<librbd::MockTestImageCtx> MockOpenImageRequest;
   typedef OpenLocalImageRequest<librbd::MockTestImageCtx> MockOpenLocalImageRequest;
   typedef std::list<cls::journal::Tag> Tags;
@@ -358,11 +387,13 @@ public:
         }));
   }
 
-  void expect_journal_is_tag_owner(librbd::MockJournal &mock_journal,
-                                   bool is_owner, int r) {
-    EXPECT_CALL(mock_journal, is_tag_owner(_))
-      .WillOnce(DoAll(SetArgPointee<0>(is_owner),
-                      Return(r)));
+  void expect_is_primary(MockIsPrimaryRequest &mock_is_primary_request,
+			 bool primary, int r) {
+    EXPECT_CALL(mock_is_primary_request, send())
+      .WillOnce(Invoke([this, &mock_is_primary_request, primary, r]() {
+          *mock_is_primary_request.primary = primary;
+          m_threads->work_queue->queue(mock_is_primary_request.on_finish, r);
+        }));
   }
 
   void expect_journal_get_tag_tid(librbd::MockJournal &mock_journal,
@@ -375,6 +406,13 @@ public:
     EXPECT_CALL(mock_journal, get_tag_data()).WillOnce(Return(tag_data));
   }
 
+  void expect_is_resync_requested(librbd::MockJournal &mock_journal,
+                                  bool do_resync, int r) {
+    EXPECT_CALL(mock_journal, is_resync_requested(_))
+      .WillOnce(DoAll(SetArgPointee<0>(do_resync),
+                      Return(r)));
+  }
+
   bufferlist encode_tag_data(const librbd::journal::TagData &tag_data) {
     bufferlist bl;
     ::encode(tag_data, bl);
@@ -402,14 +440,14 @@ public:
                                     remote_mirror_uuid,
                                     &mock_journaler,
                                     &m_mirror_peer_client_meta,
-                                    on_finish);
+                                    on_finish, &m_do_resync);
   }
 
   librbd::ImageCtx *m_remote_image_ctx;
   librbd::ImageCtx *m_local_image_ctx = nullptr;
   librbd::MockTestImageCtx *m_local_test_image_ctx = nullptr;
   librbd::journal::MirrorPeerClientMeta m_mirror_peer_client_meta;
-
+  bool m_do_resync;
 };
 
 TEST_F(TestMockImageReplayerBootstrapRequest, NonPrimaryRemoteSyncingState) {
@@ -448,7 +486,8 @@ TEST_F(TestMockImageReplayerBootstrapRequest, NonPrimaryRemoteSyncingState) {
   MockOpenImageRequest mock_open_image_request;
   expect_open_image(mock_open_image_request, m_remote_io_ctx,
                     mock_remote_image_ctx.id, mock_remote_image_ctx, 0);
-  expect_journal_is_tag_owner(mock_journal, false, 0);
+  MockIsPrimaryRequest mock_is_primary_request;
+  expect_is_primary(mock_is_primary_request, false, 0);
 
   // switch the state to replaying
   mirror_peer_client_meta.state = librbd::journal::MIRROR_PEER_STATE_REPLAYING;
@@ -505,13 +544,15 @@ TEST_F(TestMockImageReplayerBootstrapRequest, RemoteDemotePromote) {
   MockOpenImageRequest mock_open_image_request;
   expect_open_image(mock_open_image_request, m_remote_io_ctx,
                     mock_remote_image_ctx.id, mock_remote_image_ctx, 0);
-  expect_journal_is_tag_owner(mock_journal, true, 0);
+  MockIsPrimaryRequest mock_is_primary_request;
+  expect_is_primary(mock_is_primary_request, true, 0);
 
   // open the local image
   mock_local_image_ctx.journal = &mock_journal;
   MockOpenLocalImageRequest mock_open_local_image_request;
   expect_open_local_image(mock_open_local_image_request, m_local_io_ctx,
                           mock_local_image_ctx.id, mock_local_image_ctx, 0);
+  expect_is_resync_requested(mock_journal, false, 0);
 
   // remote demotion / promotion event
   Tags tags = {
@@ -582,13 +623,15 @@ TEST_F(TestMockImageReplayerBootstrapRequest, MultipleRemoteDemotePromotes) {
   MockOpenImageRequest mock_open_image_request;
   expect_open_image(mock_open_image_request, m_remote_io_ctx,
                     mock_remote_image_ctx.id, mock_remote_image_ctx, 0);
-  expect_journal_is_tag_owner(mock_journal, true, 0);
+  MockIsPrimaryRequest mock_is_primary_request;
+  expect_is_primary(mock_is_primary_request, true, 0);
 
   // open the local image
   mock_local_image_ctx.journal = &mock_journal;
   MockOpenLocalImageRequest mock_open_local_image_request;
   expect_open_local_image(mock_open_local_image_request, m_local_io_ctx,
                           mock_local_image_ctx.id, mock_local_image_ctx, 0);
+  expect_is_resync_requested(mock_journal, false, 0);
 
   // remote demotion / promotion event
   Tags tags = {
@@ -669,13 +712,15 @@ TEST_F(TestMockImageReplayerBootstrapRequest, LocalDemoteRemotePromote) {
   MockOpenImageRequest mock_open_image_request;
   expect_open_image(mock_open_image_request, m_remote_io_ctx,
                     mock_remote_image_ctx.id, mock_remote_image_ctx, 0);
-  expect_journal_is_tag_owner(mock_journal, true, 0);
+  MockIsPrimaryRequest mock_is_primary_request;
+  expect_is_primary(mock_is_primary_request, true, 0);
 
   // open the local image
   mock_local_image_ctx.journal = &mock_journal;
   MockOpenLocalImageRequest mock_open_local_image_request;
   expect_open_local_image(mock_open_local_image_request, m_local_io_ctx,
                           mock_local_image_ctx.id, mock_local_image_ctx, 0);
+  expect_is_resync_requested(mock_journal, false, 0);
 
   // remote demotion / promotion event
   Tags tags = {
@@ -744,13 +789,15 @@ TEST_F(TestMockImageReplayerBootstrapRequest, SplitBrainForcePromote) {
   MockOpenImageRequest mock_open_image_request;
   expect_open_image(mock_open_image_request, m_remote_io_ctx,
                     mock_remote_image_ctx.id, mock_remote_image_ctx, 0);
-  expect_journal_is_tag_owner(mock_journal, true, 0);
+  MockIsPrimaryRequest mock_is_primary_request;
+  expect_is_primary(mock_is_primary_request, true, 0);
 
   // open the local image
   mock_local_image_ctx.journal = &mock_journal;
   MockOpenLocalImageRequest mock_open_local_image_request;
   expect_open_local_image(mock_open_local_image_request, m_local_io_ctx,
                           mock_local_image_ctx.id, mock_local_image_ctx, 0);
+  expect_is_resync_requested(mock_journal, false, 0);
 
   // remote demotion / promotion event
   Tags tags = {
diff --git a/src/test/rbd_mirror/image_replayer/test_mock_CreateImageRequest.cc b/src/test/rbd_mirror/image_replayer/test_mock_CreateImageRequest.cc
index 8191652..798dc1d 100644
--- a/src/test/rbd_mirror/image_replayer/test_mock_CreateImageRequest.cc
+++ b/src/test/rbd_mirror/image_replayer/test_mock_CreateImageRequest.cc
@@ -198,7 +198,7 @@ public:
       librbd::ImageCtx *ictx = new librbd::ImageCtx(parent_image_ctx->name,
 						    "", "", m_remote_io_ctx,
                                                     false);
-      ictx->state->open();
+      ictx->state->open(false);
       EXPECT_EQ(0, ictx->operations->snap_create(snap_name.c_str()));
       EXPECT_EQ(0, ictx->operations->snap_protect(snap_name.c_str()));
       ictx->state->close();
diff --git a/src/test/rbd_mirror/image_sync/test_mock_ObjectCopyRequest.cc b/src/test/rbd_mirror/image_sync/test_mock_ObjectCopyRequest.cc
index b018f16..5a2fa9e 100644
--- a/src/test/rbd_mirror/image_sync/test_mock_ObjectCopyRequest.cc
+++ b/src/test/rbd_mirror/image_sync/test_mock_ObjectCopyRequest.cc
@@ -174,17 +174,18 @@ public:
     if (mock_image_ctx.image_ctx->object_map != nullptr) {
       auto &expect = EXPECT_CALL(mock_object_map, aio_update(snap_id, 0, 1, state, _, _));
       if (r < 0) {
-        expect.WillOnce(WithArg<5>(Invoke([this, r](Context *ctx) {
-            m_threads->work_queue->queue(ctx, r);
-          })));
+        expect.WillOnce(DoAll(WithArg<5>(Invoke([this, r](Context *ctx) {
+                                  m_threads->work_queue->queue(ctx, r);
+                                })),
+                              Return(true)));
       } else {
-        expect.WillOnce(WithArg<5>(Invoke([&mock_image_ctx, snap_id, state, r](Context *ctx) {
-            assert(mock_image_ctx.image_ctx->snap_lock.is_locked());
-            assert(mock_image_ctx.image_ctx->object_map_lock.is_wlocked());
-            mock_image_ctx.image_ctx->object_map->aio_update(snap_id, 0, 1,
-                                                             state,
-                                                             boost::none, ctx);
-          })));
+        expect.WillOnce(DoAll(WithArg<5>(Invoke([&mock_image_ctx, snap_id, state, r](Context *ctx) {
+                                  assert(mock_image_ctx.image_ctx->snap_lock.is_locked());
+                                  assert(mock_image_ctx.image_ctx->object_map_lock.is_wlocked());
+                                  mock_image_ctx.image_ctx->object_map->aio_update<Context>(
+                                    snap_id, 0, 1, state, boost::none, ctx);
+                                })),
+                              Return(true)));
       }
     }
   }
diff --git a/src/test/rbd_mirror/image_sync/test_mock_SnapshotCreateRequest.cc b/src/test/rbd_mirror/image_sync/test_mock_SnapshotCreateRequest.cc
index c0105ae..c54aa2c 100644
--- a/src/test/rbd_mirror/image_sync/test_mock_SnapshotCreateRequest.cc
+++ b/src/test/rbd_mirror/image_sync/test_mock_SnapshotCreateRequest.cc
@@ -91,8 +91,8 @@ public:
 
   void expect_object_map_resize(librbd::MockTestImageCtx &mock_image_ctx,
                                 librados::snap_t snap_id, int r) {
-    std::string oid(librbd::ObjectMap::object_map_name(mock_image_ctx.id,
-                                                       snap_id));
+    std::string oid(librbd::ObjectMap<>::object_map_name(mock_image_ctx.id,
+                                                         snap_id));
     EXPECT_CALL(get_mock_io_ctx(mock_image_ctx.md_ctx),
                 exec(oid, _, StrEq("rbd"), StrEq("object_map_resize"), _, _, _))
                   .WillOnce(Return(r));
diff --git a/src/test/rbd_mirror/test_ImageDeleter.cc b/src/test/rbd_mirror/test_ImageDeleter.cc
index 5480316..d3739eb 100644
--- a/src/test/rbd_mirror/test_ImageDeleter.cc
+++ b/src/test/rbd_mirror/test_ImageDeleter.cc
@@ -69,7 +69,7 @@ public:
     EXPECT_EQ(0, create_image(rbd, m_local_io_ctx, m_image_name, 1 << 20));
     ImageCtx *ictx = new ImageCtx(m_image_name, "", "", m_local_io_ctx,
                                   false);
-    EXPECT_EQ(0, ictx->state->open());
+    EXPECT_EQ(0, ictx->state->open(false));
     m_local_image_id = ictx->id;
 
     cls::rbd::MirrorImage mirror_image(GLOBAL_IMAGE_ID,
@@ -112,7 +112,7 @@ public:
     if (!ictx) {
       ictx = new ImageCtx("", m_local_image_id, "", m_local_io_ctx,
                           false);
-      r = ictx->state->open();
+      r = ictx->state->open(false);
       close = (r == 0);
     }
 
@@ -133,7 +133,7 @@ public:
     if (!ictx) {
       ictx = new ImageCtx("", m_local_image_id, "", m_local_io_ctx,
                           false);
-      EXPECT_EQ(0, ictx->state->open());
+      EXPECT_EQ(0, ictx->state->open(false));
       close = true;
     }
 
@@ -147,7 +147,7 @@ public:
   void create_snapshot(std::string snap_name="snap1", bool protect=false) {
     ImageCtx *ictx = new ImageCtx("", m_local_image_id, "", m_local_io_ctx,
                                   false);
-    EXPECT_EQ(0, ictx->state->open());
+    EXPECT_EQ(0, ictx->state->open(false));
     promote_image(ictx);
 
     EXPECT_EQ(0, ictx->operations->snap_create(snap_name.c_str()));
@@ -163,7 +163,7 @@ public:
   std::string create_clone() {
     ImageCtx *ictx = new ImageCtx("", m_local_image_id, "", m_local_io_ctx,
                                   false);
-    EXPECT_EQ(0, ictx->state->open());
+    EXPECT_EQ(0, ictx->state->open(false));
     promote_image(ictx);
 
     EXPECT_EQ(0, ictx->operations->snap_create("snap1"));
@@ -175,7 +175,7 @@ public:
     std::string clone_id;
     ImageCtx *ictx_clone = new ImageCtx("clone1", "", "", m_local_io_ctx,
                                         false);
-    EXPECT_EQ(0, ictx_clone->state->open());
+    EXPECT_EQ(0, ictx_clone->state->open(false));
     clone_id = ictx_clone->id;
     cls::rbd::MirrorImage mirror_image(GLOBAL_CLONE_IMAGE_ID,
                                 MirrorImageState::MIRROR_IMAGE_STATE_ENABLED);
@@ -193,7 +193,7 @@ public:
   void check_image_deleted() {
     ImageCtx *ictx = new ImageCtx("", m_local_image_id, "", m_local_io_ctx,
                                   false);
-    EXPECT_EQ(-ENOENT, ictx->state->open());
+    EXPECT_EQ(-ENOENT, ictx->state->open(false));
     delete ictx;
 
     cls::rbd::MirrorImage mirror_image;
@@ -417,7 +417,7 @@ TEST_F(TestImageDeleter, Delete_NonExistent_Image_Without_MirroringState) {
 TEST_F(TestImageDeleter, Fail_Delete_NonPrimary_Image) {
   ImageCtx *ictx = new ImageCtx("", m_local_image_id, "", m_local_io_ctx,
                                 false);
-  EXPECT_EQ(0, ictx->state->open());
+  EXPECT_EQ(0, ictx->state->open(false));
 
   m_deleter->schedule_image_delete(_rados, m_local_pool_id, m_local_image_id,
       m_image_name, GLOBAL_IMAGE_ID);
@@ -436,7 +436,7 @@ TEST_F(TestImageDeleter, Fail_Delete_NonPrimary_Image) {
 TEST_F(TestImageDeleter, Retry_Failed_Deletes) {
   ImageCtx *ictx = new ImageCtx("", m_local_image_id, "", m_local_io_ctx,
                                 false);
-  EXPECT_EQ(0, ictx->state->open());
+  EXPECT_EQ(0, ictx->state->open(false));
 
   m_deleter->set_failed_timer_interval(2);
 
@@ -464,7 +464,7 @@ TEST_F(TestImageDeleter, Retry_Failed_Deletes) {
 TEST_F(TestImageDeleter, Delete_Is_Idempotent) {
   ImageCtx *ictx = new ImageCtx("", m_local_image_id, "", m_local_io_ctx,
                                 false);
-  EXPECT_EQ(0, ictx->state->open());
+  EXPECT_EQ(0, ictx->state->open(false));
 
   m_deleter->schedule_image_delete(_rados, m_local_pool_id, m_local_image_id,
       m_image_name, GLOBAL_IMAGE_ID);
diff --git a/src/test/rbd_mirror/test_ImageReplayer.cc b/src/test/rbd_mirror/test_ImageReplayer.cc
index 38b0b41..bb65c55 100644
--- a/src/test/rbd_mirror/test_ImageReplayer.cc
+++ b/src/test/rbd_mirror/test_ImageReplayer.cc
@@ -97,6 +97,7 @@ public:
 
     EXPECT_EQ(0, m_remote_cluster.ioctx_create(m_remote_pool_name.c_str(),
 					       m_remote_ioctx));
+    EXPECT_EQ(0, librbd::mirror_mode_set(m_remote_ioctx, RBD_MIRROR_MODE_POOL));
 
     m_image_name = get_temp_image_name();
     uint64_t features = g_ceph_context->_conf->rbd_default_features;
@@ -192,7 +193,7 @@ public:
   {
     librbd::ImageCtx *ictx = new librbd::ImageCtx(image_name.c_str(),
 						  "", "", ioctx, readonly);
-    EXPECT_EQ(0, ictx->state->open());
+    EXPECT_EQ(0, ictx->state->open(false));
     *ictxp = ictx;
   }
 
@@ -390,7 +391,7 @@ TEST_F(TestImageReplayer, BootstrapErrorLocalImageExists)
 
 TEST_F(TestImageReplayer, BootstrapErrorNoJournal)
 {
-  // disable remote journal journaling
+  // disable remote image journaling
   librbd::ImageCtx *ictx;
   open_remote_image(&ictx);
   uint64_t features;
@@ -404,6 +405,58 @@ TEST_F(TestImageReplayer, BootstrapErrorNoJournal)
   ASSERT_EQ(-ENOENT, cond.wait());
 }
 
+TEST_F(TestImageReplayer, BootstrapErrorMirrorDisabled)
+{
+  // disable remote image mirroring
+  ASSERT_EQ(0, librbd::mirror_mode_set(m_remote_ioctx, RBD_MIRROR_MODE_IMAGE));
+  librbd::ImageCtx *ictx;
+  open_remote_image(&ictx);
+  ASSERT_EQ(0, librbd::mirror_image_disable(ictx, true));
+  close_image(ictx);
+
+  create_replayer<>();
+  C_SaferCond cond;
+  m_replayer->start(&cond);
+  ASSERT_EQ(-ENOENT, cond.wait());
+}
+
+TEST_F(TestImageReplayer, BootstrapMirrorDisabling)
+{
+  // set remote image mirroring state to DISABLING
+  ASSERT_EQ(0, librbd::mirror_mode_set(m_remote_ioctx, RBD_MIRROR_MODE_IMAGE));
+  librbd::ImageCtx *ictx;
+  open_remote_image(&ictx);
+  ASSERT_EQ(0, librbd::mirror_image_enable(ictx));
+  cls::rbd::MirrorImage mirror_image;
+  ASSERT_EQ(0, librbd::cls_client::mirror_image_get(&m_remote_ioctx, ictx->id,
+                                                    &mirror_image));
+  mirror_image.state = cls::rbd::MirrorImageState::MIRROR_IMAGE_STATE_DISABLING;
+  ASSERT_EQ(0, librbd::cls_client::mirror_image_set(&m_remote_ioctx, ictx->id,
+                                                    mirror_image));
+  close_image(ictx);
+
+  create_replayer<>();
+  C_SaferCond cond;
+  m_replayer->start(&cond);
+  ASSERT_EQ(0, cond.wait());
+  ASSERT_TRUE(m_replayer->is_stopped());
+}
+
+TEST_F(TestImageReplayer, BootstrapDemoted)
+{
+  // demote remote image
+  librbd::ImageCtx *ictx;
+  open_remote_image(&ictx);
+  ASSERT_EQ(0, librbd::mirror_image_demote(ictx));
+  close_image(ictx);
+
+  create_replayer<>();
+  C_SaferCond cond;
+  m_replayer->start(&cond);
+  ASSERT_EQ(0, cond.wait());
+  ASSERT_TRUE(m_replayer->is_stopped());
+}
+
 TEST_F(TestImageReplayer, StartInterrupted)
 {
   create_replayer<>();
diff --git a/src/test/rbd_mirror/test_PoolWatcher.cc b/src/test/rbd_mirror/test_PoolWatcher.cc
index 2a2708e..96e1b3d 100644
--- a/src/test/rbd_mirror/test_PoolWatcher.cc
+++ b/src/test/rbd_mirror/test_PoolWatcher.cc
@@ -126,7 +126,7 @@ TestPoolWatcher() : m_lock("TestPoolWatcherLock"),
     {
       librbd::ImageCtx *ictx = new librbd::ImageCtx(parent_image_name.c_str(),
 						    "", "", pioctx, false);
-      ictx->state->open();
+      ictx->state->open(false);
       EXPECT_EQ(0, ictx->operations->snap_create(snap_name.c_str()));
       EXPECT_EQ(0, ictx->operations->snap_protect(snap_name.c_str()));
       ictx->state->close();
diff --git a/src/test/rbd_mirror/test_fixture.cc b/src/test/rbd_mirror/test_fixture.cc
index b1eb489..361e553 100644
--- a/src/test/rbd_mirror/test_fixture.cc
+++ b/src/test/rbd_mirror/test_fixture.cc
@@ -79,7 +79,7 @@ int TestFixture::open_image(librados::IoCtx &io_ctx,
   *image_ctx = new librbd::ImageCtx(image_name.c_str(), "", NULL, io_ctx,
                                     false);
   m_image_ctxs.insert(*image_ctx);
-  return (*image_ctx)->state->open();
+  return (*image_ctx)->state->open(false);
 }
 
 int TestFixture::create_snap(librbd::ImageCtx *image_ctx, const char* snap_name,
diff --git a/src/test/rbd_mirror/test_mock_ImageReplayer.cc b/src/test/rbd_mirror/test_mock_ImageReplayer.cc
index 2477e03..a276899 100644
--- a/src/test/rbd_mirror/test_mock_ImageReplayer.cc
+++ b/src/test/rbd_mirror/test_mock_ImageReplayer.cc
@@ -75,6 +75,7 @@ struct BootstrapRequest<librbd::MockTestImageCtx> {
         ::journal::MockJournalerProxy *journaler,
         librbd::journal::MirrorPeerClientMeta *client_meta,
         Context *on_finish,
+        bool *do_resync,
         rbd::mirror::ProgressContext *progress_ctx = nullptr) {
     assert(s_instance != nullptr);
     s_instance->on_finish = on_finish;
diff --git a/src/test/ubuntu-12.04/install-deps.sh b/src/test/ubuntu-12.04/install-deps.sh
index 129178f..94def86 100755
--- a/src/test/ubuntu-12.04/install-deps.sh
+++ b/src/test/ubuntu-12.04/install-deps.sh
@@ -19,21 +19,12 @@ if test $(id -u) != 0 ; then
 fi
 export LC_ALL=C # the following is vulnerable to i18n
 
-if test -f /etc/redhat-release ; then
-    $SUDO yum install -y redhat-lsb-core
-fi
-
-if type apt-get > /dev/null 2>&1 ; then
-    $SUDO apt-get install -y lsb-release devscripts equivs
-fi
-
-if type zypper > /dev/null 2>&1 ; then
-    $SUDO zypper --gpg-auto-import-keys --non-interactive install lsb-release systemd-rpm-macros
-fi
-
-case $(lsb_release -si) in
-Ubuntu|Debian|Devuan)
-        $SUDO apt-get install -y dpkg-dev
+source /etc/os-release
+case $ID in
+    debian|ubuntu|devuan)
+        echo "Using apt-get to install dependencies"
+        $SUDO apt-get install -y lsb-release devscripts equivs
+        $SUDO apt-get install -y dpkg-dev gcc
         if ! test -r debian/control ; then
             echo debian/control is not a readable file
             exit 1
@@ -57,7 +48,9 @@ Ubuntu|Debian|Devuan)
 	$SUDO env DEBIAN_FRONTEND=noninteractive apt-get -y remove ceph-build-deps
 	if [ -n "$backports" ] ; then rm $control; fi
         ;;
-CentOS|Fedora|RedHatEnterpriseServer)
+    centos|fedora|rhel)
+        echo "Using yum to install dependencies"
+        $SUDO yum install -y redhat-lsb-core
         case $(lsb_release -si) in
             Fedora)
                 $SUDO yum install -y yum-utils
@@ -82,12 +75,14 @@ CentOS|Fedora|RedHatEnterpriseServer)
         $SUDO yum-builddep -y $DIR/ceph.spec 2>&1 | tee $DIR/yum-builddep.out
         ! grep -q -i error: $DIR/yum-builddep.out || exit 1
         ;;
-*SUSE*)
+    opensuse|suse|sles)
+        echo "Using zypper to install dependencies"
+        $SUDO zypper --gpg-auto-import-keys --non-interactive install lsb-release systemd-rpm-macros
         sed -e 's/@//g' < ceph.spec.in > $DIR/ceph.spec
         $SUDO zypper --non-interactive install $(rpmspec -q --buildrequires $DIR/ceph.spec) || exit 1
         ;;
-*)
-        echo "$(lsb_release -si) is unknown, dependencies will have to be installed manually."
+    *)
+        echo "$ID is unknown, dependencies will have to be installed manually."
         ;;
 esac
 
diff --git a/src/test/ubuntu-14.04/install-deps.sh b/src/test/ubuntu-14.04/install-deps.sh
index 129178f..94def86 100755
--- a/src/test/ubuntu-14.04/install-deps.sh
+++ b/src/test/ubuntu-14.04/install-deps.sh
@@ -19,21 +19,12 @@ if test $(id -u) != 0 ; then
 fi
 export LC_ALL=C # the following is vulnerable to i18n
 
-if test -f /etc/redhat-release ; then
-    $SUDO yum install -y redhat-lsb-core
-fi
-
-if type apt-get > /dev/null 2>&1 ; then
-    $SUDO apt-get install -y lsb-release devscripts equivs
-fi
-
-if type zypper > /dev/null 2>&1 ; then
-    $SUDO zypper --gpg-auto-import-keys --non-interactive install lsb-release systemd-rpm-macros
-fi
-
-case $(lsb_release -si) in
-Ubuntu|Debian|Devuan)
-        $SUDO apt-get install -y dpkg-dev
+source /etc/os-release
+case $ID in
+    debian|ubuntu|devuan)
+        echo "Using apt-get to install dependencies"
+        $SUDO apt-get install -y lsb-release devscripts equivs
+        $SUDO apt-get install -y dpkg-dev gcc
         if ! test -r debian/control ; then
             echo debian/control is not a readable file
             exit 1
@@ -57,7 +48,9 @@ Ubuntu|Debian|Devuan)
 	$SUDO env DEBIAN_FRONTEND=noninteractive apt-get -y remove ceph-build-deps
 	if [ -n "$backports" ] ; then rm $control; fi
         ;;
-CentOS|Fedora|RedHatEnterpriseServer)
+    centos|fedora|rhel)
+        echo "Using yum to install dependencies"
+        $SUDO yum install -y redhat-lsb-core
         case $(lsb_release -si) in
             Fedora)
                 $SUDO yum install -y yum-utils
@@ -82,12 +75,14 @@ CentOS|Fedora|RedHatEnterpriseServer)
         $SUDO yum-builddep -y $DIR/ceph.spec 2>&1 | tee $DIR/yum-builddep.out
         ! grep -q -i error: $DIR/yum-builddep.out || exit 1
         ;;
-*SUSE*)
+    opensuse|suse|sles)
+        echo "Using zypper to install dependencies"
+        $SUDO zypper --gpg-auto-import-keys --non-interactive install lsb-release systemd-rpm-macros
         sed -e 's/@//g' < ceph.spec.in > $DIR/ceph.spec
         $SUDO zypper --non-interactive install $(rpmspec -q --buildrequires $DIR/ceph.spec) || exit 1
         ;;
-*)
-        echo "$(lsb_release -si) is unknown, dependencies will have to be installed manually."
+    *)
+        echo "$ID is unknown, dependencies will have to be installed manually."
         ;;
 esac
 
diff --git a/src/tools/Makefile-client.am b/src/tools/Makefile-client.am
index e0488fc..937d12a 100644
--- a/src/tools/Makefile-client.am
+++ b/src/tools/Makefile-client.am
@@ -103,6 +103,7 @@ librbd_mirror_internal_la_SOURCES = \
 	tools/rbd_mirror/image_replayer/CloseImageRequest.cc \
 	tools/rbd_mirror/image_replayer/CreateImageRequest.cc \
 	tools/rbd_mirror/image_replayer/EventPreprocessor.cc \
+	tools/rbd_mirror/image_replayer/IsPrimaryRequest.cc \
 	tools/rbd_mirror/image_replayer/OpenImageRequest.cc \
 	tools/rbd_mirror/image_replayer/OpenLocalImageRequest.cc \
 	tools/rbd_mirror/image_replayer/ReplayStatusFormatter.cc \
@@ -130,6 +131,7 @@ noinst_HEADERS += \
 	tools/rbd_mirror/image_replayer/CloseImageRequest.h \
 	tools/rbd_mirror/image_replayer/CreateImageRequest.h \
 	tools/rbd_mirror/image_replayer/EventPreprocessor.h \
+	tools/rbd_mirror/image_replayer/IsPrimaryRequest.h \
 	tools/rbd_mirror/image_replayer/OpenImageRequest.h \
 	tools/rbd_mirror/image_replayer/OpenLocalImageRequest.h \
 	tools/rbd_mirror/image_replayer/ReplayStatusFormatter.h \
diff --git a/src/tools/cephfs/DataScan.cc b/src/tools/cephfs/DataScan.cc
index bb0584a..7dc6019 100644
--- a/src/tools/cephfs/DataScan.cc
+++ b/src/tools/cephfs/DataScan.cc
@@ -212,8 +212,8 @@ int DataScan::main(const std::vector<const char*> &args)
   // If caller didn't specify a namespace, try to pick
   // one if only one exists
   if (fscid == FS_CLUSTER_ID_NONE) {
-    if (fsmap->get_filesystems().size() == 1) {
-      fscid = fsmap->get_filesystems().begin()->first;
+    if (fsmap->filesystem_count() == 1) {
+      fscid = fsmap->get_filesystem()->fscid;
     } else {
       std::cerr << "Specify a filesystem with --filesystem" << std::endl;
       return -EINVAL;
diff --git a/src/tools/cephfs/RoleSelector.cc b/src/tools/cephfs/RoleSelector.cc
index d51f011..3c7932a 100644
--- a/src/tools/cephfs/RoleSelector.cc
+++ b/src/tools/cephfs/RoleSelector.cc
@@ -35,8 +35,8 @@ int MDSRoleSelector::parse(const FSMap &fsmap, std::string const &str)
   if (colon_pos == std::string::npos) {
     // An unqualified rank.  Only valid if there is only one
     // namespace.
-    if (fsmap.get_filesystems().size() == 1) {
-      fscid = fsmap.get_filesystems().begin()->first;
+    if (fsmap.filesystem_count() == 1) {
+      fscid = fsmap.get_filesystem()->fscid;
       return parse_rank(fsmap, str);
     } else {
       return -EINVAL;
diff --git a/src/tools/rados/rados.cc b/src/tools/rados/rados.cc
index 09f95dd..80aac49 100644
--- a/src/tools/rados/rados.cc
+++ b/src/tools/rados/rados.cc
@@ -219,7 +219,10 @@ void usage(ostream& out)
 "   --read-percent                   percent of operations that are read\n"
 "   --target-throughput              target throughput (in bytes)\n"
 "   --run-length                     total time (in seconds)\n"
-    ;
+"CACHE POOLS OPTIONS:\n"
+"   --with-clones                    include clones when doing flush or evict\n"
+"OMAP OPTIONS:\n"
+"    --omap-key-file file            read the omap key from a file\n";
 }
 
 unsigned default_op_size = 1 << 22;
@@ -1189,22 +1192,59 @@ static int do_cache_flush_evict_all(IoCtx& io_ctx, bool blocking)
 	io_ctx.locator_set_key(string());
       }
       io_ctx.set_namespace(i->get_nspace());
-      if (blocking)
-	r = do_cache_flush(io_ctx, i->get_oid());
-      else
-	r = do_cache_try_flush(io_ctx, i->get_oid());
+      snap_set_t ls;
+      io_ctx.snap_set_read(LIBRADOS_SNAP_DIR);
+      r = io_ctx.list_snaps(i->get_oid(), &ls);
       if (r < 0) {
-	cerr << "failed to flush " << i->get_nspace() << "/" << i->get_oid() << ": "
-	     << cpp_strerror(r) << std::endl;
-	++errors;
-	continue;
+        cerr << "error listing snap shots " << i->get_nspace() << "/" << i->get_oid() << ": "
+             << cpp_strerror(r) << std::endl;
+        ++errors;
+        continue;
       }
-      r = do_cache_evict(io_ctx, i->get_oid());
-      if (r < 0) {
-	cerr << "failed to evict " << i->get_nspace() << "/" << i->get_oid() << ": "
-	     << cpp_strerror(r) << std::endl;
-	++errors;
-	continue;
+      std::vector<clone_info_t>::iterator ci = ls.clones.begin();
+      // no snapshots
+      if (ci == ls.clones.end()) {
+        io_ctx.snap_set_read(CEPH_NOSNAP);
+        if (blocking)
+          r = do_cache_flush(io_ctx, i->get_oid());
+        else
+          r = do_cache_try_flush(io_ctx, i->get_oid());
+        if (r < 0) {
+          cerr << "failed to flush " << i->get_nspace() << "/" << i->get_oid() << ": "
+               << cpp_strerror(r) << std::endl;
+          ++errors;
+          continue;
+        }
+        r = do_cache_evict(io_ctx, i->get_oid());
+        if (r < 0) {
+          cerr << "failed to evict " << i->get_nspace() << "/" << i->get_oid() << ": "
+               << cpp_strerror(r) << std::endl;
+          ++errors;
+          continue;
+        }
+      } else {
+      // has snapshots
+        for (std::vector<clone_info_t>::iterator ci = ls.clones.begin();
+             ci != ls.clones.end(); ++ci) {
+          io_ctx.snap_set_read(ci->cloneid);
+          if (blocking)
+	    r = do_cache_flush(io_ctx, i->get_oid());
+          else
+	    r = do_cache_try_flush(io_ctx, i->get_oid());
+          if (r < 0) {
+	    cerr << "failed to flush " << i->get_nspace() << "/" << i->get_oid() << ": "
+	         << cpp_strerror(r) << std::endl;
+	    ++errors;
+	    break;
+          }
+          r = do_cache_evict(io_ctx, i->get_oid());
+          if (r < 0) {
+	    cerr << "failed to evict " << i->get_nspace() << "/" << i->get_oid() << ": "
+	         << cpp_strerror(r) << std::endl;
+	    ++errors;
+	    break;
+          }
+        }
       }
     }
   }
@@ -1511,6 +1551,7 @@ static int rados_tool_common(const std::map < std::string, std::string > &opts,
   bool cleanup = true;
   bool no_verify = false;
   bool use_striper = false;
+  bool with_clones = false;
   const char *snapname = NULL;
   snap_t snapid = CEPH_NOSNAP;
   std::map<std::string, std::string>::const_iterator i;
@@ -1535,6 +1576,9 @@ static int rados_tool_common(const std::map < std::string, std::string > &opts,
   Formatter *formatter = NULL;
   bool pretty_format = false;
   const char *output = NULL;
+  bool omap_key_valid = false;
+  std::string omap_key;
+  std::string omap_key_pretty;
 
   Rados rados;
   IoCtx io_ctx;
@@ -1718,6 +1762,28 @@ static int rados_tool_common(const std::map < std::string, std::string > &opts,
   if (i != opts.end()) {
     bench_write_dest |= static_cast<int>(OP_WRITE_DEST_XATTR);
   }
+  i = opts.find("with-clones");
+  if (i != opts.end()) {
+    with_clones = true;
+  }
+  i = opts.find("omap-key-file");
+  if (i != opts.end()) {
+    string err;
+    bufferlist indata;
+    ret = indata.read_file(i->second.c_str(), &err);
+    if (ret < 0) {
+      cerr << err << std::endl;
+      return 1;
+    }
+
+    omap_key_valid = true;
+    omap_key = std::string(indata.c_str(), indata.length());
+    omap_key_pretty = omap_key;
+    if (std::find_if_not(omap_key.begin(), omap_key.end(),
+                         (int (*)(int))isprint) != omap_key.end()) {
+        omap_key_pretty = "(binary key)";
+    }
+  }
 
   // open rados
   ret = rados.init_with_context(g_ceph_context);
@@ -2244,15 +2310,20 @@ static int rados_tool_common(const std::map < std::string, std::string > &opts,
       ret = 0;
     }
   } else if (strcmp(nargs[0], "setomapval") == 0) {
-    if (!pool_name || nargs.size() < 3 || nargs.size() > 4)
+    uint32_t min_args = (omap_key_valid ? 2 : 3);
+    if (!pool_name || nargs.size() < min_args || nargs.size() > min_args + 1) {
       usage_exit();
+    }
 
     string oid(nargs[1]);
-    string key(nargs[2]);
+    if (!omap_key_valid) {
+      omap_key = nargs[2];
+      omap_key_pretty = omap_key;
+    }
 
     bufferlist bl;
-    if (nargs.size() == 4) {
-      string val(nargs[3]);
+    if (nargs.size() > min_args) {
+      string val(nargs[min_args]);
       bl.append(val);
     } else {
       do {
@@ -2264,41 +2335,47 @@ static int rados_tool_common(const std::map < std::string, std::string > &opts,
     }
 
     map<string, bufferlist> values;
-    values[key] = bl;
+    values[omap_key] = bl;
 
     ret = io_ctx.omap_set(oid, values);
     if (ret < 0) {
       cerr << "error setting omap value " << pool_name << "/" << oid << "/"
-	   << key << ": " << cpp_strerror(ret) << std::endl;
+           << omap_key_pretty << ": " << cpp_strerror(ret) << std::endl;
       goto out;
     } else {
       ret = 0;
     }
   } else if (strcmp(nargs[0], "getomapval") == 0) {
-    if (!pool_name || nargs.size() < 3)
+    uint32_t min_args = (omap_key_valid ? 2 : 3);
+    if (!pool_name || nargs.size() < min_args || nargs.size() > min_args + 1) {
       usage_exit();
+    }
 
     string oid(nargs[1]);
-    string key(nargs[2]);
+    if (!omap_key_valid) {
+      omap_key = nargs[2];
+      omap_key_pretty = omap_key;
+    }
+
     set<string> keys;
-    keys.insert(key);
+    keys.insert(omap_key);
 
     std::string outfile;
-    if (nargs.size() >= 4) {
-      outfile = nargs[3];
+    if (nargs.size() > min_args) {
+      outfile = nargs[min_args];
     }
 
     map<string, bufferlist> values;
     ret = io_ctx.omap_get_vals_by_keys(oid, keys, &values);
     if (ret < 0) {
       cerr << "error getting omap value " << pool_name << "/" << oid << "/"
-	   << key << ": " << cpp_strerror(ret) << std::endl;
+	   << omap_key_pretty << ": " << cpp_strerror(ret) << std::endl;
       goto out;
     } else {
       ret = 0;
     }
 
-    if (values.size() && values.begin()->first == key) {
+    if (values.size() && values.begin()->first == omap_key) {
       if (!outfile.empty()) {
 	cerr << "Writing to " << outfile << std::endl;
 	dump_data(outfile, values.begin()->second);
@@ -2309,24 +2386,29 @@ static int rados_tool_common(const std::map < std::string, std::string > &opts,
       }
       ret = 0;
     } else {
-      cout << "No such key: " << pool_name << "/" << oid << "/" << key
-	   << std::endl;
+      cout << "No such key: " << pool_name << "/" << oid << "/"
+           << omap_key_pretty << std::endl;
       ret = -1;
       goto out;
     }
   } else if (strcmp(nargs[0], "rmomapkey") == 0) {
-    if (!pool_name || nargs.size() < 3)
+    uint32_t num_args = (omap_key_valid ? 2 : 3);
+    if (!pool_name || nargs.size() != num_args) {
       usage_exit();
+    }
 
     string oid(nargs[1]);
-    string key(nargs[2]);
+    if (!omap_key_valid) {
+      omap_key = nargs[2];
+      omap_key_pretty = omap_key;
+    }
     set<string> keys;
-    keys.insert(key);
+    keys.insert(omap_key);
 
     ret = io_ctx.omap_rm_keys(oid, keys);
     if (ret < 0) {
       cerr << "error removing omap key " << pool_name << "/" << oid << "/"
-	   << key << ": " << cpp_strerror(ret) << std::endl;
+	   << omap_key_pretty << ": " << cpp_strerror(ret) << std::endl;
       goto out;
     } else {
       ret = 0;
@@ -3130,31 +3212,100 @@ static int rados_tool_common(const std::map < std::string, std::string > &opts,
     if (!pool_name || nargs.size() < 2)
       usage_exit();
     string oid(nargs[1]);
-    ret = do_cache_flush(io_ctx, oid);
-    if (ret < 0) {
-      cerr << "error from cache-flush " << oid << ": "
-	   << cpp_strerror(ret) << std::endl;
-      goto out;
+    if (with_clones) {
+      snap_set_t ls;
+      io_ctx.snap_set_read(LIBRADOS_SNAP_DIR);
+      ret = io_ctx.list_snaps(oid, &ls);
+      if (ret < 0) {
+        cerr << "error listing snapshots " << pool_name << "/" << oid << ": "
+             << cpp_strerror(ret) << std::endl;
+        goto out;
+      }
+      for (std::vector<clone_info_t>::iterator ci = ls.clones.begin();
+           ci != ls.clones.end(); ++ci) {
+        if (snapid != CEPH_NOSNAP && ci->cloneid > snapid)
+          break;
+        io_ctx.snap_set_read(ci->cloneid);
+        ret = do_cache_flush(io_ctx, oid);
+        if (ret < 0) {
+          cerr << "error from cache-flush " << oid << ": "
+               << cpp_strerror(ret) << std::endl;
+          goto out;
+        }
+      }
+    } else {
+      ret = do_cache_flush(io_ctx, oid);
+      if (ret < 0) {
+        cerr << "error from cache-flush " << oid << ": "
+	     << cpp_strerror(ret) << std::endl;
+        goto out;
+      }
     }
   } else if (strcmp(nargs[0], "cache-try-flush") == 0) {
     if (!pool_name || nargs.size() < 2)
       usage_exit();
     string oid(nargs[1]);
-    ret = do_cache_try_flush(io_ctx, oid);
-    if (ret < 0) {
-      cerr << "error from cache-try-flush " << oid << ": "
-	   << cpp_strerror(ret) << std::endl;
-      goto out;
+    if (with_clones) {
+      snap_set_t ls;
+      io_ctx.snap_set_read(LIBRADOS_SNAP_DIR);
+      ret = io_ctx.list_snaps(oid, &ls);
+      if (ret < 0) {
+        cerr << "error listing snapshots " << pool_name << "/" << oid << ": "
+             << cpp_strerror(ret) << std::endl;
+        goto out;
+      }
+      for (std::vector<clone_info_t>::iterator ci = ls.clones.begin();
+           ci != ls.clones.end(); ++ci) {
+        if (snapid != CEPH_NOSNAP && ci->cloneid > snapid)
+          break;
+        io_ctx.snap_set_read(ci->cloneid);
+        ret = do_cache_try_flush(io_ctx, oid);
+        if (ret < 0) {
+          cerr << "error from cache-flush " << oid << ": "
+               << cpp_strerror(ret) << std::endl;
+          goto out;
+        }
+      }
+    } else {
+      ret = do_cache_try_flush(io_ctx, oid);
+      if (ret < 0) {
+        cerr << "error from cache-flush " << oid << ": "
+             << cpp_strerror(ret) << std::endl;
+        goto out;
+      }
     }
   } else if (strcmp(nargs[0], "cache-evict") == 0) {
     if (!pool_name || nargs.size() < 2)
       usage_exit();
     string oid(nargs[1]);
-    ret = do_cache_evict(io_ctx, oid);
-    if (ret < 0) {
-      cerr << "error from cache-evict " << oid << ": "
-	   << cpp_strerror(ret) << std::endl;
-      goto out;
+    if (with_clones) {
+      snap_set_t ls;
+      io_ctx.snap_set_read(LIBRADOS_SNAP_DIR);
+      ret = io_ctx.list_snaps(oid, &ls);
+      if (ret < 0) {
+        cerr << "error listing snapshots " << pool_name << "/" << oid << ": "
+             << cpp_strerror(ret) << std::endl;
+        goto out;
+      }
+      for (std::vector<clone_info_t>::iterator ci = ls.clones.begin();
+           ci != ls.clones.end(); ++ci) {
+        if (snapid != CEPH_NOSNAP && ci->cloneid > snapid)
+          break;
+        io_ctx.snap_set_read(ci->cloneid);
+        ret = do_cache_evict(io_ctx, oid);
+        if (ret < 0) {
+          cerr << "error from cache-flush " << oid << ": "
+               << cpp_strerror(ret) << std::endl;
+          goto out;
+        }
+      }
+    } else {
+      ret = do_cache_evict(io_ctx, oid);
+      if (ret < 0) {
+        cerr << "error from cache-flush " << oid << ": "
+             << cpp_strerror(ret) << std::endl;
+        goto out;
+      }
     }
   } else if (strcmp(nargs[0], "cache-flush-evict-all") == 0) {
     if (!pool_name)
@@ -3373,6 +3524,10 @@ int main(int argc, const char **argv)
       opts["write-dest-obj"] = "true";
     } else if (ceph_argparse_flag(args, i, "--write-xattr", (char*)NULL)) {
       opts["write-dest-xattr"] = "true";
+    } else if (ceph_argparse_flag(args, i, "--with-clones", (char*)NULL)) {
+      opts["with-clones"] = "true";
+    } else if (ceph_argparse_witharg(args, i, &val, "--omap-key-file", (char*)NULL)) {
+      opts["omap-key-file"] = val;
     } else {
       if (val[0] == '-')
         usage_exit();
diff --git a/src/tools/rbd/Utils.cc b/src/tools/rbd/Utils.cc
index bc67f5d..618d257 100644
--- a/src/tools/rbd/Utils.cc
+++ b/src/tools/rbd/Utils.cc
@@ -644,17 +644,12 @@ int snap_set(librbd::Image &image, const std::string &snap_name) {
 }
 
 std::string image_id(librbd::Image& image) {
-  librbd::image_info_t info;
-  int r = image.stat(info, sizeof(info));
+  std::string id;
+  int r = image.get_id(&id);
   if (r < 0) {
-    return string();
+    return std::string();
   }
-
-  char prefix[RBD_MAX_BLOCK_NAME_SIZE + 1];
-  strncpy(prefix, info.block_name_prefix, RBD_MAX_BLOCK_NAME_SIZE);
-  prefix[RBD_MAX_BLOCK_NAME_SIZE] = '\0';
-
-  return string(prefix + strlen(RBD_DATA_PREFIX));
+  return id;
 }
 
 std::string mirror_image_state(librbd::mirror_image_state_t state) {
diff --git a/src/tools/rbd/action/BenchWrite.cc b/src/tools/rbd/action/BenchWrite.cc
index c012708..47077c2 100644
--- a/src/tools/rbd/action/BenchWrite.cc
+++ b/src/tools/rbd/action/BenchWrite.cc
@@ -126,6 +126,11 @@ int do_bench_write(librbd::Image& image, uint64_t io_size,
     return -EINVAL;
   }
 
+  if (io_size > std::numeric_limits<uint32_t>::max()) {
+    std::cerr << "rbd: io-size should be less than 4G" << std::endl;
+    return -EINVAL;
+  }
+
   rbd_bencher b(&image);
 
   std::cout << "bench-write "
diff --git a/src/tools/rbd/action/DiskUsage.cc b/src/tools/rbd/action/DiskUsage.cc
index 9ef2e7e..3a38a05 100644
--- a/src/tools/rbd/action/DiskUsage.cc
+++ b/src/tools/rbd/action/DiskUsage.cc
@@ -110,12 +110,14 @@ static int do_disk_usage(librbd::RBD &rbd, librados::IoCtx &io_ctx,
   uint64_t used_size = 0;
   uint64_t total_prov = 0;
   uint64_t total_used = 0;
+  bool found = false;
   std::sort(names.begin(), names.end());
   for (std::vector<string>::const_iterator name = names.begin();
        name != names.end(); ++name) {
     if (imgname != NULL && *name != imgname) {
       continue;
     }
+    found = true;
 
     librbd::Image image;
     r = rbd.open_read_only(io_ctx, image, name->c_str(), NULL);
@@ -206,6 +208,10 @@ static int do_disk_usage(librbd::RBD &rbd, librados::IoCtx &io_ctx,
       ++count;
     }
   }
+  if (!found) {
+    std::cerr << "specified image " << imgname << " is not found." << std::endl;
+    return -ENOENT;
+  }
 
 out:
   if (f) {
diff --git a/src/tools/rbd/action/Info.cc b/src/tools/rbd/action/Info.cc
index 8e31e45..3be863a 100644
--- a/src/tools/rbd/action/Info.cc
+++ b/src/tools/rbd/action/Info.cc
@@ -108,9 +108,7 @@ static int do_show_info(const char *imgname, librbd::Image& image,
     }
   }
 
-  char prefix[RBD_MAX_BLOCK_NAME_SIZE + 1];
-  strncpy(prefix, info.block_name_prefix, RBD_MAX_BLOCK_NAME_SIZE);
-  prefix[RBD_MAX_BLOCK_NAME_SIZE] = '\0';
+  std::string prefix = image.get_block_name_prefix();
 
   if (f) {
     f->open_object_section("image");
diff --git a/src/tools/rbd/action/Journal.cc b/src/tools/rbd/action/Journal.cc
index cf8ec55..fd87543 100644
--- a/src/tools/rbd/action/Journal.cc
+++ b/src/tools/rbd/action/Journal.cc
@@ -111,10 +111,12 @@ static int do_show_journal_status(librados::IoCtx& io_ctx,
     f->open_object_section("status");
     f->dump_unsigned("minimum_set", minimum_set);
     f->dump_unsigned("active_set", active_set);
-    f->open_object_section("registered_clients");
+    f->open_array_section("registered_clients");
     for (std::set<cls::journal::Client>::iterator c =
           registered_clients.begin(); c != registered_clients.end(); ++c) {
+      f->open_object_section("client");
       c->dump(f);
+      f->close_section();
     }
     f->close_section();
     f->close_section();
diff --git a/src/tools/rbd/action/Nbd.cc b/src/tools/rbd/action/Nbd.cc
index 478e42a..3271096 100644
--- a/src/tools/rbd/action/Nbd.cc
+++ b/src/tools/rbd/action/Nbd.cc
@@ -100,7 +100,9 @@ void get_map_arguments(po::options_description *positional,
                                      at::ARGUMENT_MODIFIER_NONE);
   options->add_options()
     ("read-only", po::bool_switch(), "mount read-only")
-    ("device", po::value<std::string>(), "specify nbd device");
+    ("device", po::value<std::string>(), "specify nbd device")
+    ("nbds_max", po::value<std::string>(), "override module param nbds_max")
+    ("max_part", po::value<std::string>(), "override module param max_part");
 }
 
 int execute_map(const po::variables_map &vm)
@@ -137,6 +139,14 @@ int execute_map(const po::variables_map &vm)
     args.push_back("--device");
     args.push_back(vm["device"].as<std::string>().c_str());
   }
+  if (vm.count("nbds_max")) {
+    args.push_back("--nbds_max");
+    args.push_back(vm["nbds_max"].as<std::string>().c_str());
+  }
+  if (vm.count("max_part")) {
+    args.push_back("--max_part");
+    args.push_back(vm["max_part"].as<std::string>().c_str());
+  }
 
   return call_nbd_cmd(vm, args);
 }
diff --git a/src/tools/rbd/action/Status.cc b/src/tools/rbd/action/Status.cc
index ab37bc8..9d2951e 100644
--- a/src/tools/rbd/action/Status.cc
+++ b/src/tools/rbd/action/Status.cc
@@ -20,7 +20,6 @@ namespace po = boost::program_options;
 static int do_show_status(librados::IoCtx &io_ctx, librbd::Image &image,
                           const char *imgname, Formatter *f)
 {
-  librbd::image_info_t info;
   uint8_t old_format;
   int r;
   std::string header_oid;
@@ -34,16 +33,13 @@ static int do_show_status(librados::IoCtx &io_ctx, librbd::Image &image,
     header_oid = imgname;
     header_oid += RBD_SUFFIX;
   } else {
-    r = image.stat(info, sizeof(info));
-    if (r < 0)
+    std::string id;
+    r = image.get_id(&id);
+    if (r < 0) {
       return r;
+    }
 
-    char prefix[RBD_MAX_BLOCK_NAME_SIZE + 1];
-    strncpy(prefix, info.block_name_prefix, RBD_MAX_BLOCK_NAME_SIZE);
-    prefix[RBD_MAX_BLOCK_NAME_SIZE] = '\0';
-
-    header_oid = RBD_HEADER_PREFIX;
-    header_oid.append(prefix + strlen(RBD_DATA_PREFIX));
+    header_oid = RBD_HEADER_PREFIX + id;
   }
 
   r = io_ctx.list_watchers(header_oid, &watchers);
@@ -54,7 +50,7 @@ static int do_show_status(librados::IoCtx &io_ctx, librbd::Image &image,
     f->open_object_section("status");
 
   if (f) {
-    f->open_object_section("watchers");
+    f->open_array_section("watchers");
     for (std::list<obj_watch_t>::iterator i = watchers.begin(); i != watchers.end(); ++i) {
       f->open_object_section("watcher");
       f->dump_string("address", i->addr);
diff --git a/src/tools/rbd/action/Watch.cc b/src/tools/rbd/action/Watch.cc
index 65be93d..80caeee 100644
--- a/src/tools/rbd/action/Watch.cc
+++ b/src/tools/rbd/action/Watch.cc
@@ -61,19 +61,13 @@ static int do_watch(librados::IoCtx& pp, librbd::Image &image,
   if (old_format != 0) {
     header_oid = std::string(imgname) + RBD_SUFFIX;
   } else {
-    librbd::image_info_t info;
-    r = image.stat(info, sizeof(info));
+    std::string id;
+    r = image.get_id(&id);
     if (r < 0) {
-      std::cerr << "failed to stat image" << std::endl;
       return r;
     }
 
-    char prefix[RBD_MAX_BLOCK_NAME_SIZE + 1];
-    strncpy(prefix, info.block_name_prefix, RBD_MAX_BLOCK_NAME_SIZE);
-    prefix[RBD_MAX_BLOCK_NAME_SIZE] = '\0';
-
-    std::string image_id(prefix + strlen(RBD_DATA_PREFIX));
-    header_oid = RBD_HEADER_PREFIX + image_id;
+    header_oid = RBD_HEADER_PREFIX + id;
   }
 
   uint64_t cookie;
diff --git a/src/tools/rbd_mirror/ImageDeleter.cc b/src/tools/rbd_mirror/ImageDeleter.cc
index 0b0c775..d089768 100644
--- a/src/tools/rbd_mirror/ImageDeleter.cc
+++ b/src/tools/rbd_mirror/ImageDeleter.cc
@@ -330,7 +330,7 @@ bool ImageDeleter::process_image_delete() {
 
     ImageCtx *imgctx = new ImageCtx("", m_active_delete->local_image_id,
                                     nullptr, ioctx, false);
-    r = imgctx->state->open();
+    r = imgctx->state->open(false);
     if (r < 0) {
       derr << "error opening image id " << m_active_delete->local_image_id
            << ": " << cpp_strerror(r) << dendl;
diff --git a/src/tools/rbd_mirror/ImageReplayer.cc b/src/tools/rbd_mirror/ImageReplayer.cc
index f83eafb..c995287 100644
--- a/src/tools/rbd_mirror/ImageReplayer.cc
+++ b/src/tools/rbd_mirror/ImageReplayer.cc
@@ -397,7 +397,7 @@ void ImageReplayer<I>::bootstrap() {
     &m_local_image_ctx, m_local_image_name, m_remote_image_id,
     m_global_image_id, m_threads->work_queue, m_threads->timer,
     &m_threads->timer_lock, m_local_mirror_uuid, m_remote_mirror_uuid,
-    m_remote_journaler, &m_client_meta, ctx, &m_progress_cxt);
+    m_remote_journaler, &m_client_meta, ctx, &m_do_resync, &m_progress_cxt);
 
   {
     Mutex::Locker locker(m_lock);
@@ -428,6 +428,9 @@ void ImageReplayer<I>::handle_bootstrap(int r) {
     dout(5) << "remote image is non-primary or local image is primary" << dendl;
     on_start_fail(0, "remote image is non-primary or local image is primary");
     return;
+  } else if (r == -EEXIST) {
+    on_start_fail(r, "split-brain detected");
+    return;
   } else if (r < 0) {
     on_start_fail(r, "error bootstrapping replay");
     return;
@@ -435,7 +438,6 @@ void ImageReplayer<I>::handle_bootstrap(int r) {
     return;
   }
 
-
   assert(m_local_journal == nullptr);
   {
     RWLock::RLocker snap_locker(m_local_image_ctx->snap_lock);
@@ -452,13 +454,8 @@ void ImageReplayer<I>::handle_bootstrap(int r) {
 
   {
     Mutex::Locker locker(m_lock);
-    bool do_resync = false;
-    r = m_local_image_ctx->journal->is_resync_requested(&do_resync);
-    if (r < 0) {
-      derr << "failed to check if a resync was requested" << dendl;
-    }
 
-    if (do_resync) {
+    if (m_do_resync) {
       Context *on_finish = m_on_start_finish;
       m_stopping_for_resync = true;
       FunctionContext *ctx = new FunctionContext([this, on_finish](int r) {
@@ -607,6 +604,7 @@ void ImageReplayer<I>::on_start_fail(int r, const std::string &desc)
   Context *ctx = new FunctionContext([this, r, desc](int _r) {
       {
         Mutex::Locker locker(m_lock);
+        assert(m_state == STATE_STARTING);
         m_state = STATE_STOPPING;
         if (r < 0 && r != -ECANCELED) {
           derr << "start failed: " << cpp_strerror(r) << dendl;
@@ -1061,6 +1059,12 @@ void ImageReplayer<I>::process_entry() {
   dout(20) << "processing entry tid=" << m_replay_entry.get_commit_tid()
            << dendl;
 
+  // stop replaying events if stop has been requested
+  if (on_replay_interrupted()) {
+    m_event_replay_tracker.finish_op();
+    return;
+  }
+
   Context *on_ready = create_context_callback<
     ImageReplayer, &ImageReplayer<I>::handle_process_entry_ready>(this);
   Context *on_commit = new C_ReplayCommitted(this, std::move(m_replay_entry));
diff --git a/src/tools/rbd_mirror/ImageReplayer.h b/src/tools/rbd_mirror/ImageReplayer.h
index 222ef02..ee080f1 100644
--- a/src/tools/rbd_mirror/ImageReplayer.h
+++ b/src/tools/rbd_mirror/ImageReplayer.h
@@ -253,6 +253,7 @@ private:
   int m_last_r = 0;
   std::string m_state_desc;
   BootstrapProgressContext m_progress_cxt;
+  bool m_do_resync;
   image_replayer::EventPreprocessor<ImageCtxT> *m_event_preprocessor = nullptr;
   image_replayer::ReplayStatusFormatter<ImageCtxT> *m_replay_status_formatter =
     nullptr;
diff --git a/src/tools/rbd_mirror/image_replayer/BootstrapRequest.cc b/src/tools/rbd_mirror/image_replayer/BootstrapRequest.cc
index 2f791f0..0b0a9cf 100644
--- a/src/tools/rbd_mirror/image_replayer/BootstrapRequest.cc
+++ b/src/tools/rbd_mirror/image_replayer/BootstrapRequest.cc
@@ -4,6 +4,7 @@
 #include "BootstrapRequest.h"
 #include "CloseImageRequest.h"
 #include "CreateImageRequest.h"
+#include "IsPrimaryRequest.h"
 #include "OpenImageRequest.h"
 #include "OpenLocalImageRequest.h"
 #include "common/debug.h"
@@ -51,6 +52,7 @@ BootstrapRequest<I>::BootstrapRequest(
         Journaler *journaler,
         MirrorPeerClientMeta *client_meta,
         Context *on_finish,
+        bool *do_resync,
         rbd::mirror::ProgressContext *progress_ctx)
   : BaseRequest("rbd::mirror::image_replayer::BootstrapRequest",
 		reinterpret_cast<CephContext*>(local_io_ctx.cct()), on_finish),
@@ -62,6 +64,7 @@ BootstrapRequest<I>::BootstrapRequest(
     m_local_mirror_uuid(local_mirror_uuid),
     m_remote_mirror_uuid(remote_mirror_uuid), m_journaler(journaler),
     m_client_meta(client_meta), m_progress_ctx(progress_ctx),
+    m_do_resync(do_resync),
     m_lock(unique_lock_name("BootstrapRequest::m_lock", this)) {
 }
 
@@ -72,6 +75,8 @@ BootstrapRequest<I>::~BootstrapRequest() {
 
 template <typename I>
 void BootstrapRequest<I>::send() {
+  *m_do_resync = false;
+
   get_local_image_id();
 }
 
@@ -252,9 +257,6 @@ void BootstrapRequest<I>::open_remote_image() {
 
 template <typename I>
 void BootstrapRequest<I>::handle_open_remote_image(int r) {
-  // deduce the class type for the journal to support unit tests
-  typedef typename std::decay<decltype(*I::journal)>::type Journal;
-
   dout(20) << ": r=" << r << dendl;
 
   if (r < 0) {
@@ -264,18 +266,36 @@ void BootstrapRequest<I>::handle_open_remote_image(int r) {
     return;
   }
 
-  // TODO: make async
-  bool tag_owner;
-  r = Journal::is_tag_owner(m_remote_image_ctx, &tag_owner);
+  is_primary();
+}
+
+template <typename I>
+void BootstrapRequest<I>::is_primary() {
+  dout(20) << dendl;
+
+  update_progress("OPEN_REMOTE_IMAGE");
+
+  Context *ctx = create_context_callback<
+    BootstrapRequest<I>, &BootstrapRequest<I>::handle_is_primary>(
+      this);
+  IsPrimaryRequest<I> *request = IsPrimaryRequest<I>::create(m_remote_image_ctx,
+                                                             &m_primary, ctx);
+  request->send();
+}
+
+template <typename I>
+void BootstrapRequest<I>::handle_is_primary(int r) {
+  dout(20) << ": r=" << r << dendl;
+
   if (r < 0) {
-    derr << ": failed to query remote image primary status: " << cpp_strerror(r)
+    derr << ": error querying remote image primary status: " << cpp_strerror(r)
          << dendl;
     m_ret_val = r;
     close_remote_image();
     return;
   }
 
-  if (!tag_owner) {
+  if (!m_primary) {
     dout(5) << ": remote image is not primary -- skipping image replay"
             << dendl;
     m_ret_val = -EREMOTEIO;
@@ -369,7 +389,33 @@ void BootstrapRequest<I>::handle_open_local_image(int r) {
     m_ret_val = r;
     close_remote_image();
     return;
-  } if (m_client.state == cls::journal::CLIENT_STATE_DISCONNECTED) {
+  }
+
+  I *local_image_ctx = (*m_local_image_ctx);
+  {
+    RWLock::RLocker snap_locker(local_image_ctx->snap_lock);
+    if (local_image_ctx->journal == nullptr) {
+      derr << ": local image does not support journaling" << dendl;
+      m_ret_val = -EINVAL;
+      close_local_image();
+      return;
+    }
+
+    r = (*m_local_image_ctx)->journal->is_resync_requested(m_do_resync);
+    if (r < 0) {
+      derr << ": failed to check if a resync was requested" << dendl;
+      m_ret_val = r;
+      close_local_image();
+      return;
+    }
+  }
+
+  if (*m_do_resync) {
+    close_remote_image();
+    return;
+  }
+
+  if (m_client.state == cls::journal::CLIENT_STATE_DISCONNECTED) {
     dout(10) << ": client flagged disconnected -- skipping bootstrap" << dendl;
     // The caller is expected to detect disconnect initializing remote journal.
     m_ret_val = 0;
@@ -509,7 +555,7 @@ void BootstrapRequest<I>::handle_get_remote_tags(int r) {
   {
     RWLock::RLocker snap_locker(local_image_ctx->snap_lock);
     if (local_image_ctx->journal == nullptr) {
-      derr << "local image does not support journaling" << dendl;
+      derr << ": local image does not support journaling" << dendl;
       m_ret_val = -EINVAL;
       close_local_image();
       return;
diff --git a/src/tools/rbd_mirror/image_replayer/BootstrapRequest.h b/src/tools/rbd_mirror/image_replayer/BootstrapRequest.h
index 8926adf..6b4ed21 100644
--- a/src/tools/rbd_mirror/image_replayer/BootstrapRequest.h
+++ b/src/tools/rbd_mirror/image_replayer/BootstrapRequest.h
@@ -52,14 +52,15 @@ public:
         Journaler *journaler,
         MirrorPeerClientMeta *client_meta,
         Context *on_finish,
+        bool *do_resync,
         ProgressContext *progress_ctx = nullptr) {
     return new BootstrapRequest(local_io_ctx, remote_io_ctx,
                                 image_sync_throttler, local_image_ctx,
                                 local_image_name, remote_image_id,
                                 global_image_id, work_queue, timer, timer_lock,
                                 local_mirror_uuid, remote_mirror_uuid,
-                                journaler, client_meta, on_finish,
-				progress_ctx);
+                                journaler, client_meta, on_finish, do_resync,
+                                progress_ctx);
   }
 
   BootstrapRequest(librados::IoCtx &local_io_ctx,
@@ -73,7 +74,7 @@ public:
                    const std::string &local_mirror_uuid,
                    const std::string &remote_mirror_uuid, Journaler *journaler,
                    MirrorPeerClientMeta *client_meta, Context *on_finish,
-		   ProgressContext *progress_ctx = nullptr);
+                   bool *do_resync, ProgressContext *progress_ctx = nullptr);
   ~BootstrapRequest();
 
   void send();
@@ -100,6 +101,9 @@ private:
    *    v                                               *
    * OPEN_REMOTE_IMAGE  * * * * * * * * * * * * * * * * *
    *    |                                               *
+   *    v                                               *
+   * IS_PRIMARY * * * * * * * * * * * * * * * * * * * * *
+   *    |                                               *
    *    | (remote image primary)                        *
    *    \----> OPEN_LOCAL_IMAGE * * * * * * * * * * * * *
    *    |         |   .   ^                             *
@@ -158,6 +162,7 @@ private:
   Journaler *m_journaler;
   MirrorPeerClientMeta *m_client_meta;
   ProgressContext *m_progress_ctx;
+  bool *m_do_resync;
   Mutex m_lock;
   bool m_canceled = false;
 
@@ -165,6 +170,7 @@ private:
   cls::journal::Client m_client;
   uint64_t m_remote_tag_class = 0;
   ImageCtxT *m_remote_image_ctx = nullptr;
+  bool m_primary = false;
   bool m_created_local_image = false;
   int m_ret_val = 0;
 
@@ -185,6 +191,9 @@ private:
   void open_remote_image();
   void handle_open_remote_image(int r);
 
+  void is_primary();
+  void handle_is_primary(int r);
+
   void update_client_state();
   void handle_update_client_state(int r);
 
diff --git a/src/tools/rbd_mirror/image_replayer/IsPrimaryRequest.cc b/src/tools/rbd_mirror/image_replayer/IsPrimaryRequest.cc
new file mode 100644
index 0000000..8a45a95
--- /dev/null
+++ b/src/tools/rbd_mirror/image_replayer/IsPrimaryRequest.cc
@@ -0,0 +1,122 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include "IsPrimaryRequest.h"
+#include "common/errno.h"
+#include "common/WorkQueue.h"
+#include "cls/rbd/cls_rbd_client.h"
+#include "librbd/ImageCtx.h"
+#include "librbd/Journal.h"
+#include "librbd/Utils.h"
+#include <type_traits>
+
+#define dout_context g_ceph_context
+#define dout_subsys ceph_subsys_rbd_mirror
+#undef dout_prefix
+#define dout_prefix *_dout << "rbd::mirror::image_replayer::IsPrimaryRequest: " \
+                           << this << " " << __func__ << " "
+
+namespace rbd {
+namespace mirror {
+namespace image_replayer {
+
+using librbd::util::create_context_callback;
+using librbd::util::create_rados_ack_callback;
+
+template <typename I>
+IsPrimaryRequest<I>::IsPrimaryRequest(I *image_ctx, bool *primary,
+                                      Context *on_finish)
+  : m_image_ctx(image_ctx), m_primary(primary), m_on_finish(on_finish) {
+}
+
+template <typename I>
+void IsPrimaryRequest<I>::send() {
+  send_get_mirror_state();
+}
+
+template <typename I>
+void IsPrimaryRequest<I>::send_get_mirror_state() {
+  dout(20) << dendl;
+
+  librados::ObjectReadOperation op;
+  librbd::cls_client::mirror_image_get_start(&op, m_image_ctx->id);
+
+  librados::AioCompletion *aio_comp = create_rados_ack_callback<
+    IsPrimaryRequest<I>, &IsPrimaryRequest<I>::handle_get_mirror_state>(this);
+  int r = m_image_ctx->md_ctx.aio_operate(RBD_MIRRORING, aio_comp, &op,
+                                          &m_out_bl);
+  assert(r == 0);
+  aio_comp->release();
+}
+
+template <typename I>
+void IsPrimaryRequest<I>::handle_get_mirror_state(int r) {
+  dout(20) << ": r=" << r << dendl;
+
+  cls::rbd::MirrorImage mirror_image;
+  if (r == 0) {
+    bufferlist::iterator iter = m_out_bl.begin();
+    r = librbd::cls_client::mirror_image_get_finish(&iter, &mirror_image);
+    if (r == 0) {
+      if (mirror_image.state == cls::rbd::MIRROR_IMAGE_STATE_ENABLED) {
+        send_is_tag_owner();
+        return;
+      } else if (mirror_image.state == cls::rbd::MIRROR_IMAGE_STATE_DISABLING) {
+        dout(5) << ": image mirroring is being disabled" << dendl;
+        *m_primary = false;
+      } else {
+        derr << ": image mirroring is disabled" << dendl;
+        r = -EINVAL;
+      }
+    } else {
+      derr << ": failed to decode image mirror state: " << cpp_strerror(r)
+           << dendl;
+    }
+  } else {
+    derr << ": failed to retrieve image mirror state: " << cpp_strerror(r)
+         << dendl;
+  }
+
+  finish(r);
+}
+
+template <typename I>
+void IsPrimaryRequest<I>::send_is_tag_owner() {
+  // deduce the class type for the journal to support unit tests
+  using Journal = typename std::decay<
+    typename std::remove_pointer<decltype(std::declval<I>().journal)>
+    ::type>::type;
+
+  dout(20) << dendl;
+
+  Context *ctx = create_context_callback<
+    IsPrimaryRequest<I>, &IsPrimaryRequest<I>::handle_is_tag_owner>(this);
+
+  Journal::is_tag_owner(m_image_ctx, m_primary, ctx);
+}
+
+template <typename I>
+void IsPrimaryRequest<I>::handle_is_tag_owner(int r) {
+  dout(20) << ": r=" << r << dendl;
+
+  if (r < 0) {
+    derr << ": failed to query remote image tag owner: " << cpp_strerror(r)
+         << dendl;
+  }
+
+  finish(r);
+}
+
+template <typename I>
+void IsPrimaryRequest<I>::finish(int r) {
+  dout(20) << ": r=" << r << dendl;
+
+  m_on_finish->complete(r);
+  delete this;
+}
+
+} // namespace image_replayer
+} // namespace mirror
+} // namespace rbd
+
+template class rbd::mirror::image_replayer::IsPrimaryRequest<librbd::ImageCtx>;
diff --git a/src/tools/rbd_mirror/image_replayer/IsPrimaryRequest.h b/src/tools/rbd_mirror/image_replayer/IsPrimaryRequest.h
new file mode 100644
index 0000000..ddb332c
--- /dev/null
+++ b/src/tools/rbd_mirror/image_replayer/IsPrimaryRequest.h
@@ -0,0 +1,67 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef RBD_MIRROR_IMAGE_REPLAYER_IS_PRIMARY_REQUEST_H
+#define RBD_MIRROR_IMAGE_REPLAYER_IS_PRIMARY_REQUEST_H
+
+#include "include/buffer.h"
+
+class Context;
+class ContextWQ;
+namespace librbd { class ImageCtx; }
+
+namespace rbd {
+namespace mirror {
+namespace image_replayer {
+
+template <typename ImageCtxT = librbd::ImageCtx>
+class IsPrimaryRequest {
+public:
+  static IsPrimaryRequest* create(ImageCtxT *image_ctx, bool *primary,
+                                  Context *on_finish) {
+    return new IsPrimaryRequest(image_ctx, primary, on_finish);
+  }
+
+  IsPrimaryRequest(ImageCtxT *image_ctx, bool *primary, Context *on_finish);
+
+  void send();
+
+private:
+  /**
+   * @verbatim
+   *
+   * <start>
+   *    |
+   *    v
+   * GET_MIRROR_STATE * * * * *
+   *    |                     *
+   *    v                     *
+   * IS_TAG_OWNER * * * * * * * (error)
+   *    |                     *
+   *    v                     *
+   * <finish> < * * * * * * * *
+   *
+   * @endverbatim
+   */
+  ImageCtxT *m_image_ctx;
+  bool *m_primary;
+  Context *m_on_finish;
+
+  bufferlist m_out_bl;
+
+  void send_get_mirror_state();
+  void handle_get_mirror_state(int r);
+
+  void send_is_tag_owner();
+  void handle_is_tag_owner(int r);
+
+  void finish(int r);
+};
+
+} // namespace image_replayer
+} // namespace mirror
+} // namespace rbd
+
+extern template class rbd::mirror::image_replayer::IsPrimaryRequest<librbd::ImageCtx>;
+
+#endif // RBD_MIRROR_IMAGE_REPLAYER_IS_PRIMARY_REQUEST_H
diff --git a/src/tools/rbd_mirror/image_replayer/OpenImageRequest.cc b/src/tools/rbd_mirror/image_replayer/OpenImageRequest.cc
index 9cf6af2..32c0724 100644
--- a/src/tools/rbd_mirror/image_replayer/OpenImageRequest.cc
+++ b/src/tools/rbd_mirror/image_replayer/OpenImageRequest.cc
@@ -44,7 +44,7 @@ void OpenImageRequest<I>::send_open_image() {
   Context *ctx = create_context_callback<
     OpenImageRequest<I>, &OpenImageRequest<I>::handle_open_image>(
       this);
-  (*m_image_ctx)->state->open(ctx);
+  (*m_image_ctx)->state->open(false, ctx);
 }
 
 template <typename I>
diff --git a/src/tools/rbd_mirror/image_replayer/OpenLocalImageRequest.cc b/src/tools/rbd_mirror/image_replayer/OpenLocalImageRequest.cc
index be0b671..9ef103b 100644
--- a/src/tools/rbd_mirror/image_replayer/OpenLocalImageRequest.cc
+++ b/src/tools/rbd_mirror/image_replayer/OpenLocalImageRequest.cc
@@ -1,8 +1,9 @@
 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
 // vim: ts=8 sw=2 smarttab
 
-#include "OpenLocalImageRequest.h"
 #include "CloseImageRequest.h"
+#include "IsPrimaryRequest.h"
+#include "OpenLocalImageRequest.h"
 #include "common/errno.h"
 #include "common/WorkQueue.h"
 #include "librbd/ExclusiveLock.h"
@@ -29,10 +30,15 @@ namespace {
 
 struct MirrorExclusiveLockPolicy : public librbd::exclusive_lock::Policy {
 
-  virtual void lock_requested(bool force) {
+  virtual bool may_auto_request_lock() {
+    return false;
+  }
+
+  virtual int lock_requested(bool force) {
     // TODO: interlock is being requested (e.g. local promotion)
     // Wait for demote event from peer or abort replay on forced
     // promotion.
+    return -EROFS;
   }
 
 };
@@ -94,7 +100,7 @@ void OpenLocalImageRequest<I>::send_open_image() {
   Context *ctx = create_context_callback<
     OpenLocalImageRequest<I>, &OpenLocalImageRequest<I>::handle_open_image>(
       this);
-  (*m_local_image_ctx)->state->open(ctx);
+  (*m_local_image_ctx)->state->open(false, ctx);
 }
 
 template <typename I>
@@ -108,40 +114,54 @@ void OpenLocalImageRequest<I>::handle_open_image(int r) {
     return;
   }
 
-  send_lock_image();
+  send_is_primary();
 }
 
 template <typename I>
-void OpenLocalImageRequest<I>::send_lock_image() {
-  // deduce the class type for the journal to support unit tests
-  typedef typename std::decay<decltype(*I::journal)>::type Journal;
-
+void OpenLocalImageRequest<I>::send_is_primary() {
   dout(20) << dendl;
 
-  RWLock::RLocker owner_locker((*m_local_image_ctx)->owner_lock);
-  if ((*m_local_image_ctx)->exclusive_lock == nullptr) {
-    derr << ": image does not support exclusive lock" << dendl;
-    send_close_image(false, -EINVAL);
-    return;
-  }
+  Context *ctx = create_context_callback<
+    OpenLocalImageRequest<I>, &OpenLocalImageRequest<I>::handle_is_primary>(
+      this);
+  IsPrimaryRequest<I> *request = IsPrimaryRequest<I>::create(*m_local_image_ctx,
+                                                             &m_primary, ctx);
+  request->send();
+}
+
+template <typename I>
+void OpenLocalImageRequest<I>::handle_is_primary(int r) {
+  dout(20) << ": r=" << r << dendl;
 
-  // TODO: make an async version
-  bool tag_owner;
-  int r = Journal::is_tag_owner(*m_local_image_ctx, &tag_owner);
   if (r < 0) {
-    derr << ": failed to query journal: " << cpp_strerror(r) << dendl;
+    derr << ": error querying local image primary status: " << cpp_strerror(r)
+         << dendl;
     send_close_image(false, r);
     return;
   }
 
   // if the local image owns the tag -- don't steal the lock since
   // we aren't going to mirror peer data into this image anyway
-  if (tag_owner) {
+  if (m_primary) {
     dout(10) << ": local image is primary -- skipping image replay" << dendl;
     send_close_image(false, -EREMOTEIO);
     return;
   }
 
+  send_lock_image();
+}
+
+template <typename I>
+void OpenLocalImageRequest<I>::send_lock_image() {
+  dout(20) << dendl;
+
+  RWLock::RLocker owner_locker((*m_local_image_ctx)->owner_lock);
+  if ((*m_local_image_ctx)->exclusive_lock == nullptr) {
+    derr << ": image does not support exclusive lock" << dendl;
+    send_close_image(false, -EINVAL);
+    return;
+  }
+
   // disallow any proxied maintenance operations before grabbing lock
   (*m_local_image_ctx)->exclusive_lock->block_requests(-EROFS);
 
@@ -161,11 +181,16 @@ void OpenLocalImageRequest<I>::handle_lock_image(int r) {
        << cpp_strerror(r) << dendl;
     send_close_image(false, r);
     return;
-  } else if ((*m_local_image_ctx)->exclusive_lock == nullptr ||
-             !(*m_local_image_ctx)->exclusive_lock->is_lock_owner()) {
-    derr << ": image is not locked" << dendl;
-    send_close_image(false, -EBUSY);
-    return;
+  }
+
+  {
+    RWLock::RLocker owner_locker((*m_local_image_ctx)->owner_lock);
+    if ((*m_local_image_ctx)->exclusive_lock == nullptr ||
+	!(*m_local_image_ctx)->exclusive_lock->is_lock_owner()) {
+      derr << ": image is not locked" << dendl;
+      send_close_image(false, -EBUSY);
+      return;
+    }
   }
 
   finish(0);
diff --git a/src/tools/rbd_mirror/image_replayer/OpenLocalImageRequest.h b/src/tools/rbd_mirror/image_replayer/OpenLocalImageRequest.h
index e40b1c2..2a1bbb2 100644
--- a/src/tools/rbd_mirror/image_replayer/OpenLocalImageRequest.h
+++ b/src/tools/rbd_mirror/image_replayer/OpenLocalImageRequest.h
@@ -48,6 +48,9 @@ private:
    *    v
    * OPEN_IMAGE * * * * * * * *
    *    |                     *
+   *    v                     *
+   * IS_PRIMARY * * * * * * * *
+   *    |                     *
    *    v (skip if primary)   v
    * LOCK_IMAGE * * * > CLOSE_IMAGE
    *    |                     |
@@ -63,11 +66,15 @@ private:
   ContextWQ *m_work_queue;
   Context *m_on_finish;
 
+  bool m_primary = false;
   int m_ret_val = 0;
 
   void send_open_image();
   void handle_open_image(int r);
 
+  void send_is_primary();
+  void handle_is_primary(int r);
+
   void send_lock_image();
   void handle_lock_image(int r);
 
diff --git a/src/tools/rbd_mirror/image_sync/ObjectCopyRequest.cc b/src/tools/rbd_mirror/image_sync/ObjectCopyRequest.cc
index 9975b5b..5c02473 100644
--- a/src/tools/rbd_mirror/image_sync/ObjectCopyRequest.cc
+++ b/src/tools/rbd_mirror/image_sync/ObjectCopyRequest.cc
@@ -255,14 +255,11 @@ void ObjectCopyRequest<I>::send_update_object_map() {
            << dendl;
 
   RWLock::WLocker object_map_locker(m_local_image_ctx->object_map_lock);
-  Context *ctx = create_context_callback<
+  bool sent = m_local_image_ctx->object_map->template aio_update<
     ObjectCopyRequest<I>, &ObjectCopyRequest<I>::handle_update_object_map>(
+      snap_object_state.first, m_object_number, snap_object_state.second, {},
       this);
-  m_local_image_ctx->object_map->aio_update(snap_object_state.first,
-                                            m_object_number,
-                                            m_object_number + 1,
-                                            snap_object_state.second,
-                                            boost::none, ctx);
+  assert(sent);
   m_local_image_ctx->snap_lock.put_read();
 }
 
diff --git a/src/tools/rbd_mirror/image_sync/SnapshotCopyRequest.cc b/src/tools/rbd_mirror/image_sync/SnapshotCopyRequest.cc
index 026e72f..2f0946c 100644
--- a/src/tools/rbd_mirror/image_sync/SnapshotCopyRequest.cc
+++ b/src/tools/rbd_mirror/image_sync/SnapshotCopyRequest.cc
@@ -105,7 +105,7 @@ void SnapshotCopyRequest<I>::send_snap_unprotect() {
     int r = m_local_image_ctx->is_snap_unprotected(local_snap_id,
                                                    &local_unprotected);
     if (r < 0) {
-      derr << "failed to retrieve local snap unprotect status: "
+      derr << ": failed to retrieve local snap unprotect status: "
            << cpp_strerror(r) << dendl;
       m_local_image_ctx->snap_lock.put_read();
       finish(r);
@@ -132,7 +132,7 @@ void SnapshotCopyRequest<I>::send_snap_unprotect() {
       r = m_remote_image_ctx->is_snap_unprotected(snap_seq_it->first,
                                                   &remote_unprotected);
       if (r < 0) {
-        derr << "failed to retrieve remote snap unprotect status: "
+        derr << ": failed to retrieve remote snap unprotect status: "
              << cpp_strerror(r) << dendl;
         m_remote_image_ctx->snap_lock.put_read();
         finish(r);
@@ -281,7 +281,7 @@ void SnapshotCopyRequest<I>::send_snap_create() {
   auto snap_info_it = m_remote_image_ctx->snap_info.find(m_prev_snap_id);
   if (snap_info_it == m_remote_image_ctx->snap_info.end()) {
     m_remote_image_ctx->snap_lock.put_read();
-    derr << "failed to retrieve remote snap info: " << m_snap_name
+    derr << ": failed to retrieve remote snap info: " << m_snap_name
          << dendl;
     finish(-ENOENT);
     return;
@@ -358,7 +358,7 @@ void SnapshotCopyRequest<I>::send_snap_protect() {
     int r = m_remote_image_ctx->is_snap_protected(remote_snap_id,
                                                   &remote_protected);
     if (r < 0) {
-      derr << "failed to retrieve remote snap protect status: "
+      derr << ": failed to retrieve remote snap protect status: "
            << cpp_strerror(r) << dendl;
       m_remote_image_ctx->snap_lock.put_read();
       finish(r);
@@ -380,7 +380,7 @@ void SnapshotCopyRequest<I>::send_snap_protect() {
     r = m_local_image_ctx->is_snap_protected(snap_seq_it->second,
                                              &local_protected);
     if (r < 0) {
-      derr << "failed to retrieve local snap protect status: "
+      derr << ": failed to retrieve local snap protect status: "
            << cpp_strerror(r) << dendl;
       m_local_image_ctx->snap_lock.put_read();
       finish(r);
diff --git a/src/tools/rbd_mirror/image_sync/SnapshotCreateRequest.cc b/src/tools/rbd_mirror/image_sync/SnapshotCreateRequest.cc
index ee0c80f..7a9a379 100644
--- a/src/tools/rbd_mirror/image_sync/SnapshotCreateRequest.cc
+++ b/src/tools/rbd_mirror/image_sync/SnapshotCreateRequest.cc
@@ -214,7 +214,7 @@ void SnapshotCreateRequest<I>::send_create_object_map() {
   m_local_image_ctx->snap_lock.get_read();
   auto snap_it = m_local_image_ctx->snap_ids.find(m_snap_name);
   if (snap_it == m_local_image_ctx->snap_ids.end()) {
-    derr << "failed to locate snap: " << m_snap_name << dendl;
+    derr << ": failed to locate snap: " << m_snap_name << dendl;
     m_local_image_ctx->snap_lock.put_read();
     finish(-ENOENT);
     return;
@@ -222,7 +222,7 @@ void SnapshotCreateRequest<I>::send_create_object_map() {
   librados::snap_t local_snap_id = snap_it->second;
   m_local_image_ctx->snap_lock.put_read();
 
-  std::string object_map_oid(librbd::ObjectMap::object_map_name(
+  std::string object_map_oid(librbd::ObjectMap<>::object_map_name(
     m_local_image_ctx->id, local_snap_id));
   uint64_t object_count = Striper::get_num_objects(m_local_image_ctx->layout,
                                                    m_size);
diff --git a/src/tools/rbd_nbd/rbd-nbd.cc b/src/tools/rbd_nbd/rbd-nbd.cc
index 49cbeec..9d38cf1 100644
--- a/src/tools/rbd_nbd/rbd-nbd.cc
+++ b/src/tools/rbd_nbd/rbd-nbd.cc
@@ -62,7 +62,8 @@ static void usage()
             << "Options:\n"
             << "  --device <device path>                    Specify nbd device path\n"
             << "  --read-only                               Map readonly\n"
-            << "  --nbds_max <limit>                        Override for module param\n"
+            << "  --nbds_max <limit>                        Override for module param nbds_max\n"
+            << "  --max_part <limit>                        Override for module param max_part\n"
             << std::endl;
   generic_server_usage();
 }
@@ -70,6 +71,7 @@ static void usage()
 static std::string devpath, poolname("rbd"), imgname, snapname;
 static bool readonly = false;
 static int nbds_max = 0;
+static int max_part = 255;
 
 #ifdef CEPH_BIG_ENDIAN
 #define ntohll(a) (a)
@@ -219,7 +221,7 @@ private:
 
       int r = safe_read_exact(fd, &ctx->request, sizeof(struct nbd_request));
       if (r < 0) {
-	derr << "failed to read nbd request header: " << cpp_strerror(errno)
+	derr << "failed to read nbd request header: " << cpp_strerror(r)
 	     << dendl;
 	return;
       }
@@ -251,7 +253,7 @@ private:
 	  r = safe_read_exact(fd, ptr.c_str(), ctx->request.len);
           if (r < 0) {
 	    derr << *ctx << ": failed to read nbd request data: "
-		 << cpp_strerror(errno) << dendl;
+		 << cpp_strerror(r) << dendl;
             return;
 	  }
           ctx->data.push_back(ptr);
@@ -443,14 +445,15 @@ static int open_device(const char* path, bool try_load_moudle = false)
 {
   int nbd = open(path, O_RDWR);
   if (nbd < 0 && try_load_moudle && access("/sys/module/nbd", F_OK) != 0) {
+    ostringstream param;
     int r;
     if (nbds_max) {
-      ostringstream param;
       param << "nbds_max=" << nbds_max;
-      r = module_load("nbd", param.str().c_str());
-    } else {
-      r = module_load("nbd", NULL);
     }
+    if (max_part) {
+        param << " max_part=" << max_part;
+    }
+    r = module_load("nbd", param.str().c_str());
     if (r < 0) {
       cerr << "rbd-nbd: failed to load nbd kernel module: " << cpp_strerror(-r) << std::endl;
       return r;
@@ -582,6 +585,14 @@ static int do_map()
   }
 
   size = info.size;
+
+  if (size > (1UL << 32) * 512) {
+    r = -EFBIG;
+    cerr << "rbd-nbd: image is too large (" << prettybyte_t(size) << ", max is "
+         << prettybyte_t((1UL << 32) * 512) << ")" << std::endl;
+    goto close_nbd;
+  }
+
   r = ioctl(nbd, NBD_SET_SIZE, size);
   if (r < 0) {
     r = -errno;
@@ -755,6 +766,15 @@ static int rbd_nbd(int argc, const char *argv[])
         cerr << "rbd-nbd: Invalid argument for nbds_max!" << std::endl;
         return EXIT_FAILURE;
       }
+    } else if (ceph_argparse_witharg(args, i, &max_part, err, "--max_part", (char *)NULL)) {
+      if (!err.str().empty()) {
+        cerr << err.str() << std::endl;
+        return EXIT_FAILURE;
+      }
+      if ((max_part < 0) || (max_part > 255)) {
+        cerr << "rbd-nbd: Invalid argument for max_part(0~255)!" << std::endl;
+        return EXIT_FAILURE;
+      }
     } else if (ceph_argparse_flag(args, i, "--read-only", (char *)NULL)) {
       readonly = true;
     } else {
diff --git a/src/tracing/librbd.tp b/src/tracing/librbd.tp
index 96b478b..668fed5 100644
--- a/src/tracing/librbd.tp
+++ b/src/tracing/librbd.tp
@@ -1766,6 +1766,84 @@ TRACEPOINT_EVENT(librbd, is_exclusive_lock_owner_exit,
     )
 )
 
+TRACEPOINT_EVENT(librbd, lock_acquire_enter,
+    TP_ARGS(
+        void*, imagectx,
+        int, lock_mode),
+    TP_FIELDS(
+      ctf_integer_hex(void*, imagectx, imagectx)
+      ctf_integer(int, lock_mode, lock_mode)
+    )
+)
+
+TRACEPOINT_EVENT(librbd, lock_acquire_exit,
+    TP_ARGS(
+        void*, imagectx,
+        int, retval),
+    TP_FIELDS(
+      ctf_integer_hex(void*, imagectx, imagectx)
+      ctf_integer(int, retval, retval)
+    )
+)
+
+TRACEPOINT_EVENT(librbd, lock_release_enter,
+    TP_ARGS(
+        void*, imagectx),
+    TP_FIELDS(
+      ctf_integer_hex(void*, imagectx, imagectx)
+    )
+)
+
+TRACEPOINT_EVENT(librbd, lock_release_exit,
+    TP_ARGS(
+        void*, imagectx,
+        int, retval),
+    TP_FIELDS(
+      ctf_integer_hex(void*, imagectx, imagectx)
+      ctf_integer(int, retval, retval)
+    )
+)
+
+TRACEPOINT_EVENT(librbd, lock_get_owners_enter,
+    TP_ARGS(
+        void*, imagectx),
+    TP_FIELDS(
+        ctf_integer_hex(void*, imagectx, imagectx)
+    )
+)
+
+TRACEPOINT_EVENT(librbd, lock_get_owners_exit,
+    TP_ARGS(
+        void*, imagectx,
+        int, retval),
+    TP_FIELDS(
+        ctf_integer_hex(void*, imagectx, imagectx)
+        ctf_integer(int, retval, retval)
+    )
+)
+
+TRACEPOINT_EVENT(librbd, lock_break_enter,
+    TP_ARGS(
+        void*, imagectx,
+        int, lock_mode,
+	const char*, lock_owner),
+    TP_FIELDS(
+        ctf_integer_hex(void*, imagectx, imagectx)
+        ctf_integer(int, lock_mode, lock_mode)
+        ctf_string(lock_owner, lock_owner)
+    )
+)
+
+TRACEPOINT_EVENT(librbd, lock_break_exit,
+    TP_ARGS(
+        void*, imagectx,
+        int, retval),
+    TP_FIELDS(
+        ctf_integer_hex(void*, imagectx, imagectx)
+        ctf_integer(int, retval, retval)
+    )
+)
+
 TRACEPOINT_EVENT(librbd, stat_enter,
     TP_ARGS(
         void*, imagectx,
diff --git a/systemd/ceph-disk at .service b/systemd/ceph-disk at .service
index f13c30b..e85f0df 100644
--- a/systemd/ceph-disk at .service
+++ b/systemd/ceph-disk at .service
@@ -1,8 +1,10 @@
 [Unit]
 Description=Ceph disk activation: %f
+After=local-fs.target
+Wants=local-fs.target
 
 [Service]
 Type=oneshot
 KillMode=none
-ExecStart=/bin/sh -c 'timeout 120 flock /var/lock/ceph-disk /usr/sbin/ceph-disk --verbose --log-stdout trigger --sync %f'
+ExecStart=/bin/sh -c 'timeout 120 flock /var/lock/ceph-disk-$(basename %f) /usr/sbin/ceph-disk --verbose --log-stdout trigger --sync %f'
 TimeoutSec=0
diff --git a/systemd/ceph-mon at .service b/systemd/ceph-mon at .service
index 15e2bf7..5291210 100644
--- a/systemd/ceph-mon at .service
+++ b/systemd/ceph-mon at .service
@@ -24,7 +24,8 @@ PrivateTmp=true
 TasksMax=infinity
 Restart=on-failure
 StartLimitInterval=30min
-StartLimitBurst=3
+StartLimitBurst=5
+RestartSec=10
 
 [Install]
 WantedBy=ceph-mon.target
diff --git a/systemd/ceph-osd at .service b/systemd/ceph-osd at .service
index 592b324..bd5a78b 100644
--- a/systemd/ceph-osd at .service
+++ b/systemd/ceph-osd at .service
@@ -18,7 +18,8 @@ PrivateTmp=true
 TasksMax=infinity
 Restart=on-failure
 StartLimitInterval=30min
-StartLimitBurst=3
+StartLimitBurst=30
+RestartSec=20s
 
 [Install]
 WantedBy=ceph-osd.target

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/pkg-ceph/ceph.git



More information about the Pkg-ceph-commits mailing list