[Pkg-ganeti-devel] [ganeti] 16/165: Make snapshotting for exports optional
Apollon Oikonomopoulos
apoikos at moszumanska.debian.org
Tue Aug 11 13:53:08 UTC 2015
This is an automated email from the git hooks/post-receive script.
apoikos pushed a commit to branch master
in repository ganeti.
commit 01a9df2e40c304b5d25e1e0a8158879c4f55ce66
Author: Aaron Karper <akarper at google.com>
Date: Wed Oct 29 12:49:32 2014 +0100
Make snapshotting for exports optional
This only uses snapshots if the instance is not down. The reboot (if
any) is delayed until after the export.
Signed-off-by: Aaron Karper <akarper at google.com>
Reviewed-by: Hrvoje Ribicic <riba at google.com>
---
lib/backend.py | 19 ++++++---------
lib/cmdlib/backup.py | 63 +++++++++++++++++++++++++++++--------------------
lib/masterd/instance.py | 27 +++++++++++++--------
tools/move-instance | 3 ---
4 files changed, 62 insertions(+), 50 deletions(-)
diff --git a/lib/backend.py b/lib/backend.py
index 1e14635..da98b5a 100644
--- a/lib/backend.py
+++ b/lib/backend.py
@@ -4854,21 +4854,11 @@ def _GetImportExportIoCommand(instance, mode, ieio, ieargs):
elif ieio == constants.IEIO_RAW_DISK:
(disk, ) = ieargs
- real_disk = _OpenRealBD(disk)
-
if mode == constants.IEM_IMPORT:
- # we use nocreat to fail if the device is not already there or we pass a
- # wrong path; we use notrunc to no attempt truncate on an LV device
- suffix = utils.BuildShellCmd("| dd of=%s conv=nocreat,notrunc bs=%s",
- real_disk.dev_path,
- str(constants.DD_BLOCK_SIZE)) # 1 MB
+ suffix = utils.BuildShellCmd("| %s", disk.Import())
elif mode == constants.IEM_EXPORT:
- # the block size on the read dd is 1MiB to match our units
- prefix = utils.BuildShellCmd("dd if=%s bs=%s count=%s |",
- real_disk.dev_path,
- str(constants.DD_BLOCK_SIZE), # 1 MB
- str(disk.size))
+ prefix = utils.BuildShellCmd("%s |", disk.Export())
exp_size = disk.size
elif ieio == constants.IEIO_SCRIPT:
@@ -4937,6 +4927,11 @@ def StartImportExportDaemon(mode, opts, host, port, instance, component,
@param ieioargs: Input/output arguments
"""
+
+ # Use Import/Export over socat.
+ #
+ # Export() gives a command that produces a flat stream.
+ # Import() gives a command that reads a flat stream to a disk template.
if mode == constants.IEM_IMPORT:
prefix = "import"
diff --git a/lib/cmdlib/backup.py b/lib/cmdlib/backup.py
index b5fd26d..9183838 100644
--- a/lib/cmdlib/backup.py
+++ b/lib/cmdlib/backup.py
@@ -278,13 +278,6 @@ class LUBackupExport(LogicalUnit):
raise errors.ProgrammerError("Unhandled export mode %r" %
self.op.mode)
- # instance disk type verification
- # TODO: Implement export support for file-based disks
- for disk in self.cfg.GetInstanceDisks(self.instance.uuid):
- if disk.dev_type in constants.DTS_FILEBASED:
- raise errors.OpPrereqError("Export not supported for instances with"
- " file-based disks", errors.ECODE_INVAL)
-
# Check prerequisites for zeroing
if self.op.zero_free_space:
# Check that user shutdown detection has been enabled
@@ -423,6 +416,35 @@ class LUBackupExport(LogicalUnit):
feedback_fn("Zeroing completed!")
+ def StartInstance(self, feedback_fn, src_node_uuid):
+ """Send the node instructions to start the instance.
+
+ @raise errors.OpExecError: If the instance didn't start up.
+
+ """
+ assert self.instance.disks_active
+ feedback_fn("Starting instance %s" % self.instance.name)
+ result = self.rpc.call_instance_start(src_node_uuid,
+ (self.instance, None, None),
+ False, self.op.reason)
+ msg = result.fail_msg
+ if msg:
+ feedback_fn("Failed to start instance: %s" % msg)
+ ShutdownInstanceDisks(self, self.instance)
+ raise errors.OpExecError("Could not start instance: %s" % msg)
+
+ def InstanceDown(self):
+ """Returns true iff the instance is shut down during transfer."""
+ return (self.instance.admin_state != constants.ADMINST_UP or
+ self.op.shutdown)
+
+ def DoReboot(self):
+ """Returns true iff the instance needs to be started after transfer."""
+
+ return (self.op.shutdown and
+ self.instance.admin_state == constants.ADMINST_UP and
+ not self.op.remove_instance)
+
def Exec(self, feedback_fn):
"""Export an instance to an image in the cluster.
@@ -454,28 +476,16 @@ class LUBackupExport(LogicalUnit):
self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
try:
+ snapshot = not self.InstanceDown()
helper = masterd.instance.ExportInstanceHelper(self, feedback_fn,
- self.instance)
+ self.instance, snapshot)
- will_be_shut_down = (self.instance.admin_state != constants.ADMINST_UP or
- self.op.shutdown)
- if (not will_be_shut_down or self.op.mode == constants.EXPORT_MODE_LOCAL):
+ if snapshot:
helper.CreateSnapshots()
- try:
- if (self.op.shutdown and
- self.instance.admin_state == constants.ADMINST_UP and
- not self.op.remove_instance):
- assert self.instance.disks_active
- feedback_fn("Starting instance %s" % self.instance.name)
- result = self.rpc.call_instance_start(src_node_uuid,
- (self.instance, None, None),
- False, self.op.reason)
- msg = result.fail_msg
- if msg:
- feedback_fn("Failed to start instance: %s" % msg)
- ShutdownInstanceDisks(self, self.instance)
- raise errors.OpExecError("Could not start instance: %s" % msg)
+ try:
+ if self.DoReboot() and snapshot:
+ self.StartInstance(feedback_fn, src_node_uuid)
if self.op.mode == constants.EXPORT_MODE_LOCAL:
(fin_resu, dresults) = helper.LocalExport(self.dst_node,
self.op.compress)
@@ -493,6 +503,9 @@ class LUBackupExport(LogicalUnit):
key_name, dest_ca_pem,
self.op.compress,
timeouts)
+
+ if self.DoReboot() and not snapshot:
+ self.StartInstance(feedback_fn, src_node_uuid)
finally:
helper.Cleanup()
diff --git a/lib/masterd/instance.py b/lib/masterd/instance.py
index 7ae3716..4bfc514 100644
--- a/lib/masterd/instance.py
+++ b/lib/masterd/instance.py
@@ -1149,20 +1149,23 @@ class _RemoteExportCb(ImportExportCbBase):
class ExportInstanceHelper(object):
- def __init__(self, lu, feedback_fn, instance):
+ def __init__(self, lu, feedback_fn, instance, snapshot):
"""Initializes this class.
@param lu: Logical unit instance
@param feedback_fn: Feedback function
@type instance: L{objects.Instance}
@param instance: Instance object
+ @type snapshot: bool
+ @param instance: whether the export should use snapshotting
"""
self._lu = lu
self._feedback_fn = feedback_fn
self._instance = instance
+ self._snapshot = snapshot
- self._snap_disks = {}
+ self._disks_to_transfer = {}
self._removed_snaps = [False] * len(instance.disks)
def CreateSnapshots(self):
@@ -1171,7 +1174,7 @@ class ExportInstanceHelper(object):
Currently support drbd, plain and ext disk templates.
"""
- assert not self._snap_disks
+ assert not self._disks_to_transfer
instance = self._instance
src_node = instance.primary_node
@@ -1209,10 +1212,10 @@ class ExportInstanceHelper(object):
logical_id=disk_id, iv_name=disk.iv_name,
params=disk_params)
- assert idx not in self._snap_disks
- self._snap_disks[idx] = new_dev
+ assert idx not in self._disks_to_transfer
+ self._disks_to_transfer[idx] = new_dev
- assert len(self._snap_disks) == len(instance.disks)
+ assert len(self._disks_to_transfer) == len(instance.disks)
assert len(self._removed_snaps) == len(instance.disks)
def _RemoveSnapshot(self, disk_index):
@@ -1222,7 +1225,7 @@ class ExportInstanceHelper(object):
@param disk_index: Index of the snapshot to be removed
"""
- disk = self._snap_disks.get(disk_index)
+ disk = self._disks_to_transfer.get(disk_index)
if disk and not self._removed_snaps[disk_index]:
src_node = self._instance.primary_node
src_node_name = self._lu.cfg.GetNodeName(src_node)
@@ -1251,11 +1254,15 @@ class ExportInstanceHelper(object):
instance = self._instance
src_node_uuid = instance.primary_node
- assert len(self._snap_disks) == len(instance.disks)
+ if not self._snapshot:
+ disks = self._lu.cfg.GetInstanceDisks(instance.uuid)
+ self._disks_to_transfer = dict((i, disk) for i, disk in enumerate(disks))
+
+ assert len(self._disks_to_transfer) == len(instance.disks)
transfers = []
- for idx, dev in self._snap_disks.items():
+ for idx, dev in self._disks_to_transfer.items():
if not dev:
transfers.append(None)
continue
@@ -1290,7 +1297,7 @@ class ExportInstanceHelper(object):
self._feedback_fn("Finalizing export on %s" % dest_node.name)
result = self._lu.rpc.call_finalize_export(dest_node.uuid, instance,
- self._snap_disks)
+ self._disks_to_transfer.values())
msg = result.fail_msg
fin_resu = not msg
if msg:
diff --git a/tools/move-instance b/tools/move-instance
index ba8f686..925e5a8 100755
--- a/tools/move-instance
+++ b/tools/move-instance
@@ -703,9 +703,6 @@ class MoveSourceExecutor(object):
logging.info("Retrieving instance information from source cluster")
instinfo = self._GetInstanceInfo(src_client, mrt.PollJob,
mrt.move.src_instance_name)
- if instinfo["disk_template"] in constants.DTS_FILEBASED:
- raise Error("Inter-cluster move of file-based instances is not"
- " supported.")
logging.info("Preparing export on source cluster")
expinfo = self._PrepareExport(src_client, mrt.PollJob,
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/pkg-ganeti/ganeti.git
More information about the Pkg-ganeti-devel
mailing list