[Pkg-ganeti-devel] [ganeti] 04/05: Add missing QA files

Apollon Oikonomopoulos apoikos at moszumanska.debian.org
Sat Jan 2 13:13:12 UTC 2016


This is an automated email from the git hooks/post-receive script.

apoikos pushed a commit to branch debian/stable/jessie
in repository ganeti.

commit cfb1112ca02abd78bdd03d1f03aeebcf7224adb3
Author: Apollon Oikonomopoulos <apoikos at debian.org>
Date:   Wed Dec 23 11:19:51 2015 +0200

    Add missing QA files
    
    These files are not present in the upstream tarball, so import them from
    git  under debian/tests. Note that this is needed only on source
    versions using upstream tarballs; after switching to upstream git, all
    QA files are present in the Debian sources.
---
 debian/copyright                  |   4 +
 debian/tests/qa/colors.py         | 114 +++++++
 debian/tests/qa/patch/order       |   0
 debian/tests/qa/qa-patch.json     |   1 +
 debian/tests/qa/qa_iptables.py    | 116 +++++++
 debian/tests/qa/qa_network.py     | 122 ++++++++
 debian/tests/qa/qa_performance.py | 625 ++++++++++++++++++++++++++++++++++++++
 debian/tests/vcluster-qa          |   1 +
 8 files changed, 983 insertions(+)

diff --git a/debian/copyright b/debian/copyright
index a887d46..8d1e436 100644
--- a/debian/copyright
+++ b/debian/copyright
@@ -11,6 +11,10 @@ Copyright: Copyright (c) 2007 Leonardo Rodrigues de Mello <l at lmello.eu.org>
            Copyright (c) 2007-2015 Debian Ganeti Team <pkg-ganeti at lists.alioth.debian.org>
 License: GPL-2+
 
+Files: debian/tests/qa/*
+Copyright: Copyright (c) 2013-2014 Google Inc.
+License: BSD-2-Clause
+
 Files: doc/html/_static/basic.css
        doc/html/_static/default.css
        doc/html/_static/doctools.js
diff --git a/debian/tests/qa/colors.py b/debian/tests/qa/colors.py
new file mode 100644
index 0000000..4e9b4b6
--- /dev/null
+++ b/debian/tests/qa/colors.py
@@ -0,0 +1,114 @@
+#!/usr/bin/python -u
+#
+
+# Copyright (C) 2013 Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Script for adding colorized output to Ganeti.
+
+Colors are enabled only if the standard output is a proper terminal.
+(Or call check_for_colors() to make a thorough test using "tput".)
+
+See http://en.wikipedia.org/wiki/ANSI_escape_code for more possible additions.
+"""
+
+import os
+import subprocess
+import sys
+
+DEFAULT = "0"
+BOLD = "1"
+UNDERLINE = "4"
+REVERSE = "7"
+
+BLACK = "30"
+RED = "31"
+GREEN = "32"
+YELLOW = "33"
+BLUE = "34"
+MAGENTA = "35"
+CYAN = "36"
+WHITE = "37"
+
+BG_BLACK = "40"
+BG_RED = "41"
+BG_GREEN = "42"
+BG_YELLOW = "43"
+BG_BLUE = "44"
+BG_MAGENTA = "45"
+BG_CYAN = "46"
+BG_WHITE = "47"
+
+_enabled = sys.stdout.isatty()
+
+
+def _escape_one(code):
+  return "\033[" + code + "m" if code else ""
+
+
+def _escape(codes):
+  if hasattr(codes, "__iter__"):
+    return _escape_one(";".join(codes))
+  else:
+    return _escape_one(codes)
+
+
+def _reset():
+  return _escape([DEFAULT])
+
+
+def colorize(line, color=None):
+  """Wraps a given string into ANSI color codes corresponding to given
+  color(s).
+
+  @param line: a string
+  @param color: a color or a list of colors selected from this module's
+    constants
+  """
+  if _enabled and color:
+    return _escape(color) + line + _reset()
+  else:
+    return line
+
+
+def check_for_colors():
+  """Tries to call 'tput' to properly determine, if the terminal has colors.
+
+  This functions is meant to be run once at the program's start. If not
+  invoked, colors are enabled iff standard output is a terminal.
+  """
+  colors = 0
+  if sys.stdout.isatty():
+    try:
+      p = subprocess.Popen(["tput", "colors"], stdout=subprocess.PIPE)
+      output = p.communicate()[0]
+      if p.returncode == 0:
+        colors = int(output)
+    except (OSError, ValueError):
+      pass
+  global _enabled
+  _enabled = (colors >= 2)
diff --git a/debian/tests/qa/patch/order b/debian/tests/qa/patch/order
new file mode 100644
index 0000000..e69de29
diff --git a/debian/tests/qa/qa-patch.json b/debian/tests/qa/qa-patch.json
new file mode 100644
index 0000000..fe51488
--- /dev/null
+++ b/debian/tests/qa/qa-patch.json
@@ -0,0 +1 @@
+[]
diff --git a/debian/tests/qa/qa_iptables.py b/debian/tests/qa/qa_iptables.py
new file mode 100644
index 0000000..6379091
--- /dev/null
+++ b/debian/tests/qa/qa_iptables.py
@@ -0,0 +1,116 @@
+#!/usr/bin/python -u
+#
+
+# Copyright (C) 2013 Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Manipulates nodes using `iptables` to simulate non-standard network
+conditions.
+
+"""
+
+import uuid
+
+import qa_config
+import qa_utils
+
+from qa_utils import AssertCommand
+
+# String used as a comment for produced `iptables` results
+IPTABLES_COMMENT_MARKER = "ganeti_qa_script"
+
+
+class RulesContext(object):
+  def __init__(self, nodes):
+    self._nodes = set()
+
+  def __enter__(self):
+    self._marker = IPTABLES_COMMENT_MARKER + "_" + str(uuid.uuid4())
+    return Rules(self)
+
+  def __exit__(self, ext_type, exc_val, exc_tb):
+    CleanRules(self._nodes, self._marker)
+
+  def _AddNode(self, node):
+    self._nodes.add(node)
+
+
+class Rules(object):
+  """Allows to introduce iptable rules and dispose them at the end of a block.
+
+  Don't instantiate this class directly. Use `with RulesContext() as r` instead.
+  """
+
+  def __init__(self, ctx=None):
+    self._ctx = ctx
+    if self._ctx is not None:
+      self._marker = self._ctx._marker
+    else:
+      self._marker = IPTABLES_COMMENT_MARKER
+
+  def _AddNode(self, node):
+    if self._ctx is not None:
+      self._ctx._AddNode(node)
+
+  def AppendRule(self, node, chain, rule, table="filter"):
+    """Appends an `iptables` rule to a given node
+    """
+    AssertCommand(["iptables", "-t", table, "-A", chain] +
+                  rule +
+                  ["-m", "comment",
+                   "--comment", self._marker],
+                  node=node)
+    self._AddNode(node)
+
+  def RedirectPort(self, node, host, port, new_port):
+    """Adds a rule to a master node that makes a destination host+port visible
+    under a different port number.
+
+    """
+    self.AppendRule(node, "OUTPUT",
+                    ["--protocol", "tcp",
+                     "--destination", host, "--dport", str(port),
+                     "--jump", "DNAT",
+                     "--to-destination", ":" + str(new_port)],
+                    table="nat")
+
+
+GLOBAL_RULES = Rules()
+
+
+def CleanRules(nodes, marker=IPTABLES_COMMENT_MARKER):
+  """Removes all QA `iptables` rules matching a given marker from a given node.
+
+  If no marker is given, the global default is used, which clean all custom
+  markers.
+  """
+  if not hasattr(nodes, '__iter__'):
+    nodes = [nodes]
+  for node in nodes:
+    AssertCommand(("iptables-save | grep -v '%s' | iptables-restore" %
+                    (marker, )),
+                  node=node)
diff --git a/debian/tests/qa/qa_network.py b/debian/tests/qa/qa_network.py
new file mode 100644
index 0000000..0169d77
--- /dev/null
+++ b/debian/tests/qa/qa_network.py
@@ -0,0 +1,122 @@
+#
+#
+
+# Copyright (C) 2013 Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""QA tests for networks.
+
+"""
+
+import qa_config
+import qa_tags
+import qa_utils
+
+from ganeti import query
+
+from qa_utils import AssertCommand
+
+
+def TestNetworkList():
+  """gnt-network list"""
+  qa_utils.GenericQueryTest("gnt-network", query.NETWORK_FIELDS.keys())
+
+
+def TestNetworkListFields():
+  """gnt-network list-fields"""
+  qa_utils.GenericQueryFieldsTest("gnt-network", query.NETWORK_FIELDS.keys())
+
+
+def GetNonexistentNetworks(count):
+  """Gets network names which shouldn't exist on the cluster.
+
+  @param count: Number of networks to get
+  @rtype: integer
+
+  """
+  return qa_utils.GetNonexistentEntityNames(count, "networks", "network")
+
+
+def TestNetworkAddRemove():
+  """gnt-network add/remove"""
+  (network1, network2) = GetNonexistentNetworks(2)
+
+  # Add some networks of different sizes.
+  # Note: Using RFC5737 addresses.
+  AssertCommand(["gnt-network", "add", "--network", "192.0.2.0/30", network1])
+  AssertCommand(["gnt-network", "add", "--network", "198.51.100.0/24",
+                 network2])
+  # Try to add a network with an existing name.
+  AssertCommand(["gnt-network", "add", "--network", "203.0.133.0/24", network2],
+                fail=True)
+
+  TestNetworkList()
+  TestNetworkListFields()
+
+  AssertCommand(["gnt-network", "remove", network1])
+  AssertCommand(["gnt-network", "remove", network2])
+
+  TestNetworkList()
+
+
+def TestNetworkTags():
+  """gnt-network tags"""
+  (network, ) = GetNonexistentNetworks(1)
+  AssertCommand(["gnt-network", "add", "--network", "192.0.2.0/30", network])
+  qa_tags.TestNetworkTags(network)
+  AssertCommand(["gnt-network", "remove", network])
+
+
+def TestNetworkConnect():
+  """gnt-network connect/disconnect"""
+  (group1, ) = qa_utils.GetNonexistentGroups(1)
+  (network1, ) = GetNonexistentNetworks(1)
+
+  default_mode = "bridged"
+  default_link = "xen-br0"
+  nicparams = qa_config.get("default-nicparams")
+  if nicparams:
+    mode = nicparams.get("mode", default_mode)
+    link = nicparams.get("link", default_link)
+  else:
+    mode = default_mode
+    link = default_link
+
+  nicparams = "mode=%s,link=%s" % (mode, link)
+
+  AssertCommand(["gnt-group", "add", group1])
+  AssertCommand(["gnt-network", "add", "--network", "192.0.2.0/24", network1])
+
+  AssertCommand(["gnt-network", "connect", "--nic-parameters", nicparams,
+                network1, group1])
+
+  TestNetworkList()
+
+  AssertCommand(["gnt-network", "disconnect", network1, group1])
+
+  AssertCommand(["gnt-group", "remove", group1])
+  AssertCommand(["gnt-network", "remove", network1])
diff --git a/debian/tests/qa/qa_performance.py b/debian/tests/qa/qa_performance.py
new file mode 100644
index 0000000..d6eea0a
--- /dev/null
+++ b/debian/tests/qa/qa_performance.py
@@ -0,0 +1,625 @@
+#
+#
+
+# Copyright (C) 2014 Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Performance testing QA tests.
+
+"""
+
+import datetime
+import functools
+import itertools
+import threading
+import time
+
+from ganeti import constants
+
+import qa_config
+import qa_error
+from qa_instance_utils import GetGenericAddParameters
+import qa_job_utils
+import qa_logging
+import qa_utils
+
+
+MAX_JOB_SUBMISSION_DURATION = 15.0
+
+
+class _JobQueueDriver(object):
+  """This class handles polling of jobs and reacting on status changes.
+
+  Jobs are added via the L{AddJob} method, and can have callback functions
+  assigned to them. Those are called as soon as the job enters the appropriate
+  state. Callback functions can add new jobs to the driver as needed.
+
+  A call to L{WaitForCompletion} finally polls Ganeti until all jobs have
+  succeeded.
+
+  """
+
+  _UNKNOWN_STATUS = "unknown"
+
+  class _JobEntry(object):
+    """Internal class representing a job entry.
+
+    """
+    def __init__(self, job_id, running_fn, success_fn):
+      self.job_id = job_id
+      self.running_fn = running_fn
+      self.success_fn = success_fn
+
+    def __str__(self):
+      return str(self.job_id)
+
+  def __init__(self):
+    self._jobs = {}
+    self._running_notified = set()
+    self._jobs_per_status = {}
+    self._lock = threading.RLock()
+
+  def AddJob(self, job_id, running_fn=None, success_fn=None):
+    """Add a job to the driver.
+
+    @type job_id: of ints
+    @param job_id: job id to add to the driver
+    @type running_fn: function taking a L{_JobQueueDriver} and an int
+    @param running_fn: function called once when a job changes to running state
+                       (or success state, if the running state was too short)
+    @type success_fn: function taking a L{_JobQueueDriver} and an int
+    @param success_fn: function called for each successful job id
+
+    """
+    with self._lock:
+      self._jobs[job_id] = _JobQueueDriver._JobEntry(job_id,
+                                                     running_fn,
+                                                     success_fn)
+      # the status will be updated on the next call to _FetchJobStatuses
+      self._jobs_per_status.setdefault(self._UNKNOWN_STATUS, []).append(job_id)
+
+  def _FetchJobStatuses(self):
+    """Retrieves status information of the given jobs.
+
+    """
+    job_statuses = qa_job_utils.GetJobStatuses(self._GetJobIds())
+
+    new_statuses = {}
+    for job_id, status in job_statuses.items():
+      new_statuses.setdefault(status, []).append(self._jobs[int(job_id)])
+    self._jobs_per_status = new_statuses
+
+  def _GetJobIds(self):
+    return list(self._jobs.keys())
+
+  def _GetJobsInStatuses(self, statuses):
+    """Returns a list of L{_JobEntry} of all jobs in the given statuses.
+
+    @type statuses: iterable of strings
+    @param statuses: jobs in those statuses are returned
+    @rtype: list of L{_JobEntry}
+    @return: list of job entries in the requested statuses
+
+    """
+    ret = []
+    for state in statuses:
+      ret.extend(self._jobs_per_status.get(state, []))
+    return ret
+
+  def _UpdateJobStatuses(self):
+    """Retrieves job statuses from the cluster and updates internal state.
+
+    """
+    self._FetchJobStatuses()
+    error_jobs = self._GetJobsInStatuses([constants.JOB_STATUS_ERROR])
+    if error_jobs:
+      raise qa_error.Error(
+        "Jobs %s are in error state!" % [job.job_id for job in error_jobs])
+
+    for job in self._GetJobsInStatuses([constants.JOB_STATUS_RUNNING,
+                                        constants.JOB_STATUS_SUCCESS]):
+      if job.job_id not in self._running_notified:
+        if job.running_fn is not None:
+          job.running_fn(self, job.job_id)
+        self._running_notified.add(job.job_id)
+
+    for job in self._GetJobsInStatuses([constants.JOB_STATUS_SUCCESS]):
+      if job.success_fn is not None:
+        job.success_fn(self, job.job_id)
+
+      # we're done with this job
+      del self._jobs[job.job_id]
+
+  def _HasPendingJobs(self):
+    """Checks if there are still jobs pending.
+
+    @rtype: bool
+    @return: C{True} if there are still jobs which have not succeeded
+
+    """
+    with self._lock:
+      self._UpdateJobStatuses()
+      uncompleted_jobs = self._GetJobsInStatuses(
+        constants.JOB_STATUS_ALL - constants.JOBS_FINALIZED)
+      unknown_jobs = self._GetJobsInStatuses([self._UNKNOWN_STATUS])
+      return len(uncompleted_jobs) > 0 or len(unknown_jobs) > 0
+
+  def WaitForCompletion(self):
+    """Wait for the completion of all registered jobs.
+
+    """
+    while self._HasPendingJobs():
+      time.sleep(2)
+
+    with self._lock:
+      if self._jobs:
+        raise qa_error.Error(
+          "Jobs %s didn't finish in success state!" % self._GetJobIds())
+
+
+def _AcquireAllInstances():
+  """Generator for acquiring all instances in the QA config.
+
+  """
+  try:
+    while True:
+      instance = qa_config.AcquireInstance()
+      yield instance
+  except qa_error.OutOfInstancesError:
+    pass
+
+
+def _AcquireAllNodes():
+  """Generator for acquiring all nodes in the QA config.
+
+  """
+  exclude = []
+  try:
+    while True:
+      node = qa_config.AcquireNode(exclude=exclude)
+      exclude.append(node)
+      yield node
+  except qa_error.OutOfNodesError:
+    pass
+
+
+def _ExecuteJobSubmittingCmd(cmd):
+  """Executes a job submitting command and returns the resulting job ID.
+
+  This will fail if submitting the job takes longer than
+  L{MAX_JOB_SUBMISSION_DURATION}.
+
+  @type cmd: list of string or string
+  @param cmd: the job producing command to execute on the cluster
+  @rtype: int
+  @return: job-id
+
+  """
+  start = datetime.datetime.now()
+  result = qa_job_utils.ExecuteJobProducingCommand(cmd)
+  duration = qa_utils.TimedeltaToTotalSeconds(datetime.datetime.now() - start)
+  if duration > MAX_JOB_SUBMISSION_DURATION:
+    print(qa_logging.FormatWarning(
+      "Executing '%s' took %f seconds, a maximum of %f was expected" %
+      (cmd, duration, MAX_JOB_SUBMISSION_DURATION)))
+  return result
+
+
+def _SubmitInstanceCreationJob(instance, disk_template=None):
+  """Submit an instance creation job.
+
+  @type instance: L{qa_config._QaInstance}
+  @param instance: instance to submit a create command for
+  @type disk_template: string
+  @param disk_template: disk template for the new instance or C{None} which
+                        causes the default disk template to be used
+  @rtype: int
+  @return: job id of the submitted creation job
+
+  """
+  if disk_template is None:
+    disk_template = qa_config.GetDefaultDiskTemplate()
+  try:
+    cmd = (["gnt-instance", "add", "--submit", "--opportunistic-locking",
+            "--os-type=%s" % qa_config.get("os"),
+            "--disk-template=%s" % disk_template] +
+           GetGenericAddParameters(instance, disk_template))
+    cmd.append(instance.name)
+
+    instance.SetDiskTemplate(disk_template)
+
+    return _ExecuteJobSubmittingCmd(cmd)
+  except:
+    instance.Release()
+    raise
+
+
+def _SubmitInstanceRemoveJob(instance):
+  """Submit an instance remove job.
+
+  @type instance: L{qa_config._QaInstance}
+  @param instance: the instance to remove
+  @rtype: int
+  @return: job id of the submitted remove job
+
+  """
+  try:
+    cmd = (["gnt-instance", "remove", "--submit", "-f"])
+    cmd.append(instance.name)
+
+    return _ExecuteJobSubmittingCmd(cmd)
+  finally:
+    instance.Release()
+
+
+def _TestParallelInstanceCreationAndRemoval(max_instances=None,
+                                            disk_template=None,
+                                            custom_job_driver=None):
+  """Tests parallel creation and immediate removal of instances.
+
+  @type max_instances: int
+  @param max_instances: maximum number of instances to create
+  @type disk_template: string
+  @param disk_template: disk template for the new instances or C{None} which
+                        causes the default disk template to be used
+  @type custom_job_driver: _JobQueueDriver
+  @param custom_job_driver: a custom L{_JobQueueDriver} to use if not L{None}.
+                            If one is specified, C{WaitForCompletion} is _not_
+                            called on it.
+
+  """
+  job_driver = custom_job_driver or _JobQueueDriver()
+
+  def _CreateSuccessFn(instance, job_driver, _):
+    job_id = _SubmitInstanceRemoveJob(instance)
+    job_driver.AddJob(job_id)
+
+  instance_generator = _AcquireAllInstances()
+  if max_instances is not None:
+    instance_generator = itertools.islice(instance_generator, max_instances)
+
+  for instance in instance_generator:
+    job_id = _SubmitInstanceCreationJob(instance, disk_template=disk_template)
+    job_driver.AddJob(
+      job_id, success_fn=functools.partial(_CreateSuccessFn, instance))
+
+  if custom_job_driver is None:
+    job_driver.WaitForCompletion()
+
+
+def TestParallelMaxInstanceCreationPerformance():
+  """PERFORMANCE: Parallel instance creation (instance count = max).
+
+  """
+  _TestParallelInstanceCreationAndRemoval()
+
+
+def TestParallelNodeCountInstanceCreationPerformance():
+  """PERFORMANCE: Parallel instance creation (instance count = node count).
+
+  """
+  nodes = list(_AcquireAllNodes())
+  _TestParallelInstanceCreationAndRemoval(max_instances=len(nodes))
+  qa_config.ReleaseManyNodes(nodes)
+
+
+def CreateAllInstances():
+  """Create all instances configured in QA config in the cluster.
+
+  @rtype: list of L{qa_config._QaInstance}
+  @return: list of instances created in the cluster
+
+  """
+  job_driver = _JobQueueDriver()
+  instances = list(_AcquireAllInstances())
+  for instance in instances:
+    job_id = _SubmitInstanceCreationJob(instance)
+    job_driver.AddJob(job_id)
+
+  job_driver.WaitForCompletion()
+  return instances
+
+
+def RemoveAllInstances(instances):
+  """Removes all given instances from the cluster.
+
+  @type instances: list of L{qa_config._QaInstance}
+  @param instances:
+
+  """
+  job_driver = _JobQueueDriver()
+  for instance in instances:
+    job_id = _SubmitInstanceRemoveJob(instance)
+    job_driver.AddJob(job_id)
+
+  job_driver.WaitForCompletion()
+
+
+def TestParallelModify(instances):
+  """PERFORMANCE: Parallel instance modify.
+
+  @type instances: list of L{qa_config._QaInstance}
+  @param instances: list of instances to issue modify commands against
+
+  """
+  job_driver = _JobQueueDriver()
+  # set min mem to same value as max mem
+  new_min_mem = qa_config.get(constants.BE_MAXMEM)
+  for instance in instances:
+    cmd = (["gnt-instance", "modify", "--submit",
+            "-B", "%s=%s" % (constants.BE_MINMEM, new_min_mem)])
+    cmd.append(instance.name)
+    job_driver.AddJob(_ExecuteJobSubmittingCmd(cmd))
+
+    cmd = (["gnt-instance", "modify", "--submit",
+            "-O", "fake_os_param=fake_value"])
+    cmd.append(instance.name)
+    job_driver.AddJob(_ExecuteJobSubmittingCmd(cmd))
+
+    cmd = (["gnt-instance", "modify", "--submit",
+            "-O", "fake_os_param=fake_value",
+            "-B", "%s=%s" % (constants.BE_MINMEM, new_min_mem)])
+    cmd.append(instance.name)
+    job_driver.AddJob(_ExecuteJobSubmittingCmd(cmd))
+
+  job_driver.WaitForCompletion()
+
+
+def TestParallelInstanceOSOperations(instances):
+  """PERFORMANCE: Parallel instance OS operations.
+
+  Note: This test leaves the instances either running or stopped, there's no
+  guarantee on the actual status.
+
+  @type instances: list of L{qa_config._QaInstance}
+  @param instances: list of instances to issue lifecycle commands against
+
+  """
+  OPS = ["start", "shutdown", "reboot", "reinstall"]
+  job_driver = _JobQueueDriver()
+
+  def _SubmitNextOperation(instance, start, idx, job_driver, _):
+    if idx == len(OPS):
+      return
+    op_idx = (start + idx) % len(OPS)
+
+    next_fn = functools.partial(_SubmitNextOperation, instance, start, idx + 1)
+
+    if OPS[op_idx] == "reinstall" and \
+        instance.disk_template == constants.DT_DISKLESS:
+      # no reinstall possible with diskless instances
+      next_fn(job_driver, None)
+      return
+    elif OPS[op_idx] == "reinstall":
+      # the instance has to be shut down for reinstall to work
+      shutdown_cmd = ["gnt-instance", "shutdown", "--submit", instance.name]
+      cmd = ["gnt-instance", "reinstall", "--submit", "-f", instance.name]
+
+      job_driver.AddJob(_ExecuteJobSubmittingCmd(shutdown_cmd),
+                        running_fn=lambda _, __: job_driver.AddJob(
+                          _ExecuteJobSubmittingCmd(cmd),
+                          running_fn=next_fn))
+    else:
+      cmd = ["gnt-instance", OPS[op_idx], "--submit"]
+      if OPS[op_idx] == "reinstall":
+        cmd.append("-f")
+      cmd.append(instance.name)
+
+      job_id = _ExecuteJobSubmittingCmd(cmd)
+      job_driver.AddJob(job_id, running_fn=next_fn)
+
+  for start, instance in enumerate(instances):
+    _SubmitNextOperation(instance, start % len(OPS), 0, job_driver, None)
+
+  job_driver.WaitForCompletion()
+
+
+def TestParallelInstanceQueries(instances):
+  """PERFORMANCE: Parallel instance queries.
+
+  @type instances: list of L{qa_config._QaInstance}
+  @param instances: list of instances to issue queries against
+
+  """
+  threads = qa_job_utils.QAThreadGroup()
+  for instance in instances:
+    cmd = ["gnt-instance", "info", instance.name]
+    info_thread = qa_job_utils.QAThread(qa_utils.AssertCommand, [cmd], {})
+    threads.Start(info_thread)
+
+    cmd = ["gnt-instance", "list"]
+    list_thread = qa_job_utils.QAThread(qa_utils.AssertCommand, [cmd], {})
+    threads.Start(list_thread)
+
+  threads.JoinAndReraise()
+
+
+def TestJobQueueSubmissionPerformance():
+  """PERFORMANCE: Job queue submission performance.
+
+  This test exercises the job queue and verifies that the job submission time
+  does not increase as more jobs are added.
+
+  """
+  MAX_CLUSTER_INFO_SECONDS = 15.0
+  job_driver = _JobQueueDriver()
+  submission_durations = []
+
+  def _VerifySubmissionDuration(duration_seconds):
+    # only start to verify the submission duration once we got data from the
+    # first 10 job submissions
+    if len(submission_durations) >= 10:
+      avg_duration = sum(submission_durations) / len(submission_durations)
+      max_duration = avg_duration * 1.5
+      if duration_seconds > max_duration:
+        print(qa_logging.FormatWarning(
+          "Submitting a delay job took %f seconds, max %f expected" %
+          (duration_seconds, max_duration)))
+    else:
+      submission_durations.append(duration_seconds)
+
+  def _SubmitDelayJob(count):
+    for _ in range(count):
+      cmd = ["gnt-debug", "delay", "--submit", "0.1"]
+
+      start = datetime.datetime.now()
+      job_id = _ExecuteJobSubmittingCmd(cmd)
+      duration_seconds = \
+        qa_utils.TimedeltaToTotalSeconds(datetime.datetime.now() - start)
+      _VerifySubmissionDuration(duration_seconds)
+
+      job_driver.AddJob(job_id)
+
+  threads = qa_job_utils.QAThreadGroup()
+  for i in range(10):
+    thread = qa_job_utils.QAThread(_SubmitDelayJob, [20], {})
+    threads.Start(thread)
+
+  threads.JoinAndReraise()
+
+  qa_utils.AssertCommand(["gnt-cluster", "info"],
+                         max_seconds=MAX_CLUSTER_INFO_SECONDS)
+
+  job_driver.WaitForCompletion()
+
+
+def TestParallelDRBDInstanceCreationPerformance():
+  """PERFORMANCE: Parallel DRBD backed instance creation.
+
+  """
+  assert qa_config.IsTemplateSupported(constants.DT_DRBD8)
+
+  nodes = list(_AcquireAllNodes())
+  _TestParallelInstanceCreationAndRemoval(max_instances=len(nodes) * 2,
+                                          disk_template=constants.DT_DRBD8)
+  qa_config.ReleaseManyNodes(nodes)
+
+
+def TestParallelPlainInstanceCreationPerformance():
+  """PERFORMANCE: Parallel plain backed instance creation.
+
+  """
+  assert qa_config.IsTemplateSupported(constants.DT_PLAIN)
+
+  nodes = list(_AcquireAllNodes())
+  _TestParallelInstanceCreationAndRemoval(max_instances=len(nodes) * 2,
+                                          disk_template=constants.DT_PLAIN)
+  qa_config.ReleaseManyNodes(nodes)
+
+
+def _TestInstanceOperationInParallelToInstanceCreation(*cmds):
+  """Run the given test command in parallel to an instance creation.
+
+  @type cmds: list of list of strings
+  @param cmds: commands to execute in parallel to an instance creation. Each
+               command in the list is executed once the previous job starts
+               to run.
+
+  """
+  def _SubmitNextCommand(cmd_idx, job_driver, _):
+    if cmd_idx >= len(cmds):
+      return
+    job_id = _ExecuteJobSubmittingCmd(cmds[cmd_idx])
+    job_driver.AddJob(
+      job_id, success_fn=functools.partial(_SubmitNextCommand, cmd_idx + 1))
+
+  assert qa_config.IsTemplateSupported(constants.DT_DRBD8)
+  assert len(cmds) > 0
+
+  job_driver = _JobQueueDriver()
+  _SubmitNextCommand(0, job_driver, None)
+
+  _TestParallelInstanceCreationAndRemoval(max_instances=1,
+                                          disk_template=constants.DT_DRBD8,
+                                          custom_job_driver=job_driver)
+
+  job_driver.WaitForCompletion()
+
+
+def TestParallelInstanceFailover(instance):
+  """PERFORMANCE: Instance failover with parallel instance creation.
+
+  """
+  _TestInstanceOperationInParallelToInstanceCreation(
+    ["gnt-instance", "failover", "--submit", "-f", "--shutdown-timeout=0",
+     instance.name])
+
+
+def TestParallelInstanceMigration(instance):
+  """PERFORMANCE: Instance migration with parallel instance creation.
+
+  """
+  _TestInstanceOperationInParallelToInstanceCreation(
+    ["gnt-instance", "migrate", "--submit", "-f", instance.name])
+
+
+def TestParallelInstanceReplaceDisks(instance):
+  """PERFORMANCE: Instance replace-disks with parallel instance creation.
+
+  """
+  _TestInstanceOperationInParallelToInstanceCreation(
+    ["gnt-instance", "replace-disks", "--submit", "--early-release", "-p",
+     instance.name])
+
+
+def TestParallelInstanceReboot(instance):
+  """PERFORMANCE: Instance reboot with parallel instance creation.
+
+  """
+  _TestInstanceOperationInParallelToInstanceCreation(
+    ["gnt-instance", "reboot", "--submit", instance.name])
+
+
+def TestParallelInstanceReinstall(instance):
+  """PERFORMANCE: Instance reinstall with parallel instance creation.
+
+  """
+  # instance reinstall requires the instance to be down
+  qa_utils.AssertCommand(["gnt-instance", "stop", instance.name])
+
+  _TestInstanceOperationInParallelToInstanceCreation(
+    ["gnt-instance", "reinstall", "--submit", "-f", instance.name])
+
+  qa_utils.AssertCommand(["gnt-instance", "start", instance.name])
+
+
+def TestParallelInstanceRename(instance):
+  """PERFORMANCE: Instance rename with parallel instance creation.
+
+  """
+  # instance rename requires the instance to be down
+  qa_utils.AssertCommand(["gnt-instance", "stop", instance.name])
+
+  new_instance = qa_config.AcquireInstance()
+  try:
+    _TestInstanceOperationInParallelToInstanceCreation(
+      ["gnt-instance", "rename", "--submit", instance.name, new_instance.name],
+      ["gnt-instance", "rename", "--submit", new_instance.name, instance.name])
+  finally:
+    new_instance.Release()
+
+  qa_utils.AssertCommand(["gnt-instance", "start", instance.name])
diff --git a/debian/tests/vcluster-qa b/debian/tests/vcluster-qa
index 341996c..44d237e 100755
--- a/debian/tests/vcluster-qa
+++ b/debian/tests/vcluster-qa
@@ -28,4 +28,5 @@ export PYTHONPATH="/usr/share/ganeti/default"
 export _SYSTEMCTL_SKIP_REDIRECT=1
 
 # Run the QA suite
+cp -r debian/tests/qa/* qa/
 ./qa/ganeti-qa.py --yes-do-it debian/tests/vcluster-qa.json

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/pkg-ganeti/ganeti.git



More information about the Pkg-ganeti-devel mailing list