[SCM] WebKit Debian packaging branch, debian/experimental, updated. upstream/1.3.3-10851-g50815da

yutak at chromium.org yutak at chromium.org
Wed Dec 22 17:57:47 UTC 2010


The following commit has been merged in the debian/experimental branch:
commit f62918640646815782cf6f4f580d7adf1d5429ed
Author: yutak at chromium.org <yutak at chromium.org@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Date:   Fri Dec 3 11:34:28 2010 +0000

    2010-12-03  Sheriff Bot  <webkit.review.bot at gmail.com>
    
            Unreviewed, rolling out r73211.
            http://trac.webkit.org/changeset/73211
            https://bugs.webkit.org/show_bug.cgi?id=50450
    
            Broke Chromium's "Webkit Win (dbg)(2)" bot. (Requested by
            yutak on #webkit).
    
            * Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py:
    
    git-svn-id: http://svn.webkit.org/repository/webkit/trunk@73260 268f45cc-cd09-0410-ab3c-d52691b4dbfc

diff --git a/WebKitTools/ChangeLog b/WebKitTools/ChangeLog
index fbba1ec..cf94f76 100644
--- a/WebKitTools/ChangeLog
+++ b/WebKitTools/ChangeLog
@@ -1,5 +1,16 @@
 2010-12-03  Sheriff Bot  <webkit.review.bot at gmail.com>
 
+        Unreviewed, rolling out r73211.
+        http://trac.webkit.org/changeset/73211
+        https://bugs.webkit.org/show_bug.cgi?id=50450
+
+        Broke Chromium's "Webkit Win (dbg)(2)" bot. (Requested by
+        yutak on #webkit).
+
+        * Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py:
+
+2010-12-03  Sheriff Bot  <webkit.review.bot at gmail.com>
+
         Unreviewed, rolling out r73222.
         http://trac.webkit.org/changeset/73222
         https://bugs.webkit.org/show_bug.cgi?id=50449
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py
index 65d0b09..fdb8da6 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py
@@ -62,6 +62,150 @@ _log = logging.getLogger("webkitpy.layout_tests.layout_package."
                          "dump_render_tree_thread")
 
 
+def _expected_test_output(port, filename):
+    """Returns an expected TestOutput object."""
+    return test_output.TestOutput(port.expected_text(filename),
+                                  port.expected_image(filename),
+                                  port.expected_checksum(filename))
+
+def _process_output(port, options, test_input, test_types, test_args,
+                    test_output, worker_name):
+    """Receives the output from a DumpRenderTree process, subjects it to a
+    number of tests, and returns a list of failure types the test produced.
+
+    Args:
+      port: port-specific hooks
+      options: command line options argument from optparse
+      proc: an active DumpRenderTree process
+      test_input: Object containing the test filename and timeout
+      test_types: list of test types to subject the output to
+      test_args: arguments to be passed to each test
+      test_output: a TestOutput object containing the output of the test
+      worker_name: worker name for logging
+
+    Returns: a TestResult object
+    """
+    failures = []
+
+    if test_output.crash:
+        failures.append(test_failures.FailureCrash())
+    if test_output.timeout:
+        failures.append(test_failures.FailureTimeout())
+
+    test_name = port.relative_test_filename(test_input.filename)
+    if test_output.crash:
+        _log.debug("%s Stacktrace for %s:\n%s" % (worker_name, test_name,
+                                                  test_output.error))
+        filename = os.path.join(options.results_directory, test_name)
+        filename = os.path.splitext(filename)[0] + "-stack.txt"
+        port.maybe_make_directory(os.path.split(filename)[0])
+        with codecs.open(filename, "wb", "utf-8") as file:
+            file.write(test_output.error)
+    elif test_output.error:
+        _log.debug("%s %s output stderr lines:\n%s" % (worker_name, test_name,
+                                                       test_output.error))
+
+    expected_test_output = _expected_test_output(port, test_input.filename)
+
+    # Check the output and save the results.
+    start_time = time.time()
+    time_for_diffs = {}
+    for test_type in test_types:
+        start_diff_time = time.time()
+        new_failures = test_type.compare_output(port, test_input.filename,
+                                                test_args, test_output,
+                                                expected_test_output)
+        # Don't add any more failures if we already have a crash, so we don't
+        # double-report those tests. We do double-report for timeouts since
+        # we still want to see the text and image output.
+        if not test_output.crash:
+            failures.extend(new_failures)
+        time_for_diffs[test_type.__class__.__name__] = (
+            time.time() - start_diff_time)
+
+    total_time_for_all_diffs = time.time() - start_diff_time
+    return test_results.TestResult(test_input.filename, failures, test_output.test_time,
+                                   total_time_for_all_diffs, time_for_diffs)
+
+
+def _pad_timeout(timeout):
+    """Returns a safe multiple of the per-test timeout value to use
+    to detect hung test threads.
+
+    """
+    # When we're running one test per DumpRenderTree process, we can
+    # enforce a hard timeout.  The DumpRenderTree watchdog uses 2.5x
+    # the timeout; we want to be larger than that.
+    return timeout * 3
+
+
+def _milliseconds_to_seconds(msecs):
+    return float(msecs) / 1000.0
+
+
+def _should_fetch_expected_checksum(options):
+    return options.pixel_tests and not (options.new_baseline or options.reset_results)
+
+
+def _run_single_test(port, options, test_input, test_types, test_args, driver, worker_name):
+    # FIXME: Pull this into TestShellThread._run().
+
+    # The image hash is used to avoid doing an image dump if the
+    # checksums match, so it should be set to a blank value if we
+    # are generating a new baseline.  (Otherwise, an image from a
+    # previous run will be copied into the baseline."""
+    if _should_fetch_expected_checksum(options):
+        test_input.image_hash = port.expected_checksum(test_input.filename)
+    test_output = driver.run_test(test_input)
+    return _process_output(port, options, test_input, test_types, test_args,
+                           test_output, worker_name)
+
+
+class SingleTestThread(threading.Thread):
+    """Thread wrapper for running a single test file."""
+
+    def __init__(self, port, options, worker_number, worker_name,
+                 test_input, test_types, test_args):
+        """
+        Args:
+          port: object implementing port-specific hooks
+          options: command line argument object from optparse
+          worker_number: worker number for tests
+          worker_name: for logging
+          test_input: Object containing the test filename and timeout
+          test_types: A list of TestType objects to run the test output
+              against.
+          test_args: A TestArguments object to pass to each TestType.
+        """
+
+        threading.Thread.__init__(self)
+        self._port = port
+        self._options = options
+        self._test_input = test_input
+        self._test_types = test_types
+        self._test_args = test_args
+        self._driver = None
+        self._worker_number = worker_number
+        self._name = worker_name
+
+    def run(self):
+        self._covered_run()
+
+    def _covered_run(self):
+        # FIXME: this is a separate routine to work around a bug
+        # in coverage: see http://bitbucket.org/ned/coveragepy/issue/85.
+        self._driver = self._port.create_driver(self._worker_number)
+        self._driver.start()
+        self._test_result = _run_single_test(self._port, self._options,
+                                             self._test_input, self._test_types,
+                                             self._test_args, self._driver,
+                                             self._name)
+        self._driver.stop()
+
+    def get_test_result(self):
+        return self._test_result
+
+
 class WatchableThread(threading.Thread):
     """This class abstracts an interface used by
     run_webkit_tests.TestRunner._wait_for_threads_to_finish for thread
@@ -119,19 +263,14 @@ class TestShellThread(WatchableThread):
         self._name = worker_name
         self._filename_list_queue = filename_list_queue
         self._result_queue = result_queue
-
-        self._batch_count = 0
-        self._batch_size = self._options.batch_size
+        self._filename_list = []
         self._driver = None
-        self._have_http_lock = False
-
-        self._test_runner = None
-        self._result_summary = None
-        self._test_list_timing_stats = {}
+        self._test_group_timing_stats = {}
         self._test_results = []
         self._num_tests = 0
         self._start_time = 0
         self._stop_time = 0
+        self._have_http_lock = False
         self._http_lock_wait_begin = 0
         self._http_lock_wait_end = 0
 
@@ -141,14 +280,12 @@ class TestShellThread(WatchableThread):
                                         self._options.results_directory))
         self._test_args = self._get_test_args(worker_number)
 
-        # Append tests we're running to the existing tests_run.txt file.
-        # This is created in run_webkit_tests.py:_PrepareListsAndPrintOutput.
-        tests_run_filename = os.path.join(self._options.results_directory,
-                                          "tests_run.txt")
-        self._tests_run_file = codecs.open(tests_run_filename, "a", "utf-8")
-
-    def __del__(self):
-        self._cleanup()
+        # Current group of tests we're running.
+        self._current_group = None
+        # Number of tests in self._current_group.
+        self._num_tests_in_current_group = None
+        # Time at which we started running tests from self._current_group.
+        self._current_group_start_time = None
 
     def _get_test_args(self, worker_number):
         """Returns the tuple of arguments for tests and for DumpRenderTree."""
@@ -167,7 +304,7 @@ class TestShellThread(WatchableThread):
     def get_test_group_timing_stats(self):
         """Returns a dictionary mapping test group to a tuple of
         (number of tests in that group, time to run the tests)"""
-        return self._test_list_timing_stats
+        return self._test_group_timing_stats
 
     def get_test_results(self):
         """Return the list of all tests run on this thread.
@@ -184,12 +321,6 @@ class TestShellThread(WatchableThread):
     def get_num_tests(self):
         return self._num_tests
 
-    def next_timeout(self):
-        """Return the time the test is supposed to finish by."""
-        if self._next_timeout:
-            return self._next_timeout + self._http_lock_wait_time()
-        return self._next_timeout
-
     def run(self):
         """Delegate main work to a helper method and watch for uncaught
         exceptions."""
@@ -200,18 +331,20 @@ class TestShellThread(WatchableThread):
         # in coverage: see http://bitbucket.org/ned/coveragepy/issue/85.
         self._thread_id = thread.get_ident()
         self._start_time = time.time()
+        self._num_tests = 0
         try:
-            _log.debug('%s starting' % (self._name))
+            _log.debug('%s starting' % (self.getName()))
             self._run(test_runner=None, result_summary=None)
-            _log.debug('%s done (%d tests)' % (self._name, self._num_tests))
+            _log.debug('%s done (%d tests)' % (self.getName(),
+                       self.get_num_tests()))
         except KeyboardInterrupt:
             self._exception_info = sys.exc_info()
-            _log.debug("%s interrupted" % self._name)
+            _log.debug("%s interrupted" % self.getName())
         except:
             # Save the exception for our caller to see.
             self._exception_info = sys.exc_info()
             self._stop_time = time.time()
-            _log.error('%s dying, exception raised' % self._name)
+            _log.error('%s dying, exception raised' % self.getName())
 
         self._stop_time = time.time()
 
@@ -222,97 +355,108 @@ class TestShellThread(WatchableThread):
         do multi-threaded debugging."""
         self._run(test_runner, result_summary)
 
+    def cancel(self):
+        """Clean up http lock and set a flag telling this thread to quit."""
+        self._stop_servers_with_lock()
+        WatchableThread.cancel(self)
+
+    def next_timeout(self):
+        """Return the time the test is supposed to finish by."""
+        if self._next_timeout:
+            return self._next_timeout + self._http_lock_wait_time()
+        return self._next_timeout
+
+    def _http_lock_wait_time(self):
+        """Return the time what http locking takes."""
+        if self._http_lock_wait_begin == 0:
+            return 0
+        if self._http_lock_wait_end == 0:
+            return time.time() - self._http_lock_wait_begin
+        return self._http_lock_wait_end - self._http_lock_wait_begin
+
     def _run(self, test_runner, result_summary):
         """Main work entry point of the thread. Basically we pull urls from the
         filename queue and run the tests until we run out of urls.
 
         If test_runner is not None, then we call test_runner.UpdateSummary()
-        with the results of each test during _tear_down_test(), below."""
-        self._test_runner = test_runner
-        self._result_summary = result_summary
-
-        while not self._canceled:
-            try:
-                current_group, filename_list = \
-                    self._filename_list_queue.get_nowait()
-                self.handle_test_list(current_group, filename_list)
-            except Queue.Empty:
-                break
-
-        if self._canceled:
-            _log.debug('Testing canceled')
+        with the results of each test."""
+        batch_size = self._options.batch_size
+        batch_count = 0
 
-        self._cleanup()
+        # Append tests we're running to the existing tests_run.txt file.
+        # This is created in run_webkit_tests.py:_PrepareListsAndPrintOutput.
+        tests_run_filename = os.path.join(self._options.results_directory,
+                                          "tests_run.txt")
+        tests_run_file = codecs.open(tests_run_filename, "a", "utf-8")
 
-    def _cleanup(self):
-        self._kill_dump_render_tree()
-        if self._have_http_lock:
-            self._stop_servers_with_lock()
-        if self._tests_run_file:
-            self._tests_run_file.close()
-            self._tests_run_file = None
-
-    def handle_test_list(self, list_name, test_list):
-        if list_name == "tests_to_http_lock":
-            self._start_servers_with_lock()
-
-        start_time = time.time()
-        num_tests = 0
-        for test_input in test_list:
-            self._run_test(test_input)
+        while True:
             if self._canceled:
-                break
-            num_tests += 1
+                _log.debug('Testing cancelled')
+                tests_run_file.close()
+                return
+
+            if len(self._filename_list) is 0:
+                if self._current_group is not None:
+                    self._test_group_timing_stats[self._current_group] = \
+                        (self._num_tests_in_current_group,
+                         time.time() - self._current_group_start_time)
+
+                try:
+                    self._current_group, self._filename_list = \
+                        self._filename_list_queue.get_nowait()
+                except Queue.Empty:
+                    self._stop_servers_with_lock()
+                    self._kill_dump_render_tree()
+                    tests_run_file.close()
+                    return
+
+                if self._current_group == "tests_to_http_lock":
+                    self._start_servers_with_lock()
+                elif self._have_http_lock:
+                    self._stop_servers_with_lock()
+
+                self._num_tests_in_current_group = len(self._filename_list)
+                self._current_group_start_time = time.time()
+
+            test_input = self._filename_list.pop()
+
+            # We have a url, run tests.
+            batch_count += 1
+            self._num_tests += 1
+            if self._options.run_singly:
+                result = self._run_test_in_another_thread(test_input)
+            else:
+                result = self._run_test_in_this_thread(test_input)
+
+            filename = test_input.filename
+            tests_run_file.write(filename + "\n")
+            if result.failures:
+                # Check and kill DumpRenderTree if we need to.
+                if len([1 for f in result.failures
+                        if f.should_kill_dump_render_tree()]):
+                    self._kill_dump_render_tree()
+                    # Reset the batch count since the shell just bounced.
+                    batch_count = 0
+                # Print the error message(s).
+                error_str = '\n'.join(['  ' + f.message() for
+                                       f in result.failures])
+                _log.debug("%s %s failed:\n%s" % (self.getName(),
+                           self._port.relative_test_filename(filename),
+                           error_str))
+            else:
+                _log.debug("%s %s passed" % (self.getName(),
+                           self._port.relative_test_filename(filename)))
+            self._result_queue.put(result.dumps())
+
+            if batch_size > 0 and batch_count >= batch_size:
+                # Bounce the shell and reset count.
+                self._kill_dump_render_tree()
+                batch_count = 0
 
-        elapsed_time = time.time() - start_time
+            if test_runner:
+                test_runner.update_summary(result_summary)
 
-        if self._have_http_lock:
-            self._stop_servers_with_lock()
-
-        self._test_list_timing_stats[list_name] = \
-           (num_tests, elapsed_time)
-
-    def _run_test(self, test_input):
-        self._set_up_test(test_input)
-
-        # We calculate how long we expect the test to take.
-        #
-        # The DumpRenderTree watchdog uses 2.5x the timeout; we want to be
-        # larger than that. We also add a little more padding if we're
-        # running tests in a separate thread.
-        #
-        # Note that we need to convert the test timeout from a
-        # string value in milliseconds to a float for Python.
-        driver_timeout_sec = 3.0 * float(test_input.timeout) / 1000.0
-        thread_padding_sec = 1.0
-        thread_timeout_sec = driver_timeout_sec + thread_padding_sec
-        if self._options.run_singly:
-            test_timeout_sec = thread_timeout_sec
-        else:
-            test_timeout_sec = driver_timeout_sec
-
-        start = time.time()
-        self._next_timeout = start + test_timeout_sec
-
-        if self._options.run_singly:
-            result = self._run_test_in_another_thread(test_input,
-                                                      thread_timeout_sec)
-        else:
-            result = self._run_test_in_this_thread(test_input)
-
-        self._tear_down_test(test_input, result)
-
-    def _set_up_test(self, test_input):
-        test_input.uri = self._port.filename_to_uri(test_input.filename)
-        if self._should_fetch_expected_checksum():
-            test_input.image_checksum = self._port.expected_checksum(
-                test_input.filename)
-
-    def _should_fetch_expected_checksum(self):
-        return (self._options.pixel_tests and not
-                (self._options.new_baseline or self._options.reset_results))
-
-    def _run_test_in_another_thread(self, test_input, thread_timeout_sec):
+    def _run_test_in_another_thread(self, test_input):
         """Run a test in a separate thread, enforcing a hard time limit.
 
         Since we can only detect the termination of a thread, not any internal
@@ -321,24 +465,25 @@ class TestShellThread(WatchableThread):
 
         Args:
           test_input: Object containing the test filename and timeout
-          thread_timeout_sec: time to wait before killing the driver process.
+
         Returns:
           A TestResult
         """
-        worker = self
-        result = None
-
-        driver = worker._port.create_driver(worker._worker_number)
-        driver.start()
-
-        class SingleTestThread(threading.Thread):
-            def run(self):
-                result = worker._run_single_test(driver, test_input)
-
-        thread = SingleTestThread()
-        thread.start()
-        thread.join(thread_timeout_sec)
-        if thread.isAlive():
+        worker = SingleTestThread(self._port,
+                                  self._options,
+                                  self._worker_number,
+                                  self._name,
+                                  test_input,
+                                  self._test_types,
+                                  self._test_args)
+
+        worker.start()
+
+        thread_timeout = _milliseconds_to_seconds(
+            _pad_timeout(int(test_input.timeout)))
+        thread._next_timeout = time.time() + thread_timeout
+        worker.join(thread_timeout)
+        if worker.isAlive():
             # If join() returned with the thread still running, the
             # DumpRenderTree is completely hung and there's nothing
             # more we can do with it.  We have to kill all the
@@ -348,10 +493,16 @@ class TestShellThread(WatchableThread):
             # that tradeoff in order to avoid losing the rest of this
             # thread's results.
             _log.error('Test thread hung: killing all DumpRenderTrees')
+            if worker._driver:
+                worker._driver.stop()
 
-        driver.stop()
-
-        if not result:
+        try:
+            result = worker.get_test_result()
+        except AttributeError, e:
+            # This gets raised if the worker thread has already exited.
+            failures = []
+            _log.error('Cannot get results of test: %s' %
+                       test_input.filename)
             result = test_results.TestResult(test_input.filename, failures=[],
                 test_run_time=0, total_time_for_all_diffs=0, time_for_diffs={})
 
@@ -365,123 +516,33 @@ class TestShellThread(WatchableThread):
 
         Returns: a TestResult object.
         """
-        # poll() is not threadsafe and can throw OSError due to:
-        # http://bugs.python.org/issue1731717
-        if not self._driver or self._driver.poll() is not None:
-            self._driver = self._port.create_driver(self._worker_number)
-            self._driver.start()
-
-        test_result = self._run_single_test(test_input, self._driver)
+        self._ensure_dump_render_tree_is_running()
+        thread_timeout = _milliseconds_to_seconds(
+             _pad_timeout(int(test_input.timeout)))
+        self._next_timeout = time.time() + thread_timeout
+        test_result = _run_single_test(self._port, self._options, test_input,
+                                       self._test_types, self._test_args,
+                                       self._driver, self._name)
         self._test_results.append(test_result)
         return test_result
 
-    def _run_single_test(self, test_input, driver):
-        # The image hash is used to avoid doing an image dump if the
-        # checksums match, so it should be set to a blank value if we
-        # are generating a new baseline.  (Otherwise, an image from a
-        # previous run will be copied into the baseline."""
-        if self._should_fetch_expected_checksum():
-            test_input.image_hash = self._port.expected_checksum(
-                test_input.filename)
-        test_output = driver.run_test(test_input)
-        return self._process_output(test_input.filename, test_output)
-
-    def _process_output(self, test_filename, test_output):
-        """Receives the output from a DumpRenderTree process, subjects it to a
-        number of tests, and returns a list of failure types the test produced.
+    def _ensure_dump_render_tree_is_running(self):
+        """Start the shared DumpRenderTree, if it's not running.
 
-        Args:
-        test_filename: full path to the test in question.
-        test_output: a TestOutput object containing the output of the test
+        This is not for use when running tests singly, since those each start
+        a separate DumpRenderTree in their own thread.
 
-        Returns: a TestResult object
         """
-        failures = []
-
-        if test_output.crash:
-            failures.append(test_failures.FailureCrash())
-        if test_output.timeout:
-            failures.append(test_failures.FailureTimeout())
-
-        test_name = self._port.relative_test_filename(test_filename)
-        if test_output.crash:
-            _log.debug("%s Stacktrace for %s:\n%s" %
-                       (self._name, test_name, test_output.error))
-            filename = os.path.join(self._options.results_directory, test_name)
-            filename = os.path.splitext(filename)[0] + "-stack.txt"
-            self._port.maybe_make_directory(os.path.split(filename)[0])
-            with codecs.open(filename, "wb", "utf-8") as file:
-                file.write(test_output.error)
-        elif test_output.error:
-            _log.debug("%s %s output stderr lines:\n%s" %
-                       (self._name, test_name, test_output.error))
-
-        expected_test_output = self._expected_test_output(test_filename)
-
-        # Check the output and save the results.
-        start_time = time.time()
-        time_for_diffs = {}
-        for test_type in self._test_types:
-            start_diff_time = time.time()
-            new_failures = test_type.compare_output(self._port,
-                                                    test_filename,
-                                                    self._test_args,
-                                                    test_output,
-                                                    expected_test_output)
-            # Don't add any more failures if we already have a crash, so we
-            # don't double-report those tests. We do double-report for timeouts
-            # since we still want to see the text and image output.
-            if not test_output.crash:
-                failures.extend(new_failures)
-            time_for_diffs[test_type.__class__.__name__] = (
-                time.time() - start_diff_time)
-
-        total_time_for_all_diffs = time.time() - start_diff_time
-        return test_results.TestResult(test_filename,
-                                       failures,
-                                       test_output.test_time,
-                                       total_time_for_all_diffs,
-                                       time_for_diffs)
-
-    def _expected_test_output(self, filename):
-        """Returns an expected TestOutput object."""
-        return test_output.TestOutput(self._port.expected_text(filename),
-                                    self._port.expected_image(filename),
-                                    self._port.expected_checksum(filename))
-
-    def _tear_down_test(self, test_input, result):
-        self._num_tests += 1
-        self._batch_count += 1
-        self._tests_run_file.write(test_input.filename + "\n")
-        test_name = self._port.relative_test_filename(test_input.filename)
-
-        if result.failures:
-            # Check and kill DumpRenderTree if we need to.
-            if any([f.should_kill_dump_render_tree() for f in result.failures]):
-                self._kill_dump_render_tree()
-                # Reset the batch count since the shell just bounced.
-                self._batch_count = 0
-
-            # Print the error message(s).
-            _log.debug("%s %s failed:" % (self._name, test_name))
-            for f in result.failures:
-                _log.debug("%s  %s" % (self._name, f.message()))
-        else:
-            _log.debug("%s %s passed" % (self._name, test_name))
-
-        self._result_queue.put(result.dumps())
-
-        if self._batch_size > 0 and self._batch_count >= self._batch_size:
-            # Bounce the shell and reset count.
-            self._kill_dump_render_tree()
-            self._batch_count = 0
-
-        if self._test_runner:
-            self._test_runner.update_summary(self._result_summary)
+        # poll() is not threadsafe and can throw OSError due to:
+        # http://bugs.python.org/issue1731717
+        if not self._driver or self._driver.poll() is not None:
+            self._driver = self._port.create_driver(self._worker_number)
+            self._driver.start()
 
     def _start_servers_with_lock(self):
+        """Acquire http lock and start the servers."""
         self._http_lock_wait_begin = time.time()
-        _log.debug('Acquiring http lock ...')
+        _log.debug('Acquire http lock ...')
         self._port.acquire_http_lock()
         _log.debug('Starting HTTP server ...')
         self._port.start_http_server()
@@ -490,14 +551,6 @@ class TestShellThread(WatchableThread):
         self._http_lock_wait_end = time.time()
         self._have_http_lock = True
 
-    def _http_lock_wait_time(self):
-        """Return the time what http locking takes."""
-        if self._http_lock_wait_begin == 0:
-            return 0
-        if self._http_lock_wait_end == 0:
-            return time.time() - self._http_lock_wait_begin
-        return self._http_lock_wait_end - self._http_lock_wait_begin
-
     def _stop_servers_with_lock(self):
         """Stop the servers and release http lock."""
         if self._have_http_lock:

-- 
WebKit Debian packaging



More information about the Pkg-webkit-commits mailing list