[SCM] WebKit Debian packaging branch, debian/experimental, updated. upstream/1.3.3-9427-gc2be6fc
dpranke at chromium.org
dpranke at chromium.org
Wed Dec 22 12:55:50 UTC 2010
The following commit has been merged in the debian/experimental branch:
commit 7e68734e6e2d80db49c1a0e694ee56c89f0da2e0
Author: dpranke at chromium.org <dpranke at chromium.org@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Date: Thu Sep 2 04:47:42 2010 +0000
2010-09-01 Dirk Pranke <dpranke at chromium.org>
Reviewed by Tony Chang.
Land a patched version of r66542 - change TestRunner to have an
separate cleanup method and fix the ordering of cleanup between
TestRunner and the printing module, and then wrap everything in a
try/finally block to ensure reliable cleanup without needing to
rely on stuff happening in the destructor of the TestRunner.
Also refactor run_webkit_tests.run() to be much smaller and cleaner
by creating a bunch of helper methods and moving more stuff into
the TestRunner class.
This fixes the crash at the end of the linux test run of
new-run-webkit-tests (and undoes the rollout in 66547).
https://bugs.webkit.org/show_bug.cgi?id=44902
* Scripts/webkitpy/layout_tests/data/failures/expected/exception.html: Added.
* Scripts/webkitpy/layout_tests/data/failures/expected/keyboard.html: Added.
* Scripts/webkitpy/layout_tests/data/passes/error-expected.txt: Added.
* Scripts/webkitpy/layout_tests/data/passes/error.html: Added.
* Scripts/webkitpy/layout_tests/data/platform/test/test_expectations.txt:
* Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py:
* Scripts/webkitpy/layout_tests/layout_package/printing.py:
* Scripts/webkitpy/layout_tests/layout_package/printing_unittest.py:
* Scripts/webkitpy/layout_tests/port/base.py:
* Scripts/webkitpy/layout_tests/port/test.py:
* Scripts/webkitpy/layout_tests/run_webkit_tests.py:
* Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py:
git-svn-id: http://svn.webkit.org/repository/webkit/trunk@66640 268f45cc-cd09-0410-ab3c-d52691b4dbfc
diff --git a/WebKitTools/ChangeLog b/WebKitTools/ChangeLog
index 4fefdef..e465ca2 100644
--- a/WebKitTools/ChangeLog
+++ b/WebKitTools/ChangeLog
@@ -1,3 +1,35 @@
+2010-09-01 Dirk Pranke <dpranke at chromium.org>
+
+ Reviewed by Tony Chang.
+
+ Land a patched version of r66542 - change TestRunner to have an
+ separate cleanup method and fix the ordering of cleanup between
+ TestRunner and the printing module, and then wrap everything in a
+ try/finally block to ensure reliable cleanup without needing to
+ rely on stuff happening in the destructor of the TestRunner.
+
+ Also refactor run_webkit_tests.run() to be much smaller and cleaner
+ by creating a bunch of helper methods and moving more stuff into
+ the TestRunner class.
+
+ This fixes the crash at the end of the linux test run of
+ new-run-webkit-tests (and undoes the rollout in 66547).
+
+ https://bugs.webkit.org/show_bug.cgi?id=44902
+
+ * Scripts/webkitpy/layout_tests/data/failures/expected/exception.html: Added.
+ * Scripts/webkitpy/layout_tests/data/failures/expected/keyboard.html: Added.
+ * Scripts/webkitpy/layout_tests/data/passes/error-expected.txt: Added.
+ * Scripts/webkitpy/layout_tests/data/passes/error.html: Added.
+ * Scripts/webkitpy/layout_tests/data/platform/test/test_expectations.txt:
+ * Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py:
+ * Scripts/webkitpy/layout_tests/layout_package/printing.py:
+ * Scripts/webkitpy/layout_tests/layout_package/printing_unittest.py:
+ * Scripts/webkitpy/layout_tests/port/base.py:
+ * Scripts/webkitpy/layout_tests/port/test.py:
+ * Scripts/webkitpy/layout_tests/run_webkit_tests.py:
+ * Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py:
+
2010-09-01 Mark Rowe <mrowe at apple.com>
Reviewed by Adam Roben.
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/data/failures/expected/exception.html b/WebKitTools/Scripts/webkitpy/layout_tests/data/failures/expected/exception.html
new file mode 100644
index 0000000..38c54e3
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/data/failures/expected/exception.html
@@ -0,0 +1 @@
+exception
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/data/failures/expected/keyboard.html b/WebKitTools/Scripts/webkitpy/layout_tests/data/failures/expected/keyboard.html
new file mode 100644
index 0000000..c253983
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/data/failures/expected/keyboard.html
@@ -0,0 +1 @@
+keyboard
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/data/passes/error-expected.txt b/WebKitTools/Scripts/webkitpy/layout_tests/data/passes/error-expected.txt
new file mode 100644
index 0000000..9427269
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/data/passes/error-expected.txt
@@ -0,0 +1 @@
+error-txt
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/data/passes/error.html b/WebKitTools/Scripts/webkitpy/layout_tests/data/passes/error.html
new file mode 100644
index 0000000..8276753
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/data/passes/error.html
@@ -0,0 +1 @@
+error
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/data/platform/test/test_expectations.txt b/WebKitTools/Scripts/webkitpy/layout_tests/data/platform/test/test_expectations.txt
index 6e66caa..16556e3 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/data/platform/test/test_expectations.txt
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/data/platform/test/test_expectations.txt
@@ -8,3 +8,5 @@ WONTFIX : failures/expected/missing_image.html = MISSING PASS
WONTFIX : failures/expected/missing_text.html = MISSING PASS
WONTFIX : failures/expected/text.html = TEXT
WONTFIX : failures/expected/timeout.html = TIMEOUT
+WONTFIX SKIP : failures/expected/keyboard.html = CRASH
+WONTFIX SKIP : failures/expected/exception.html = CRASH
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py
index 6343400..ec33086 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py
@@ -169,6 +169,11 @@ class SingleTestThread(threading.Thread):
self._output_dir = output_dir
def run(self):
+ self._covered_run()
+
+ def _covered_run(self):
+ # FIXME: this is a separate routine to work around a bug
+ # in coverage: see http://bitbucket.org/ned/coveragepy/issue/85.
test_info = self._test_info
driver = self._port.create_driver(self._image_path, self._shell_args)
driver.start()
@@ -287,6 +292,11 @@ class TestShellThread(WatchableThread):
def run(self):
"""Delegate main work to a helper method and watch for uncaught
exceptions."""
+ self._covered_run()
+
+ def _covered_run(self):
+ # FIXME: this is a separate routine to work around a bug
+ # in coverage: see http://bitbucket.org/ned/coveragepy/issue/85.
self._thread_id = thread.get_ident()
self._start_time = time.time()
self._num_tests = 0
@@ -303,9 +313,9 @@ class TestShellThread(WatchableThread):
self._exception_info = sys.exc_info()
self._stop_time = time.time()
# Re-raise it and die.
- _log.error('%s dying: %s' % (self.getName(),
+ _log.error('%s dying, exception raised: %s' % (self.getName(),
self._exception_info))
- raise
+
self._stop_time = time.time()
def run_in_main_thread(self, test_runner, result_summary):
@@ -321,14 +331,8 @@ class TestShellThread(WatchableThread):
If test_runner is not None, then we call test_runner.UpdateSummary()
with the results of each test."""
- batch_size = 0
+ batch_size = self._options.batch_size
batch_count = 0
- if self._options.batch_size:
- try:
- batch_size = int(self._options.batch_size)
- except:
- _log.info("Ignoring invalid batch size '%s'" %
- self._options.batch_size)
# Append tests we're running to the existing tests_run.txt file.
# This is created in run_webkit_tests.py:_PrepareListsAndPrintOutput.
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/metered_stream_unittest.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/metered_stream_unittest.py
index a9c6d5b..9421ff8 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/metered_stream_unittest.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/metered_stream_unittest.py
@@ -34,7 +34,6 @@ import optparse
import pdb
import sys
import unittest
-import logging
from webkitpy.common.array_stream import ArrayStream
from webkitpy.layout_tests.layout_package import metered_stream
@@ -97,13 +96,19 @@ class TestMeteredStream(unittest.TestCase):
m.write("foo")
self.assertEquals(a.get(), ['foo'])
+ import logging
+ b = ArrayStream()
+ logger = logging.getLogger()
+ handler = logging.StreamHandler(b)
+ logger.addHandler(handler)
m.update("bar")
- # FIXME: figure out how to test that this went to the logger. Is this
- # good enough?
+ logger.handlers.remove(handler)
self.assertEquals(a.get(), ['foo'])
+ self.assertEquals(b.get(), ['bar\n'])
m.progress("dropped")
self.assertEquals(a.get(), ['foo'])
+ self.assertEquals(b.get(), ['bar\n'])
if __name__ == '__main__':
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/printing.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/printing.py
index a9e015f..d420631 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/printing.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/printing.py
@@ -126,19 +126,6 @@ def print_options():
]
-def configure_logging(options, meter):
- """Configures the logging system."""
- log_fmt = '%(message)s'
- log_datefmt = '%y%m%d %H:%M:%S'
- log_level = logging.INFO
- if options.verbose:
- log_fmt = ('%(asctime)s %(filename)s:%(lineno)-4d %(levelname)s '
- '%(message)s')
- log_level = logging.DEBUG
-
- logging.basicConfig(level=log_level, format=log_fmt,
- datefmt=log_datefmt, stream=meter)
-
def parse_print_options(print_options, verbose, child_processes,
is_fully_parallel):
@@ -190,6 +177,28 @@ def parse_print_options(print_options, verbose, child_processes,
return switches
+def _configure_logging(stream, verbose):
+ log_fmt = '%(message)s'
+ log_datefmt = '%y%m%d %H:%M:%S'
+ log_level = logging.INFO
+ if verbose:
+ log_fmt = ('%(asctime)s %(filename)s:%(lineno)-4d %(levelname)s '
+ '%(message)s')
+ log_level = logging.DEBUG
+
+ root = logging.getLogger()
+ handler = logging.StreamHandler(stream)
+ handler.setFormatter(logging.Formatter(log_fmt, None))
+ root.addHandler(handler)
+ root.setLevel(log_level)
+ return handler
+
+
+def _restore_logging(handler_to_remove):
+ root = logging.getLogger()
+ root.handlers.remove(handler_to_remove)
+
+
class Printer(object):
"""Class handling all non-debug-logging printing done by run-webkit-tests.
@@ -237,12 +246,22 @@ class Printer(object):
self._meter = metered_stream.MeteredStream(options.verbose,
regular_output)
- configure_logging(self._options, self._meter)
+ self._logging_handler = _configure_logging(self._meter,
+ options.verbose)
self.switches = parse_print_options(options.print_options,
options.verbose, child_processes, is_fully_parallel)
- # These two routines just hide the implmentation of the switches.
+ def cleanup(self):
+ """Restore logging configuration to its initial settings."""
+ if self._logging_handler:
+ _restore_logging(self._logging_handler)
+ self._logging_handler = None
+
+ def __del__(self):
+ self.cleanup()
+
+ # These two routines just hide the implementation of the switches.
def disabled(self, option):
return not option in self.switches
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/printing_unittest.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/printing_unittest.py
index 40c691f..29139d0 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/printing_unittest.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/printing_unittest.py
@@ -37,6 +37,7 @@ import unittest
import logging
from webkitpy.common import array_stream
+from webkitpy.common.system import logtesting
from webkitpy.layout_tests import port
from webkitpy.layout_tests.layout_package import printing
from webkitpy.layout_tests.layout_package import dump_render_tree_thread
@@ -53,25 +54,24 @@ def get_options(args):
class TestUtilityFunctions(unittest.TestCase):
def test_configure_logging(self):
- # FIXME: We need to figure out how to reset the basic logger.
- # FIXME: If other testing classes call logging.basicConfig() then
- # FIXME: these calls become no-ops and we can't control the
- # FIXME: configuration to test things properly.
options, args = get_options([])
stream = array_stream.ArrayStream()
- printing.configure_logging(options, stream)
+ handler = printing._configure_logging(stream, options.verbose)
logging.info("this should be logged")
- # self.assertFalse(stream.empty())
+ self.assertFalse(stream.empty())
stream.reset()
logging.debug("this should not be logged")
- # self.assertTrue(stream.empty())
+ self.assertTrue(stream.empty())
+
+ printing._restore_logging(handler)
stream.reset()
options, args = get_options(['--verbose'])
- printing.configure_logging(options, stream)
+ handler = printing._configure_logging(stream, options.verbose)
logging.debug("this should be logged")
- # self.assertFalse(stream.empty())
+ self.assertFalse(stream.empty())
+ printing._restore_logging(handler)
def test_print_options(self):
options, args = get_options([])
@@ -190,6 +190,14 @@ class Testprinter(unittest.TestCase):
do_helper(method_name, switch, 'hello', exp_err, exp_bot)
do_helper(method_name, 'everything', 'hello', exp_err, exp_bot)
+ def test_configure_and_cleanup(self):
+ # This test verifies that calling cleanup repeatedly and deleting
+ # the object is safe.
+ printer, err, out = self.get_printer(['--print', 'everything'])
+ printer.cleanup()
+ printer.cleanup()
+ printer = None
+
def test_print_actual(self):
# Actual results need to be logged to the buildbot's stream.
self.do_switch_tests('print_actual', 'actual', to_buildbot=True)
@@ -421,11 +429,12 @@ class Testprinter(unittest.TestCase):
self.assertFalse(err.empty())
self.assertTrue(out.empty())
- def test_write(self):
+ def test_write_nothing(self):
printer, err, out = self.get_printer(['--print', 'nothing'])
printer.write("foo")
self.assertTrue(err.empty())
+ def test_write_misc(self):
printer, err, out = self.get_printer(['--print', 'misc'])
printer.write("foo")
self.assertFalse(err.empty())
@@ -433,6 +442,7 @@ class Testprinter(unittest.TestCase):
printer.write("foo", "config")
self.assertTrue(err.empty())
+ def test_write_everything(self):
printer, err, out = self.get_printer(['--print', 'everything'])
printer.write("foo")
self.assertFalse(err.empty())
@@ -440,11 +450,10 @@ class Testprinter(unittest.TestCase):
printer.write("foo", "config")
self.assertFalse(err.empty())
- # FIXME: this should be logged somewhere, but it actually
- # disappears into the ether in the logging subsystem.
+ def test_write_verbose(self):
printer, err, out = self.get_printer(['--verbose'])
printer.write("foo")
- self.assertTrue(err.empty())
+ self.assertTrue(not err.empty() and "foo" in err.get()[0])
self.assertTrue(out.empty())
def test_print_unexpected_results(self):
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_expectations_unittest.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_expectations_unittest.py
index e4c6b7e..26eb18d 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_expectations_unittest.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_expectations_unittest.py
@@ -100,7 +100,7 @@ class Base(unittest.TestCase):
return """
BUG_TEST : failures/expected/text.html = TEXT
BUG_TEST WONTFIX SKIP : failures/expected/crash.html = CRASH
-BUG_TEST REBASELINE : failure/expected/missing_image.html = MISSING
+BUG_TEST REBASELINE : failures/expected/missing_image.html = MISSING
BUG_TEST WONTFIX : failures/expected/image_checksum.html = IMAGE
BUG_TEST WONTFIX WIN : failures/expected/image.html = IMAGE
"""
@@ -253,6 +253,13 @@ BUG_TEST : failures/expected/text.html = IMAGE""")
BUG_TEST : failures/expected/text.html = TEXT
BUG_TEST : failures/expected/text.html = IMAGE""")
+ def test_semantic_missing_file(self):
+ # This should log a non-fatal error.
+ self.parse_exp('BUG_TEST : missing_file.html = TEXT')
+ self.assertEqual(
+ len(self._exp._expected_failures.get_non_fatal_errors()), 1)
+
+
def test_overrides(self):
self.parse_exp(self.get_basic_expectations(), """
BUG_OVERRIDE : failures/expected/text.html = IMAGE""")
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/base.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/base.py
index af1af93..0dda774 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/port/base.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/port/base.py
@@ -327,7 +327,6 @@ class Port(object):
if not self._webkit_base_dir:
abspath = os.path.abspath(__file__)
self._webkit_base_dir = abspath[0:abspath.find('WebKitTools')]
- _log.debug("Using WebKit root: %s" % self._webkit_base_dir)
return os.path.join(self._webkit_base_dir, *comps)
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/test.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/test.py
index d36b540..e309334 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/port/test.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/port/test.py
@@ -151,7 +151,10 @@ class TestDriver(base.Driver):
def run_test(self, uri, timeoutms, image_hash):
basename = uri[(uri.rfind("/") + 1):uri.rfind(".html")]
- error = ''
+ if 'error' in basename:
+ error = basename + "_error\n"
+ else:
+ error = ''
checksum = None
# There are four currently supported types of tests: text, image,
# image hash (checksum), and stderr output. The fake output
@@ -170,10 +173,13 @@ class TestDriver(base.Driver):
# will allow us to see if any results get crossed by the rest of the
# program.
if 'failures' in uri:
+ if 'keyboard' in basename:
+ raise KeyboardInterrupt
+ if 'exception' in basename:
+ raise ValueError('exception from ' + basename)
+
crash = 'crash' in basename
timeout = 'timeout' in basename
- if 'error' in basename:
- error = basename + "_error\n"
if 'text' in basename:
output = basename + '_failed-txt\n'
else:
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/run_webkit_tests.py b/WebKitTools/Scripts/webkitpy/layout_tests/run_webkit_tests.py
index 7163e1b..4132260 100755
--- a/WebKitTools/Scripts/webkitpy/layout_tests/run_webkit_tests.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/run_webkit_tests.py
@@ -75,7 +75,6 @@ from test_types import image_diff
from test_types import text_diff
from test_types import test_type_base
-from webkitpy.common.system.executive import Executive
from webkitpy.thirdparty import simplejson
import port
@@ -106,12 +105,11 @@ class TestInfo:
self._image_hash = None
def _read_image_hash(self):
- try:
- with codecs.open(self._expected_hash_path, "r", "ascii") as hash_file:
- return hash_file.read()
- except IOError, e:
- if errno.ENOENT != e.errno:
- raise
+ if not os.path.exists(self._expected_hash_path):
+ return None
+
+ with codecs.open(self._expected_hash_path, "r", "ascii") as hash_file:
+ return hash_file.read()
def image_hash(self):
# Read the image_hash lazily to reduce startup time.
@@ -273,35 +271,42 @@ class TestRunner:
# options.results_directory, use_tls=True, port=9323)
# a list of TestType objects
- self._test_types = []
+ self._test_types = [text_diff.TestTextDiff]
+ if options.pixel_tests:
+ self._test_types.append(image_diff.ImageDiff)
# a set of test files, and the same tests as a list
self._test_files = set()
self._test_files_list = None
self._result_queue = Queue.Queue()
-
self._retrying = False
- # Hack for dumping threads on the bots
- self._last_thread_dump = None
-
- def __del__(self):
- _log.debug("flushing stdout")
- sys.stdout.flush()
- _log.debug("flushing stderr")
- sys.stderr.flush()
- _log.debug("stopping http server")
- self._port.stop_http_server()
- _log.debug("stopping websocket server")
- self._port.stop_websocket_server()
-
- def gather_file_paths(self, paths):
+ def collect_tests(self, args, last_unexpected_results):
"""Find all the files to test.
Args:
- paths: a list of globs to use instead of the defaults."""
+ args: list of test arguments from the command line
+ last_unexpected_results: list of unexpected results to retest, if any
+
+ """
+ paths = [arg for arg in args if arg and arg != '']
+ paths += last_unexpected_results
+ if self._options.test_list:
+ paths += read_test_files(self._options.test_list)
self._test_files = test_files.gather_test_files(self._port, paths)
+ def lint(self):
+ # Creating the expecations for each platform/configuration pair does
+ # all the test list parsing and ensures it's correct syntax (e.g. no
+ # dupes).
+ for platform_name in self._port.test_platform_names():
+ self.parse_expectations(platform_name, is_debug_mode=True)
+ self.parse_expectations(platform_name, is_debug_mode=False)
+ self._printer.write("")
+ _log.info("If there are no fail messages, errors or exceptions, "
+ "then the lint succeeded.")
+ return 0
+
def parse_expectations(self, test_platform_name, is_debug_mode):
"""Parse the expectations from the test_list files and return a data
structure holding them. Throws an error if the test_list files have
@@ -336,8 +341,8 @@ class TestRunner:
self._printer.print_expected("Found: %d tests" %
(len(self._test_files)))
if not num_all_test_files:
- _log.critical("No tests to run.")
- sys.exit(1)
+ _log.critical('No tests to run.')
+ return None
skipped = set()
if num_all_test_files > 1 and not self._options.force:
@@ -468,10 +473,6 @@ class TestRunner:
return result_summary
- def add_test_type(self, test_type):
- """Add a TestType to the TestRunner."""
- self._test_types.append(test_type)
-
def _get_dir_for_test_file(self, test_file):
"""Returns the highest-level directory by which to shard the given
test file."""
@@ -643,7 +644,7 @@ class TestRunner:
"""
# FIXME: We should use webkitpy.tool.grammar.pluralize here.
plural = ""
- if self._options.child_processes > 1:
+ if not self._is_single_threaded():
plural = "s"
self._printer.print_update('Starting %s%s ...' %
(self._port.driver_name(), plural))
@@ -714,6 +715,49 @@ class TestRunner:
"""Returns whether the test runner needs an HTTP server."""
return self._contains_tests(self.HTTP_SUBDIR)
+ def set_up_run(self):
+ """Configures the system to be ready to run tests.
+
+ Returns a ResultSummary object if we should continue to run tests,
+ or None if we should abort.
+
+ """
+ # This must be started before we check the system dependencies,
+ # since the helper may do things to make the setup correct.
+ self._printer.print_update("Starting helper ...")
+ self._port.start_helper()
+
+ # Check that the system dependencies (themes, fonts, ...) are correct.
+ if not self._options.nocheck_sys_deps:
+ self._printer.print_update("Checking system dependencies ...")
+ if not self._port.check_sys_deps(self.needs_http()):
+ self._port.stop_helper()
+ return None
+
+ if self._options.clobber_old_results:
+ self._clobber_old_results()
+
+ # Create the output directory if it doesn't already exist.
+ self._port.maybe_make_directory(self._options.results_directory)
+
+ self._port.setup_test_run()
+
+ self._printer.print_update("Preparing tests ...")
+ result_summary = self.prepare_lists_and_print_output()
+ if not result_summary:
+ return None
+
+ if self.needs_http():
+ self._printer.print_update('Starting HTTP server ...')
+ self._port.start_http_server()
+
+ if self._contains_tests(self.WEBSOCKET_SUBDIR):
+ self._printer.print_update('Starting WebSocket server ...')
+ self._port.start_websocket_server()
+ # self._websocket_secure_server.Start()
+
+ return result_summary
+
def run(self, result_summary):
"""Run all our tests on all our test files.
@@ -726,19 +770,12 @@ class TestRunner:
Return:
The number of unexpected results (0 == success)
"""
- if not self._test_files:
- return 0
- start_time = time.time()
+ # gather_test_files() must have been called first to initialize us.
+ # If we didn't find any files to test, we've errored out already in
+ # prepare_lists_and_print_output().
+ assert(len(self._test_files))
- if self.needs_http():
- self._printer.print_update('Starting HTTP server ...')
-
- self._port.start_http_server()
-
- if self._contains_tests(self.WEBSOCKET_SUBDIR):
- self._printer.print_update('Starting WebSocket server ...')
- self._port.start_websocket_server()
- # self._websocket_secure_server.Start()
+ start_time = time.time()
keyboard_interrupted, thread_timings, test_timings, \
individual_test_timings = (
@@ -801,6 +838,20 @@ class TestRunner:
# bot red for those.
return unexpected_results['num_regressions']
+ def clean_up_run(self):
+ """Restores the system after we're done running tests."""
+
+ _log.debug("flushing stdout")
+ sys.stdout.flush()
+ _log.debug("flushing stderr")
+ sys.stderr.flush()
+ _log.debug("stopping http server")
+ self._port.stop_http_server()
+ _log.debug("stopping websocket server")
+ self._port.stop_websocket_server()
+ _log.debug("stopping helper")
+ self._port.stop_helper()
+
def update_summary(self, result_summary):
"""Update the summary and print results with any completed tests."""
while True:
@@ -819,6 +870,20 @@ class TestRunner:
self._printer.print_progress(result_summary, self._retrying,
self._test_files_list)
+ def _clobber_old_results(self):
+ # Just clobber the actual test results directories since the other
+ # files in the results directory are explicitly used for cross-run
+ # tracking.
+ self._printer.print_update("Clobbering old results in %s" %
+ self._options.results_directory)
+ layout_tests_dir = self._port.layout_tests_dir()
+ possible_dirs = os.listdir(layout_tests_dir)
+ for dirname in possible_dirs:
+ if os.path.isdir(os.path.join(layout_tests_dir, dirname)):
+ shutil.rmtree(os.path.join(self._options.results_directory,
+ dirname),
+ ignore_errors=True)
+
def _get_failures(self, result_summary, include_crashes):
"""Filters a dict of results and returns only the failures.
@@ -911,6 +976,33 @@ class TestRunner:
_log.info("JSON files uploaded.")
+ def _print_config(self):
+ """Prints the configuration for the test run."""
+ p = self._printer
+ p.print_config("Using port '%s'" % self._port.name())
+ p.print_config("Placing test results in %s" %
+ self._options.results_directory)
+ if self._options.new_baseline:
+ p.print_config("Placing new baselines in %s" %
+ self._port.baseline_path())
+ p.print_config("Using %s build" % self._options.configuration)
+ if self._options.pixel_tests:
+ p.print_config("Pixel tests enabled")
+ else:
+ p.print_config("Pixel tests disabled")
+
+ p.print_config("Regular timeout: %s, slow test timeout: %s" %
+ (self._options.time_out_ms,
+ self._options.slow_time_out_ms))
+
+ if self._is_single_threaded():
+ p.print_config("Running one %s" % self._port.driver_name())
+ else:
+ p.print_config("Running %s %ss in parallel" %
+ (self._options.child_processes,
+ self._port.driver_name()))
+ p.print_config("")
+
def _print_expected_results_of_type(self, result_summary,
result_type, result_type_str):
"""Print the number of the tests in a given result class.
@@ -1266,12 +1358,12 @@ def read_test_files(files):
return tests
-def run(port_obj, options, args, regular_output=sys.stderr,
+def run(port, options, args, regular_output=sys.stderr,
buildbot_output=sys.stdout):
"""Run the tests.
Args:
- port_obj: Port object for port-specific behavior
+ port: Port object for port-specific behavior
options: a dictionary of command line options
args: a list of sub directories or files to test
regular_output: a stream-like object that we can send logging/debug
@@ -1281,24 +1373,61 @@ def run(port_obj, options, args, regular_output=sys.stderr,
Returns:
the number of unexpected results that occurred, or -1 if there is an
error.
- """
- # Configure the printing subsystem for printing output, logging debug
- # info, and tracing tests.
-
- if not options.child_processes:
- # FIXME: Investigate perf/flakiness impact of using cpu_count + 1.
- options.child_processes = port_obj.default_child_processes()
+ """
+ _set_up_derived_options(port, options)
- printer = printing.Printer(port_obj, options, regular_output=regular_output,
- buildbot_output=buildbot_output,
- child_processes=int(options.child_processes),
- is_fully_parallel=options.experimental_fully_parallel)
+ printer = printing.Printer(port, options, regular_output, buildbot_output,
+ int(options.child_processes), options.experimental_fully_parallel)
if options.help_printing:
printer.help_printing()
+ printer.cleanup()
return 0
- executive = Executive()
+ last_unexpected_results = _gather_unexpected_results(options)
+ if options.print_last_failures:
+ printer.write("\n".join(last_unexpected_results) + "\n")
+ printer.cleanup()
+ return 0
+
+ # We wrap any parts of the run that are slow or likely to raise exceptions
+ # in a try/finally to ensure that we clean up the logging configuration.
+ num_unexpected_results = -1
+ try:
+ test_runner = TestRunner(port, options, printer)
+ test_runner._print_config()
+
+ printer.print_update("Collecting tests ...")
+ test_runner.collect_tests(args, last_unexpected_results)
+
+ printer.print_update("Parsing expectations ...")
+ if options.lint_test_files:
+ return test_runner.lint()
+ test_runner.parse_expectations(port.test_platform_name(),
+ options.configuration == 'Debug')
+
+ printer.print_update("Checking build ...")
+ if not port.check_build(test_runner.needs_http()):
+ return -1
+
+ result_summary = test_runner.set_up_run()
+ if result_summary:
+ num_unexpected_results = test_runner.run(result_summary)
+ test_runner.clean_up_run()
+ _log.debug("Testing completed, Exit status: %d" %
+ num_unexpected_results)
+ finally:
+ printer.cleanup()
+
+ return num_unexpected_results
+
+
+def _set_up_derived_options(port_obj, options):
+ """Sets the options values that depend on other options values."""
+
+ if not options.child_processes:
+ # FIXME: Investigate perf/flakiness impact of using cpu_count + 1.
+ options.child_processes = str(port_obj.default_child_processes())
if not options.configuration:
options.configuration = port_obj.default_configuration()
@@ -1318,30 +1447,6 @@ def run(port_obj, options, args, regular_output=sys.stderr,
# Debug or Release.
options.results_directory = port_obj.results_directory()
- last_unexpected_results = []
- if options.print_last_failures or options.retest_last_failures:
- unexpected_results_filename = os.path.join(
- options.results_directory, "unexpected_results.json")
- with codecs.open(unexpected_results_filename, "r", "utf-8") as file:
- results = simplejson.load(file)
- last_unexpected_results = results['tests'].keys()
- if options.print_last_failures:
- printer.write("\n".join(last_unexpected_results) + "\n")
- return 0
-
- if options.clobber_old_results:
- # Just clobber the actual test results directories since the other
- # files in the results directory are explicitly used for cross-run
- # tracking.
- printer.print_update("Clobbering old results in %s" %
- options.results_directory)
- layout_tests_dir = port_obj.layout_tests_dir()
- possible_dirs = os.listdir(layout_tests_dir)
- for dirname in possible_dirs:
- if os.path.isdir(os.path.join(layout_tests_dir, dirname)):
- shutil.rmtree(os.path.join(options.results_directory, dirname),
- ignore_errors=True)
-
if not options.time_out_ms:
if options.configuration == "Debug":
options.time_out_ms = str(2 * TestRunner.DEFAULT_TEST_TIMEOUT_MS)
@@ -1349,92 +1454,18 @@ def run(port_obj, options, args, regular_output=sys.stderr,
options.time_out_ms = str(TestRunner.DEFAULT_TEST_TIMEOUT_MS)
options.slow_time_out_ms = str(5 * int(options.time_out_ms))
- printer.print_config("Regular timeout: %s, slow test timeout: %s" %
- (options.time_out_ms, options.slow_time_out_ms))
-
- if int(options.child_processes) == 1:
- printer.print_config("Running one %s" % port_obj.driver_name())
- else:
- printer.print_config("Running %s %ss in parallel" % (
- options.child_processes, port_obj.driver_name()))
-
- # Include all tests if none are specified.
- new_args = []
- for arg in args:
- if arg and arg != '':
- new_args.append(arg)
-
- paths = new_args
- if not paths:
- paths = []
- paths += last_unexpected_results
- if options.test_list:
- paths += read_test_files(options.test_list)
-
- # Create the output directory if it doesn't already exist.
- port_obj.maybe_make_directory(options.results_directory)
- printer.print_update("Collecting tests ...")
-
- test_runner = TestRunner(port_obj, options, printer)
- test_runner.gather_file_paths(paths)
-
- if options.lint_test_files:
- # Creating the expecations for each platform/configuration pair does
- # all the test list parsing and ensures it's correct syntax (e.g. no
- # dupes).
- for platform_name in port_obj.test_platform_names():
- test_runner.parse_expectations(platform_name, is_debug_mode=True)
- test_runner.parse_expectations(platform_name, is_debug_mode=False)
- printer.write("")
- _log.info("If there are no fail messages, errors or exceptions, "
- "then the lint succeeded.")
- return 0
-
- printer.print_config("Using port '%s'" % port_obj.name())
- printer.print_config("Placing test results in %s" %
- options.results_directory)
- if options.new_baseline:
- printer.print_config("Placing new baselines in %s" %
- port_obj.baseline_path())
- printer.print_config("Using %s build" % options.configuration)
- if options.pixel_tests:
- printer.print_config("Pixel tests enabled")
- else:
- printer.print_config("Pixel tests disabled")
- printer.print_config("")
-
- printer.print_update("Parsing expectations ...")
- test_runner.parse_expectations(port_obj.test_platform_name(),
- options.configuration == 'Debug')
-
- printer.print_update("Checking build ...")
- if not port_obj.check_build(test_runner.needs_http()):
- return -1
-
- printer.print_update("Starting helper ...")
- port_obj.start_helper()
-
- # Check that the system dependencies (themes, fonts, ...) are correct.
- if not options.nocheck_sys_deps:
- printer.print_update("Checking system dependencies ...")
- if not port_obj.check_sys_deps(test_runner.needs_http()):
- return -1
-
- printer.print_update("Preparing tests ...")
- result_summary = test_runner.prepare_lists_and_print_output()
-
- port_obj.setup_test_run()
-
- test_runner.add_test_type(text_diff.TestTextDiff)
- if options.pixel_tests:
- test_runner.add_test_type(image_diff.ImageDiff)
- num_unexpected_results = test_runner.run(result_summary)
- port_obj.stop_helper()
-
- _log.debug("Exit status: %d" % num_unexpected_results)
- return num_unexpected_results
+def _gather_unexpected_results(options):
+ """Returns the unexpected results from the previous run, if any."""
+ last_unexpected_results = []
+ if options.print_last_failures or options.retest_last_failures:
+ unexpected_results_filename = os.path.join(
+ options.results_directory, "unexpected_results.json")
+ with codecs.open(unexpected_results_filename, "r", "utf-8") as file:
+ results = simplejson.load(file)
+ last_unexpected_results = results['tests'].keys()
+ return last_unexpected_results
def _compat_shim_callback(option, opt_str, value, parser):
@@ -1597,7 +1628,7 @@ def parse_args(args=None):
# Restart DumpRenderTree every n tests (default: 1000)
optparse.make_option("--batch-size",
help=("Run a the tests in batches (n), after every n tests, "
- "DumpRenderTree is relaunched.")),
+ "DumpRenderTree is relaunched."), type="int", default=0),
# old-run-webkit-tests calls --run-singly: -1|--singly
# Isolate each test case run (implies --nthly 1 --verbose)
optparse.make_option("--run-singly", action="store_true",
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py b/WebKitTools/Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py
index 3a3b14e..4cbfdfc 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py
@@ -41,6 +41,7 @@ import threading
import unittest
from webkitpy.common import array_stream
+from webkitpy.common.system import outputcapture
from webkitpy.layout_tests import port
from webkitpy.layout_tests import run_webkit_tests
from webkitpy.layout_tests.layout_package import dump_render_tree_thread
@@ -48,75 +49,139 @@ from webkitpy.layout_tests.layout_package import dump_render_tree_thread
from webkitpy.thirdparty.mock import Mock
-def passing_run(args, port_obj=None, record_results=False,
+def passing_run(args=[], port_obj=None, record_results=False,
tests_included=False):
- args.extend(['--print', 'nothing'])
+ new_args = ['--print', 'nothing']
+ if not '--platform' in args:
+ new_args.extend(['--platform', 'test'])
+ if not record_results:
+ new_args.append('--no-record-results')
+ new_args.extend(args)
if not tests_included:
# We use the glob to test that globbing works.
- args.extend(['passes', 'failures/expected/*'])
- if not record_results:
- args.append('--no-record-results')
- options, args = run_webkit_tests.parse_args(args)
+ new_args.extend(['passes', 'failures/expected/*'])
+ options, parsed_args = run_webkit_tests.parse_args(new_args)
if port_obj is None:
port_obj = port.get(options.platform, options)
- res = run_webkit_tests.run(port_obj, options, args)
+ res = run_webkit_tests.run(port_obj, options, parsed_args)
return res == 0
-def logging_run(args, tests_included=False):
- args.extend(['--no-record-results'])
+def logging_run(args=[], tests_included=False):
+ new_args = ['--no-record-results']
+ if not '--platform' in args:
+ new_args.extend(['--platform', 'test'])
+ if args:
+ new_args.extend(args)
if not tests_included:
- args.extend(['passes', 'failures/expected/*'])
- options, args = run_webkit_tests.parse_args(args)
+ new_args.extend(['passes', 'failures/expected/*'])
+ options, parsed_args = run_webkit_tests.parse_args(new_args)
port_obj = port.get(options.platform, options)
buildbot_output = array_stream.ArrayStream()
regular_output = array_stream.ArrayStream()
- res = run_webkit_tests.run(port_obj, options, args,
+ res = run_webkit_tests.run(port_obj, options, parsed_args,
buildbot_output=buildbot_output,
regular_output=regular_output)
return (res, buildbot_output, regular_output)
class MainTest(unittest.TestCase):
- def test_fast(self):
- self.assertTrue(passing_run(['--platform', 'test']))
- self.assertTrue(passing_run(['--platform', 'test', '--run-singly']))
- self.assertTrue(passing_run(['--platform', 'test',
- 'passes/text.html'], tests_included=True))
+ def test_basic(self):
+ self.assertTrue(passing_run())
- def test_unexpected_failures(self):
- # Run tests including the unexpected failures.
- self.assertFalse(passing_run(['--platform', 'test'],
- tests_included=True))
+ def test_batch_size(self):
+ # FIXME: verify # of tests run
+ self.assertTrue(passing_run(['--batch-size', '2']))
- def test_one_child_process(self):
+ def test_child_process_1(self):
(res, buildbot_output, regular_output) = logging_run(
- ['--platform', 'test', '--print', 'config', '--child-processes',
- '1'])
+ ['--print', 'config', '--child-processes', '1'])
self.assertTrue('Running one DumpRenderTree\n'
in regular_output.get())
- def test_two_child_processes(self):
+ def test_child_processes_2(self):
(res, buildbot_output, regular_output) = logging_run(
- ['--platform', 'test', '--print', 'config', '--child-processes',
- '2'])
+ ['--print', 'config', '--child-processes', '2'])
self.assertTrue('Running 2 DumpRenderTrees in parallel\n'
in regular_output.get())
+ def test_exception_raised(self):
+ self.assertRaises(ValueError, logging_run,
+ ['failures/expected/exception.html'], tests_included=True)
+
+ def test_full_results_html(self):
+ # FIXME: verify html?
+ self.assertTrue(passing_run(['--full-results-html']))
+
+ def test_help_printing(self):
+ res, out, err = logging_run(['--help-printing'])
+ self.assertEqual(res, 0)
+ self.assertTrue(out.empty())
+ self.assertFalse(err.empty())
+
+ def test_keyboard_interrupt(self):
+ # Note that this also tests running a test marked as SKIP if
+ # you specify it explicitly.
+ self.assertRaises(KeyboardInterrupt, passing_run,
+ ['failures/expected/keyboard.html'], tests_included=True)
+
def test_last_results(self):
- passing_run(['--platform', 'test'], record_results=True)
+ passing_run(['--clobber-old-results'], record_results=True)
(res, buildbot_output, regular_output) = logging_run(
- ['--platform', 'test', '--print-last-failures'])
+ ['--print-last-failures'])
self.assertEqual(regular_output.get(), ['\n\n'])
self.assertEqual(buildbot_output.get(), [])
+ def test_lint_test_files(self):
+ # FIXME: add errors?
+ res, out, err = logging_run(['--lint-test-files'], tests_included=True)
+ self.assertEqual(res, 0)
+ self.assertTrue(out.empty())
+ self.assertTrue(any(['lint succeeded' in msg for msg in err.get()]))
+
def test_no_tests_found(self):
- self.assertRaises(SystemExit, logging_run,
- ['--platform', 'test', 'resources'],
- tests_included=True)
- self.assertRaises(SystemExit, logging_run,
- ['--platform', 'test', 'foo'],
- tests_included=True)
+ res, out, err = logging_run(['resources'], tests_included=True)
+ self.assertEqual(res, -1)
+ self.assertTrue(out.empty())
+ self.assertTrue('No tests to run.\n' in err.get())
+
+ def test_no_tests_found_2(self):
+ res, out, err = logging_run(['foo'], tests_included=True)
+ self.assertEqual(res, -1)
+ self.assertTrue(out.empty())
+ self.assertTrue('No tests to run.\n' in err.get())
+
+ def test_randomize_order(self):
+ # FIXME: verify order was shuffled
+ self.assertTrue(passing_run(['--randomize-order']))
+
+ def test_run_chunk(self):
+ # FIXME: verify # of tests run
+ self.assertTrue(passing_run(['--run-chunk', '1:4']))
+
+ def test_run_force(self):
+ # This raises an exception because we run
+ # failures/expected/exception.html, which is normally SKIPped.
+ self.assertRaises(ValueError, logging_run, ['--force'])
+
+ def test_run_part(self):
+ # FIXME: verify # of tests run
+ self.assertTrue(passing_run(['--run-part', '1:2']))
+
+ def test_run_singly(self):
+ self.assertTrue(passing_run(['--run-singly']))
+
+ def test_single_file(self):
+ # FIXME: verify # of tests run
+ self.assertTrue(passing_run(['passes/text.html'], tests_included=True))
+
+ def test_unexpected_failures(self):
+ # Run tests including the unexpected failures.
+ res, out, err = logging_run(tests_included=True)
+ self.assertEqual(res, 1)
+ self.assertFalse(out.empty())
+ self.assertFalse(err.empty())
+
def _mocked_open(original_open, file_list):
def _wrapper(name, mode, encoding):
@@ -144,7 +209,7 @@ class RebaselineTest(unittest.TestCase):
# is missing, update the expected generic location.
file_list = []
codecs.open = _mocked_open(original_open, file_list)
- passing_run(['--platform', 'test', '--pixel-tests',
+ passing_run(['--pixel-tests',
'--reset-results',
'passes/image.html',
'failures/expected/missing_image.html'],
@@ -165,7 +230,7 @@ class RebaselineTest(unittest.TestCase):
# is mssing, then create a new expectation in the platform dir.
file_list = []
codecs.open = _mocked_open(original_open, file_list)
- passing_run(['--platform', 'test', '--pixel-tests',
+ passing_run(['--pixel-tests',
'--new-baseline',
'passes/image.html',
'failures/expected/missing_image.html'],
@@ -208,6 +273,7 @@ class DryrunTest(unittest.TestCase):
if sys.platform != "darwin":
return
+ self.assertTrue(passing_run(['--platform', 'test']))
self.assertTrue(passing_run(['--platform', 'dryrun',
'fast/html']))
self.assertTrue(passing_run(['--platform', 'dryrun-mac',
@@ -223,6 +289,11 @@ class TestThread(dump_render_tree_thread.WatchableThread):
self._timeout_queue = Queue.Queue()
def run(self):
+ self._covered_run()
+
+ def _covered_run(self):
+ # FIXME: this is a separate routine to work around a bug
+ # in coverage: see http://bitbucket.org/ned/coveragepy/issue/85.
self._thread_id = thread.get_ident()
try:
self._started_queue.put('')
@@ -284,8 +355,11 @@ class WaitForThreadsToFinishTest(unittest.TestCase):
self.assertTrue(interrupted)
def test_timeout(self):
+ oc = outputcapture.OutputCapture()
+ oc.capture_output()
interrupted = self.run_one_thread('Timeout')
self.assertFalse(interrupted)
+ oc.restore_output()
def test_exception(self):
self.assertRaises(ValueError, self.run_one_thread, 'Exception')
@@ -293,6 +367,8 @@ class WaitForThreadsToFinishTest(unittest.TestCase):
class StandaloneFunctionsTest(unittest.TestCase):
def test_log_wedged_thread(self):
+ oc = outputcapture.OutputCapture()
+ oc.capture_output()
logger = run_webkit_tests._log
astream = array_stream.ArrayStream()
handler = TestHandler(astream)
@@ -310,6 +386,7 @@ class StandaloneFunctionsTest(unittest.TestCase):
self.assertFalse(astream.empty())
self.assertFalse(child_thread.isAlive())
+ oc.restore_output()
def test_find_thread_stack(self):
id, stack = sys._current_frames().items()[0]
--
WebKit Debian packaging
More information about the Pkg-webkit-commits
mailing list