[SCM] WebKit Debian packaging branch, debian/experimental, updated. upstream/1.3.3-9427-gc2be6fc

dpranke at chromium.org dpranke at chromium.org
Wed Dec 22 12:52:21 UTC 2010


The following commit has been merged in the debian/experimental branch:
commit e0afd20b9a079034772ac5d033da2a69e639f7db
Author: dpranke at chromium.org <dpranke at chromium.org@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Date:   Tue Aug 31 21:53:52 2010 +0000

    2010-08-31  Dirk Pranke  <dpranke at chromium.org>
    
            Reviewed by Ojan Vafai.
    
            new-run-webkit-tests: add more unit tests
    
            Add more unit tests for new-run-webkit-tests; we now cover all but
            the most obscure code paths in the generic code. We still need to
            add coverage for the http server and web socket paths, and add better
            coverage of the platform-specific logic. Note that the rebaselining
            tool is still not well tested.
    
            Also clean up some of the configuration logic for the printing
            module and the way it interacts with the Python logging module; that
            is a crufty interface, to be certain.
    
            https://bugs.webkit.org/show_bug.cgi?id=44902
    
            * Scripts/webkitpy/layout_tests/data/failures/expected/exception.html: Added.
            * Scripts/webkitpy/layout_tests/data/failures/expected/keyboard.html: Added.
            * Scripts/webkitpy/layout_tests/data/passes/error-expected.txt: Added.
            * Scripts/webkitpy/layout_tests/data/passes/error.html: Added.
            * Scripts/webkitpy/layout_tests/data/platform/test/test_expectations.txt:
            * Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py:
            * Scripts/webkitpy/layout_tests/layout_package/printing.py:
            * Scripts/webkitpy/layout_tests/layout_package/printing_unittest.py:
            * Scripts/webkitpy/layout_tests/port/base.py:
            * Scripts/webkitpy/layout_tests/port/test.py:
            * Scripts/webkitpy/layout_tests/run_webkit_tests.py:
            * Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py:
    
    git-svn-id: http://svn.webkit.org/repository/webkit/trunk@66542 268f45cc-cd09-0410-ab3c-d52691b4dbfc

diff --git a/WebKitTools/ChangeLog b/WebKitTools/ChangeLog
index e0cd80a..d10fd41 100644
--- a/WebKitTools/ChangeLog
+++ b/WebKitTools/ChangeLog
@@ -1,3 +1,34 @@
+2010-08-31  Dirk Pranke  <dpranke at chromium.org>
+
+        Reviewed by Ojan Vafai.
+
+        new-run-webkit-tests: add more unit tests
+
+        Add more unit tests for new-run-webkit-tests; we now cover all but
+        the most obscure code paths in the generic code. We still need to
+        add coverage for the http server and web socket paths, and add better
+        coverage of the platform-specific logic. Note that the rebaselining
+        tool is still not well tested.
+
+        Also clean up some of the configuration logic for the printing
+        module and the way it interacts with the Python logging module; that
+        is a crufty interface, to be certain.
+        
+        https://bugs.webkit.org/show_bug.cgi?id=44902
+
+        * Scripts/webkitpy/layout_tests/data/failures/expected/exception.html: Added.
+        * Scripts/webkitpy/layout_tests/data/failures/expected/keyboard.html: Added.
+        * Scripts/webkitpy/layout_tests/data/passes/error-expected.txt: Added.
+        * Scripts/webkitpy/layout_tests/data/passes/error.html: Added.
+        * Scripts/webkitpy/layout_tests/data/platform/test/test_expectations.txt:
+        * Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py:
+        * Scripts/webkitpy/layout_tests/layout_package/printing.py:
+        * Scripts/webkitpy/layout_tests/layout_package/printing_unittest.py:
+        * Scripts/webkitpy/layout_tests/port/base.py:
+        * Scripts/webkitpy/layout_tests/port/test.py:
+        * Scripts/webkitpy/layout_tests/run_webkit_tests.py:
+        * Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py:
+
 2010-08-31  Darin Adler  <darin at apple.com>
 
         Reviewed by Anders Carlsson.
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/data/failures/expected/exception.html b/WebKitTools/Scripts/webkitpy/layout_tests/data/failures/expected/exception.html
new file mode 100644
index 0000000..38c54e3
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/data/failures/expected/exception.html
@@ -0,0 +1 @@
+exception
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/data/failures/expected/keyboard.html b/WebKitTools/Scripts/webkitpy/layout_tests/data/failures/expected/keyboard.html
new file mode 100644
index 0000000..c253983
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/data/failures/expected/keyboard.html
@@ -0,0 +1 @@
+keyboard
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/data/passes/error-expected.txt b/WebKitTools/Scripts/webkitpy/layout_tests/data/passes/error-expected.txt
new file mode 100644
index 0000000..9427269
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/data/passes/error-expected.txt
@@ -0,0 +1 @@
+error-txt
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/data/passes/error.html b/WebKitTools/Scripts/webkitpy/layout_tests/data/passes/error.html
new file mode 100644
index 0000000..8276753
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/data/passes/error.html
@@ -0,0 +1 @@
+error
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/data/platform/test/test_expectations.txt b/WebKitTools/Scripts/webkitpy/layout_tests/data/platform/test/test_expectations.txt
index 6e66caa..16556e3 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/data/platform/test/test_expectations.txt
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/data/platform/test/test_expectations.txt
@@ -8,3 +8,5 @@ WONTFIX : failures/expected/missing_image.html = MISSING PASS
 WONTFIX : failures/expected/missing_text.html = MISSING PASS
 WONTFIX : failures/expected/text.html = TEXT
 WONTFIX : failures/expected/timeout.html = TIMEOUT
+WONTFIX SKIP : failures/expected/keyboard.html = CRASH
+WONTFIX SKIP : failures/expected/exception.html = CRASH
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py
index 6343400..ec33086 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py
@@ -169,6 +169,11 @@ class SingleTestThread(threading.Thread):
         self._output_dir = output_dir
 
     def run(self):
+        self._covered_run()
+
+    def _covered_run(self):
+        # FIXME: this is a separate routine to work around a bug
+        # in coverage: see http://bitbucket.org/ned/coveragepy/issue/85.
         test_info = self._test_info
         driver = self._port.create_driver(self._image_path, self._shell_args)
         driver.start()
@@ -287,6 +292,11 @@ class TestShellThread(WatchableThread):
     def run(self):
         """Delegate main work to a helper method and watch for uncaught
         exceptions."""
+        self._covered_run()
+
+    def _covered_run(self):
+        # FIXME: this is a separate routine to work around a bug
+        # in coverage: see http://bitbucket.org/ned/coveragepy/issue/85.
         self._thread_id = thread.get_ident()
         self._start_time = time.time()
         self._num_tests = 0
@@ -303,9 +313,9 @@ class TestShellThread(WatchableThread):
             self._exception_info = sys.exc_info()
             self._stop_time = time.time()
             # Re-raise it and die.
-            _log.error('%s dying: %s' % (self.getName(),
+            _log.error('%s dying, exception raised: %s' % (self.getName(),
                        self._exception_info))
-            raise
+
         self._stop_time = time.time()
 
     def run_in_main_thread(self, test_runner, result_summary):
@@ -321,14 +331,8 @@ class TestShellThread(WatchableThread):
 
         If test_runner is not None, then we call test_runner.UpdateSummary()
         with the results of each test."""
-        batch_size = 0
+        batch_size = self._options.batch_size
         batch_count = 0
-        if self._options.batch_size:
-            try:
-                batch_size = int(self._options.batch_size)
-            except:
-                _log.info("Ignoring invalid batch size '%s'" %
-                          self._options.batch_size)
 
         # Append tests we're running to the existing tests_run.txt file.
         # This is created in run_webkit_tests.py:_PrepareListsAndPrintOutput.
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/printing.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/printing.py
index a9e015f..1632a0a 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/printing.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/printing.py
@@ -126,8 +126,8 @@ def print_options():
     ]
 
 
-def configure_logging(options, meter):
-    """Configures the logging system."""
+def _configure_logging(options, meter):
+    """Configures the logging system. Return the previous handler, if any."""
     log_fmt = '%(message)s'
     log_datefmt = '%y%m%d %H:%M:%S'
     log_level = logging.INFO
@@ -136,9 +136,23 @@ def configure_logging(options, meter):
                    '%(message)s')
         log_level = logging.DEBUG
 
-    logging.basicConfig(level=log_level, format=log_fmt,
-                        datefmt=log_datefmt, stream=meter)
+    root = logging.getLogger()
+    handler = logging.StreamHandler(meter)
+    handler.setFormatter(logging.Formatter(log_fmt, None))
+    if not root.handlers:
+        old_handler = None
+        root.addHandler(handler)
+    else:
+        old_handler = root.handlers[0]
+        root.handlers[0] = handler
+    root.setLevel(log_level)
+    return old_handler
+
 
+def _restore_logging(handler):
+    root = logging.getLogger()
+    if root and root.handlers[0]:
+        root.handlers[0] = handler
 
 def parse_print_options(print_options, verbose, child_processes,
                         is_fully_parallel):
@@ -237,11 +251,20 @@ class Printer(object):
 
         self._meter = metered_stream.MeteredStream(options.verbose,
                                                    regular_output)
-        configure_logging(self._options, self._meter)
+        self._old_handler = _configure_logging(self._options, self._meter)
 
         self.switches = parse_print_options(options.print_options,
             options.verbose, child_processes, is_fully_parallel)
 
+    def cleanup(self):
+        """Restore logging configuration to its initial settings."""
+        _restore_logging(self._old_handler)
+        self._old_handler = None
+
+    def __del__(self):
+        if self._old_handler:
+            _restore_logging(self._old_handler)
+
     # These two routines just hide the implmentation of the switches.
     def disabled(self, option):
         return not option in self.switches
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/printing_unittest.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/printing_unittest.py
index 40c691f..2ba9d7a 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/printing_unittest.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/printing_unittest.py
@@ -37,6 +37,7 @@ import unittest
 import logging
 
 from webkitpy.common import array_stream
+from webkitpy.common.system import logtesting
 from webkitpy.layout_tests import port
 from webkitpy.layout_tests.layout_package import printing
 from webkitpy.layout_tests.layout_package import dump_render_tree_thread
@@ -53,25 +54,24 @@ def get_options(args):
 
 class TestUtilityFunctions(unittest.TestCase):
     def test_configure_logging(self):
-        # FIXME: We need to figure out how to reset the basic logger.
-        # FIXME: If other testing classes call logging.basicConfig() then
-        # FIXME: these calls become no-ops and we can't control the
-        # FIXME: configuration to test things properly.
         options, args = get_options([])
         stream = array_stream.ArrayStream()
-        printing.configure_logging(options, stream)
+        handler = printing._configure_logging(options, stream)
         logging.info("this should be logged")
-        # self.assertFalse(stream.empty())
+        self.assertFalse(stream.empty())
 
         stream.reset()
         logging.debug("this should not be logged")
-        # self.assertTrue(stream.empty())
+        self.assertTrue(stream.empty())
+
+        printing._restore_logging(handler)
 
         stream.reset()
         options, args = get_options(['--verbose'])
-        printing.configure_logging(options, stream)
+        handler = printing._configure_logging(options, stream)
         logging.debug("this should be logged")
-        # self.assertFalse(stream.empty())
+        self.assertFalse(stream.empty())
+        printing._restore_logging(handler)
 
     def test_print_options(self):
         options, args = get_options([])
@@ -421,11 +421,12 @@ class  Testprinter(unittest.TestCase):
         self.assertFalse(err.empty())
         self.assertTrue(out.empty())
 
-    def test_write(self):
+    def test_write_nothing(self):
         printer, err, out = self.get_printer(['--print', 'nothing'])
         printer.write("foo")
         self.assertTrue(err.empty())
 
+    def test_write_misc(self):
         printer, err, out = self.get_printer(['--print', 'misc'])
         printer.write("foo")
         self.assertFalse(err.empty())
@@ -433,6 +434,7 @@ class  Testprinter(unittest.TestCase):
         printer.write("foo", "config")
         self.assertTrue(err.empty())
 
+    def test_write_everything(self):
         printer, err, out = self.get_printer(['--print', 'everything'])
         printer.write("foo")
         self.assertFalse(err.empty())
@@ -440,11 +442,10 @@ class  Testprinter(unittest.TestCase):
         printer.write("foo", "config")
         self.assertFalse(err.empty())
 
-        # FIXME: this should be logged somewhere, but it actually
-        # disappears into the ether in the logging subsystem.
+    def test_write_verbose(self):
         printer, err, out = self.get_printer(['--verbose'])
         printer.write("foo")
-        self.assertTrue(err.empty())
+        self.assertTrue(not err.empty() and "foo" in err.get()[0])
         self.assertTrue(out.empty())
 
     def test_print_unexpected_results(self):
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/base.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/base.py
index af1af93..0dda774 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/port/base.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/port/base.py
@@ -327,7 +327,6 @@ class Port(object):
         if not self._webkit_base_dir:
             abspath = os.path.abspath(__file__)
             self._webkit_base_dir = abspath[0:abspath.find('WebKitTools')]
-            _log.debug("Using WebKit root: %s" % self._webkit_base_dir)
 
         return os.path.join(self._webkit_base_dir, *comps)
 
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/test.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/test.py
index d36b540..e309334 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/port/test.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/port/test.py
@@ -151,7 +151,10 @@ class TestDriver(base.Driver):
     def run_test(self, uri, timeoutms, image_hash):
         basename = uri[(uri.rfind("/") + 1):uri.rfind(".html")]
 
-        error = ''
+        if 'error' in basename:
+            error = basename + "_error\n"
+        else:
+            error = ''
         checksum = None
         # There are four currently supported types of tests: text, image,
         # image hash (checksum), and stderr output. The fake output
@@ -170,10 +173,13 @@ class TestDriver(base.Driver):
         # will allow us to see if any results get crossed by the rest of the
         # program.
         if 'failures' in uri:
+            if 'keyboard' in basename:
+                raise KeyboardInterrupt
+            if 'exception' in basename:
+                raise ValueError('exception from ' + basename)
+
             crash = 'crash' in basename
             timeout = 'timeout' in basename
-            if 'error' in basename:
-                error = basename + "_error\n"
             if 'text' in basename:
                 output = basename + '_failed-txt\n'
             else:
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/run_webkit_tests.py b/WebKitTools/Scripts/webkitpy/layout_tests/run_webkit_tests.py
index 7163e1b..b1e1f6c 100755
--- a/WebKitTools/Scripts/webkitpy/layout_tests/run_webkit_tests.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/run_webkit_tests.py
@@ -106,12 +106,11 @@ class TestInfo:
         self._image_hash = None
 
     def _read_image_hash(self):
-        try:
-            with codecs.open(self._expected_hash_path, "r", "ascii") as hash_file:
-                return hash_file.read()
-        except IOError, e:
-            if errno.ENOENT != e.errno:
-                raise
+        if not os.path.exists(self._expected_hash_path):
+            return None
+
+        with codecs.open(self._expected_hash_path, "r", "ascii") as hash_file:
+            return hash_file.read()
 
     def image_hash(self):
         # Read the image_hash lazily to reduce startup time.
@@ -336,8 +335,8 @@ class TestRunner:
         self._printer.print_expected("Found:  %d tests" %
                                      (len(self._test_files)))
         if not num_all_test_files:
-            _log.critical("No tests to run.")
-            sys.exit(1)
+            _log.critical('No tests to run.')
+            return None
 
         skipped = set()
         if num_all_test_files > 1 and not self._options.force:
@@ -726,8 +725,11 @@ class TestRunner:
         Return:
           The number of unexpected results (0 == success)
         """
-        if not self._test_files:
-            return 0
+        # gather_test_files() must have been called first to initialize us.
+        # If we didn't find any files to test, we've errored out already in
+        # prepare_lists_and_print_output().
+        assert(len(self._test_files))
+
         start_time = time.time()
 
         if self.needs_http():
@@ -1422,6 +1424,8 @@ def run(port_obj, options, args, regular_output=sys.stderr,
 
     printer.print_update("Preparing tests ...")
     result_summary = test_runner.prepare_lists_and_print_output()
+    if not result_summary:
+        return -1
 
     port_obj.setup_test_run()
 
@@ -1433,6 +1437,8 @@ def run(port_obj, options, args, regular_output=sys.stderr,
 
     port_obj.stop_helper()
 
+    printer.cleanup()
+
     _log.debug("Exit status: %d" % num_unexpected_results)
     return num_unexpected_results
 
@@ -1597,7 +1603,7 @@ def parse_args(args=None):
         #   Restart DumpRenderTree every n tests (default: 1000)
         optparse.make_option("--batch-size",
             help=("Run a the tests in batches (n), after every n tests, "
-                  "DumpRenderTree is relaunched.")),
+                  "DumpRenderTree is relaunched."), type="int", default=0),
         # old-run-webkit-tests calls --run-singly: -1|--singly
         # Isolate each test case run (implies --nthly 1 --verbose)
         optparse.make_option("--run-singly", action="store_true",
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py b/WebKitTools/Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py
index 3a3b14e..4cbfdfc 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py
@@ -41,6 +41,7 @@ import threading
 import unittest
 
 from webkitpy.common import array_stream
+from webkitpy.common.system import outputcapture
 from webkitpy.layout_tests import port
 from webkitpy.layout_tests import run_webkit_tests
 from webkitpy.layout_tests.layout_package import dump_render_tree_thread
@@ -48,75 +49,139 @@ from webkitpy.layout_tests.layout_package import dump_render_tree_thread
 from webkitpy.thirdparty.mock import Mock
 
 
-def passing_run(args, port_obj=None, record_results=False,
+def passing_run(args=[], port_obj=None, record_results=False,
                 tests_included=False):
-    args.extend(['--print', 'nothing'])
+    new_args = ['--print', 'nothing']
+    if not '--platform' in args:
+        new_args.extend(['--platform', 'test'])
+    if not record_results:
+        new_args.append('--no-record-results')
+    new_args.extend(args)
     if not tests_included:
         # We use the glob to test that globbing works.
-        args.extend(['passes', 'failures/expected/*'])
-    if not record_results:
-        args.append('--no-record-results')
-    options, args = run_webkit_tests.parse_args(args)
+        new_args.extend(['passes', 'failures/expected/*'])
+    options, parsed_args = run_webkit_tests.parse_args(new_args)
     if port_obj is None:
         port_obj = port.get(options.platform, options)
-    res = run_webkit_tests.run(port_obj, options, args)
+    res = run_webkit_tests.run(port_obj, options, parsed_args)
     return res == 0
 
 
-def logging_run(args, tests_included=False):
-    args.extend(['--no-record-results'])
+def logging_run(args=[], tests_included=False):
+    new_args = ['--no-record-results']
+    if not '--platform' in args:
+        new_args.extend(['--platform', 'test'])
+    if args:
+        new_args.extend(args)
     if not tests_included:
-        args.extend(['passes', 'failures/expected/*'])
-    options, args = run_webkit_tests.parse_args(args)
+        new_args.extend(['passes', 'failures/expected/*'])
+    options, parsed_args = run_webkit_tests.parse_args(new_args)
     port_obj = port.get(options.platform, options)
     buildbot_output = array_stream.ArrayStream()
     regular_output = array_stream.ArrayStream()
-    res = run_webkit_tests.run(port_obj, options, args,
+    res = run_webkit_tests.run(port_obj, options, parsed_args,
                                buildbot_output=buildbot_output,
                                regular_output=regular_output)
     return (res, buildbot_output, regular_output)
 
 
 class MainTest(unittest.TestCase):
-    def test_fast(self):
-        self.assertTrue(passing_run(['--platform', 'test']))
-        self.assertTrue(passing_run(['--platform', 'test', '--run-singly']))
-        self.assertTrue(passing_run(['--platform', 'test',
-                                     'passes/text.html'], tests_included=True))
+    def test_basic(self):
+        self.assertTrue(passing_run())
 
-    def test_unexpected_failures(self):
-        # Run tests including the unexpected failures.
-        self.assertFalse(passing_run(['--platform', 'test'],
-                         tests_included=True))
+    def test_batch_size(self):
+        # FIXME: verify # of tests run
+        self.assertTrue(passing_run(['--batch-size', '2']))
 
-    def test_one_child_process(self):
+    def test_child_process_1(self):
         (res, buildbot_output, regular_output) = logging_run(
-             ['--platform', 'test', '--print', 'config', '--child-processes',
-              '1'])
+             ['--print', 'config', '--child-processes', '1'])
         self.assertTrue('Running one DumpRenderTree\n'
                         in regular_output.get())
 
-    def test_two_child_processes(self):
+    def test_child_processes_2(self):
         (res, buildbot_output, regular_output) = logging_run(
-             ['--platform', 'test', '--print', 'config', '--child-processes',
-              '2'])
+             ['--print', 'config', '--child-processes', '2'])
         self.assertTrue('Running 2 DumpRenderTrees in parallel\n'
                         in regular_output.get())
 
+    def test_exception_raised(self):
+        self.assertRaises(ValueError, logging_run,
+            ['failures/expected/exception.html'], tests_included=True)
+
+    def test_full_results_html(self):
+        # FIXME: verify html?
+        self.assertTrue(passing_run(['--full-results-html']))
+
+    def test_help_printing(self):
+        res, out, err = logging_run(['--help-printing'])
+        self.assertEqual(res, 0)
+        self.assertTrue(out.empty())
+        self.assertFalse(err.empty())
+
+    def test_keyboard_interrupt(self):
+        # Note that this also tests running a test marked as SKIP if
+        # you specify it explicitly.
+        self.assertRaises(KeyboardInterrupt, passing_run,
+            ['failures/expected/keyboard.html'], tests_included=True)
+
     def test_last_results(self):
-        passing_run(['--platform', 'test'], record_results=True)
+        passing_run(['--clobber-old-results'], record_results=True)
         (res, buildbot_output, regular_output) = logging_run(
-            ['--platform', 'test', '--print-last-failures'])
+            ['--print-last-failures'])
         self.assertEqual(regular_output.get(), ['\n\n'])
         self.assertEqual(buildbot_output.get(), [])
 
+    def test_lint_test_files(self):
+        # FIXME:  add errors?
+        res, out, err = logging_run(['--lint-test-files'], tests_included=True)
+        self.assertEqual(res, 0)
+        self.assertTrue(out.empty())
+        self.assertTrue(any(['lint succeeded' in msg for msg in err.get()]))
+
     def test_no_tests_found(self):
-        self.assertRaises(SystemExit, logging_run,
-                          ['--platform', 'test', 'resources'],
-                          tests_included=True)
-        self.assertRaises(SystemExit, logging_run,
-                          ['--platform', 'test', 'foo'],
-                          tests_included=True)
+        res, out, err = logging_run(['resources'], tests_included=True)
+        self.assertEqual(res, -1)
+        self.assertTrue(out.empty())
+        self.assertTrue('No tests to run.\n' in err.get())
+
+    def test_no_tests_found_2(self):
+        res, out, err = logging_run(['foo'], tests_included=True)
+        self.assertEqual(res, -1)
+        self.assertTrue(out.empty())
+        self.assertTrue('No tests to run.\n' in err.get())
+
+    def test_randomize_order(self):
+        # FIXME: verify order was shuffled
+        self.assertTrue(passing_run(['--randomize-order']))
+
+    def test_run_chunk(self):
+        # FIXME: verify # of tests run
+        self.assertTrue(passing_run(['--run-chunk', '1:4']))
+
+    def test_run_force(self):
+        # This raises an exception because we run
+        # failures/expected/exception.html, which is normally SKIPped.
+        self.assertRaises(ValueError, logging_run, ['--force'])
+
+    def test_run_part(self):
+        # FIXME: verify # of tests run
+        self.assertTrue(passing_run(['--run-part', '1:2']))
+
+    def test_run_singly(self):
+        self.assertTrue(passing_run(['--run-singly']))
+
+    def test_single_file(self):
+        # FIXME: verify # of tests run
+        self.assertTrue(passing_run(['passes/text.html'], tests_included=True))
+
+    def test_unexpected_failures(self):
+        # Run tests including the unexpected failures.
+        res, out, err = logging_run(tests_included=True)
+        self.assertEqual(res, 1)
+        self.assertFalse(out.empty())
+        self.assertFalse(err.empty())
+
 
 def _mocked_open(original_open, file_list):
     def _wrapper(name, mode, encoding):
@@ -144,7 +209,7 @@ class RebaselineTest(unittest.TestCase):
             # is missing, update the expected generic location.
             file_list = []
             codecs.open = _mocked_open(original_open, file_list)
-            passing_run(['--platform', 'test', '--pixel-tests',
+            passing_run(['--pixel-tests',
                          '--reset-results',
                          'passes/image.html',
                          'failures/expected/missing_image.html'],
@@ -165,7 +230,7 @@ class RebaselineTest(unittest.TestCase):
             # is mssing, then create a new expectation in the platform dir.
             file_list = []
             codecs.open = _mocked_open(original_open, file_list)
-            passing_run(['--platform', 'test', '--pixel-tests',
+            passing_run(['--pixel-tests',
                          '--new-baseline',
                          'passes/image.html',
                          'failures/expected/missing_image.html'],
@@ -208,6 +273,7 @@ class DryrunTest(unittest.TestCase):
         if sys.platform != "darwin":
             return
 
+        self.assertTrue(passing_run(['--platform', 'test']))
         self.assertTrue(passing_run(['--platform', 'dryrun',
                                      'fast/html']))
         self.assertTrue(passing_run(['--platform', 'dryrun-mac',
@@ -223,6 +289,11 @@ class TestThread(dump_render_tree_thread.WatchableThread):
         self._timeout_queue = Queue.Queue()
 
     def run(self):
+        self._covered_run()
+
+    def _covered_run(self):
+        # FIXME: this is a separate routine to work around a bug
+        # in coverage: see http://bitbucket.org/ned/coveragepy/issue/85.
         self._thread_id = thread.get_ident()
         try:
             self._started_queue.put('')
@@ -284,8 +355,11 @@ class WaitForThreadsToFinishTest(unittest.TestCase):
         self.assertTrue(interrupted)
 
     def test_timeout(self):
+        oc = outputcapture.OutputCapture()
+        oc.capture_output()
         interrupted = self.run_one_thread('Timeout')
         self.assertFalse(interrupted)
+        oc.restore_output()
 
     def test_exception(self):
         self.assertRaises(ValueError, self.run_one_thread, 'Exception')
@@ -293,6 +367,8 @@ class WaitForThreadsToFinishTest(unittest.TestCase):
 
 class StandaloneFunctionsTest(unittest.TestCase):
     def test_log_wedged_thread(self):
+        oc = outputcapture.OutputCapture()
+        oc.capture_output()
         logger = run_webkit_tests._log
         astream = array_stream.ArrayStream()
         handler = TestHandler(astream)
@@ -310,6 +386,7 @@ class StandaloneFunctionsTest(unittest.TestCase):
 
         self.assertFalse(astream.empty())
         self.assertFalse(child_thread.isAlive())
+        oc.restore_output()
 
     def test_find_thread_stack(self):
         id, stack = sys._current_frames().items()[0]

-- 
WebKit Debian packaging



More information about the Pkg-webkit-commits mailing list