[SCM] WebKit Debian packaging branch, debian/experimental, updated. debian/1.3.8-1-1049-g2e11a8e

dpranke at chromium.org dpranke at chromium.org
Fri Jan 21 14:36:18 UTC 2011


The following commit has been merged in the debian/experimental branch:
commit ac8a7cfb5d6802addfa4add58039dfe1d91d3a28
Author: dpranke at chromium.org <dpranke at chromium.org@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Date:   Thu Dec 23 01:26:29 2010 +0000

    2010-12-22  Dirk Pranke  <dpranke at chromium.org>
    
            Reviewed by Ojan Vafai.
    
            This change splits out the TestRunner class into its own file.
            When we fork the code for message passing, we will create a new
            TestRunner, and this change will allow that to be more modular.
    
            While we're at it, split out TestInput and ResultSummary into
            their own files, to reduce the size of run_webkit_tests to
            something more managable.
    
            https://bugs.webkit.org/show_bug.cgi?id=51092
    
            * Scripts/webkitpy/layout_tests/layout_package/printing_unittest.py:
            * Scripts/webkitpy/layout_tests/layout_package/result_summary.py:
            * Scripts/webkitpy/layout_tests/layout_package/test_runner.py:
            * Scripts/webkitpy/layout_tests/layout_package/test_runner_unittest.py:
            * Scripts/webkitpy/layout_tests/layout_package/test_input.py:
            * Scripts/webkitpy/layout_tests/run_webkit_tests.py:
            * Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py:
    
     2010-12-22  Dirk Pranke  <dpranke at chromium.org>
    
            Reviewed by Ojan Vafai.
    
            nrwt multiprocessing - start over, prepare to fork the code
    
            This code cleans up the signatures and implementation of the
            TestRunner class so we can easily fork it to run either the
            stable implementation or the new, unstable message-passing
            implementation. The two variants will have different
            implementations of the run_tests() method. We will switch
            between the two based on the setting for the '--worker-model'
            switch. We rename the two currently valid values to 'old-inline'
            and 'old-threads'.
    
            https://bugs.webkit.org/show_bug.cgi?id=51081
    
            * Scripts/webkitpy/layout_tests/run_webkit_tests.py:
            * Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py:
    
    git-svn-id: http://svn.webkit.org/repository/webkit/trunk@74525 268f45cc-cd09-0410-ab3c-d52691b4dbfc

diff --git a/Tools/ChangeLog b/Tools/ChangeLog
index 898a301..65aaa70 100644
--- a/Tools/ChangeLog
+++ b/Tools/ChangeLog
@@ -2,6 +2,28 @@
 
         Reviewed by Ojan Vafai.
 
+        This change splits out the TestRunner class into its own file.
+        When we fork the code for message passing, we will create a new
+        TestRunner, and this change will allow that to be more modular.
+
+        While we're at it, split out TestInput and ResultSummary into
+        their own files, to reduce the size of run_webkit_tests to
+        something more managable.
+
+        https://bugs.webkit.org/show_bug.cgi?id=51092
+
+        * Scripts/webkitpy/layout_tests/layout_package/printing_unittest.py:
+        * Scripts/webkitpy/layout_tests/layout_package/result_summary.py:
+        * Scripts/webkitpy/layout_tests/layout_package/test_runner.py:
+        * Scripts/webkitpy/layout_tests/layout_package/test_runner_unittest.py:
+        * Scripts/webkitpy/layout_tests/layout_package/test_input.py:
+        * Scripts/webkitpy/layout_tests/run_webkit_tests.py:
+        * Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py:
+
+ 2010-12-22  Dirk Pranke  <dpranke at chromium.org>
+
+        Reviewed by Ojan Vafai.
+
         nrwt multiprocessing - start over, prepare to fork the code
  
         This code cleans up the signatures and implementation of the
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/printing_unittest.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/printing_unittest.py
index 27a6a29..0e478c8 100644
--- a/Tools/Scripts/webkitpy/layout_tests/layout_package/printing_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/printing_unittest.py
@@ -39,11 +39,13 @@ import logging
 from webkitpy.common import array_stream
 from webkitpy.common.system import logtesting
 from webkitpy.layout_tests import port
+
 from webkitpy.layout_tests.layout_package import printing
-from webkitpy.layout_tests.layout_package import test_results
+from webkitpy.layout_tests.layout_package import result_summary
 from webkitpy.layout_tests.layout_package import test_expectations
 from webkitpy.layout_tests.layout_package import test_failures
-from webkitpy.layout_tests import run_webkit_tests
+from webkitpy.layout_tests.layout_package import test_results
+from webkitpy.layout_tests.layout_package import test_runner
 
 
 def get_options(args):
@@ -149,7 +151,7 @@ class  Testprinter(unittest.TestCase):
             self._port.test_platform_name(), is_debug_mode=False,
             is_lint_mode=False)
 
-        rs = run_webkit_tests.ResultSummary(expectations, test_paths)
+        rs = result_summary.ResultSummary(expectations, test_paths)
         return test_paths, rs, expectations
 
     def test_help_printer(self):
@@ -515,7 +517,7 @@ class  Testprinter(unittest.TestCase):
                 retry.add(self.get_result('passes/text.html'), True)
                 retry.add(self.get_result('failures/expected/timeout.html'), True)
                 retry.add(self.get_result('failures/expected/crash.html'), True)
-            unexpected_results = run_webkit_tests.summarize_unexpected_results(
+            unexpected_results = test_runner.summarize_unexpected_results(
                 self._port, exp, rs, retry)
             return unexpected_results
 
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/result_summary.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/result_summary.py
new file mode 100644
index 0000000..80fd6ac
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/result_summary.py
@@ -0,0 +1,89 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 Google Inc. All rights reserved.
+# Copyright (C) 2010 Gabor Rapcsanyi (rgabor at inf.u-szeged.hu), University of Szeged
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Run layout tests."""
+
+import logging
+
+import test_expectations
+
+_log = logging.getLogger("webkitpy.layout_tests.run_webkit_tests")
+
+TestExpectationsFile = test_expectations.TestExpectationsFile
+
+
+class ResultSummary(object):
+    """A class for partitioning the test results we get into buckets.
+
+    This class is basically a glorified struct and it's private to this file
+    so we don't bother with any information hiding."""
+
+    def __init__(self, expectations, test_files):
+        self.total = len(test_files)
+        self.remaining = self.total
+        self.expectations = expectations
+        self.expected = 0
+        self.unexpected = 0
+        self.unexpected_failures = 0
+        self.unexpected_crashes_or_timeouts = 0
+        self.tests_by_expectation = {}
+        self.tests_by_timeline = {}
+        self.results = {}
+        self.unexpected_results = {}
+        self.failures = {}
+        self.tests_by_expectation[test_expectations.SKIP] = set()
+        for expectation in TestExpectationsFile.EXPECTATIONS.values():
+            self.tests_by_expectation[expectation] = set()
+        for timeline in TestExpectationsFile.TIMELINES.values():
+            self.tests_by_timeline[timeline] = (
+                expectations.get_tests_with_timeline(timeline))
+
+    def add(self, result, expected):
+        """Add a TestResult into the appropriate bin.
+
+        Args:
+          result: TestResult
+          expected: whether the result was what we expected it to be.
+        """
+
+        self.tests_by_expectation[result.type].add(result.filename)
+        self.results[result.filename] = result
+        self.remaining -= 1
+        if len(result.failures):
+            self.failures[result.filename] = result.failures
+        if expected:
+            self.expected += 1
+        else:
+            self.unexpected_results[result.filename] = result.type
+            self.unexpected += 1
+            if len(result.failures):
+                self.unexpected_failures += 1
+            if result.type == test_expectations.CRASH or result.type == test_expectations.TIMEOUT:
+                self.unexpected_crashes_or_timeouts += 1
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_input.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_input.py
new file mode 100644
index 0000000..4b027c0
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_input.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 Google Inc. All rights reserved.
+# Copyright (C) 2010 Gabor Rapcsanyi (rgabor at inf.u-szeged.hu), University of Szeged
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+class TestInput:
+    """Groups information about a test for easy passing of data."""
+
+    def __init__(self, filename, timeout):
+        """Holds the input parameters for a test.
+        Args:
+          filename: Full path to the test.
+          timeout: Timeout in msecs the driver should use while running the test
+          """
+        # FIXME: filename should really be test_name as a relative path.
+        self.filename = filename
+        self.timeout = timeout
+        # The image_hash is used to avoid doing an image dump if the
+        # checksums match. The image_hash is set later, and only if it is needed
+        # for the test.
+        self.image_hash = None
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner.py
new file mode 100644
index 0000000..24d04ca
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner.py
@@ -0,0 +1,1218 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 Google Inc. All rights reserved.
+# Copyright (C) 2010 Gabor Rapcsanyi (rgabor at inf.u-szeged.hu), University of Szeged
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""
+The TestRunner class runs a series of tests (TestType interface) against a set
+of test files.  If a test file fails a TestType, it returns a list TestFailure
+objects to the TestRunner.  The TestRunner then aggregates the TestFailures to
+create a final report.
+"""
+
+from __future__ import with_statement
+
+import codecs
+import errno
+import logging
+import math
+import os
+import Queue
+import random
+import shutil
+import sys
+import time
+
+from result_summary import ResultSummary
+from test_input import TestInput
+
+import dump_render_tree_thread
+import json_layout_results_generator
+import message_broker
+import printing
+import test_expectations
+import test_failures
+import test_results
+import test_results_uploader
+
+from webkitpy.thirdparty import simplejson
+from webkitpy.tool import grammar
+
+_log = logging.getLogger("webkitpy.layout_tests.run_webkit_tests")
+
+# Builder base URL where we have the archived test results.
+BUILDER_BASE_URL = "http://build.chromium.org/buildbot/layout_test_results/"
+
+LAYOUT_TESTS_DIRECTORY = "LayoutTests" + os.sep
+
+TestExpectationsFile = test_expectations.TestExpectationsFile
+
+
+def summarize_unexpected_results(port_obj, expectations, result_summary,
+                                 retry_summary):
+    """Summarize any unexpected results as a dict.
+
+    FIXME: split this data structure into a separate class?
+
+    Args:
+        port_obj: interface to port-specific hooks
+        expectations: test_expectations.TestExpectations object
+        result_summary: summary object from initial test runs
+        retry_summary: summary object from final test run of retried tests
+    Returns:
+        A dictionary containing a summary of the unexpected results from the
+        run, with the following fields:
+        'version': a version indicator (1 in this version)
+        'fixable': # of fixable tests (NOW - PASS)
+        'skipped': # of skipped tests (NOW & SKIPPED)
+        'num_regressions': # of non-flaky failures
+        'num_flaky': # of flaky failures
+        'num_passes': # of unexpected passes
+        'tests': a dict of tests -> {'expected': '...', 'actual': '...'}
+    """
+    results = {}
+    results['version'] = 1
+
+    tbe = result_summary.tests_by_expectation
+    tbt = result_summary.tests_by_timeline
+    results['fixable'] = len(tbt[test_expectations.NOW] -
+                                tbe[test_expectations.PASS])
+    results['skipped'] = len(tbt[test_expectations.NOW] &
+                                tbe[test_expectations.SKIP])
+
+    num_passes = 0
+    num_flaky = 0
+    num_regressions = 0
+    keywords = {}
+    for k, v in TestExpectationsFile.EXPECTATIONS.iteritems():
+        keywords[v] = k.upper()
+
+    tests = {}
+    for filename, result in result_summary.unexpected_results.iteritems():
+        # Note that if a test crashed in the original run, we ignore
+        # whether or not it crashed when we retried it (if we retried it),
+        # and always consider the result not flaky.
+        test = port_obj.relative_test_filename(filename)
+        expected = expectations.get_expectations_string(filename)
+        actual = [keywords[result]]
+
+        if result == test_expectations.PASS:
+            num_passes += 1
+        elif result == test_expectations.CRASH:
+            num_regressions += 1
+        else:
+            if filename not in retry_summary.unexpected_results:
+                actual.extend(expectations.get_expectations_string(
+                    filename).split(" "))
+                num_flaky += 1
+            else:
+                retry_result = retry_summary.unexpected_results[filename]
+                if result != retry_result:
+                    actual.append(keywords[retry_result])
+                    num_flaky += 1
+                else:
+                    num_regressions += 1
+
+        tests[test] = {}
+        tests[test]['expected'] = expected
+        tests[test]['actual'] = " ".join(actual)
+
+    results['tests'] = tests
+    results['num_passes'] = num_passes
+    results['num_flaky'] = num_flaky
+    results['num_regressions'] = num_regressions
+
+    return results
+
+
+class TestRunInterruptedException(Exception):
+    """Raised when a test run should be stopped immediately."""
+    def __init__(self, reason):
+        self.reason = reason
+
+
+class TestRunner:
+    """A class for managing running a series of tests on a series of layout
+    test files."""
+
+    HTTP_SUBDIR = os.sep.join(['', 'http', ''])
+    WEBSOCKET_SUBDIR = os.sep.join(['', 'websocket', ''])
+
+    # The per-test timeout in milliseconds, if no --time-out-ms option was
+    # given to run_webkit_tests. This should correspond to the default timeout
+    # in DumpRenderTree.
+    DEFAULT_TEST_TIMEOUT_MS = 6 * 1000
+
+    def __init__(self, port, options, printer):
+        """Initialize test runner data structures.
+
+        Args:
+          port: an object implementing port-specific
+          options: a dictionary of command line options
+          printer: a Printer object to record updates to.
+        """
+        self._port = port
+        self._options = options
+        self._printer = printer
+        self._message_broker = None
+
+        # disable wss server. need to install pyOpenSSL on buildbots.
+        # self._websocket_secure_server = websocket_server.PyWebSocket(
+        #        options.results_directory, use_tls=True, port=9323)
+
+        # a set of test files, and the same tests as a list
+        self._test_files = set()
+        self._test_files_list = None
+        self._result_queue = Queue.Queue()
+        self._retrying = False
+
+    def collect_tests(self, args, last_unexpected_results):
+        """Find all the files to test.
+
+        Args:
+          args: list of test arguments from the command line
+          last_unexpected_results: list of unexpected results to retest, if any
+
+        """
+        paths = [self._strip_test_dir_prefix(arg) for arg in args if arg and arg != '']
+        paths += last_unexpected_results
+        if self._options.test_list:
+            paths += read_test_files(self._options.test_list)
+        self._test_files = self._port.tests(paths)
+
+    def _strip_test_dir_prefix(self, path):
+        if path.startswith(LAYOUT_TESTS_DIRECTORY):
+            return path[len(LAYOUT_TESTS_DIRECTORY):]
+        return path
+
+    def lint(self):
+        lint_failed = False
+
+        # Creating the expecations for each platform/configuration pair does
+        # all the test list parsing and ensures it's correct syntax (e.g. no
+        # dupes).
+        for platform_name in self._port.test_platform_names():
+            try:
+                self.parse_expectations(platform_name, is_debug_mode=True)
+            except test_expectations.ParseError:
+                lint_failed = True
+            try:
+                self.parse_expectations(platform_name, is_debug_mode=False)
+            except test_expectations.ParseError:
+                lint_failed = True
+
+        self._printer.write("")
+        if lint_failed:
+            _log.error("Lint failed.")
+            return -1
+
+        _log.info("Lint succeeded.")
+        return 0
+
+    def parse_expectations(self, test_platform_name, is_debug_mode):
+        """Parse the expectations from the test_list files and return a data
+        structure holding them. Throws an error if the test_list files have
+        invalid syntax."""
+        if self._options.lint_test_files:
+            test_files = None
+        else:
+            test_files = self._test_files
+
+        expectations_str = self._port.test_expectations()
+        overrides_str = self._port.test_expectations_overrides()
+        self._expectations = test_expectations.TestExpectations(
+            self._port, test_files, expectations_str, test_platform_name,
+            is_debug_mode, self._options.lint_test_files,
+            overrides=overrides_str)
+        return self._expectations
+
+    def prepare_lists_and_print_output(self):
+        """Create appropriate subsets of test lists and returns a
+        ResultSummary object. Also prints expected test counts.
+        """
+
+        # Remove skipped - both fixable and ignored - files from the
+        # top-level list of files to test.
+        num_all_test_files = len(self._test_files)
+        self._printer.print_expected("Found:  %d tests" %
+                                     (len(self._test_files)))
+        if not num_all_test_files:
+            _log.critical('No tests to run.')
+            return None
+
+        skipped = set()
+        if num_all_test_files > 1 and not self._options.force:
+            skipped = self._expectations.get_tests_with_result_type(
+                           test_expectations.SKIP)
+            self._test_files -= skipped
+
+        # Create a sorted list of test files so the subset chunk,
+        # if used, contains alphabetically consecutive tests.
+        self._test_files_list = list(self._test_files)
+        if self._options.randomize_order:
+            random.shuffle(self._test_files_list)
+        else:
+            self._test_files_list.sort()
+
+        # If the user specifies they just want to run a subset of the tests,
+        # just grab a subset of the non-skipped tests.
+        if self._options.run_chunk or self._options.run_part:
+            chunk_value = self._options.run_chunk or self._options.run_part
+            test_files = self._test_files_list
+            try:
+                (chunk_num, chunk_len) = chunk_value.split(":")
+                chunk_num = int(chunk_num)
+                assert(chunk_num >= 0)
+                test_size = int(chunk_len)
+                assert(test_size > 0)
+            except:
+                _log.critical("invalid chunk '%s'" % chunk_value)
+                return None
+
+            # Get the number of tests
+            num_tests = len(test_files)
+
+            # Get the start offset of the slice.
+            if self._options.run_chunk:
+                chunk_len = test_size
+                # In this case chunk_num can be really large. We need
+                # to make the slave fit in the current number of tests.
+                slice_start = (chunk_num * chunk_len) % num_tests
+            else:
+                # Validate the data.
+                assert(test_size <= num_tests)
+                assert(chunk_num <= test_size)
+
+                # To count the chunk_len, and make sure we don't skip
+                # some tests, we round to the next value that fits exactly
+                # all the parts.
+                rounded_tests = num_tests
+                if rounded_tests % test_size != 0:
+                    rounded_tests = (num_tests + test_size -
+                                     (num_tests % test_size))
+
+                chunk_len = rounded_tests / test_size
+                slice_start = chunk_len * (chunk_num - 1)
+                # It does not mind if we go over test_size.
+
+            # Get the end offset of the slice.
+            slice_end = min(num_tests, slice_start + chunk_len)
+
+            files = test_files[slice_start:slice_end]
+
+            tests_run_msg = 'Running: %d tests (chunk slice [%d:%d] of %d)' % (
+                (slice_end - slice_start), slice_start, slice_end, num_tests)
+            self._printer.print_expected(tests_run_msg)
+
+            # If we reached the end and we don't have enough tests, we run some
+            # from the beginning.
+            if slice_end - slice_start < chunk_len:
+                extra = chunk_len - (slice_end - slice_start)
+                extra_msg = ('   last chunk is partial, appending [0:%d]' %
+                            extra)
+                self._printer.print_expected(extra_msg)
+                tests_run_msg += "\n" + extra_msg
+                files.extend(test_files[0:extra])
+            tests_run_filename = os.path.join(self._options.results_directory,
+                                              "tests_run.txt")
+            with codecs.open(tests_run_filename, "w", "utf-8") as file:
+                file.write(tests_run_msg + "\n")
+
+            len_skip_chunk = int(len(files) * len(skipped) /
+                                 float(len(self._test_files)))
+            skip_chunk_list = list(skipped)[0:len_skip_chunk]
+            skip_chunk = set(skip_chunk_list)
+
+            # Update expectations so that the stats are calculated correctly.
+            # We need to pass a list that includes the right # of skipped files
+            # to ParseExpectations so that ResultSummary() will get the correct
+            # stats. So, we add in the subset of skipped files, and then
+            # subtract them back out.
+            self._test_files_list = files + skip_chunk_list
+            self._test_files = set(self._test_files_list)
+
+            self._expectations = self.parse_expectations(
+                self._port.test_platform_name(),
+                self._options.configuration == 'Debug')
+
+            self._test_files = set(files)
+            self._test_files_list = files
+        else:
+            skip_chunk = skipped
+
+        result_summary = ResultSummary(self._expectations,
+            self._test_files | skip_chunk)
+        self._print_expected_results_of_type(result_summary,
+            test_expectations.PASS, "passes")
+        self._print_expected_results_of_type(result_summary,
+            test_expectations.FAIL, "failures")
+        self._print_expected_results_of_type(result_summary,
+            test_expectations.FLAKY, "flaky")
+        self._print_expected_results_of_type(result_summary,
+            test_expectations.SKIP, "skipped")
+
+        if self._options.force:
+            self._printer.print_expected('Running all tests, including '
+                                         'skips (--force)')
+        else:
+            # Note that we don't actually run the skipped tests (they were
+            # subtracted out of self._test_files, above), but we stub out the
+            # results here so the statistics can remain accurate.
+            for test in skip_chunk:
+                result = test_results.TestResult(test,
+                    failures=[], test_run_time=0, total_time_for_all_diffs=0,
+                    time_for_diffs=0)
+                result.type = test_expectations.SKIP
+                result_summary.add(result, expected=True)
+        self._printer.print_expected('')
+
+        return result_summary
+
+    def _get_dir_for_test_file(self, test_file):
+        """Returns the highest-level directory by which to shard the given
+        test file."""
+        index = test_file.rfind(os.sep + LAYOUT_TESTS_DIRECTORY)
+
+        test_file = test_file[index + len(LAYOUT_TESTS_DIRECTORY):]
+        test_file_parts = test_file.split(os.sep, 1)
+        directory = test_file_parts[0]
+        test_file = test_file_parts[1]
+
+        # The http tests are very stable on mac/linux.
+        # TODO(ojan): Make the http server on Windows be apache so we can
+        # turn shard the http tests there as well. Switching to apache is
+        # what made them stable on linux/mac.
+        return_value = directory
+        while ((directory != 'http' or sys.platform in ('darwin', 'linux2'))
+                and test_file.find(os.sep) >= 0):
+            test_file_parts = test_file.split(os.sep, 1)
+            directory = test_file_parts[0]
+            return_value = os.path.join(return_value, directory)
+            test_file = test_file_parts[1]
+
+        return return_value
+
+    def _get_test_input_for_file(self, test_file):
+        """Returns the appropriate TestInput object for the file. Mostly this
+        is used for looking up the timeout value (in ms) to use for the given
+        test."""
+        if self._test_is_slow(test_file):
+            return TestInput(test_file, self._options.slow_time_out_ms)
+        return TestInput(test_file, self._options.time_out_ms)
+
+    def _test_requires_lock(self, test_file):
+        """Return True if the test needs to be locked when
+        running multiple copies of NRWTs."""
+        split_path = test_file.split(os.sep)
+        return 'http' in split_path or 'websocket' in split_path
+
+    def _test_is_slow(self, test_file):
+        return self._expectations.has_modifier(test_file,
+                                               test_expectations.SLOW)
+
+    def _shard_tests(self, test_files, use_real_shards):
+        """Groups tests into batches.
+        This helps ensure that tests that depend on each other (aka bad tests!)
+        continue to run together as most cross-tests dependencies tend to
+        occur within the same directory. If use_real_shards is False, we
+        put each (non-HTTP/websocket) test into its own shard for maximum
+        concurrency instead of trying to do any sort of real sharding.
+
+        Return:
+            A list of lists of TestInput objects.
+        """
+        # FIXME: when we added http locking, we changed how this works such
+        # that we always lump all of the HTTP threads into a single shard.
+        # That will slow down experimental-fully-parallel, but it's unclear
+        # what the best alternative is completely revamping how we track
+        # when to grab the lock.
+
+        test_lists = []
+        tests_to_http_lock = []
+        if not use_real_shards:
+            for test_file in test_files:
+                test_input = self._get_test_input_for_file(test_file)
+                if self._test_requires_lock(test_file):
+                    tests_to_http_lock.append(test_input)
+                else:
+                    test_lists.append((".", [test_input]))
+        else:
+            tests_by_dir = {}
+            for test_file in test_files:
+                directory = self._get_dir_for_test_file(test_file)
+                test_input = self._get_test_input_for_file(test_file)
+                if self._test_requires_lock(test_file):
+                    tests_to_http_lock.append(test_input)
+                else:
+                    tests_by_dir.setdefault(directory, [])
+                    tests_by_dir[directory].append(test_input)
+            # Sort by the number of tests in the dir so that the ones with the
+            # most tests get run first in order to maximize parallelization.
+            # Number of tests is a good enough, but not perfect, approximation
+            # of how long that set of tests will take to run. We can't just use
+            # a PriorityQueue until we move to Python 2.6.
+            for directory in tests_by_dir:
+                test_list = tests_by_dir[directory]
+                # Keep the tests in alphabetical order.
+                # FIXME: Remove once tests are fixed so they can be run in any
+                # order.
+                test_list.reverse()
+                test_list_tuple = (directory, test_list)
+                test_lists.append(test_list_tuple)
+            test_lists.sort(lambda a, b: cmp(len(b[1]), len(a[1])))
+
+        # Put the http tests first. There are only a couple hundred of them,
+        # but each http test takes a very long time to run, so sorting by the
+        # number of tests doesn't accurately capture how long they take to run.
+        if tests_to_http_lock:
+            tests_to_http_lock.reverse()
+            test_lists.insert(0, ("tests_to_http_lock", tests_to_http_lock))
+
+        return test_lists
+
+    def _contains_tests(self, subdir):
+        for test_file in self._test_files:
+            if test_file.find(subdir) >= 0:
+                return True
+        return False
+
+    def _num_workers(self):
+        return int(self._options.child_processes)
+
+    def _run_tests(self, file_list, result_summary):
+        """Runs the tests in the file_list.
+
+        Return: A tuple (interrupted, keyboard_interrupted, thread_timings,
+            test_timings, individual_test_timings)
+            interrupted is whether the run was interrupted
+            keyboard_interrupted is whether the interruption was because someone
+              typed Ctrl^C
+            thread_timings is a list of dicts with the total runtime
+              of each thread with 'name', 'num_tests', 'total_time' properties
+            test_timings is a list of timings for each sharded subdirectory
+              of the form [time, directory_name, num_tests]
+            individual_test_timings is a list of run times for each test
+              in the form {filename:filename, test_run_time:test_run_time}
+            result_summary: summary object to populate with the results
+        """
+
+        self._printer.print_update('Sharding tests ...')
+        num_workers = self._num_workers()
+        test_lists = self._shard_tests(file_list,
+            num_workers > 1 and not self._options.experimental_fully_parallel)
+        filename_queue = Queue.Queue()
+        for item in test_lists:
+            filename_queue.put(item)
+
+        self._printer.print_update('Starting %s ...' %
+                                   grammar.pluralize('worker', num_workers))
+        self._message_broker = message_broker.get(self._port, self._options)
+        broker = self._message_broker
+        self._current_filename_queue = filename_queue
+        self._current_result_summary = result_summary
+
+        if not self._options.dry_run:
+            threads = broker.start_workers(self)
+        else:
+            threads = {}
+
+        self._printer.print_update("Starting testing ...")
+        keyboard_interrupted = False
+        interrupted = False
+        if not self._options.dry_run:
+            try:
+                broker.run_message_loop()
+            except KeyboardInterrupt:
+                _log.info("Interrupted, exiting")
+                broker.cancel_workers()
+                keyboard_interrupted = True
+                interrupted = True
+            except TestRunInterruptedException, e:
+                _log.info(e.reason)
+                broker.cancel_workers()
+                interrupted = True
+            except:
+                # Unexpected exception; don't try to clean up workers.
+                _log.info("Exception raised, exiting")
+                raise
+
+        thread_timings, test_timings, individual_test_timings = \
+            self._collect_timing_info(threads)
+
+        broker.cleanup()
+        self._message_broker = None
+        return (interrupted, keyboard_interrupted, thread_timings, test_timings,
+                individual_test_timings)
+
+    def update(self):
+        self.update_summary(self._current_result_summary)
+
+    def _collect_timing_info(self, threads):
+        test_timings = {}
+        individual_test_timings = []
+        thread_timings = []
+
+        for thread in threads:
+            thread_timings.append({'name': thread.getName(),
+                                   'num_tests': thread.get_num_tests(),
+                                   'total_time': thread.get_total_time()})
+            test_timings.update(thread.get_test_group_timing_stats())
+            individual_test_timings.extend(thread.get_test_results())
+
+        return (thread_timings, test_timings, individual_test_timings)
+
+    def needs_http(self):
+        """Returns whether the test runner needs an HTTP server."""
+        return self._contains_tests(self.HTTP_SUBDIR)
+
+    def needs_websocket(self):
+        """Returns whether the test runner needs a WEBSOCKET server."""
+        return self._contains_tests(self.WEBSOCKET_SUBDIR)
+
+    def set_up_run(self):
+        """Configures the system to be ready to run tests.
+
+        Returns a ResultSummary object if we should continue to run tests,
+        or None if we should abort.
+
+        """
+        # This must be started before we check the system dependencies,
+        # since the helper may do things to make the setup correct.
+        self._printer.print_update("Starting helper ...")
+        self._port.start_helper()
+
+        # Check that the system dependencies (themes, fonts, ...) are correct.
+        if not self._options.nocheck_sys_deps:
+            self._printer.print_update("Checking system dependencies ...")
+            if not self._port.check_sys_deps(self.needs_http()):
+                self._port.stop_helper()
+                return None
+
+        if self._options.clobber_old_results:
+            self._clobber_old_results()
+
+        # Create the output directory if it doesn't already exist.
+        self._port.maybe_make_directory(self._options.results_directory)
+
+        self._port.setup_test_run()
+
+        self._printer.print_update("Preparing tests ...")
+        result_summary = self.prepare_lists_and_print_output()
+        if not result_summary:
+            return None
+
+        return result_summary
+
+    def run(self, result_summary):
+        """Run all our tests on all our test files.
+
+        For each test file, we run each test type. If there are any failures,
+        we collect them for reporting.
+
+        Args:
+          result_summary: a summary object tracking the test results.
+
+        Return:
+          The number of unexpected results (0 == success)
+        """
+        # gather_test_files() must have been called first to initialize us.
+        # If we didn't find any files to test, we've errored out already in
+        # prepare_lists_and_print_output().
+        assert(len(self._test_files))
+
+        start_time = time.time()
+
+        interrupted, keyboard_interrupted, thread_timings, test_timings, \
+            individual_test_timings = (
+            self._run_tests(self._test_files_list, result_summary))
+
+        # We exclude the crashes from the list of results to retry, because
+        # we want to treat even a potentially flaky crash as an error.
+        failures = self._get_failures(result_summary, include_crashes=False)
+        retry_summary = result_summary
+        while (len(failures) and self._options.retry_failures and
+            not self._retrying and not interrupted):
+            _log.info('')
+            _log.info("Retrying %d unexpected failure(s) ..." % len(failures))
+            _log.info('')
+            self._retrying = True
+            retry_summary = ResultSummary(self._expectations, failures.keys())
+            # Note that we intentionally ignore the return value here.
+            self._run_tests(failures.keys(), retry_summary)
+            failures = self._get_failures(retry_summary, include_crashes=True)
+
+        end_time = time.time()
+
+        self._print_timing_statistics(end_time - start_time,
+                                      thread_timings, test_timings,
+                                      individual_test_timings,
+                                      result_summary)
+
+        self._print_result_summary(result_summary)
+
+        sys.stdout.flush()
+        sys.stderr.flush()
+
+        self._printer.print_one_line_summary(result_summary.total,
+                                             result_summary.expected,
+                                             result_summary.unexpected)
+
+        unexpected_results = summarize_unexpected_results(self._port,
+            self._expectations, result_summary, retry_summary)
+        self._printer.print_unexpected_results(unexpected_results)
+
+        if (self._options.record_results and not self._options.dry_run and
+            not interrupted):
+            # Write the same data to log files and upload generated JSON files
+            # to appengine server.
+            self._upload_json_files(unexpected_results, result_summary,
+                                    individual_test_timings)
+
+        # Write the summary to disk (results.html) and display it if requested.
+        if not self._options.dry_run:
+            wrote_results = self._write_results_html_file(result_summary)
+            if self._options.show_results and wrote_results:
+                self._show_results_html_file()
+
+        # Now that we've completed all the processing we can, we re-raise
+        # a KeyboardInterrupt if necessary so the caller can handle it.
+        if keyboard_interrupted:
+            raise KeyboardInterrupt
+
+        # Ignore flaky failures and unexpected passes so we don't turn the
+        # bot red for those.
+        return unexpected_results['num_regressions']
+
+    def clean_up_run(self):
+        """Restores the system after we're done running tests."""
+
+        _log.debug("flushing stdout")
+        sys.stdout.flush()
+        _log.debug("flushing stderr")
+        sys.stderr.flush()
+        _log.debug("stopping helper")
+        self._port.stop_helper()
+
+    def update_summary(self, result_summary):
+        """Update the summary and print results with any completed tests."""
+        while True:
+            try:
+                result = test_results.TestResult.loads(self._result_queue.get_nowait())
+            except Queue.Empty:
+                return
+
+            expected = self._expectations.matches_an_expected_result(
+                result.filename, result.type, self._options.pixel_tests)
+            result_summary.add(result, expected)
+            exp_str = self._expectations.get_expectations_string(
+                result.filename)
+            got_str = self._expectations.expectation_to_string(result.type)
+            self._printer.print_test_result(result, expected, exp_str, got_str)
+            self._printer.print_progress(result_summary, self._retrying,
+                                         self._test_files_list)
+
+            def interrupt_if_at_failure_limit(limit, count, message):
+                if limit and count >= limit:
+                    raise TestRunInterruptedException(message % count)
+
+            interrupt_if_at_failure_limit(
+                self._options.exit_after_n_failures,
+                result_summary.unexpected_failures,
+                "Aborting run since %d failures were reached")
+            interrupt_if_at_failure_limit(
+                self._options.exit_after_n_crashes_or_timeouts,
+                result_summary.unexpected_crashes_or_timeouts,
+                "Aborting run since %d crashes or timeouts were reached")
+
+    def _clobber_old_results(self):
+        # Just clobber the actual test results directories since the other
+        # files in the results directory are explicitly used for cross-run
+        # tracking.
+        self._printer.print_update("Clobbering old results in %s" %
+                                   self._options.results_directory)
+        layout_tests_dir = self._port.layout_tests_dir()
+        possible_dirs = self._port.test_dirs()
+        for dirname in possible_dirs:
+            if os.path.isdir(os.path.join(layout_tests_dir, dirname)):
+                shutil.rmtree(os.path.join(self._options.results_directory,
+                                           dirname),
+                              ignore_errors=True)
+
+    def _get_failures(self, result_summary, include_crashes):
+        """Filters a dict of results and returns only the failures.
+
+        Args:
+          result_summary: the results of the test run
+          include_crashes: whether crashes are included in the output.
+            We use False when finding the list of failures to retry
+            to see if the results were flaky. Although the crashes may also be
+            flaky, we treat them as if they aren't so that they're not ignored.
+        Returns:
+          a dict of files -> results
+        """
+        failed_results = {}
+        for test, result in result_summary.unexpected_results.iteritems():
+            if (result == test_expectations.PASS or
+                result == test_expectations.CRASH and not include_crashes):
+                continue
+            failed_results[test] = result
+
+        return failed_results
+
+    def _upload_json_files(self, unexpected_results, result_summary,
+                        individual_test_timings):
+        """Writes the results of the test run as JSON files into the results
+        dir and upload the files to the appengine server.
+
+        There are three different files written into the results dir:
+          unexpected_results.json: A short list of any unexpected results.
+            This is used by the buildbots to display results.
+          expectations.json: This is used by the flakiness dashboard.
+          results.json: A full list of the results - used by the flakiness
+            dashboard and the aggregate results dashboard.
+
+        Args:
+          unexpected_results: dict of unexpected results
+          result_summary: full summary object
+          individual_test_timings: list of test times (used by the flakiness
+            dashboard).
+        """
+        results_directory = self._options.results_directory
+        _log.debug("Writing JSON files in %s." % results_directory)
+        unexpected_json_path = os.path.join(results_directory, "unexpected_results.json")
+        with codecs.open(unexpected_json_path, "w", "utf-8") as file:
+            simplejson.dump(unexpected_results, file, sort_keys=True, indent=2)
+
+        # Write a json file of the test_expectations.txt file for the layout
+        # tests dashboard.
+        expectations_path = os.path.join(results_directory, "expectations.json")
+        expectations_json = \
+            self._expectations.get_expectations_json_for_all_platforms()
+        with codecs.open(expectations_path, "w", "utf-8") as file:
+            file.write(u"ADD_EXPECTATIONS(%s);" % expectations_json)
+
+        generator = json_layout_results_generator.JSONLayoutResultsGenerator(
+            self._port, self._options.builder_name, self._options.build_name,
+            self._options.build_number, self._options.results_directory,
+            BUILDER_BASE_URL, individual_test_timings,
+            self._expectations, result_summary, self._test_files_list,
+            not self._options.upload_full_results,
+            self._options.test_results_server,
+            "layout-tests",
+            self._options.master_name)
+
+        _log.debug("Finished writing JSON files.")
+
+        json_files = ["expectations.json"]
+        if self._options.upload_full_results:
+            json_files.append("results.json")
+        else:
+            json_files.append("incremental_results.json")
+
+        generator.upload_json_files(json_files)
+
+    def _print_config(self):
+        """Prints the configuration for the test run."""
+        p = self._printer
+        p.print_config("Using port '%s'" % self._port.name())
+        p.print_config("Placing test results in %s" %
+                       self._options.results_directory)
+        if self._options.new_baseline:
+            p.print_config("Placing new baselines in %s" %
+                           self._port.baseline_path())
+        p.print_config("Using %s build" % self._options.configuration)
+        if self._options.pixel_tests:
+            p.print_config("Pixel tests enabled")
+        else:
+            p.print_config("Pixel tests disabled")
+
+        p.print_config("Regular timeout: %s, slow test timeout: %s" %
+                       (self._options.time_out_ms,
+                        self._options.slow_time_out_ms))
+
+        if self._num_workers() == 1:
+            p.print_config("Running one %s" % self._port.driver_name())
+        else:
+            p.print_config("Running %s %ss in parallel" %
+                           (self._options.child_processes,
+                            self._port.driver_name()))
+        p.print_config('Command line: ' +
+                       ' '.join(self._port.driver_cmd_line()))
+        p.print_config("Worker model: %s" % self._options.worker_model)
+        p.print_config("")
+
+    def _print_expected_results_of_type(self, result_summary,
+                                        result_type, result_type_str):
+        """Print the number of the tests in a given result class.
+
+        Args:
+          result_summary - the object containing all the results to report on
+          result_type - the particular result type to report in the summary.
+          result_type_str - a string description of the result_type.
+        """
+        tests = self._expectations.get_tests_with_result_type(result_type)
+        now = result_summary.tests_by_timeline[test_expectations.NOW]
+        wontfix = result_summary.tests_by_timeline[test_expectations.WONTFIX]
+
+        # We use a fancy format string in order to print the data out in a
+        # nicely-aligned table.
+        fmtstr = ("Expect: %%5d %%-8s (%%%dd now, %%%dd wontfix)"
+                  % (self._num_digits(now), self._num_digits(wontfix)))
+        self._printer.print_expected(fmtstr %
+            (len(tests), result_type_str, len(tests & now), len(tests & wontfix)))
+
+    def _num_digits(self, num):
+        """Returns the number of digits needed to represent the length of a
+        sequence."""
+        ndigits = 1
+        if len(num):
+            ndigits = int(math.log10(len(num))) + 1
+        return ndigits
+
+    def _print_timing_statistics(self, total_time, thread_timings,
+                               directory_test_timings, individual_test_timings,
+                               result_summary):
+        """Record timing-specific information for the test run.
+
+        Args:
+          total_time: total elapsed time (in seconds) for the test run
+          thread_timings: wall clock time each thread ran for
+          directory_test_timings: timing by directory
+          individual_test_timings: timing by file
+          result_summary: summary object for the test run
+        """
+        self._printer.print_timing("Test timing:")
+        self._printer.print_timing("  %6.2f total testing time" % total_time)
+        self._printer.print_timing("")
+        self._printer.print_timing("Thread timing:")
+        cuml_time = 0
+        for t in thread_timings:
+            self._printer.print_timing("    %10s: %5d tests, %6.2f secs" %
+                  (t['name'], t['num_tests'], t['total_time']))
+            cuml_time += t['total_time']
+        self._printer.print_timing("   %6.2f cumulative, %6.2f optimal" %
+              (cuml_time, cuml_time / int(self._options.child_processes)))
+        self._printer.print_timing("")
+
+        self._print_aggregate_test_statistics(individual_test_timings)
+        self._print_individual_test_times(individual_test_timings,
+                                          result_summary)
+        self._print_directory_timings(directory_test_timings)
+
+    def _print_aggregate_test_statistics(self, individual_test_timings):
+        """Prints aggregate statistics (e.g. median, mean, etc.) for all tests.
+        Args:
+          individual_test_timings: List of TestResults for all tests.
+        """
+        test_types = []  # Unit tests don't actually produce any timings.
+        if individual_test_timings:
+            test_types = individual_test_timings[0].time_for_diffs.keys()
+        times_for_dump_render_tree = []
+        times_for_diff_processing = []
+        times_per_test_type = {}
+        for test_type in test_types:
+            times_per_test_type[test_type] = []
+
+        for test_stats in individual_test_timings:
+            times_for_dump_render_tree.append(test_stats.test_run_time)
+            times_for_diff_processing.append(
+                test_stats.total_time_for_all_diffs)
+            time_for_diffs = test_stats.time_for_diffs
+            for test_type in test_types:
+                times_per_test_type[test_type].append(
+                    time_for_diffs[test_type])
+
+        self._print_statistics_for_test_timings(
+            "PER TEST TIME IN TESTSHELL (seconds):",
+            times_for_dump_render_tree)
+        self._print_statistics_for_test_timings(
+            "PER TEST DIFF PROCESSING TIMES (seconds):",
+            times_for_diff_processing)
+        for test_type in test_types:
+            self._print_statistics_for_test_timings(
+                "PER TEST TIMES BY TEST TYPE: %s" % test_type,
+                times_per_test_type[test_type])
+
+    def _print_individual_test_times(self, individual_test_timings,
+                                  result_summary):
+        """Prints the run times for slow, timeout and crash tests.
+        Args:
+          individual_test_timings: List of TestStats for all tests.
+          result_summary: summary object for test run
+        """
+        # Reverse-sort by the time spent in DumpRenderTree.
+        individual_test_timings.sort(lambda a, b:
+            cmp(b.test_run_time, a.test_run_time))
+
+        num_printed = 0
+        slow_tests = []
+        timeout_or_crash_tests = []
+        unexpected_slow_tests = []
+        for test_tuple in individual_test_timings:
+            filename = test_tuple.filename
+            is_timeout_crash_or_slow = False
+            if self._test_is_slow(filename):
+                is_timeout_crash_or_slow = True
+                slow_tests.append(test_tuple)
+
+            if filename in result_summary.failures:
+                result = result_summary.results[filename].type
+                if (result == test_expectations.TIMEOUT or
+                    result == test_expectations.CRASH):
+                    is_timeout_crash_or_slow = True
+                    timeout_or_crash_tests.append(test_tuple)
+
+            if (not is_timeout_crash_or_slow and
+                num_printed < printing.NUM_SLOW_TESTS_TO_LOG):
+                num_printed = num_printed + 1
+                unexpected_slow_tests.append(test_tuple)
+
+        self._printer.print_timing("")
+        self._print_test_list_timing("%s slowest tests that are not "
+            "marked as SLOW and did not timeout/crash:" %
+            printing.NUM_SLOW_TESTS_TO_LOG, unexpected_slow_tests)
+        self._printer.print_timing("")
+        self._print_test_list_timing("Tests marked as SLOW:", slow_tests)
+        self._printer.print_timing("")
+        self._print_test_list_timing("Tests that timed out or crashed:",
+                                     timeout_or_crash_tests)
+        self._printer.print_timing("")
+
+    def _print_test_list_timing(self, title, test_list):
+        """Print timing info for each test.
+
+        Args:
+          title: section heading
+          test_list: tests that fall in this section
+        """
+        if self._printer.disabled('slowest'):
+            return
+
+        self._printer.print_timing(title)
+        for test_tuple in test_list:
+            filename = test_tuple.filename[len(
+                self._port.layout_tests_dir()) + 1:]
+            filename = filename.replace('\\', '/')
+            test_run_time = round(test_tuple.test_run_time, 1)
+            self._printer.print_timing("  %s took %s seconds" %
+                                       (filename, test_run_time))
+
+    def _print_directory_timings(self, directory_test_timings):
+        """Print timing info by directory for any directories that
+        take > 10 seconds to run.
+
+        Args:
+          directory_test_timing: time info for each directory
+        """
+        timings = []
+        for directory in directory_test_timings:
+            num_tests, time_for_directory = directory_test_timings[directory]
+            timings.append((round(time_for_directory, 1), directory,
+                            num_tests))
+        timings.sort()
+
+        self._printer.print_timing("Time to process slowest subdirectories:")
+        min_seconds_to_print = 10
+        for timing in timings:
+            if timing[0] > min_seconds_to_print:
+                self._printer.print_timing(
+                    "  %s took %s seconds to run %s tests." % (timing[1],
+                    timing[0], timing[2]))
+        self._printer.print_timing("")
+
+    def _print_statistics_for_test_timings(self, title, timings):
+        """Prints the median, mean and standard deviation of the values in
+        timings.
+
+        Args:
+          title: Title for these timings.
+          timings: A list of floats representing times.
+        """
+        self._printer.print_timing(title)
+        timings.sort()
+
+        num_tests = len(timings)
+        if not num_tests:
+            return
+        percentile90 = timings[int(.9 * num_tests)]
+        percentile99 = timings[int(.99 * num_tests)]
+
+        if num_tests % 2 == 1:
+            median = timings[((num_tests - 1) / 2) - 1]
+        else:
+            lower = timings[num_tests / 2 - 1]
+            upper = timings[num_tests / 2]
+            median = (float(lower + upper)) / 2
+
+        mean = sum(timings) / num_tests
+
+        for time in timings:
+            sum_of_deviations = math.pow(time - mean, 2)
+
+        std_deviation = math.sqrt(sum_of_deviations / num_tests)
+        self._printer.print_timing("  Median:          %6.3f" % median)
+        self._printer.print_timing("  Mean:            %6.3f" % mean)
+        self._printer.print_timing("  90th percentile: %6.3f" % percentile90)
+        self._printer.print_timing("  99th percentile: %6.3f" % percentile99)
+        self._printer.print_timing("  Standard dev:    %6.3f" % std_deviation)
+        self._printer.print_timing("")
+
+    def _print_result_summary(self, result_summary):
+        """Print a short summary about how many tests passed.
+
+        Args:
+          result_summary: information to log
+        """
+        failed = len(result_summary.failures)
+        skipped = len(
+            result_summary.tests_by_expectation[test_expectations.SKIP])
+        total = result_summary.total
+        passed = total - failed - skipped
+        pct_passed = 0.0
+        if total > 0:
+            pct_passed = float(passed) * 100 / total
+
+        self._printer.print_actual("")
+        self._printer.print_actual("=> Results: %d/%d tests passed (%.1f%%)" %
+                     (passed, total, pct_passed))
+        self._printer.print_actual("")
+        self._print_result_summary_entry(result_summary,
+            test_expectations.NOW, "Tests to be fixed")
+
+        self._printer.print_actual("")
+        self._print_result_summary_entry(result_summary,
+            test_expectations.WONTFIX,
+            "Tests that will only be fixed if they crash (WONTFIX)")
+        self._printer.print_actual("")
+
+    def _print_result_summary_entry(self, result_summary, timeline,
+                                    heading):
+        """Print a summary block of results for a particular timeline of test.
+
+        Args:
+          result_summary: summary to print results for
+          timeline: the timeline to print results for (NOT, WONTFIX, etc.)
+          heading: a textual description of the timeline
+        """
+        total = len(result_summary.tests_by_timeline[timeline])
+        not_passing = (total -
+           len(result_summary.tests_by_expectation[test_expectations.PASS] &
+               result_summary.tests_by_timeline[timeline]))
+        self._printer.print_actual("=> %s (%d):" % (heading, not_passing))
+
+        for result in TestExpectationsFile.EXPECTATION_ORDER:
+            if result == test_expectations.PASS:
+                continue
+            results = (result_summary.tests_by_expectation[result] &
+                       result_summary.tests_by_timeline[timeline])
+            desc = TestExpectationsFile.EXPECTATION_DESCRIPTIONS[result]
+            if not_passing and len(results):
+                pct = len(results) * 100.0 / not_passing
+                self._printer.print_actual("  %5d %-24s (%4.1f%%)" %
+                    (len(results), desc[len(results) != 1], pct))
+
+    def _results_html(self, test_files, failures, title="Test Failures", override_time=None):
+        """
+        test_files = a list of file paths
+        failures = dictionary mapping test paths to failure objects
+        title = title printed at top of test
+        override_time = current time (used by unit tests)
+        """
+        page = """<html>
+  <head>
+    <title>Layout Test Results (%(time)s)</title>
+  </head>
+  <body>
+    <h2>%(title)s (%(time)s)</h2>
+        """ % {'title': title, 'time': override_time or time.asctime()}
+
+        for test_file in sorted(test_files):
+            test_name = self._port.relative_test_filename(test_file)
+            test_url = self._port.filename_to_uri(test_file)
+            page += u"<p><a href='%s'>%s</a><br />\n" % (test_url, test_name)
+            test_failures = failures.get(test_file, [])
+            for failure in test_failures:
+                page += (u"&nbsp;&nbsp;%s<br/>" %
+                         failure.result_html_output(test_name))
+            page += "</p>\n"
+        page += "</body></html>\n"
+        return page
+
+    def _write_results_html_file(self, result_summary):
+        """Write results.html which is a summary of tests that failed.
+
+        Args:
+          result_summary: a summary of the results :)
+
+        Returns:
+          True if any results were written (since expected failures may be
+          omitted)
+        """
+        # test failures
+        if self._options.full_results_html:
+            results_title = "Test Failures"
+            test_files = result_summary.failures.keys()
+        else:
+            results_title = "Unexpected Test Failures"
+            unexpected_failures = self._get_failures(result_summary,
+                include_crashes=True)
+            test_files = unexpected_failures.keys()
+        if not len(test_files):
+            return False
+
+        out_filename = os.path.join(self._options.results_directory,
+                                    "results.html")
+        with codecs.open(out_filename, "w", "utf-8") as results_file:
+            html = self._results_html(test_files, result_summary.failures, results_title)
+            results_file.write(html)
+
+        return True
+
+    def _show_results_html_file(self):
+        """Shows the results.html page."""
+        results_filename = os.path.join(self._options.results_directory,
+                                        "results.html")
+        self._port.show_results_html_file(results_filename)
+
+
+def read_test_files(files):
+    tests = []
+    for file in files:
+        try:
+            with codecs.open(file, 'r', 'utf-8') as file_contents:
+                # FIXME: This could be cleaner using a list comprehension.
+                for line in file_contents:
+                    line = test_expectations.strip_comments(line)
+                    if line:
+                        tests.append(line)
+        except IOError, e:
+            if e.errno == errno.ENOENT:
+                _log.critical('')
+                _log.critical('--test-list file "%s" not found' % file)
+            raise
+    return tests
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner_unittest.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner_unittest.py
new file mode 100644
index 0000000..3c564ae
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner_unittest.py
@@ -0,0 +1,102 @@
+#!/usr/bin/python
+# Copyright (C) 2010 Google Inc. All rights reserved.
+# Copyright (C) 2010 Gabor Rapcsanyi (rgabor at inf.u-szeged.hu), University of Szeged
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Unit tests for TestRunner()."""
+
+import unittest
+
+from webkitpy.thirdparty.mock import Mock
+
+import test_runner
+
+
+class TestRunnerWrapper(test_runner.TestRunner):
+    def _get_test_input_for_file(self, test_file):
+        return test_file
+
+
+class TestRunnerTest(unittest.TestCase):
+    def test_results_html(self):
+        mock_port = Mock()
+        mock_port.relative_test_filename = lambda name: name
+        mock_port.filename_to_uri = lambda name: name
+
+        runner = test_runner.TestRunner(port=mock_port, options=Mock(),
+            printer=Mock())
+        expected_html = u"""<html>
+  <head>
+    <title>Layout Test Results (time)</title>
+  </head>
+  <body>
+    <h2>Title (time)</h2>
+        <p><a href='test_path'>test_path</a><br />
+</p>
+</body></html>
+"""
+        html = runner._results_html(["test_path"], {}, "Title", override_time="time")
+        self.assertEqual(html, expected_html)
+
+    def test_shard_tests(self):
+        # Test that _shard_tests in test_runner.TestRunner really
+        # put the http tests first in the queue.
+        runner = TestRunnerWrapper(port=Mock(), options=Mock(),
+            printer=Mock())
+
+        test_list = [
+          "LayoutTests/websocket/tests/unicode.htm",
+          "LayoutTests/animations/keyframes.html",
+          "LayoutTests/http/tests/security/view-source-no-refresh.html",
+          "LayoutTests/websocket/tests/websocket-protocol-ignored.html",
+          "LayoutTests/fast/css/display-none-inline-style-change-crash.html",
+          "LayoutTests/http/tests/xmlhttprequest/supported-xml-content-types.html",
+          "LayoutTests/dom/html/level2/html/HTMLAnchorElement03.html",
+          "LayoutTests/ietestcenter/Javascript/11.1.5_4-4-c-1.html",
+          "LayoutTests/dom/html/level2/html/HTMLAnchorElement06.html",
+        ]
+
+        expected_tests_to_http_lock = set([
+          'LayoutTests/websocket/tests/unicode.htm',
+          'LayoutTests/http/tests/security/view-source-no-refresh.html',
+          'LayoutTests/websocket/tests/websocket-protocol-ignored.html',
+          'LayoutTests/http/tests/xmlhttprequest/supported-xml-content-types.html',
+        ])
+
+        # FIXME: Ideally the HTTP tests don't have to all be in one shard.
+        single_thread_results = runner._shard_tests(test_list, False)
+        multi_thread_results = runner._shard_tests(test_list, True)
+
+        self.assertEqual("tests_to_http_lock", single_thread_results[0][0])
+        self.assertEqual(expected_tests_to_http_lock, set(single_thread_results[0][1]))
+        self.assertEqual("tests_to_http_lock", multi_thread_results[0][0])
+        self.assertEqual(expected_tests_to_http_lock, set(multi_thread_results[0][1]))
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py b/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py
index 426d39d..f7e5330 100755
--- a/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py
+++ b/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py
@@ -28,1280 +28,27 @@
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-"""Run layout tests.
-
-This is a port of the existing webkit test script run-webkit-tests.
-
-The TestRunner class runs a series of tests (TestType interface) against a set
-of test files.  If a test file fails a TestType, it returns a list TestFailure
-objects to the TestRunner.  The TestRunner then aggregates the TestFailures to
-create a final report.
-
-This script reads several files, if they exist in the test_lists subdirectory
-next to this script itself.  Each should contain a list of paths to individual
-tests or entire subdirectories of tests, relative to the outermost test
-directory.  Entire lines starting with '//' (comments) will be ignored.
-
-For details of the files' contents and purposes, see test_lists/README.
-"""
+"""Run layout tests."""
 
 from __future__ import with_statement
 
 import codecs
 import errno
-import glob
 import logging
-import math
 import optparse
 import os
-import platform
-import Queue
-import random
-import re
-import shutil
 import signal
 import sys
-import time
-import traceback
 
-from layout_package import dump_render_tree_thread
-from layout_package import json_layout_results_generator
-from layout_package import message_broker
 from layout_package import printing
-from layout_package import test_expectations
-from layout_package import test_failures
-from layout_package import test_results
-from layout_package import test_results_uploader
+from layout_package import test_runner
 
 from webkitpy.common.system import user
 from webkitpy.thirdparty import simplejson
-from webkitpy.tool import grammar
 
 import port
 
-_log = logging.getLogger("webkitpy.layout_tests.run_webkit_tests")
-
-# Builder base URL where we have the archived test results.
-BUILDER_BASE_URL = "http://build.chromium.org/buildbot/layout_test_results/"
-
-LAYOUT_TESTS_DIRECTORY = "LayoutTests" + os.sep
-
-TestExpectationsFile = test_expectations.TestExpectationsFile
-
-
-class TestInput:
-    """Groups information about a test for easy passing of data."""
-
-    def __init__(self, filename, timeout):
-        """Holds the input parameters for a test.
-        Args:
-          filename: Full path to the test.
-          timeout: Timeout in msecs the driver should use while running the test
-          """
-        # FIXME: filename should really be test_name as a relative path.
-        self.filename = filename
-        self.timeout = timeout
-        # The image_hash is used to avoid doing an image dump if the
-        # checksums match. The image_hash is set later, and only if it is needed
-        # for the test.
-        self.image_hash = None
-
-
-class TestRunInterruptedException(Exception):
-    """Raised when a test run should be stopped immediately."""
-    def __init__(self, reason):
-        self.reason = reason
-
-
-class ResultSummary(object):
-    """A class for partitioning the test results we get into buckets.
-
-    This class is basically a glorified struct and it's private to this file
-    so we don't bother with any information hiding."""
-
-    def __init__(self, expectations, test_files):
-        self.total = len(test_files)
-        self.remaining = self.total
-        self.expectations = expectations
-        self.expected = 0
-        self.unexpected = 0
-        self.unexpected_failures = 0
-        self.unexpected_crashes_or_timeouts = 0
-        self.tests_by_expectation = {}
-        self.tests_by_timeline = {}
-        self.results = {}
-        self.unexpected_results = {}
-        self.failures = {}
-        self.tests_by_expectation[test_expectations.SKIP] = set()
-        for expectation in TestExpectationsFile.EXPECTATIONS.values():
-            self.tests_by_expectation[expectation] = set()
-        for timeline in TestExpectationsFile.TIMELINES.values():
-            self.tests_by_timeline[timeline] = (
-                expectations.get_tests_with_timeline(timeline))
-
-    def add(self, result, expected):
-        """Add a TestResult into the appropriate bin.
-
-        Args:
-          result: TestResult
-          expected: whether the result was what we expected it to be.
-        """
-
-        self.tests_by_expectation[result.type].add(result.filename)
-        self.results[result.filename] = result
-        self.remaining -= 1
-        if len(result.failures):
-            self.failures[result.filename] = result.failures
-        if expected:
-            self.expected += 1
-        else:
-            self.unexpected_results[result.filename] = result.type
-            self.unexpected += 1
-            if len(result.failures):
-                self.unexpected_failures += 1
-            if result.type == test_expectations.CRASH or result.type == test_expectations.TIMEOUT:
-                self.unexpected_crashes_or_timeouts += 1
-
-
-def summarize_unexpected_results(port_obj, expectations, result_summary,
-                                 retry_summary):
-    """Summarize any unexpected results as a dict.
-
-    FIXME: split this data structure into a separate class?
-
-    Args:
-        port_obj: interface to port-specific hooks
-        expectations: test_expectations.TestExpectations object
-        result_summary: summary object from initial test runs
-        retry_summary: summary object from final test run of retried tests
-    Returns:
-        A dictionary containing a summary of the unexpected results from the
-        run, with the following fields:
-        'version': a version indicator (1 in this version)
-        'fixable': # of fixable tests (NOW - PASS)
-        'skipped': # of skipped tests (NOW & SKIPPED)
-        'num_regressions': # of non-flaky failures
-        'num_flaky': # of flaky failures
-        'num_passes': # of unexpected passes
-        'tests': a dict of tests -> {'expected': '...', 'actual': '...'}
-    """
-    results = {}
-    results['version'] = 1
-
-    tbe = result_summary.tests_by_expectation
-    tbt = result_summary.tests_by_timeline
-    results['fixable'] = len(tbt[test_expectations.NOW] -
-                                tbe[test_expectations.PASS])
-    results['skipped'] = len(tbt[test_expectations.NOW] &
-                                tbe[test_expectations.SKIP])
-
-    num_passes = 0
-    num_flaky = 0
-    num_regressions = 0
-    keywords = {}
-    for k, v in TestExpectationsFile.EXPECTATIONS.iteritems():
-        keywords[v] = k.upper()
-
-    tests = {}
-    for filename, result in result_summary.unexpected_results.iteritems():
-        # Note that if a test crashed in the original run, we ignore
-        # whether or not it crashed when we retried it (if we retried it),
-        # and always consider the result not flaky.
-        test = port_obj.relative_test_filename(filename)
-        expected = expectations.get_expectations_string(filename)
-        actual = [keywords[result]]
-
-        if result == test_expectations.PASS:
-            num_passes += 1
-        elif result == test_expectations.CRASH:
-            num_regressions += 1
-        else:
-            if filename not in retry_summary.unexpected_results:
-                actual.extend(expectations.get_expectations_string(
-                    filename).split(" "))
-                num_flaky += 1
-            else:
-                retry_result = retry_summary.unexpected_results[filename]
-                if result != retry_result:
-                    actual.append(keywords[retry_result])
-                    num_flaky += 1
-                else:
-                    num_regressions += 1
-
-        tests[test] = {}
-        tests[test]['expected'] = expected
-        tests[test]['actual'] = " ".join(actual)
-
-    results['tests'] = tests
-    results['num_passes'] = num_passes
-    results['num_flaky'] = num_flaky
-    results['num_regressions'] = num_regressions
-
-    return results
-
-
-class TestRunner:
-    """A class for managing running a series of tests on a series of layout
-    test files."""
-
-    HTTP_SUBDIR = os.sep.join(['', 'http', ''])
-    WEBSOCKET_SUBDIR = os.sep.join(['', 'websocket', ''])
-
-    # The per-test timeout in milliseconds, if no --time-out-ms option was
-    # given to run_webkit_tests. This should correspond to the default timeout
-    # in DumpRenderTree.
-    DEFAULT_TEST_TIMEOUT_MS = 6 * 1000
-
-    def __init__(self, port, options, printer):
-        """Initialize test runner data structures.
-
-        Args:
-          port: an object implementing port-specific
-          options: a dictionary of command line options
-          printer: a Printer object to record updates to.
-        """
-        self._port = port
-        self._options = options
-        self._printer = printer
-        self._message_broker = None
-
-        # disable wss server. need to install pyOpenSSL on buildbots.
-        # self._websocket_secure_server = websocket_server.PyWebSocket(
-        #        options.results_directory, use_tls=True, port=9323)
-
-        # a set of test files, and the same tests as a list
-        self._test_files = set()
-        self._test_files_list = None
-        self._result_queue = Queue.Queue()
-        self._retrying = False
-
-    def collect_tests(self, args, last_unexpected_results):
-        """Find all the files to test.
-
-        Args:
-          args: list of test arguments from the command line
-          last_unexpected_results: list of unexpected results to retest, if any
-
-        """
-        paths = [self._strip_test_dir_prefix(arg) for arg in args if arg and arg != '']
-        paths += last_unexpected_results
-        if self._options.test_list:
-            paths += read_test_files(self._options.test_list)
-        self._test_files = self._port.tests(paths)
-
-    def _strip_test_dir_prefix(self, path):
-        if path.startswith(LAYOUT_TESTS_DIRECTORY):
-            return path[len(LAYOUT_TESTS_DIRECTORY):]
-        return path
-
-    def lint(self):
-        lint_failed = False
-
-        # Creating the expecations for each platform/configuration pair does
-        # all the test list parsing and ensures it's correct syntax (e.g. no
-        # dupes).
-        for platform_name in self._port.test_platform_names():
-            try:
-                self.parse_expectations(platform_name, is_debug_mode=True)
-            except test_expectations.ParseError:
-                lint_failed = True
-            try:
-                self.parse_expectations(platform_name, is_debug_mode=False)
-            except test_expectations.ParseError:
-                lint_failed = True
-
-        self._printer.write("")
-        if lint_failed:
-            _log.error("Lint failed.")
-            return -1
-
-        _log.info("Lint succeeded.")
-        return 0
-
-    def parse_expectations(self, test_platform_name, is_debug_mode):
-        """Parse the expectations from the test_list files and return a data
-        structure holding them. Throws an error if the test_list files have
-        invalid syntax."""
-        if self._options.lint_test_files:
-            test_files = None
-        else:
-            test_files = self._test_files
-
-        expectations_str = self._port.test_expectations()
-        overrides_str = self._port.test_expectations_overrides()
-        self._expectations = test_expectations.TestExpectations(
-            self._port, test_files, expectations_str, test_platform_name,
-            is_debug_mode, self._options.lint_test_files,
-            overrides=overrides_str)
-        return self._expectations
-
-
-    def prepare_lists_and_print_output(self):
-        """Create appropriate subsets of test lists and returns a
-        ResultSummary object. Also prints expected test counts.
-        """
-
-        # Remove skipped - both fixable and ignored - files from the
-        # top-level list of files to test.
-        num_all_test_files = len(self._test_files)
-        self._printer.print_expected("Found:  %d tests" %
-                                     (len(self._test_files)))
-        if not num_all_test_files:
-            _log.critical('No tests to run.')
-            return None
-
-        skipped = set()
-        if num_all_test_files > 1 and not self._options.force:
-            skipped = self._expectations.get_tests_with_result_type(
-                           test_expectations.SKIP)
-            self._test_files -= skipped
-
-        # Create a sorted list of test files so the subset chunk,
-        # if used, contains alphabetically consecutive tests.
-        self._test_files_list = list(self._test_files)
-        if self._options.randomize_order:
-            random.shuffle(self._test_files_list)
-        else:
-            self._test_files_list.sort()
-
-        # If the user specifies they just want to run a subset of the tests,
-        # just grab a subset of the non-skipped tests.
-        if self._options.run_chunk or self._options.run_part:
-            chunk_value = self._options.run_chunk or self._options.run_part
-            test_files = self._test_files_list
-            try:
-                (chunk_num, chunk_len) = chunk_value.split(":")
-                chunk_num = int(chunk_num)
-                assert(chunk_num >= 0)
-                test_size = int(chunk_len)
-                assert(test_size > 0)
-            except:
-                _log.critical("invalid chunk '%s'" % chunk_value)
-                return None
-
-            # Get the number of tests
-            num_tests = len(test_files)
-
-            # Get the start offset of the slice.
-            if self._options.run_chunk:
-                chunk_len = test_size
-                # In this case chunk_num can be really large. We need
-                # to make the slave fit in the current number of tests.
-                slice_start = (chunk_num * chunk_len) % num_tests
-            else:
-                # Validate the data.
-                assert(test_size <= num_tests)
-                assert(chunk_num <= test_size)
-
-                # To count the chunk_len, and make sure we don't skip
-                # some tests, we round to the next value that fits exactly
-                # all the parts.
-                rounded_tests = num_tests
-                if rounded_tests % test_size != 0:
-                    rounded_tests = (num_tests + test_size -
-                                     (num_tests % test_size))
-
-                chunk_len = rounded_tests / test_size
-                slice_start = chunk_len * (chunk_num - 1)
-                # It does not mind if we go over test_size.
-
-            # Get the end offset of the slice.
-            slice_end = min(num_tests, slice_start + chunk_len)
-
-            files = test_files[slice_start:slice_end]
-
-            tests_run_msg = 'Running: %d tests (chunk slice [%d:%d] of %d)' % (
-                (slice_end - slice_start), slice_start, slice_end, num_tests)
-            self._printer.print_expected(tests_run_msg)
-
-            # If we reached the end and we don't have enough tests, we run some
-            # from the beginning.
-            if slice_end - slice_start < chunk_len:
-                extra = chunk_len - (slice_end - slice_start)
-                extra_msg = ('   last chunk is partial, appending [0:%d]' %
-                            extra)
-                self._printer.print_expected(extra_msg)
-                tests_run_msg += "\n" + extra_msg
-                files.extend(test_files[0:extra])
-            tests_run_filename = os.path.join(self._options.results_directory,
-                                              "tests_run.txt")
-            with codecs.open(tests_run_filename, "w", "utf-8") as file:
-                file.write(tests_run_msg + "\n")
-
-            len_skip_chunk = int(len(files) * len(skipped) /
-                                 float(len(self._test_files)))
-            skip_chunk_list = list(skipped)[0:len_skip_chunk]
-            skip_chunk = set(skip_chunk_list)
-
-            # Update expectations so that the stats are calculated correctly.
-            # We need to pass a list that includes the right # of skipped files
-            # to ParseExpectations so that ResultSummary() will get the correct
-            # stats. So, we add in the subset of skipped files, and then
-            # subtract them back out.
-            self._test_files_list = files + skip_chunk_list
-            self._test_files = set(self._test_files_list)
-
-            self._expectations = self.parse_expectations(
-                self._port.test_platform_name(),
-                self._options.configuration == 'Debug')
-
-            self._test_files = set(files)
-            self._test_files_list = files
-        else:
-            skip_chunk = skipped
-
-        result_summary = ResultSummary(self._expectations,
-            self._test_files | skip_chunk)
-        self._print_expected_results_of_type(result_summary,
-            test_expectations.PASS, "passes")
-        self._print_expected_results_of_type(result_summary,
-            test_expectations.FAIL, "failures")
-        self._print_expected_results_of_type(result_summary,
-            test_expectations.FLAKY, "flaky")
-        self._print_expected_results_of_type(result_summary,
-            test_expectations.SKIP, "skipped")
-
-        if self._options.force:
-            self._printer.print_expected('Running all tests, including '
-                                         'skips (--force)')
-        else:
-            # Note that we don't actually run the skipped tests (they were
-            # subtracted out of self._test_files, above), but we stub out the
-            # results here so the statistics can remain accurate.
-            for test in skip_chunk:
-                result = test_results.TestResult(test,
-                    failures=[], test_run_time=0, total_time_for_all_diffs=0,
-                    time_for_diffs=0)
-                result.type = test_expectations.SKIP
-                result_summary.add(result, expected=True)
-        self._printer.print_expected('')
-
-        return result_summary
-
-    def _get_dir_for_test_file(self, test_file):
-        """Returns the highest-level directory by which to shard the given
-        test file."""
-        index = test_file.rfind(os.sep + LAYOUT_TESTS_DIRECTORY)
-
-        test_file = test_file[index + len(LAYOUT_TESTS_DIRECTORY):]
-        test_file_parts = test_file.split(os.sep, 1)
-        directory = test_file_parts[0]
-        test_file = test_file_parts[1]
-
-        # The http tests are very stable on mac/linux.
-        # TODO(ojan): Make the http server on Windows be apache so we can
-        # turn shard the http tests there as well. Switching to apache is
-        # what made them stable on linux/mac.
-        return_value = directory
-        while ((directory != 'http' or sys.platform in ('darwin', 'linux2'))
-                and test_file.find(os.sep) >= 0):
-            test_file_parts = test_file.split(os.sep, 1)
-            directory = test_file_parts[0]
-            return_value = os.path.join(return_value, directory)
-            test_file = test_file_parts[1]
-
-        return return_value
-
-    def _get_test_input_for_file(self, test_file):
-        """Returns the appropriate TestInput object for the file. Mostly this
-        is used for looking up the timeout value (in ms) to use for the given
-        test."""
-        if self._test_is_slow(test_file):
-            return TestInput(test_file, self._options.slow_time_out_ms)
-        return TestInput(test_file, self._options.time_out_ms)
-
-    def _test_requires_lock(self, test_file):
-        """Return True if the test needs to be locked when
-        running multiple copies of NRWTs."""
-        split_path = test_file.split(os.sep)
-        return 'http' in split_path or 'websocket' in split_path
-
-    def _test_is_slow(self, test_file):
-        return self._expectations.has_modifier(test_file,
-                                               test_expectations.SLOW)
-
-    def _shard_tests(self, test_files, use_real_shards):
-        """Groups tests into batches.
-        This helps ensure that tests that depend on each other (aka bad tests!)
-        continue to run together as most cross-tests dependencies tend to
-        occur within the same directory. If use_real_shards is False, we
-        put each (non-HTTP/websocket) test into its own shard for maximum
-        concurrency instead of trying to do any sort of real sharding.
-
-        Return:
-            A list of lists of TestInput objects.
-        """
-        # FIXME: when we added http locking, we changed how this works such
-        # that we always lump all of the HTTP threads into a single shard.
-        # That will slow down experimental-fully-parallel, but it's unclear
-        # what the best alternative is completely revamping how we track
-        # when to grab the lock.
-
-        test_lists = []
-        tests_to_http_lock = []
-        if not use_real_shards:
-            for test_file in test_files:
-                test_input = self._get_test_input_for_file(test_file)
-                if self._test_requires_lock(test_file):
-                    tests_to_http_lock.append(test_input)
-                else:
-                    test_lists.append((".", [test_input]))
-        else:
-            tests_by_dir = {}
-            for test_file in test_files:
-                directory = self._get_dir_for_test_file(test_file)
-                test_input = self._get_test_input_for_file(test_file)
-                if self._test_requires_lock(test_file):
-                    tests_to_http_lock.append(test_input)
-                else:
-                    tests_by_dir.setdefault(directory, [])
-                    tests_by_dir[directory].append(test_input)
-            # Sort by the number of tests in the dir so that the ones with the
-            # most tests get run first in order to maximize parallelization.
-            # Number of tests is a good enough, but not perfect, approximation
-            # of how long that set of tests will take to run. We can't just use
-            # a PriorityQueue until we move to Python 2.6.
-            for directory in tests_by_dir:
-                test_list = tests_by_dir[directory]
-                # Keep the tests in alphabetical order.
-                # FIXME: Remove once tests are fixed so they can be run in any
-                # order.
-                test_list.reverse()
-                test_list_tuple = (directory, test_list)
-                test_lists.append(test_list_tuple)
-            test_lists.sort(lambda a, b: cmp(len(b[1]), len(a[1])))
-
-        # Put the http tests first. There are only a couple hundred of them,
-        # but each http test takes a very long time to run, so sorting by the
-        # number of tests doesn't accurately capture how long they take to run.
-        if tests_to_http_lock:
-            tests_to_http_lock.reverse()
-            test_lists.insert(0, ("tests_to_http_lock", tests_to_http_lock))
-
-        return test_lists
-
-    def _contains_tests(self, subdir):
-        for test_file in self._test_files:
-            if test_file.find(subdir) >= 0:
-                return True
-        return False
-
-    def _num_workers(self):
-        return int(self._options.child_processes)
-
-    def _run_tests(self, file_list, result_summary):
-        """Runs the tests in the file_list.
-
-        Return: A tuple (interrupted, keyboard_interrupted, thread_timings,
-            test_timings, individual_test_timings)
-            interrupted is whether the run was interrupted
-            keyboard_interrupted is whether the interruption was because someone
-              typed Ctrl^C
-            thread_timings is a list of dicts with the total runtime
-              of each thread with 'name', 'num_tests', 'total_time' properties
-            test_timings is a list of timings for each sharded subdirectory
-              of the form [time, directory_name, num_tests]
-            individual_test_timings is a list of run times for each test
-              in the form {filename:filename, test_run_time:test_run_time}
-            result_summary: summary object to populate with the results
-        """
-
-        self._printer.print_update('Sharding tests ...')
-        num_workers = self._num_workers()
-        test_lists = self._shard_tests(file_list,
-            num_workers > 1 and not self._options.experimental_fully_parallel)
-        filename_queue = Queue.Queue()
-        for item in test_lists:
-            filename_queue.put(item)
-
-        self._printer.print_update('Starting %s ...' %
-                                   grammar.pluralize('worker', num_workers))
-        self._message_broker = message_broker.get(self._port, self._options)
-        broker = self._message_broker
-        self._current_filename_queue = filename_queue
-        self._current_result_summary = result_summary
-
-        if not self._options.dry_run:
-            threads = broker.start_workers(self)
-        else:
-            threads = {}
-
-        self._printer.print_update("Starting testing ...")
-        keyboard_interrupted = False
-        interrupted = False
-        if not self._options.dry_run:
-            try:
-                broker.run_message_loop()
-            except KeyboardInterrupt:
-                _log.info("Interrupted, exiting")
-                broker.cancel_workers()
-                keyboard_interrupted = True
-                interrupted = True
-            except TestRunInterruptedException, e:
-                _log.info(e.reason)
-                broker.cancel_workers()
-                interrupted = True
-            except:
-                # Unexpected exception; don't try to clean up workers.
-                _log.info("Exception raised, exiting")
-                raise
-
-        thread_timings, test_timings, individual_test_timings = \
-            self._collect_timing_info(threads)
-
-        broker.cleanup()
-        self._message_broker = None
-        return (interrupted, keyboard_interrupted, thread_timings, test_timings,
-                individual_test_timings)
-
-    def update(self):
-        self.update_summary(self._current_result_summary)
-
-    def _collect_timing_info(self, threads):
-        test_timings = {}
-        individual_test_timings = []
-        thread_timings = []
-
-        for thread in threads:
-            thread_timings.append({'name': thread.getName(),
-                                   'num_tests': thread.get_num_tests(),
-                                   'total_time': thread.get_total_time()})
-            test_timings.update(thread.get_test_group_timing_stats())
-            individual_test_timings.extend(thread.get_test_results())
-
-        return (thread_timings, test_timings, individual_test_timings)
-
-    def needs_http(self):
-        """Returns whether the test runner needs an HTTP server."""
-        return self._contains_tests(self.HTTP_SUBDIR)
-
-    def needs_websocket(self):
-        """Returns whether the test runner needs a WEBSOCKET server."""
-        return self._contains_tests(self.WEBSOCKET_SUBDIR)
-
-    def set_up_run(self):
-        """Configures the system to be ready to run tests.
-
-        Returns a ResultSummary object if we should continue to run tests,
-        or None if we should abort.
-
-        """
-        # This must be started before we check the system dependencies,
-        # since the helper may do things to make the setup correct.
-        self._printer.print_update("Starting helper ...")
-        self._port.start_helper()
-
-        # Check that the system dependencies (themes, fonts, ...) are correct.
-        if not self._options.nocheck_sys_deps:
-            self._printer.print_update("Checking system dependencies ...")
-            if not self._port.check_sys_deps(self.needs_http()):
-                self._port.stop_helper()
-                return None
-
-        if self._options.clobber_old_results:
-            self._clobber_old_results()
-
-        # Create the output directory if it doesn't already exist.
-        self._port.maybe_make_directory(self._options.results_directory)
-
-        self._port.setup_test_run()
-
-        self._printer.print_update("Preparing tests ...")
-        result_summary = self.prepare_lists_and_print_output()
-        if not result_summary:
-            return None
-
-        return result_summary
-
-    def run(self, result_summary):
-        """Run all our tests on all our test files.
-
-        For each test file, we run each test type. If there are any failures,
-        we collect them for reporting.
-
-        Args:
-          result_summary: a summary object tracking the test results.
-
-        Return:
-          The number of unexpected results (0 == success)
-        """
-        # gather_test_files() must have been called first to initialize us.
-        # If we didn't find any files to test, we've errored out already in
-        # prepare_lists_and_print_output().
-        assert(len(self._test_files))
-
-        start_time = time.time()
-
-        interrupted, keyboard_interrupted, thread_timings, test_timings, \
-            individual_test_timings = (
-            self._run_tests(self._test_files_list, result_summary))
-
-        # We exclude the crashes from the list of results to retry, because
-        # we want to treat even a potentially flaky crash as an error.
-        failures = self._get_failures(result_summary, include_crashes=False)
-        retry_summary = result_summary
-        while (len(failures) and self._options.retry_failures and
-            not self._retrying and not interrupted):
-            _log.info('')
-            _log.info("Retrying %d unexpected failure(s) ..." % len(failures))
-            _log.info('')
-            self._retrying = True
-            retry_summary = ResultSummary(self._expectations, failures.keys())
-            # Note that we intentionally ignore the return value here.
-            self._run_tests(failures.keys(), retry_summary)
-            failures = self._get_failures(retry_summary, include_crashes=True)
-
-        end_time = time.time()
-
-        self._print_timing_statistics(end_time - start_time,
-                                      thread_timings, test_timings,
-                                      individual_test_timings,
-                                      result_summary)
-
-        self._print_result_summary(result_summary)
-
-        sys.stdout.flush()
-        sys.stderr.flush()
-
-        self._printer.print_one_line_summary(result_summary.total,
-                                             result_summary.expected,
-                                             result_summary.unexpected)
-
-        unexpected_results = summarize_unexpected_results(self._port,
-            self._expectations, result_summary, retry_summary)
-        self._printer.print_unexpected_results(unexpected_results)
-
-        if (self._options.record_results and not self._options.dry_run and
-            not interrupted):
-            # Write the same data to log files and upload generated JSON files
-            # to appengine server.
-            self._upload_json_files(unexpected_results, result_summary,
-                                    individual_test_timings)
-
-        # Write the summary to disk (results.html) and display it if requested.
-        if not self._options.dry_run:
-            wrote_results = self._write_results_html_file(result_summary)
-            if self._options.show_results and wrote_results:
-                self._show_results_html_file()
-
-        # Now that we've completed all the processing we can, we re-raise
-        # a KeyboardInterrupt if necessary so the caller can handle it.
-        if keyboard_interrupted:
-            raise KeyboardInterrupt
-
-        # Ignore flaky failures and unexpected passes so we don't turn the
-        # bot red for those.
-        return unexpected_results['num_regressions']
-
-    def clean_up_run(self):
-        """Restores the system after we're done running tests."""
-
-        _log.debug("flushing stdout")
-        sys.stdout.flush()
-        _log.debug("flushing stderr")
-        sys.stderr.flush()
-        _log.debug("stopping helper")
-        self._port.stop_helper()
-
-    def update_summary(self, result_summary):
-        """Update the summary and print results with any completed tests."""
-        while True:
-            try:
-                result = test_results.TestResult.loads(self._result_queue.get_nowait())
-            except Queue.Empty:
-                return
-
-            expected = self._expectations.matches_an_expected_result(
-                result.filename, result.type, self._options.pixel_tests)
-            result_summary.add(result, expected)
-            exp_str = self._expectations.get_expectations_string(
-                result.filename)
-            got_str = self._expectations.expectation_to_string(result.type)
-            self._printer.print_test_result(result, expected, exp_str, got_str)
-            self._printer.print_progress(result_summary, self._retrying,
-                                         self._test_files_list)
-
-            def interrupt_if_at_failure_limit(limit, count, message):
-                if limit and count >= limit:
-                    raise TestRunInterruptedException(message % count)
-
-            interrupt_if_at_failure_limit(
-                self._options.exit_after_n_failures,
-                result_summary.unexpected_failures,
-                "Aborting run since %d failures were reached")
-            interrupt_if_at_failure_limit(
-                self._options.exit_after_n_crashes_or_timeouts,
-                result_summary.unexpected_crashes_or_timeouts,
-                "Aborting run since %d crashes or timeouts were reached")
-
-
-    def _clobber_old_results(self):
-        # Just clobber the actual test results directories since the other
-        # files in the results directory are explicitly used for cross-run
-        # tracking.
-        self._printer.print_update("Clobbering old results in %s" %
-                                   self._options.results_directory)
-        layout_tests_dir = self._port.layout_tests_dir()
-        possible_dirs = self._port.test_dirs()
-        for dirname in possible_dirs:
-            if os.path.isdir(os.path.join(layout_tests_dir, dirname)):
-                shutil.rmtree(os.path.join(self._options.results_directory,
-                                           dirname),
-                              ignore_errors=True)
-
-    def _get_failures(self, result_summary, include_crashes):
-        """Filters a dict of results and returns only the failures.
-
-        Args:
-          result_summary: the results of the test run
-          include_crashes: whether crashes are included in the output.
-            We use False when finding the list of failures to retry
-            to see if the results were flaky. Although the crashes may also be
-            flaky, we treat them as if they aren't so that they're not ignored.
-        Returns:
-          a dict of files -> results
-        """
-        failed_results = {}
-        for test, result in result_summary.unexpected_results.iteritems():
-            if (result == test_expectations.PASS or
-                result == test_expectations.CRASH and not include_crashes):
-                continue
-            failed_results[test] = result
-
-        return failed_results
-
-    def _upload_json_files(self, unexpected_results, result_summary,
-                        individual_test_timings):
-        """Writes the results of the test run as JSON files into the results
-        dir and upload the files to the appengine server.
-
-        There are three different files written into the results dir:
-          unexpected_results.json: A short list of any unexpected results.
-            This is used by the buildbots to display results.
-          expectations.json: This is used by the flakiness dashboard.
-          results.json: A full list of the results - used by the flakiness
-            dashboard and the aggregate results dashboard.
-
-        Args:
-          unexpected_results: dict of unexpected results
-          result_summary: full summary object
-          individual_test_timings: list of test times (used by the flakiness
-            dashboard).
-        """
-        results_directory = self._options.results_directory
-        _log.debug("Writing JSON files in %s." % results_directory)
-        unexpected_json_path = os.path.join(results_directory, "unexpected_results.json")
-        with codecs.open(unexpected_json_path, "w", "utf-8") as file:
-            simplejson.dump(unexpected_results, file, sort_keys=True, indent=2)
-
-        # Write a json file of the test_expectations.txt file for the layout
-        # tests dashboard.
-        expectations_path = os.path.join(results_directory, "expectations.json")
-        expectations_json = \
-            self._expectations.get_expectations_json_for_all_platforms()
-        with codecs.open(expectations_path, "w", "utf-8") as file:
-            file.write(u"ADD_EXPECTATIONS(%s);" % expectations_json)
-
-        generator = json_layout_results_generator.JSONLayoutResultsGenerator(
-            self._port, self._options.builder_name, self._options.build_name,
-            self._options.build_number, self._options.results_directory,
-            BUILDER_BASE_URL, individual_test_timings,
-            self._expectations, result_summary, self._test_files_list,
-            not self._options.upload_full_results,
-            self._options.test_results_server,
-            "layout-tests",
-            self._options.master_name)
-
-        _log.debug("Finished writing JSON files.")
-
-        json_files = ["expectations.json"]
-        if self._options.upload_full_results:
-            json_files.append("results.json")
-        else:
-            json_files.append("incremental_results.json")
-
-        generator.upload_json_files(json_files)
-
-    def _print_config(self):
-        """Prints the configuration for the test run."""
-        p = self._printer
-        p.print_config("Using port '%s'" % self._port.name())
-        p.print_config("Placing test results in %s" %
-                       self._options.results_directory)
-        if self._options.new_baseline:
-            p.print_config("Placing new baselines in %s" %
-                           self._port.baseline_path())
-        p.print_config("Using %s build" % self._options.configuration)
-        if self._options.pixel_tests:
-            p.print_config("Pixel tests enabled")
-        else:
-            p.print_config("Pixel tests disabled")
-
-        p.print_config("Regular timeout: %s, slow test timeout: %s" %
-                       (self._options.time_out_ms,
-                        self._options.slow_time_out_ms))
-
-        if self._num_workers() == 1:
-            p.print_config("Running one %s" % self._port.driver_name())
-        else:
-            p.print_config("Running %s %ss in parallel" %
-                           (self._options.child_processes,
-                            self._port.driver_name()))
-        p.print_config('Command line: ' +
-                       ' '.join(self._port.driver_cmd_line()))
-        p.print_config("Worker model: %s" % self._options.worker_model)
-        p.print_config("")
-
-    def _print_expected_results_of_type(self, result_summary,
-                                        result_type, result_type_str):
-        """Print the number of the tests in a given result class.
-
-        Args:
-          result_summary - the object containing all the results to report on
-          result_type - the particular result type to report in the summary.
-          result_type_str - a string description of the result_type.
-        """
-        tests = self._expectations.get_tests_with_result_type(result_type)
-        now = result_summary.tests_by_timeline[test_expectations.NOW]
-        wontfix = result_summary.tests_by_timeline[test_expectations.WONTFIX]
-
-        # We use a fancy format string in order to print the data out in a
-        # nicely-aligned table.
-        fmtstr = ("Expect: %%5d %%-8s (%%%dd now, %%%dd wontfix)"
-                  % (self._num_digits(now), self._num_digits(wontfix)))
-        self._printer.print_expected(fmtstr %
-            (len(tests), result_type_str, len(tests & now), len(tests & wontfix)))
-
-    def _num_digits(self, num):
-        """Returns the number of digits needed to represent the length of a
-        sequence."""
-        ndigits = 1
-        if len(num):
-            ndigits = int(math.log10(len(num))) + 1
-        return ndigits
-
-    def _print_timing_statistics(self, total_time, thread_timings,
-                               directory_test_timings, individual_test_timings,
-                               result_summary):
-        """Record timing-specific information for the test run.
-
-        Args:
-          total_time: total elapsed time (in seconds) for the test run
-          thread_timings: wall clock time each thread ran for
-          directory_test_timings: timing by directory
-          individual_test_timings: timing by file
-          result_summary: summary object for the test run
-        """
-        self._printer.print_timing("Test timing:")
-        self._printer.print_timing("  %6.2f total testing time" % total_time)
-        self._printer.print_timing("")
-        self._printer.print_timing("Thread timing:")
-        cuml_time = 0
-        for t in thread_timings:
-            self._printer.print_timing("    %10s: %5d tests, %6.2f secs" %
-                  (t['name'], t['num_tests'], t['total_time']))
-            cuml_time += t['total_time']
-        self._printer.print_timing("   %6.2f cumulative, %6.2f optimal" %
-              (cuml_time, cuml_time / int(self._options.child_processes)))
-        self._printer.print_timing("")
-
-        self._print_aggregate_test_statistics(individual_test_timings)
-        self._print_individual_test_times(individual_test_timings,
-                                          result_summary)
-        self._print_directory_timings(directory_test_timings)
-
-    def _print_aggregate_test_statistics(self, individual_test_timings):
-        """Prints aggregate statistics (e.g. median, mean, etc.) for all tests.
-        Args:
-          individual_test_timings: List of TestResults for all tests.
-        """
-        test_types = []  # Unit tests don't actually produce any timings.
-        if individual_test_timings:
-            test_types = individual_test_timings[0].time_for_diffs.keys()
-        times_for_dump_render_tree = []
-        times_for_diff_processing = []
-        times_per_test_type = {}
-        for test_type in test_types:
-            times_per_test_type[test_type] = []
-
-        for test_stats in individual_test_timings:
-            times_for_dump_render_tree.append(test_stats.test_run_time)
-            times_for_diff_processing.append(
-                test_stats.total_time_for_all_diffs)
-            time_for_diffs = test_stats.time_for_diffs
-            for test_type in test_types:
-                times_per_test_type[test_type].append(
-                    time_for_diffs[test_type])
-
-        self._print_statistics_for_test_timings(
-            "PER TEST TIME IN TESTSHELL (seconds):",
-            times_for_dump_render_tree)
-        self._print_statistics_for_test_timings(
-            "PER TEST DIFF PROCESSING TIMES (seconds):",
-            times_for_diff_processing)
-        for test_type in test_types:
-            self._print_statistics_for_test_timings(
-                "PER TEST TIMES BY TEST TYPE: %s" % test_type,
-                times_per_test_type[test_type])
-
-    def _print_individual_test_times(self, individual_test_timings,
-                                  result_summary):
-        """Prints the run times for slow, timeout and crash tests.
-        Args:
-          individual_test_timings: List of TestStats for all tests.
-          result_summary: summary object for test run
-        """
-        # Reverse-sort by the time spent in DumpRenderTree.
-        individual_test_timings.sort(lambda a, b:
-            cmp(b.test_run_time, a.test_run_time))
-
-        num_printed = 0
-        slow_tests = []
-        timeout_or_crash_tests = []
-        unexpected_slow_tests = []
-        for test_tuple in individual_test_timings:
-            filename = test_tuple.filename
-            is_timeout_crash_or_slow = False
-            if self._test_is_slow(filename):
-                is_timeout_crash_or_slow = True
-                slow_tests.append(test_tuple)
-
-            if filename in result_summary.failures:
-                result = result_summary.results[filename].type
-                if (result == test_expectations.TIMEOUT or
-                    result == test_expectations.CRASH):
-                    is_timeout_crash_or_slow = True
-                    timeout_or_crash_tests.append(test_tuple)
-
-            if (not is_timeout_crash_or_slow and
-                num_printed < printing.NUM_SLOW_TESTS_TO_LOG):
-                num_printed = num_printed + 1
-                unexpected_slow_tests.append(test_tuple)
-
-        self._printer.print_timing("")
-        self._print_test_list_timing("%s slowest tests that are not "
-            "marked as SLOW and did not timeout/crash:" %
-            printing.NUM_SLOW_TESTS_TO_LOG, unexpected_slow_tests)
-        self._printer.print_timing("")
-        self._print_test_list_timing("Tests marked as SLOW:", slow_tests)
-        self._printer.print_timing("")
-        self._print_test_list_timing("Tests that timed out or crashed:",
-                                     timeout_or_crash_tests)
-        self._printer.print_timing("")
-
-    def _print_test_list_timing(self, title, test_list):
-        """Print timing info for each test.
-
-        Args:
-          title: section heading
-          test_list: tests that fall in this section
-        """
-        if self._printer.disabled('slowest'):
-            return
-
-        self._printer.print_timing(title)
-        for test_tuple in test_list:
-            filename = test_tuple.filename[len(
-                self._port.layout_tests_dir()) + 1:]
-            filename = filename.replace('\\', '/')
-            test_run_time = round(test_tuple.test_run_time, 1)
-            self._printer.print_timing("  %s took %s seconds" %
-                                       (filename, test_run_time))
-
-    def _print_directory_timings(self, directory_test_timings):
-        """Print timing info by directory for any directories that
-        take > 10 seconds to run.
-
-        Args:
-          directory_test_timing: time info for each directory
-        """
-        timings = []
-        for directory in directory_test_timings:
-            num_tests, time_for_directory = directory_test_timings[directory]
-            timings.append((round(time_for_directory, 1), directory,
-                            num_tests))
-        timings.sort()
-
-        self._printer.print_timing("Time to process slowest subdirectories:")
-        min_seconds_to_print = 10
-        for timing in timings:
-            if timing[0] > min_seconds_to_print:
-                self._printer.print_timing(
-                    "  %s took %s seconds to run %s tests." % (timing[1],
-                    timing[0], timing[2]))
-        self._printer.print_timing("")
-
-    def _print_statistics_for_test_timings(self, title, timings):
-        """Prints the median, mean and standard deviation of the values in
-        timings.
-
-        Args:
-          title: Title for these timings.
-          timings: A list of floats representing times.
-        """
-        self._printer.print_timing(title)
-        timings.sort()
-
-        num_tests = len(timings)
-        if not num_tests:
-            return
-        percentile90 = timings[int(.9 * num_tests)]
-        percentile99 = timings[int(.99 * num_tests)]
-
-        if num_tests % 2 == 1:
-            median = timings[((num_tests - 1) / 2) - 1]
-        else:
-            lower = timings[num_tests / 2 - 1]
-            upper = timings[num_tests / 2]
-            median = (float(lower + upper)) / 2
-
-        mean = sum(timings) / num_tests
-
-        for time in timings:
-            sum_of_deviations = math.pow(time - mean, 2)
-
-        std_deviation = math.sqrt(sum_of_deviations / num_tests)
-        self._printer.print_timing("  Median:          %6.3f" % median)
-        self._printer.print_timing("  Mean:            %6.3f" % mean)
-        self._printer.print_timing("  90th percentile: %6.3f" % percentile90)
-        self._printer.print_timing("  99th percentile: %6.3f" % percentile99)
-        self._printer.print_timing("  Standard dev:    %6.3f" % std_deviation)
-        self._printer.print_timing("")
-
-    def _print_result_summary(self, result_summary):
-        """Print a short summary about how many tests passed.
-
-        Args:
-          result_summary: information to log
-        """
-        failed = len(result_summary.failures)
-        skipped = len(
-            result_summary.tests_by_expectation[test_expectations.SKIP])
-        total = result_summary.total
-        passed = total - failed - skipped
-        pct_passed = 0.0
-        if total > 0:
-            pct_passed = float(passed) * 100 / total
-
-        self._printer.print_actual("")
-        self._printer.print_actual("=> Results: %d/%d tests passed (%.1f%%)" %
-                     (passed, total, pct_passed))
-        self._printer.print_actual("")
-        self._print_result_summary_entry(result_summary,
-            test_expectations.NOW, "Tests to be fixed")
-
-        self._printer.print_actual("")
-        self._print_result_summary_entry(result_summary,
-            test_expectations.WONTFIX,
-            "Tests that will only be fixed if they crash (WONTFIX)")
-        self._printer.print_actual("")
-
-    def _print_result_summary_entry(self, result_summary, timeline,
-                                    heading):
-        """Print a summary block of results for a particular timeline of test.
-
-        Args:
-          result_summary: summary to print results for
-          timeline: the timeline to print results for (NOT, WONTFIX, etc.)
-          heading: a textual description of the timeline
-        """
-        total = len(result_summary.tests_by_timeline[timeline])
-        not_passing = (total -
-           len(result_summary.tests_by_expectation[test_expectations.PASS] &
-               result_summary.tests_by_timeline[timeline]))
-        self._printer.print_actual("=> %s (%d):" % (heading, not_passing))
-
-        for result in TestExpectationsFile.EXPECTATION_ORDER:
-            if result == test_expectations.PASS:
-                continue
-            results = (result_summary.tests_by_expectation[result] &
-                       result_summary.tests_by_timeline[timeline])
-            desc = TestExpectationsFile.EXPECTATION_DESCRIPTIONS[result]
-            if not_passing and len(results):
-                pct = len(results) * 100.0 / not_passing
-                self._printer.print_actual("  %5d %-24s (%4.1f%%)" %
-                    (len(results), desc[len(results) != 1], pct))
-
-    def _results_html(self, test_files, failures, title="Test Failures", override_time=None):
-        """
-        test_files = a list of file paths
-        failures = dictionary mapping test paths to failure objects
-        title = title printed at top of test
-        override_time = current time (used by unit tests)
-        """
-        page = """<html>
-  <head>
-    <title>Layout Test Results (%(time)s)</title>
-  </head>
-  <body>
-    <h2>%(title)s (%(time)s)</h2>
-        """ % {'title': title, 'time': override_time or time.asctime()}
-
-        for test_file in sorted(test_files):
-            test_name = self._port.relative_test_filename(test_file)
-            test_url = self._port.filename_to_uri(test_file)
-            page += u"<p><a href='%s'>%s</a><br />\n" % (test_url, test_name)
-            test_failures = failures.get(test_file, [])
-            for failure in test_failures:
-                page += (u"&nbsp;&nbsp;%s<br/>" %
-                         failure.result_html_output(test_name))
-            page += "</p>\n"
-        page += "</body></html>\n"
-        return page
-
-    def _write_results_html_file(self, result_summary):
-        """Write results.html which is a summary of tests that failed.
-
-        Args:
-          result_summary: a summary of the results :)
-
-        Returns:
-          True if any results were written (since expected failures may be
-          omitted)
-        """
-        # test failures
-        if self._options.full_results_html:
-            results_title = "Test Failures"
-            test_files = result_summary.failures.keys()
-        else:
-            results_title = "Unexpected Test Failures"
-            unexpected_failures = self._get_failures(result_summary,
-                include_crashes=True)
-            test_files = unexpected_failures.keys()
-        if not len(test_files):
-            return False
-
-        out_filename = os.path.join(self._options.results_directory,
-                                    "results.html")
-        with codecs.open(out_filename, "w", "utf-8") as results_file:
-            html = self._results_html(test_files, result_summary.failures, results_title)
-            results_file.write(html)
-
-        return True
-
-    def _show_results_html_file(self):
-        """Shows the results.html page."""
-        results_filename = os.path.join(self._options.results_directory,
-                                        "results.html")
-        self._port.show_results_html_file(results_filename)
-
-
-def read_test_files(files):
-    tests = []
-    for file in files:
-        try:
-            with codecs.open(file, 'r', 'utf-8') as file_contents:
-                # FIXME: This could be cleaner using a list comprehension.
-                for line in file_contents:
-                    line = test_expectations.strip_comments(line)
-                    if line:
-                        tests.append(line)
-        except IOError, e:
-            if e.errno == errno.ENOENT:
-                _log.critical('')
-                _log.critical('--test-list file "%s" not found' % file)
-            raise
-    return tests
+_log = logging.getLogger(__name__)
 
 
 def run(port, options, args, regular_output=sys.stderr,
@@ -1343,12 +90,12 @@ def run(port, options, args, regular_output=sys.stderr,
     # in a try/finally to ensure that we clean up the logging configuration.
     num_unexpected_results = -1
     try:
-        test_runner = TestRunner(port, options, printer)
-        test_runner._print_config()
+        runner = test_runner.TestRunner(port, options, printer)
+        runner._print_config()
 
         printer.print_update("Collecting tests ...")
         try:
-            test_runner.collect_tests(args, last_unexpected_results)
+            runner.collect_tests(args, last_unexpected_results)
         except IOError, e:
             if e.errno == errno.ENOENT:
                 return -1
@@ -1356,19 +103,19 @@ def run(port, options, args, regular_output=sys.stderr,
 
         printer.print_update("Parsing expectations ...")
         if options.lint_test_files:
-            return test_runner.lint()
-        test_runner.parse_expectations(port.test_platform_name(),
-                                       options.configuration == 'Debug')
+            return runner.lint()
+        runner.parse_expectations(port.test_platform_name(),
+                                  options.configuration == 'Debug')
 
         printer.print_update("Checking build ...")
-        if not port.check_build(test_runner.needs_http()):
+        if not port.check_build(runner.needs_http()):
             _log.error("Build check failed")
             return -1
 
-        result_summary = test_runner.set_up_run()
+        result_summary = runner.set_up_run()
         if result_summary:
-            num_unexpected_results = test_runner.run(result_summary)
-            test_runner.clean_up_run()
+            num_unexpected_results = runner.run(result_summary)
+            runner.clean_up_run()
             _log.debug("Testing completed, Exit status: %d" %
                        num_unexpected_results)
     finally:
@@ -1407,9 +154,9 @@ def _set_up_derived_options(port_obj, options):
 
     if not options.time_out_ms:
         if options.configuration == "Debug":
-            options.time_out_ms = str(2 * TestRunner.DEFAULT_TEST_TIMEOUT_MS)
+            options.time_out_ms = str(2 * test_runner.TestRunner.DEFAULT_TEST_TIMEOUT_MS)
         else:
-            options.time_out_ms = str(TestRunner.DEFAULT_TEST_TIMEOUT_MS)
+            options.time_out_ms = str(test_runner.TestRunner.DEFAULT_TEST_TIMEOUT_MS)
 
     options.slow_time_out_ms = str(5 * int(options.time_out_ms))
     return warnings
diff --git a/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py b/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py
index d07e928..2bfac2f 100644
--- a/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py
@@ -521,67 +521,6 @@ class RebaselineTest(unittest.TestCase):
             codecs.open = original_open
 
 
-class TestRunnerWrapper(run_webkit_tests.TestRunner):
-    def _get_test_input_for_file(self, test_file):
-        return test_file
-
-
-class TestRunnerTest(unittest.TestCase):
-    def test_results_html(self):
-        mock_port = Mock()
-        mock_port.relative_test_filename = lambda name: name
-        mock_port.filename_to_uri = lambda name: name
-
-        runner = run_webkit_tests.TestRunner(port=mock_port, options=Mock(),
-            printer=Mock())
-        expected_html = u"""<html>
-  <head>
-    <title>Layout Test Results (time)</title>
-  </head>
-  <body>
-    <h2>Title (time)</h2>
-        <p><a href='test_path'>test_path</a><br />
-</p>
-</body></html>
-"""
-        html = runner._results_html(["test_path"], {}, "Title", override_time="time")
-        self.assertEqual(html, expected_html)
-
-    def test_shard_tests(self):
-        # Test that _shard_tests in run_webkit_tests.TestRunner really
-        # put the http tests first in the queue.
-        runner = TestRunnerWrapper(port=Mock(), options=Mock(),
-            printer=Mock())
-
-        test_list = [
-          "LayoutTests/websocket/tests/unicode.htm",
-          "LayoutTests/animations/keyframes.html",
-          "LayoutTests/http/tests/security/view-source-no-refresh.html",
-          "LayoutTests/websocket/tests/websocket-protocol-ignored.html",
-          "LayoutTests/fast/css/display-none-inline-style-change-crash.html",
-          "LayoutTests/http/tests/xmlhttprequest/supported-xml-content-types.html",
-          "LayoutTests/dom/html/level2/html/HTMLAnchorElement03.html",
-          "LayoutTests/ietestcenter/Javascript/11.1.5_4-4-c-1.html",
-          "LayoutTests/dom/html/level2/html/HTMLAnchorElement06.html",
-        ]
-
-        expected_tests_to_http_lock = set([
-          'LayoutTests/websocket/tests/unicode.htm',
-          'LayoutTests/http/tests/security/view-source-no-refresh.html',
-          'LayoutTests/websocket/tests/websocket-protocol-ignored.html',
-          'LayoutTests/http/tests/xmlhttprequest/supported-xml-content-types.html',
-        ])
-
-        # FIXME: Ideally the HTTP tests don't have to all be in one shard.
-        single_thread_results = runner._shard_tests(test_list, False)
-        multi_thread_results = runner._shard_tests(test_list, True)
-
-        self.assertEqual("tests_to_http_lock", single_thread_results[0][0])
-        self.assertEqual(expected_tests_to_http_lock, set(single_thread_results[0][1]))
-        self.assertEqual("tests_to_http_lock", multi_thread_results[0][0])
-        self.assertEqual(expected_tests_to_http_lock, set(multi_thread_results[0][1]))
-
-
 class DryrunTest(unittest.TestCase):
     # FIXME: it's hard to know which platforms are safe to test; the
     # chromium platforms require a chromium checkout, and the mac platform

-- 
WebKit Debian packaging



More information about the Pkg-webkit-commits mailing list