[SCM] WebKit Debian packaging branch, webkit-1.1, updated. upstream/1.1.19-706-ge5415e9

dpranke at chromium.org dpranke at chromium.org
Thu Feb 4 21:33:05 UTC 2010


The following commit has been merged in the webkit-1.1 branch:
commit ec03291ad3ff0c115238ee30136737927be67d1c
Author: dpranke at chromium.org <dpranke at chromium.org@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Date:   Sat Jan 30 01:25:18 2010 +0000

    2010-01-29  Dirk Pranke  <dpranke at chromium.org>
    
            Reviewed by Eric Siedel.
    
            Top-level test drivers for running the Chromium port of run-webkit-tests
            and being able to rebaseline test results from the test bots.  The
            files in the Scripts directory are simply wrappers around the files
            in webkitpy/layout_tests for convenience.
    
            https://bugs.webkit.org/show_bug.cgi?id=31498
    
            * Scripts/rebaseline-chromium-webkit-tests: Added.
            * Scripts/run-chromium-webkit-tests: Added.
            * Scripts/webkitpy/layout_tests/rebaseline_chromium_webkit_tests.py: Added.
            * Scripts/webkitpy/layout_tests/run_chromium_webkit_tests.py: Added.
    
    
    git-svn-id: http://svn.webkit.org/repository/webkit/trunk@54094 268f45cc-cd09-0410-ab3c-d52691b4dbfc

diff --git a/WebKitTools/ChangeLog b/WebKitTools/ChangeLog
index fbcd93f..3cbe938 100644
--- a/WebKitTools/ChangeLog
+++ b/WebKitTools/ChangeLog
@@ -2,6 +2,22 @@
 
         Reviewed by Eric Siedel.
 
+        Top-level test drivers for running the Chromium port of run-webkit-tests
+        and being able to rebaseline test results from the test bots.  The
+        files in the Scripts directory are simply wrappers around the files
+        in webkitpy/layout_tests for convenience.
+ 
+        https://bugs.webkit.org/show_bug.cgi?id=31498
+
+        * Scripts/rebaseline-chromium-webkit-tests: Added.
+        * Scripts/run-chromium-webkit-tests: Added.
+        * Scripts/webkitpy/layout_tests/rebaseline_chromium_webkit_tests.py: Added.
+        * Scripts/webkitpy/layout_tests/run_chromium_webkit_tests.py: Added.
+
+2010-01-29  Dirk Pranke  <dpranke at chromium.org>
+
+        Reviewed by Eric Siedel.
+
         Add in the second block of python code for the Chromium port
         of run-webkit-tests. These files execute different diffs to classify
         the various types of failures from a test.
diff --git a/WebKitTools/Scripts/rebaseline-chromium-webkit-tests b/WebKitTools/Scripts/rebaseline-chromium-webkit-tests
new file mode 100755
index 0000000..d22c0c4
--- /dev/null
+++ b/WebKitTools/Scripts/rebaseline-chromium-webkit-tests
@@ -0,0 +1,40 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 The Chromium Authors. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the Chromium name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Wrapper around webkitpy/layout_tests/rebaseline.py"""
+import os
+import sys
+
+sys.path.append(os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])),
+                             "webkitpy", "layout_tests"))
+
+import rebaseline_chromium_webkit_tests
+
+if __name__ == '__main__':
+    rebaseline_chromium_webkit_tests.main()
diff --git a/WebKitTools/Scripts/run-chromium-webkit-tests b/WebKitTools/Scripts/run-chromium-webkit-tests
new file mode 100755
index 0000000..8712836
--- /dev/null
+++ b/WebKitTools/Scripts/run-chromium-webkit-tests
@@ -0,0 +1,40 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 The Chromium Authors. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the Chromium name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Wrapper around webkitpy/layout_tests/run-chromium-webkit-tests.py"""
+import os
+import sys
+
+sys.path.append(os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])),
+                             "webkitpy", "layout_tests"))
+import run_chromium_webkit_tests
+
+if __name__ == '__main__':
+    options, args = run_chromium_webkit_tests.parse_args()
+    run_chromium_webkit_tests.main(options, args)
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/rebaseline_chromium_webkit_tests.py b/WebKitTools/Scripts/webkitpy/layout_tests/rebaseline_chromium_webkit_tests.py
new file mode 100644
index 0000000..1db811f
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/rebaseline_chromium_webkit_tests.py
@@ -0,0 +1,1028 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 The Chromium Authors. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the Chromium name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Rebaselining tool that automatically produces baselines for all platforms.
+
+The script does the following for each platform specified:
+  1. Compile a list of tests that need rebaselining.
+  2. Download test result archive from buildbot for the platform.
+  3. Extract baselines from the archive file for all identified files.
+  4. Add new baselines to SVN repository.
+  5. For each test that has been rebaselined, remove this platform option from
+     the test in test_expectation.txt. If no other platforms remain after
+     removal, delete the rebaselined test from the file.
+
+At the end, the script generates a html that compares old and new baselines.
+"""
+
+import logging
+import optparse
+import os
+import re
+import shutil
+import subprocess
+import sys
+import tempfile
+import time
+import urllib
+import webbrowser
+import zipfile
+
+from layout_package import path_utils
+from layout_package import test_expectations
+from test_types import image_diff
+from test_types import text_diff
+
+# Repository type constants.
+REPO_SVN, REPO_UNKNOWN = range(2)
+
+BASELINE_SUFFIXES = ['.txt', '.png', '.checksum']
+REBASELINE_PLATFORM_ORDER = ['mac', 'win', 'win-xp', 'win-vista', 'linux']
+ARCHIVE_DIR_NAME_DICT = {'win': 'webkit-rel',
+                         'win-vista': 'webkit-dbg-vista',
+                         'win-xp': 'webkit-rel',
+                         'mac': 'webkit-rel-mac5',
+                         'linux': 'webkit-rel-linux',
+                         'win-canary': 'webkit-rel-webkit-org',
+                         'win-vista-canary': 'webkit-dbg-vista',
+                         'win-xp-canary': 'webkit-rel-webkit-org',
+                         'mac-canary': 'webkit-rel-mac-webkit-org',
+                         'linux-canary': 'webkit-rel-linux-webkit-org'}
+
+
+def run_shell_with_return_code(command, print_output=False):
+    """Executes a command and returns the output and process return code.
+
+    Args:
+      command: program and arguments.
+      print_output: if true, print the command results to standard output.
+
+    Returns:
+      command output, return code
+    """
+
+    # Use a shell for subcommands on Windows to get a PATH search.
+    use_shell = sys.platform.startswith('win')
+    p = subprocess.Popen(command, stdout=subprocess.PIPE,
+                         stderr=subprocess.STDOUT, shell=use_shell)
+    if print_output:
+        output_array = []
+        while True:
+            line = p.stdout.readline()
+            if not line:
+                break
+            if print_output:
+                print line.strip('\n')
+            output_array.append(line)
+        output = ''.join(output_array)
+    else:
+        output = p.stdout.read()
+    p.wait()
+    p.stdout.close()
+
+    return output, p.returncode
+
+
+def run_shell(command, print_output=False):
+    """Executes a command and returns the output.
+
+    Args:
+      command: program and arguments.
+      print_output: if true, print the command results to standard output.
+
+    Returns:
+      command output
+    """
+
+    output, return_code = run_shell_with_return_code(command, print_output)
+    return output
+
+
+def log_dashed_string(text, platform, logging_level=logging.INFO):
+    """Log text message with dashes on both sides."""
+
+    msg = text
+    if platform:
+        msg += ': ' + platform
+    if len(msg) < 78:
+        dashes = '-' * ((78 - len(msg)) / 2)
+        msg = '%s %s %s' % (dashes, msg, dashes)
+
+    if logging_level == logging.ERROR:
+        logging.error(msg)
+    elif logging_level == logging.WARNING:
+        logging.warn(msg)
+    else:
+        logging.info(msg)
+
+
+def setup_html_directory(html_directory):
+    """Setup the directory to store html results.
+
+       All html related files are stored in the "rebaseline_html" subdirectory.
+
+    Args:
+      html_directory: parent directory that stores the rebaselining results.
+                      If None, a temp directory is created.
+
+    Returns:
+      the directory that stores the html related rebaselining results.
+    """
+
+    if not html_directory:
+        html_directory = tempfile.mkdtemp()
+    elif not os.path.exists(html_directory):
+        os.mkdir(html_directory)
+
+    html_directory = os.path.join(html_directory, 'rebaseline_html')
+    logging.info('Html directory: "%s"', html_directory)
+
+    if os.path.exists(html_directory):
+        shutil.rmtree(html_directory, True)
+        logging.info('Deleted file at html directory: "%s"', html_directory)
+
+    if not os.path.exists(html_directory):
+        os.mkdir(html_directory)
+    return html_directory
+
+
+def get_result_file_fullpath(html_directory, baseline_filename, platform,
+                             result_type):
+    """Get full path of the baseline result file.
+
+    Args:
+      html_directory: directory that stores the html related files.
+      baseline_filename: name of the baseline file.
+      platform: win, linux or mac
+      result_type: type of the baseline result: '.txt', '.png'.
+
+    Returns:
+      Full path of the baseline file for rebaselining result comparison.
+    """
+
+    base, ext = os.path.splitext(baseline_filename)
+    result_filename = '%s-%s-%s%s' % (base, platform, result_type, ext)
+    fullpath = os.path.join(html_directory, result_filename)
+    logging.debug('  Result file full path: "%s".', fullpath)
+    return fullpath
+
+
+class Rebaseliner(object):
+    """Class to produce new baselines for a given platform."""
+
+    REVISION_REGEX = r'<a href=\"(\d+)/\">'
+
+    def __init__(self, platform, options):
+        self._file_dir = path_utils.path_from_base('webkit', 'tools',
+            'layout_tests')
+        self._platform = platform
+        self._options = options
+        self._rebaselining_tests = []
+        self._rebaselined_tests = []
+
+        # Create tests and expectations helper which is used to:
+        #   -. compile list of tests that need rebaselining.
+        #   -. update the tests in test_expectations file after rebaseline
+        #      is done.
+        self._test_expectations = \
+            test_expectations.TestExpectations(None,
+                                               self._file_dir,
+                                               platform,
+                                               False,
+                                               False)
+
+        self._repo_type = self._get_repo_type()
+
+    def run(self, backup):
+        """Run rebaseline process."""
+
+        log_dashed_string('Compiling rebaselining tests', self._platform)
+        if not self._compile_rebaselining_tests():
+            return True
+
+        log_dashed_string('Downloading archive', self._platform)
+        archive_file = self._download_buildbot_archive()
+        logging.info('')
+        if not archive_file:
+            logging.error('No archive found.')
+            return False
+
+        log_dashed_string('Extracting and adding new baselines',
+                          self._platform)
+        if not self._extract_and_add_new_baselines(archive_file):
+            return False
+
+        log_dashed_string('Updating rebaselined tests in file',
+                          self._platform)
+        self._update_rebaselined_tests_in_file(backup)
+        logging.info('')
+
+        if len(self._rebaselining_tests) != len(self._rebaselined_tests):
+            logging.warning('NOT ALL TESTS THAT NEED REBASELINING HAVE BEEN '
+                            'REBASELINED.')
+            logging.warning('  Total tests needing rebaselining: %d',
+                            len(self._rebaselining_tests))
+            logging.warning('  Total tests rebaselined: %d',
+                            len(self._rebaselined_tests))
+            return False
+
+        logging.warning('All tests needing rebaselining were successfully '
+                        'rebaselined.')
+
+        return True
+
+    def get_rebaselining_tests(self):
+        return self._rebaselining_tests
+
+    def _get_repo_type(self):
+        """Get the repository type that client is using."""
+        output, return_code = run_shell_with_return_code(['svn', 'info'],
+                                                         False)
+        if return_code == 0:
+            return REPO_SVN
+
+        return REPO_UNKNOWN
+
+    def _compile_rebaselining_tests(self):
+        """Compile list of tests that need rebaselining for the platform.
+
+        Returns:
+          List of tests that need rebaselining or
+          None if there is no such test.
+        """
+
+        self._rebaselining_tests = \
+            self._test_expectations.get_rebaselining_failures()
+        if not self._rebaselining_tests:
+            logging.warn('No tests found that need rebaselining.')
+            return None
+
+        logging.info('Total number of tests needing rebaselining '
+                     'for "%s": "%d"', self._platform,
+                     len(self._rebaselining_tests))
+
+        test_no = 1
+        for test in self._rebaselining_tests:
+            logging.info('  %d: %s', test_no, test)
+            test_no += 1
+
+        return self._rebaselining_tests
+
+    def _get_latest_revision(self, url):
+        """Get the latest layout test revision number from buildbot.
+
+        Args:
+          url: Url to retrieve layout test revision numbers.
+
+        Returns:
+          latest revision or
+          None on failure.
+        """
+
+        logging.debug('Url to retrieve revision: "%s"', url)
+
+        f = urllib.urlopen(url)
+        content = f.read()
+        f.close()
+
+        revisions = re.findall(self.REVISION_REGEX, content)
+        if not revisions:
+            logging.error('Failed to find revision, content: "%s"', content)
+            return None
+
+        revisions.sort(key=int)
+        logging.info('Latest revision: "%s"', revisions[len(revisions) - 1])
+        return revisions[len(revisions) - 1]
+
+    def _get_archive_dir_name(self, platform, webkit_canary):
+        """Get name of the layout test archive directory.
+
+        Returns:
+          Directory name or
+          None on failure
+        """
+
+        if webkit_canary:
+            platform += '-canary'
+
+        if platform in ARCHIVE_DIR_NAME_DICT:
+            return ARCHIVE_DIR_NAME_DICT[platform]
+        else:
+            logging.error('Cannot find platform key %s in archive '
+                          'directory name dictionary', platform)
+            return None
+
+    def _get_archive_url(self):
+        """Generate the url to download latest layout test archive.
+
+        Returns:
+          Url to download archive or
+          None on failure
+        """
+
+        dir_name = self._get_archive_dir_name(self._platform,
+                                              self._options.webkit_canary)
+        if not dir_name:
+            return None
+
+        logging.debug('Buildbot platform dir name: "%s"', dir_name)
+
+        url_base = '%s/%s/' % (self._options.archive_url, dir_name)
+        latest_revision = self._get_latest_revision(url_base)
+        if latest_revision is None or latest_revision <= 0:
+            return None
+
+        archive_url = ('%s%s/layout-test-results.zip' % (url_base,
+                                                         latest_revision))
+        logging.info('Archive url: "%s"', archive_url)
+        return archive_url
+
+    def _download_buildbot_archive(self):
+        """Download layout test archive file from buildbot.
+
+        Returns:
+          True if download succeeded or
+          False otherwise.
+        """
+
+        url = self._get_archive_url()
+        if url is None:
+            return None
+
+        fn = urllib.urlretrieve(url)[0]
+        logging.info('Archive downloaded and saved to file: "%s"', fn)
+        return fn
+
+    def _extract_and_add_new_baselines(self, archive_file):
+        """Extract new baselines from archive and add them to SVN repository.
+
+        Args:
+          archive_file: full path to the archive file.
+
+        Returns:
+          List of tests that have been rebaselined or
+          None on failure.
+        """
+
+        zip_file = zipfile.ZipFile(archive_file, 'r')
+        zip_namelist = zip_file.namelist()
+
+        logging.debug('zip file namelist:')
+        for name in zip_namelist:
+            logging.debug('  ' + name)
+
+        platform = path_utils.platform_name(self._platform)
+        logging.debug('Platform dir: "%s"', platform)
+
+        test_no = 1
+        self._rebaselined_tests = []
+        for test in self._rebaselining_tests:
+            logging.info('Test %d: %s', test_no, test)
+
+            found = False
+            svn_error = False
+            test_basename = os.path.splitext(test)[0]
+            for suffix in BASELINE_SUFFIXES:
+                archive_test_name = ('layout-test-results/%s-actual%s' %
+                                     (test_basename, suffix))
+                logging.debug('  Archive test file name: "%s"',
+                              archive_test_name)
+                if not archive_test_name in zip_namelist:
+                    logging.info('  %s file not in archive.', suffix)
+                    continue
+
+                found = True
+                logging.info('  %s file found in archive.', suffix)
+
+                # Extract new baseline from archive and save it to a temp file.
+                data = zip_file.read(archive_test_name)
+                temp_fd, temp_name = tempfile.mkstemp(suffix)
+                f = os.fdopen(temp_fd, 'wb')
+                f.write(data)
+                f.close()
+
+                expected_filename = '%s-expected%s' % (test_basename, suffix)
+                expected_fullpath = os.path.join(
+                    path_utils.chromium_baseline_path(platform),
+                    expected_filename)
+                expected_fullpath = os.path.normpath(expected_fullpath)
+                logging.debug('  Expected file full path: "%s"',
+                              expected_fullpath)
+
+                # TODO(victorw): for now, the rebaselining tool checks whether
+                # or not THIS baseline is duplicate and should be skipped.
+                # We could improve the tool to check all baselines in upper
+                # and lower
+                # levels and remove all duplicated baselines.
+                if self._is_dup_baseline(temp_name,
+                                       expected_fullpath,
+                                       test,
+                                       suffix,
+                                       self._platform):
+                    os.remove(temp_name)
+                    self._delete_baseline(expected_fullpath)
+                    continue
+
+                # Create the new baseline directory if it doesn't already
+                # exist.
+                path_utils.maybe_make_directory(
+                    os.path.dirname(expected_fullpath))
+
+                shutil.move(temp_name, expected_fullpath)
+
+                if not self._svn_add(expected_fullpath):
+                    svn_error = True
+                elif suffix != '.checksum':
+                    self._create_html_baseline_files(expected_fullpath)
+
+            if not found:
+                logging.warn('  No new baselines found in archive.')
+            else:
+                if svn_error:
+                    logging.warn('  Failed to add baselines to SVN.')
+                else:
+                    logging.info('  Rebaseline succeeded.')
+                    self._rebaselined_tests.append(test)
+
+            test_no += 1
+
+        zip_file.close()
+        os.remove(archive_file)
+
+        return self._rebaselined_tests
+
+    def _is_dup_baseline(self, new_baseline, baseline_path, test, suffix,
+                         platform):
+        """Check whether a baseline is duplicate and can fallback to same
+           baseline for another platform. For example, if a test has same
+           baseline on linux and windows, then we only store windows
+           baseline and linux baseline will fallback to the windows version.
+
+        Args:
+          expected_filename: baseline expectation file name.
+          test: test name.
+          suffix: file suffix of the expected results, including dot;
+                  e.g. '.txt' or '.png'.
+          platform: baseline platform 'mac', 'win' or 'linux'.
+
+        Returns:
+          True if the baseline is unnecessary.
+          False otherwise.
+        """
+        test_filepath = os.path.join(path_utils.layout_tests_dir(), test)
+        all_baselines = path_utils.expected_baselines(test_filepath,
+                                                      suffix, platform, True)
+        for (fallback_dir, fallback_file) in all_baselines:
+            if fallback_dir and fallback_file:
+                fallback_fullpath = os.path.normpath(
+                    os.path.join(fallback_dir, fallback_file))
+                if fallback_fullpath.lower() != baseline_path.lower():
+                    if not self._diff_baselines(new_baseline,
+                                                fallback_fullpath):
+                        logging.info('  Found same baseline at %s',
+                                     fallback_fullpath)
+                        return True
+                    else:
+                        return False
+
+        return False
+
+    def _diff_baselines(self, file1, file2):
+        """Check whether two baselines are different.
+
+        Args:
+          file1, file2: full paths of the baselines to compare.
+
+        Returns:
+          True if two files are different or have different extensions.
+          False otherwise.
+        """
+
+        ext1 = os.path.splitext(file1)[1].upper()
+        ext2 = os.path.splitext(file2)[1].upper()
+        if ext1 != ext2:
+            logging.warn('Files to compare have different ext. '
+                         'File1: %s; File2: %s', file1, file2)
+            return True
+
+        if ext1 == '.PNG':
+            return image_diff.ImageDiff(self._platform, '').diff_files(file1,
+                                                                       file2)
+        else:
+            return text_diff.TestTextDiff(self._platform, '').diff_files(file1,
+                                                                         file2)
+
+    def _delete_baseline(self, filename):
+        """Remove the file from repository and delete it from disk.
+
+        Args:
+          filename: full path of the file to delete.
+        """
+
+        if not filename or not os.path.isfile(filename):
+            return
+
+        if self._repo_type == REPO_SVN:
+            parent_dir, basename = os.path.split(filename)
+            original_dir = os.getcwd()
+            os.chdir(parent_dir)
+            run_shell(['svn', 'delete', '--force', basename], False)
+            os.chdir(original_dir)
+        else:
+            os.remove(filename)
+
+    def _update_rebaselined_tests_in_file(self, backup):
+        """Update the rebaselined tests in test expectations file.
+
+        Args:
+          backup: if True, backup the original test expectations file.
+
+        Returns:
+          no
+        """
+
+        if self._rebaselined_tests:
+            self._test_expectations.remove_platform_from_file(
+                self._rebaselined_tests, self._platform, backup)
+        else:
+            logging.info('No test was rebaselined so nothing to remove.')
+
+    def _svn_add(self, filename):
+        """Add the file to SVN repository.
+
+        Args:
+          filename: full path of the file to add.
+
+        Returns:
+          True if the file already exists in SVN or is sucessfully added
+               to SVN.
+          False otherwise.
+        """
+
+        if not filename:
+            return False
+
+        parent_dir, basename = os.path.split(filename)
+        if self._repo_type != REPO_SVN or parent_dir == filename:
+            logging.info("No svn checkout found, skip svn add.")
+            return True
+
+        original_dir = os.getcwd()
+        os.chdir(parent_dir)
+        status_output = run_shell(['svn', 'status', basename], False)
+        os.chdir(original_dir)
+        output = status_output.upper()
+        if output.startswith('A') or output.startswith('M'):
+            logging.info('  File already added to SVN: "%s"', filename)
+            return True
+
+        if output.find('IS NOT A WORKING COPY') >= 0:
+            logging.info('  File is not a working copy, add its parent: "%s"',
+                         parent_dir)
+            return self._svn_add(parent_dir)
+
+        os.chdir(parent_dir)
+        add_output = run_shell(['svn', 'add', basename], True)
+        os.chdir(original_dir)
+        output = add_output.upper().rstrip()
+        if output.startswith('A') and output.find(basename.upper()) >= 0:
+            logging.info('  Added new file: "%s"', filename)
+            self._svn_prop_set(filename)
+            return True
+
+        if (not status_output) and (add_output.upper().find(
+            'ALREADY UNDER VERSION CONTROL') >= 0):
+            logging.info('  File already under SVN and has no change: "%s"',
+                         filename)
+            return True
+
+        logging.warn('  Failed to add file to SVN: "%s"', filename)
+        logging.warn('  Svn status output: "%s"', status_output)
+        logging.warn('  Svn add output: "%s"', add_output)
+        return False
+
+    def _svn_prop_set(self, filename):
+        """Set the baseline property
+
+        Args:
+          filename: full path of the file to add.
+
+        Returns:
+          True if the file already exists in SVN or is sucessfully added
+               to SVN.
+          False otherwise.
+        """
+        ext = os.path.splitext(filename)[1].upper()
+        if ext != '.TXT' and ext != '.PNG' and ext != '.CHECKSUM':
+            return
+
+        parent_dir, basename = os.path.split(filename)
+        original_dir = os.getcwd()
+        os.chdir(parent_dir)
+        if ext == '.PNG':
+            cmd = ['svn', 'pset', 'svn:mime-type', 'image/png', basename]
+        else:
+            cmd = ['svn', 'pset', 'svn:eol-style', 'LF', basename]
+
+        logging.debug('  Set svn prop: %s', ' '.join(cmd))
+        run_shell(cmd, False)
+        os.chdir(original_dir)
+
+    def _create_html_baseline_files(self, baseline_fullpath):
+        """Create baseline files (old, new and diff) in html directory.
+
+           The files are used to compare the rebaselining results.
+
+        Args:
+          baseline_fullpath: full path of the expected baseline file.
+        """
+
+        if not baseline_fullpath or not os.path.exists(baseline_fullpath):
+            return
+
+        # Copy the new baseline to html directory for result comparison.
+        baseline_filename = os.path.basename(baseline_fullpath)
+        new_file = get_result_file_fullpath(self._options.html_directory,
+                                            baseline_filename, self._platform,
+                                            'new')
+        shutil.copyfile(baseline_fullpath, new_file)
+        logging.info('  Html: copied new baseline file from "%s" to "%s".',
+                     baseline_fullpath, new_file)
+
+        # Get the old baseline from SVN and save to the html directory.
+        output = run_shell(['svn', 'cat', '-r', 'BASE', baseline_fullpath])
+        if (not output) or (output.upper().rstrip().endswith(
+            'NO SUCH FILE OR DIRECTORY')):
+            logging.info('  No base file: "%s"', baseline_fullpath)
+            return
+        base_file = get_result_file_fullpath(self._options.html_directory,
+                                             baseline_filename, self._platform,
+                                             'old')
+        f = open(base_file, 'wb')
+        f.write(output)
+        f.close()
+        logging.info('  Html: created old baseline file: "%s".',
+                     base_file)
+
+        # Get the diff between old and new baselines and save to the html dir.
+        if baseline_filename.upper().endswith('.TXT'):
+            # If the user specified a custom diff command in their svn config
+            # file, then it'll be used when we do svn diff, which we don't want
+            # to happen since we want the unified diff.  Using --diff-cmd=diff
+            # doesn't always work, since they can have another diff executable
+            # in their path that gives different line endings.  So we use a
+            # bogus temp directory as the config directory, which gets
+            # around these problems.
+            if sys.platform.startswith("win"):
+                parent_dir = tempfile.gettempdir()
+            else:
+                parent_dir = sys.path[0]  # tempdir is not secure.
+            bogus_dir = os.path.join(parent_dir, "temp_svn_config")
+            logging.debug('  Html: temp config dir: "%s".', bogus_dir)
+            if not os.path.exists(bogus_dir):
+                os.mkdir(bogus_dir)
+                delete_bogus_dir = True
+            else:
+                delete_bogus_dir = False
+
+            output = run_shell(["svn", "diff", "--config-dir", bogus_dir,
+                               baseline_fullpath])
+            if output:
+                diff_file = get_result_file_fullpath(
+                    self._options.html_directory, baseline_filename,
+                    self._platform, 'diff')
+                f = open(diff_file, 'wb')
+                f.write(output)
+                f.close()
+                logging.info('  Html: created baseline diff file: "%s".',
+                             diff_file)
+
+            if delete_bogus_dir:
+                shutil.rmtree(bogus_dir, True)
+                logging.debug('  Html: removed temp config dir: "%s".',
+                              bogus_dir)
+
+
+class HtmlGenerator(object):
+    """Class to generate rebaselining result comparison html."""
+
+    HTML_REBASELINE = ('<html>'
+                       '<head>'
+                       '<style>'
+                       'body {font-family: sans-serif;}'
+                       '.mainTable {background: #666666;}'
+                       '.mainTable td , .mainTable th {background: white;}'
+                       '.detail {margin-left: 10px; margin-top: 3px;}'
+                       '</style>'
+                       '<title>Rebaselining Result Comparison (%(time)s)'
+                       '</title>'
+                       '</head>'
+                       '<body>'
+                       '<h2>Rebaselining Result Comparison (%(time)s)</h2>'
+                       '%(body)s'
+                       '</body>'
+                       '</html>')
+    HTML_NO_REBASELINING_TESTS = (
+        '<p>No tests found that need rebaselining.</p>')
+    HTML_TABLE_TEST = ('<table class="mainTable" cellspacing=1 cellpadding=5>'
+                       '%s</table><br>')
+    HTML_TR_TEST = ('<tr>'
+                    '<th style="background-color: #CDECDE; border-bottom: '
+                    '1px solid black; font-size: 18pt; font-weight: bold" '
+                    'colspan="5">'
+                    '<a href="%s">%s</a>'
+                    '</th>'
+                    '</tr>')
+    HTML_TEST_DETAIL = ('<div class="detail">'
+                        '<tr>'
+                        '<th width="100">Baseline</th>'
+                        '<th width="100">Platform</th>'
+                        '<th width="200">Old</th>'
+                        '<th width="200">New</th>'
+                        '<th width="150">Difference</th>'
+                        '</tr>'
+                        '%s'
+                        '</div>')
+    HTML_TD_NOLINK = '<td align=center><a>%s</a></td>'
+    HTML_TD_LINK = '<td align=center><a href="%(uri)s">%(name)s</a></td>'
+    HTML_TD_LINK_IMG = ('<td><a href="%(uri)s">'
+                        '<img style="width: 200" src="%(uri)s" /></a></td>')
+    HTML_TR = '<tr>%s</tr>'
+
+    def __init__(self, options, platforms, rebaselining_tests):
+        self._html_directory = options.html_directory
+        self._platforms = platforms
+        self._rebaselining_tests = rebaselining_tests
+        self._html_file = os.path.join(options.html_directory,
+                                       'rebaseline.html')
+
+    def generate_html(self):
+        """Generate html file for rebaselining result comparison."""
+
+        logging.info('Generating html file')
+
+        html_body = ''
+        if not self._rebaselining_tests:
+            html_body += self.HTML_NO_REBASELINING_TESTS
+        else:
+            tests = list(self._rebaselining_tests)
+            tests.sort()
+
+            test_no = 1
+            for test in tests:
+                logging.info('Test %d: %s', test_no, test)
+                html_body += self._generate_html_for_one_test(test)
+
+        html = self.HTML_REBASELINE % ({'time': time.asctime(),
+                                        'body': html_body})
+        logging.debug(html)
+
+        f = open(self._html_file, 'w')
+        f.write(html)
+        f.close()
+
+        logging.info('Baseline comparison html generated at "%s"',
+                     self._html_file)
+
+    def show_html(self):
+        """Launch the rebaselining html in brwoser."""
+
+        logging.info('Launching html: "%s"', self._html_file)
+
+        html_uri = path_utils.filename_to_uri(self._html_file)
+        webbrowser.open(html_uri, 1)
+
+        logging.info('Html launched.')
+
+    def _generate_baseline_links(self, test_basename, suffix, platform):
+        """Generate links for baseline results (old, new and diff).
+
+        Args:
+          test_basename: base filename of the test
+          suffix: baseline file suffixes: '.txt', '.png'
+          platform: win, linux or mac
+
+        Returns:
+          html links for showing baseline results (old, new and diff)
+        """
+
+        baseline_filename = '%s-expected%s' % (test_basename, suffix)
+        logging.debug('    baseline filename: "%s"', baseline_filename)
+
+        new_file = get_result_file_fullpath(self._html_directory,
+                                            baseline_filename, platform, 'new')
+        logging.info('    New baseline file: "%s"', new_file)
+        if not os.path.exists(new_file):
+            logging.info('    No new baseline file: "%s"', new_file)
+            return ''
+
+        old_file = get_result_file_fullpath(self._html_directory,
+                                            baseline_filename, platform, 'old')
+        logging.info('    Old baseline file: "%s"', old_file)
+        if suffix == '.png':
+            html_td_link = self.HTML_TD_LINK_IMG
+        else:
+            html_td_link = self.HTML_TD_LINK
+
+        links = ''
+        if os.path.exists(old_file):
+            links += html_td_link % {
+                'uri': path_utils.filename_to_uri(old_file),
+                'name': baseline_filename}
+        else:
+            logging.info('    No old baseline file: "%s"', old_file)
+            links += self.HTML_TD_NOLINK % ''
+
+        links += html_td_link % {'uri': path_utils.filename_to_uri(new_file),
+                                 'name': baseline_filename}
+
+        diff_file = get_result_file_fullpath(self._html_directory,
+                                             baseline_filename, platform,
+                                             'diff')
+        logging.info('    Baseline diff file: "%s"', diff_file)
+        if os.path.exists(diff_file):
+            links += html_td_link % {'uri': path_utils.filename_to_uri(
+                diff_file), 'name': 'Diff'}
+        else:
+            logging.info('    No baseline diff file: "%s"', diff_file)
+            links += self.HTML_TD_NOLINK % ''
+
+        return links
+
+    def _generate_html_for_one_test(self, test):
+        """Generate html for one rebaselining test.
+
+        Args:
+          test: layout test name
+
+        Returns:
+          html that compares baseline results for the test.
+        """
+
+        test_basename = os.path.basename(os.path.splitext(test)[0])
+        logging.info('  basename: "%s"', test_basename)
+        rows = []
+        for suffix in BASELINE_SUFFIXES:
+            if suffix == '.checksum':
+                continue
+
+            logging.info('  Checking %s files', suffix)
+            for platform in self._platforms:
+                links = self._generate_baseline_links(test_basename, suffix,
+                    platform)
+                if links:
+                    row = self.HTML_TD_NOLINK % self._get_baseline_result_type(
+                        suffix)
+                    row += self.HTML_TD_NOLINK % platform
+                    row += links
+                    logging.debug('    html row: %s', row)
+
+                    rows.append(self.HTML_TR % row)
+
+        if rows:
+            test_path = os.path.join(path_utils.layout_tests_dir(), test)
+            html = self.HTML_TR_TEST % (path_utils.filename_to_uri(test_path),
+                test)
+            html += self.HTML_TEST_DETAIL % ' '.join(rows)
+
+            logging.debug('    html for test: %s', html)
+            return self.HTML_TABLE_TEST % html
+
+        return ''
+
+    def _get_baseline_result_type(self, suffix):
+        """Name of the baseline result type."""
+
+        if suffix == '.png':
+            return 'Pixel'
+        elif suffix == '.txt':
+            return 'Render Tree'
+        else:
+            return 'Other'
+
+
+def main():
+    """Main function to produce new baselines."""
+
+    option_parser = optparse.OptionParser()
+    option_parser.add_option('-v', '--verbose',
+                             action='store_true',
+                             default=False,
+                             help='include debug-level logging.')
+
+    option_parser.add_option('-p', '--platforms',
+                             default='mac,win,win-xp,win-vista,linux',
+                             help=('Comma delimited list of platforms '
+                                   'that need rebaselining.'))
+
+    option_parser.add_option('-u', '--archive_url',
+                             default=('http://build.chromium.org/buildbot/'
+                                      'layout_test_results'),
+                             help=('Url to find the layout test result archive'
+                                   ' file.'))
+
+    option_parser.add_option('-w', '--webkit_canary',
+                             action='store_true',
+                             default=False,
+                             help=('If True, pull baselines from webkit.org '
+                                   'canary bot.'))
+
+    option_parser.add_option('-b', '--backup',
+                             action='store_true',
+                             default=False,
+                             help=('Whether or not to backup the original test'
+                                   ' expectations file after rebaseline.'))
+
+    option_parser.add_option('-d', '--html_directory',
+                             default='',
+                             help=('The directory that stores the results for'
+                                   ' rebaselining comparison.'))
+
+    options = option_parser.parse_args()[0]
+
+    # Set up our logging format.
+    log_level = logging.INFO
+    if options.verbose:
+        log_level = logging.DEBUG
+    logging.basicConfig(level=log_level,
+                        format=('%(asctime)s %(filename)s:%(lineno)-3d '
+                                '%(levelname)s %(message)s'),
+                        datefmt='%y%m%d %H:%M:%S')
+
+    # Verify 'platforms' option is valid
+    if not options.platforms:
+        logging.error('Invalid "platforms" option. --platforms must be '
+                      'specified in order to rebaseline.')
+        sys.exit(1)
+    platforms = [p.strip().lower() for p in options.platforms.split(',')]
+    for platform in platforms:
+        if not platform in REBASELINE_PLATFORM_ORDER:
+            logging.error('Invalid platform: "%s"' % (platform))
+            sys.exit(1)
+
+    # Adjust the platform order so rebaseline tool is running at the order of
+    # 'mac', 'win' and 'linux'. This is in same order with layout test baseline
+    # search paths. It simplifies how the rebaseline tool detects duplicate
+    # baselines. Check _IsDupBaseline method for details.
+    rebaseline_platforms = []
+    for platform in REBASELINE_PLATFORM_ORDER:
+        if platform in platforms:
+            rebaseline_platforms.append(platform)
+
+    options.html_directory = setup_html_directory(options.html_directory)
+
+    rebaselining_tests = set()
+    backup = options.backup
+    for platform in rebaseline_platforms:
+        rebaseliner = Rebaseliner(platform, options)
+
+        logging.info('')
+        log_dashed_string('Rebaseline started', platform)
+        if rebaseliner.run(backup):
+            # Only need to backup one original copy of test expectation file.
+            backup = False
+            log_dashed_string('Rebaseline done', platform)
+        else:
+            log_dashed_string('Rebaseline failed', platform, logging.ERROR)
+
+        rebaselining_tests |= set(rebaseliner.get_rebaselining_tests())
+
+    logging.info('')
+    log_dashed_string('Rebaselining result comparison started', None)
+    html_generator = HtmlGenerator(options,
+                                   rebaseline_platforms,
+                                   rebaselining_tests)
+    html_generator.generate_html()
+    html_generator.show_html()
+    log_dashed_string('Rebaselining result comparison done', None)
+
+    sys.exit(0)
+
+if '__main__' == __name__:
+    main()
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/run_chromium_webkit_tests.py b/WebKitTools/Scripts/webkitpy/layout_tests/run_chromium_webkit_tests.py
new file mode 100755
index 0000000..88b97f8
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/run_chromium_webkit_tests.py
@@ -0,0 +1,1697 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 The Chromium Authors. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the Chromium name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Run layout tests using the test_shell.
+
+This is a port of the existing webkit test script run-webkit-tests.
+
+The TestRunner class runs a series of tests (TestType interface) against a set
+of test files.  If a test file fails a TestType, it returns a list TestFailure
+objects to the TestRunner.  The TestRunner then aggregates the TestFailures to
+create a final report.
+
+This script reads several files, if they exist in the test_lists subdirectory
+next to this script itself.  Each should contain a list of paths to individual
+tests or entire subdirectories of tests, relative to the outermost test
+directory.  Entire lines starting with '//' (comments) will be ignored.
+
+For details of the files' contents and purposes, see test_lists/README.
+"""
+
+import errno
+import glob
+import logging
+import math
+import optparse
+import os
+import Queue
+import random
+import re
+import shutil
+import subprocess
+import sys
+import time
+import traceback
+
+from layout_package import apache_http_server
+from layout_package import test_expectations
+from layout_package import http_server
+from layout_package import json_layout_results_generator
+from layout_package import metered_stream
+from layout_package import path_utils
+from layout_package import platform_utils
+from layout_package import test_failures
+from layout_package import test_shell_thread
+from layout_package import test_files
+from layout_package import websocket_server
+from test_types import fuzzy_image_diff
+from test_types import image_diff
+from test_types import test_type_base
+from test_types import text_diff
+
+sys.path.append(path_utils.path_from_base('third_party'))
+import simplejson
+
+# Indicates that we want detailed progress updates in the output (prints
+# directory-by-directory feedback).
+LOG_DETAILED_PROGRESS = 'detailed-progress'
+
+# Log any unexpected results while running (instead of just at the end).
+LOG_UNEXPECTED = 'unexpected'
+
+# Builder base URL where we have the archived test results.
+BUILDER_BASE_URL = "http://build.chromium.org/buildbot/layout_test_results/"
+
+TestExpectationsFile = test_expectations.TestExpectationsFile
+
+
+class TestInfo:
+    """Groups information about a test for easy passing of data."""
+
+    def __init__(self, filename, timeout):
+        """Generates the URI and stores the filename and timeout for this test.
+        Args:
+          filename: Full path to the test.
+          timeout: Timeout for running the test in TestShell.
+          """
+        self.filename = filename
+        self.uri = path_utils.filename_to_uri(filename)
+        self.timeout = timeout
+        expected_hash_file = path_utils.expected_filename(filename,
+                                                          '.checksum')
+        try:
+            self.image_hash = open(expected_hash_file, "r").read()
+        except IOError, e:
+            if errno.ENOENT != e.errno:
+                raise
+            self.image_hash = None
+
+
+class ResultSummary(object):
+    """A class for partitioning the test results we get into buckets.
+
+    This class is basically a glorified struct and it's private to this file
+    so we don't bother with any information hiding."""
+
+    def __init__(self, expectations, test_files):
+        self.total = len(test_files)
+        self.remaining = self.total
+        self.expectations = expectations
+        self.expected = 0
+        self.unexpected = 0
+        self.tests_by_expectation = {}
+        self.tests_by_timeline = {}
+        self.results = {}
+        self.unexpected_results = {}
+        self.failures = {}
+        self.tests_by_expectation[test_expectations.SKIP] = set()
+        for expectation in TestExpectationsFile.EXPECTATIONS.values():
+            self.tests_by_expectation[expectation] = set()
+        for timeline in TestExpectationsFile.TIMELINES.values():
+            self.tests_by_timeline[timeline] = (
+                expectations.get_tests_with_timeline(timeline))
+
+    def add(self, test, failures, result, expected):
+        """Add a result into the appropriate bin.
+
+        Args:
+          test: test file name
+          failures: list of failure objects from test execution
+          result: result of test (PASS, IMAGE, etc.).
+          expected: whether the result was what we expected it to be.
+        """
+
+        self.tests_by_expectation[result].add(test)
+        self.results[test] = result
+        self.remaining -= 1
+        if len(failures):
+            self.failures[test] = failures
+        if expected:
+            self.expected += 1
+        else:
+            self.unexpected_results[test] = result
+            self.unexpected += 1
+
+
+class TestRunner:
+    """A class for managing running a series of tests on a series of layout
+    test files."""
+
+    HTTP_SUBDIR = os.sep.join(['', 'http', ''])
+    WEBSOCKET_SUBDIR = os.sep.join(['', 'websocket', ''])
+
+    # The per-test timeout in milliseconds, if no --time-out-ms option was
+    # given to run_webkit_tests. This should correspond to the default timeout
+    # in test_shell.exe.
+    DEFAULT_TEST_TIMEOUT_MS = 6 * 1000
+
+    NUM_RETRY_ON_UNEXPECTED_FAILURE = 1
+
+    def __init__(self, options, meter):
+        """Initialize test runner data structures.
+
+        Args:
+          options: a dictionary of command line options
+          meter: a MeteredStream object to record updates to.
+        """
+        self._options = options
+        self._meter = meter
+
+        if options.use_apache:
+            self._http_server = apache_http_server.LayoutTestApacheHttpd(
+                options.results_directory)
+        else:
+            self._http_server = http_server.Lighttpd(options.results_directory)
+
+        self._websocket_server = websocket_server.PyWebSocket(
+            options.results_directory)
+        # disable wss server. need to install pyOpenSSL on buildbots.
+        # self._websocket_secure_server = websocket_server.PyWebSocket(
+        #        options.results_directory, use_tls=True, port=9323)
+
+        # a list of TestType objects
+        self._test_types = []
+
+        # a set of test files, and the same tests as a list
+        self._test_files = set()
+        self._test_files_list = None
+        self._file_dir = path_utils.path_from_base('webkit', 'tools',
+            'layout_tests')
+        self._result_queue = Queue.Queue()
+
+        # These are used for --log detailed-progress to track status by
+        # directory.
+        self._current_dir = None
+        self._current_progress_str = ""
+        self._current_test_number = 0
+
+    def __del__(self):
+        logging.debug("flushing stdout")
+        sys.stdout.flush()
+        logging.debug("flushing stderr")
+        sys.stderr.flush()
+        logging.debug("stopping http server")
+        # Stop the http server.
+        self._http_server.stop()
+        # Stop the Web Socket / Web Socket Secure servers.
+        self._websocket_server.stop()
+        # self._websocket_secure_server.Stop()
+
+    def gather_file_paths(self, paths):
+        """Find all the files to test.
+
+        Args:
+          paths: a list of globs to use instead of the defaults."""
+        self._test_files = test_files.gather_test_files(paths)
+
+    def parse_expectations(self, platform, is_debug_mode):
+        """Parse the expectations from the test_list files and return a data
+        structure holding them. Throws an error if the test_list files have
+        invalid syntax."""
+        if self._options.lint_test_files:
+            test_files = None
+        else:
+            test_files = self._test_files
+
+        try:
+            self._expectations = test_expectations.TestExpectations(test_files,
+                self._file_dir, platform, is_debug_mode,
+                self._options.lint_test_files)
+            return self._expectations
+        except Exception, err:
+            if self._options.lint_test_files:
+                print str(err)
+            else:
+                raise err
+
+    def prepare_lists_and_print_output(self, write):
+        """Create appropriate subsets of test lists and returns a
+        ResultSummary object. Also prints expected test counts.
+
+        Args:
+          write: A callback to write info to (e.g., a LoggingWriter) or
+             sys.stdout.write.
+        """
+
+        # Remove skipped - both fixable and ignored - files from the
+        # top-level list of files to test.
+        num_all_test_files = len(self._test_files)
+        write("Found:  %d tests" % (len(self._test_files)))
+        skipped = set()
+        if num_all_test_files > 1 and not self._options.force:
+            skipped = self._expectations.get_tests_with_result_type(
+                           test_expectations.SKIP)
+            self._test_files -= skipped
+
+        # Create a sorted list of test files so the subset chunk,
+        # if used, contains alphabetically consecutive tests.
+        self._test_files_list = list(self._test_files)
+        if self._options.randomize_order:
+            random.shuffle(self._test_files_list)
+        else:
+            self._test_files_list.sort()
+
+        # If the user specifies they just want to run a subset of the tests,
+        # just grab a subset of the non-skipped tests.
+        if self._options.run_chunk or self._options.run_part:
+            chunk_value = self._options.run_chunk or self._options.run_part
+            test_files = self._test_files_list
+            try:
+                (chunk_num, chunk_len) = chunk_value.split(":")
+                chunk_num = int(chunk_num)
+                assert(chunk_num >= 0)
+                test_size = int(chunk_len)
+                assert(test_size > 0)
+            except:
+                logging.critical("invalid chunk '%s'" % chunk_value)
+                sys.exit(1)
+
+            # Get the number of tests
+            num_tests = len(test_files)
+
+            # Get the start offset of the slice.
+            if self._options.run_chunk:
+                chunk_len = test_size
+                # In this case chunk_num can be really large. We need
+                # to make the slave fit in the current number of tests.
+                slice_start = (chunk_num * chunk_len) % num_tests
+            else:
+                # Validate the data.
+                assert(test_size <= num_tests)
+                assert(chunk_num <= test_size)
+
+                # To count the chunk_len, and make sure we don't skip
+                # some tests, we round to the next value that fits exactly
+                # all the parts.
+                rounded_tests = num_tests
+                if rounded_tests % test_size != 0:
+                    rounded_tests = (num_tests + test_size -
+                                     (num_tests % test_size))
+
+                chunk_len = rounded_tests / test_size
+                slice_start = chunk_len * (chunk_num - 1)
+                # It does not mind if we go over test_size.
+
+            # Get the end offset of the slice.
+            slice_end = min(num_tests, slice_start + chunk_len)
+
+            files = test_files[slice_start:slice_end]
+
+            tests_run_msg = 'Running: %d tests (chunk slice [%d:%d] of %d)' % (
+                (slice_end - slice_start), slice_start, slice_end, num_tests)
+            write(tests_run_msg)
+
+            # If we reached the end and we don't have enough tests, we run some
+            # from the beginning.
+            if (self._options.run_chunk and
+                (slice_end - slice_start < chunk_len)):
+                extra = 1 + chunk_len - (slice_end - slice_start)
+                extra_msg = ('   last chunk is partial, appending [0:%d]' %
+                            extra)
+                write(extra_msg)
+                tests_run_msg += "\n" + extra_msg
+                files.extend(test_files[0:extra])
+            tests_run_filename = os.path.join(self._options.results_directory,
+                                              "tests_run.txt")
+            tests_run_file = open(tests_run_filename, "w")
+            tests_run_file.write(tests_run_msg + "\n")
+            tests_run_file.close()
+
+            len_skip_chunk = int(len(files) * len(skipped) /
+                                 float(len(self._test_files)))
+            skip_chunk_list = list(skipped)[0:len_skip_chunk]
+            skip_chunk = set(skip_chunk_list)
+
+            # Update expectations so that the stats are calculated correctly.
+            # We need to pass a list that includes the right # of skipped files
+            # to ParseExpectations so that ResultSummary() will get the correct
+            # stats. So, we add in the subset of skipped files, and then
+            # subtract them back out.
+            self._test_files_list = files + skip_chunk_list
+            self._test_files = set(self._test_files_list)
+
+            self._expectations = self.parse_expectations(
+                path_utils.platform_name(), self._options.target == 'Debug')
+
+            self._test_files = set(files)
+            self._test_files_list = files
+        else:
+            skip_chunk = skipped
+
+        result_summary = ResultSummary(self._expectations,
+            self._test_files | skip_chunk)
+        self._print_expected_results_of_type(write, result_summary,
+            test_expectations.PASS, "passes")
+        self._print_expected_results_of_type(write, result_summary,
+            test_expectations.FAIL, "failures")
+        self._print_expected_results_of_type(write, result_summary,
+            test_expectations.FLAKY, "flaky")
+        self._print_expected_results_of_type(write, result_summary,
+            test_expectations.SKIP, "skipped")
+
+
+        if self._options.force:
+            write('Running all tests, including skips (--force)')
+        else:
+            # Note that we don't actually run the skipped tests (they were
+            # subtracted out of self._test_files, above), but we stub out the
+            # results here so the statistics can remain accurate.
+            for test in skip_chunk:
+                result_summary.add(test, [], test_expectations.SKIP,
+                                   expected=True)
+        write("")
+
+        return result_summary
+
+    def add_test_type(self, test_type):
+        """Add a TestType to the TestRunner."""
+        self._test_types.append(test_type)
+
+    def _get_dir_for_test_file(self, test_file):
+        """Returns the highest-level directory by which to shard the given
+        test file."""
+        index = test_file.rfind(os.sep + 'LayoutTests' + os.sep)
+
+        test_file = test_file[index + len('LayoutTests/'):]
+        test_file_parts = test_file.split(os.sep, 1)
+        directory = test_file_parts[0]
+        test_file = test_file_parts[1]
+
+        # The http tests are very stable on mac/linux.
+        # TODO(ojan): Make the http server on Windows be apache so we can
+        # turn shard the http tests there as well. Switching to apache is
+        # what made them stable on linux/mac.
+        return_value = directory
+        while ((directory != 'http' or sys.platform in ('darwin', 'linux2'))
+                and test_file.find(os.sep) >= 0):
+            test_file_parts = test_file.split(os.sep, 1)
+            directory = test_file_parts[0]
+            return_value = os.path.join(return_value, directory)
+            test_file = test_file_parts[1]
+
+        return return_value
+
+    def _get_test_info_for_file(self, test_file):
+        """Returns the appropriate TestInfo object for the file. Mostly this
+        is used for looking up the timeout value (in ms) to use for the given
+        test."""
+        if self._expectations.has_modifier(test_file, test_expectations.SLOW):
+            return TestInfo(test_file, self._options.slow_time_out_ms)
+        return TestInfo(test_file, self._options.time_out_ms)
+
+    def _get_test_file_queue(self, test_files):
+        """Create the thread safe queue of lists of (test filenames, test URIs)
+        tuples. Each TestShellThread pulls a list from this queue and runs
+        those tests in order before grabbing the next available list.
+
+        Shard the lists by directory. This helps ensure that tests that depend
+        on each other (aka bad tests!) continue to run together as most
+        cross-tests dependencies tend to occur within the same directory.
+
+        Return:
+          The Queue of lists of TestInfo objects.
+        """
+
+        if (self._options.experimental_fully_parallel or
+            self._is_single_threaded()):
+            filename_queue = Queue.Queue()
+            for test_file in test_files:
+                filename_queue.put(
+                    ('.', [self._get_test_info_for_file(test_file)]))
+            return filename_queue
+
+        tests_by_dir = {}
+        for test_file in test_files:
+            directory = self._get_dir_for_test_file(test_file)
+            tests_by_dir.setdefault(directory, [])
+            tests_by_dir[directory].append(
+                self._get_test_info_for_file(test_file))
+
+        # Sort by the number of tests in the dir so that the ones with the
+        # most tests get run first in order to maximize parallelization.
+        # Number of tests is a good enough, but not perfect, approximation
+        # of how long that set of tests will take to run. We can't just use
+        # a PriorityQueue until we move # to Python 2.6.
+        test_lists = []
+        http_tests = None
+        for directory in tests_by_dir:
+            test_list = tests_by_dir[directory]
+            # Keep the tests in alphabetical order.
+            # TODO: Remove once tests are fixed so they can be run in any
+            # order.
+            test_list.reverse()
+            test_list_tuple = (directory, test_list)
+            if directory == 'LayoutTests' + os.sep + 'http':
+                http_tests = test_list_tuple
+            else:
+                test_lists.append(test_list_tuple)
+        test_lists.sort(lambda a, b: cmp(len(b[1]), len(a[1])))
+
+        # Put the http tests first. There are only a couple hundred of them,
+        # but each http test takes a very long time to run, so sorting by the
+        # number of tests doesn't accurately capture how long they take to run.
+        if http_tests:
+            test_lists.insert(0, http_tests)
+
+        filename_queue = Queue.Queue()
+        for item in test_lists:
+            filename_queue.put(item)
+        return filename_queue
+
+    def _get_test_shell_args(self, index):
+        """Returns the tuple of arguments for tests and for test_shell."""
+        shell_args = []
+        test_args = test_type_base.TestArguments()
+        if not self._options.no_pixel_tests:
+            png_path = os.path.join(self._options.results_directory,
+                                    "png_result%s.png" % index)
+            shell_args.append("--pixel-tests=" + png_path)
+            test_args.png_path = png_path
+
+        test_args.new_baseline = self._options.new_baseline
+
+        test_args.show_sources = self._options.sources
+
+        if self._options.startup_dialog:
+            shell_args.append('--testshell-startup-dialog')
+
+        if self._options.gp_fault_error_box:
+            shell_args.append('--gp-fault-error-box')
+
+        return (test_args, shell_args)
+
+    def _contains_tests(self, subdir):
+        for test_file in self._test_files_list:
+            if test_file.find(subdir) >= 0:
+                return True
+        return False
+
+    def _instantiate_test_shell_threads(self, test_shell_binary, test_files,
+                                     result_summary):
+        """Instantitates and starts the TestShellThread(s).
+
+        Return:
+          The list of threads.
+        """
+        test_shell_command = [test_shell_binary]
+
+        if self._options.wrapper:
+            # This split() isn't really what we want -- it incorrectly will
+            # split quoted strings within the wrapper argument -- but in
+            # practice it shouldn't come up and the --help output warns
+            # about it anyway.
+            test_shell_command = (self._options.wrapper.split() +
+                test_shell_command)
+
+        filename_queue = self._get_test_file_queue(test_files)
+
+        # Instantiate TestShellThreads and start them.
+        threads = []
+        for i in xrange(int(self._options.num_test_shells)):
+            # Create separate TestTypes instances for each thread.
+            test_types = []
+            for t in self._test_types:
+                test_types.append(t(self._options.platform,
+                                    self._options.results_directory))
+
+            test_args, shell_args = self._get_test_shell_args(i)
+            thread = test_shell_thread.TestShellThread(filename_queue,
+                                                       self._result_queue,
+                                                       test_shell_command,
+                                                       test_types,
+                                                       test_args,
+                                                       shell_args,
+                                                       self._options)
+            if self._is_single_threaded():
+                thread.run_in_main_thread(self, result_summary)
+            else:
+                thread.start()
+            threads.append(thread)
+
+        return threads
+
+    def _stop_layout_test_helper(self, proc):
+        """Stop the layout test helper and closes it down."""
+        if proc:
+            logging.debug("Stopping layout test helper")
+            proc.stdin.write("x\n")
+            proc.stdin.close()
+            proc.wait()
+
+    def _is_single_threaded(self):
+        """Returns whether we should run all the tests in the main thread."""
+        return int(self._options.num_test_shells) == 1
+
+    def _run_tests(self, test_shell_binary, file_list, result_summary):
+        """Runs the tests in the file_list.
+
+        Return: A tuple (failures, thread_timings, test_timings,
+            individual_test_timings)
+            failures is a map from test to list of failure types
+            thread_timings is a list of dicts with the total runtime
+              of each thread with 'name', 'num_tests', 'total_time' properties
+            test_timings is a list of timings for each sharded subdirectory
+              of the form [time, directory_name, num_tests]
+            individual_test_timings is a list of run times for each test
+              in the form {filename:filename, test_run_time:test_run_time}
+            result_summary: summary object to populate with the results
+        """
+        threads = self._instantiate_test_shell_threads(test_shell_binary,
+                                                    file_list,
+                                                    result_summary)
+
+        # Wait for the threads to finish and collect test failures.
+        failures = {}
+        test_timings = {}
+        individual_test_timings = []
+        thread_timings = []
+        try:
+            for thread in threads:
+                while thread.isAlive():
+                    # Let it timeout occasionally so it can notice a
+                    # KeyboardInterrupt. Actually, the timeout doesn't
+                    # really matter: apparently it suffices to not use
+                    # an indefinite blocking join for it to
+                    # be interruptible by KeyboardInterrupt.
+                    thread.join(0.1)
+                    self.update_summary(result_summary)
+                thread_timings.append({'name': thread.getName(),
+                                       'num_tests': thread.get_num_tests(),
+                                       'total_time': thread.get_total_time()})
+                test_timings.update(thread.get_directory_timing_stats())
+                individual_test_timings.extend(
+                    thread.get_individual_test_stats())
+        except KeyboardInterrupt:
+            for thread in threads:
+                thread.cancel()
+            self._stop_layout_test_helper(layout_test_helper_proc)
+            raise
+        for thread in threads:
+            # Check whether a TestShellThread died before normal completion.
+            exception_info = thread.get_exception_info()
+            if exception_info is not None:
+                # Re-raise the thread's exception here to make it clear that
+                # testing was aborted. Otherwise, the tests that did not run
+                # would be assumed to have passed.
+                raise exception_info[0], exception_info[1], exception_info[2]
+
+        # Make sure we pick up any remaining tests.
+        self.update_summary(result_summary)
+        return (thread_timings, test_timings, individual_test_timings)
+
+    def run(self, result_summary):
+        """Run all our tests on all our test files.
+
+        For each test file, we run each test type. If there are any failures,
+        we collect them for reporting.
+
+        Args:
+          result_summary: a summary object tracking the test results.
+
+        Return:
+          We return nonzero if there are regressions compared to the last run.
+        """
+        if not self._test_files:
+            return 0
+        start_time = time.time()
+        test_shell_binary = path_utils.test_shell_path(self._options.target)
+
+        # Start up any helper needed
+        layout_test_helper_proc = None
+        if not self._options.no_pixel_tests:
+            helper_path = path_utils.layout_test_helper_path(
+                self._options.target)
+            if len(helper_path):
+                logging.debug("Starting layout helper %s" % helper_path)
+                layout_test_helper_proc = subprocess.Popen(
+                    [helper_path], stdin=subprocess.PIPE,
+                    stdout=subprocess.PIPE, stderr=None)
+                is_ready = layout_test_helper_proc.stdout.readline()
+                if not is_ready.startswith('ready'):
+                    logging.error("layout_test_helper failed to be ready")
+
+        # Check that the system dependencies (themes, fonts, ...) are correct.
+        if not self._options.nocheck_sys_deps:
+            proc = subprocess.Popen([test_shell_binary,
+                                    "--check-layout-test-sys-deps"])
+            if proc.wait() != 0:
+                logging.info("Aborting because system dependencies check "
+                             "failed.\n To override, invoke with "
+                             "--nocheck-sys-deps")
+                sys.exit(1)
+
+        if self._contains_tests(self.HTTP_SUBDIR):
+            self._http_server.start()
+
+        if self._contains_tests(self.WEBSOCKET_SUBDIR):
+            self._websocket_server.start()
+            # self._websocket_secure_server.Start()
+
+        thread_timings, test_timings, individual_test_timings = (
+            self._run_tests(test_shell_binary, self._test_files_list,
+                           result_summary))
+
+        # We exclude the crashes from the list of results to retry, because
+        # we want to treat even a potentially flaky crash as an error.
+        failures = self._get_failures(result_summary, include_crashes=False)
+        retries = 0
+        retry_summary = result_summary
+        while (retries < self.NUM_RETRY_ON_UNEXPECTED_FAILURE and
+               len(failures)):
+            logging.debug("Retrying %d unexpected failure(s)" % len(failures))
+            retries += 1
+            retry_summary = ResultSummary(self._expectations, failures.keys())
+            self._run_tests(test_shell_binary, failures.keys(), retry_summary)
+            failures = self._get_failures(retry_summary, include_crashes=True)
+
+        self._stop_layout_test_helper(layout_test_helper_proc)
+        end_time = time.time()
+
+        write = create_logging_writer(self._options, 'timing')
+        self._print_timing_statistics(write, end_time - start_time,
+                                    thread_timings, test_timings,
+                                    individual_test_timings,
+                                    result_summary)
+
+        self._meter.update("")
+
+        if self._options.verbose:
+            # We write this block to stdout for compatibility with the
+            # buildbot log parser, which only looks at stdout, not stderr :(
+            write = lambda s: sys.stdout.write("%s\n" % s)
+        else:
+            write = create_logging_writer(self._options, 'actual')
+
+        self._print_result_summary(write, result_summary)
+
+        sys.stdout.flush()
+        sys.stderr.flush()
+
+        if (LOG_DETAILED_PROGRESS in self._options.log or
+            (LOG_UNEXPECTED in self._options.log and
+             result_summary.total != result_summary.expected)):
+            print
+
+        # This summary data gets written to stdout regardless of log level
+        self._print_one_line_summary(result_summary.total,
+                                  result_summary.expected)
+
+        unexpected_results = self._summarize_unexpected_results(result_summary,
+            retry_summary)
+        self._print_unexpected_results(unexpected_results)
+
+        # Write the same data to log files.
+        self._write_json_files(unexpected_results, result_summary,
+                             individual_test_timings)
+
+        # Write the summary to disk (results.html) and maybe open the
+        # test_shell to this file.
+        wrote_results = self._write_results_html_file(result_summary)
+        if not self._options.noshow_results and wrote_results:
+            self._show_results_html_file()
+
+        # Ignore flaky failures and unexpected passes so we don't turn the
+        # bot red for those.
+        return unexpected_results['num_regressions']
+
+    def update_summary(self, result_summary):
+        """Update the summary while running tests."""
+        while True:
+            try:
+                (test, fail_list) = self._result_queue.get_nowait()
+                result = test_failures.determine_result_type(fail_list)
+                expected = self._expectations.matches_an_expected_result(test,
+                                                                      result)
+                result_summary.add(test, fail_list, result, expected)
+                if (LOG_DETAILED_PROGRESS in self._options.log and
+                    (self._options.experimental_fully_parallel or
+                     self._is_single_threaded())):
+                    self._display_detailed_progress(result_summary)
+                else:
+                    if not expected and LOG_UNEXPECTED in self._options.log:
+                        self._print_unexpected_test_result(test, result)
+                    self._display_one_line_progress(result_summary)
+            except Queue.Empty:
+                return
+
+    def _display_one_line_progress(self, result_summary):
+        """Displays the progress through the test run."""
+        self._meter.update("Testing: %d ran as expected, %d didn't, %d left" %
+            (result_summary.expected, result_summary.unexpected,
+             result_summary.remaining))
+
+    def _display_detailed_progress(self, result_summary):
+        """Display detailed progress output where we print the directory name
+        and one dot for each completed test. This is triggered by
+        "--log detailed-progress"."""
+        if self._current_test_number == len(self._test_files_list):
+            return
+
+        next_test = self._test_files_list[self._current_test_number]
+        next_dir = os.path.dirname(
+            path_utils.relative_test_filename(next_test))
+        if self._current_progress_str == "":
+            self._current_progress_str = "%s: " % (next_dir)
+            self._current_dir = next_dir
+
+        while next_test in result_summary.results:
+            if next_dir != self._current_dir:
+                self._meter.write("%s\n" % (self._current_progress_str))
+                self._current_progress_str = "%s: ." % (next_dir)
+                self._current_dir = next_dir
+            else:
+                self._current_progress_str += "."
+
+            if (next_test in result_summary.unexpected_results and
+                LOG_UNEXPECTED in self._options.log):
+                result = result_summary.unexpected_results[next_test]
+                self._meter.write("%s\n" % self._current_progress_str)
+                self._print_unexpected_test_result(next_test, result)
+                self._current_progress_str = "%s: " % self._current_dir
+
+            self._current_test_number += 1
+            if self._current_test_number == len(self._test_files_list):
+                break
+
+            next_test = self._test_files_list[self._current_test_number]
+            next_dir = os.path.dirname(
+                path_utils.relative_test_filename(next_test))
+
+        if result_summary.remaining:
+            remain_str = " (%d)" % (result_summary.remaining)
+            self._meter.update("%s%s" %
+                               (self._current_progress_str, remain_str))
+        else:
+            self._meter.write("%s\n" % (self._current_progress_str))
+
+    def _get_failures(self, result_summary, include_crashes):
+        """Filters a dict of results and returns only the failures.
+
+        Args:
+          result_summary: the results of the test run
+          include_crashes: whether crashes are included in the output.
+            We use False when finding the list of failures to retry
+            to see if the results were flaky. Although the crashes may also be
+            flaky, we treat them as if they aren't so that they're not ignored.
+        Returns:
+          a dict of files -> results
+        """
+        failed_results = {}
+        for test, result in result_summary.unexpected_results.iteritems():
+            if (result == test_expectations.PASS or
+                result == test_expectations.CRASH and not include_crashes):
+                continue
+            failed_results[test] = result
+
+        return failed_results
+
+    def _summarize_unexpected_results(self, result_summary, retry_summary):
+        """Summarize any unexpected results as a dict.
+
+        TODO(dpranke): split this data structure into a separate class?
+
+        Args:
+          result_summary: summary object from initial test runs
+          retry_summary: summary object from final test run of retried tests
+        Returns:
+          A dictionary containing a summary of the unexpected results from the
+          run, with the following fields:
+            'version': a version indicator (1 in this version)
+            'fixable': # of fixable tests (NOW - PASS)
+            'skipped': # of skipped tests (NOW & SKIPPED)
+            'num_regressions': # of non-flaky failures
+            'num_flaky': # of flaky failures
+            'num_passes': # of unexpected passes
+            'tests': a dict of tests -> {'expected': '...', 'actual': '...'}
+        """
+        results = {}
+        results['version'] = 1
+
+        tbe = result_summary.tests_by_expectation
+        tbt = result_summary.tests_by_timeline
+        results['fixable'] = len(tbt[test_expectations.NOW] -
+                                 tbe[test_expectations.PASS])
+        results['skipped'] = len(tbt[test_expectations.NOW] &
+                                 tbe[test_expectations.SKIP])
+
+        num_passes = 0
+        num_flaky = 0
+        num_regressions = 0
+        keywords = {}
+        for k, v in TestExpectationsFile.EXPECTATIONS.iteritems():
+            keywords[v] = k.upper()
+
+        tests = {}
+        for filename, result in result_summary.unexpected_results.iteritems():
+            # Note that if a test crashed in the original run, we ignore
+            # whether or not it crashed when we retried it (if we retried it),
+            # and always consider the result not flaky.
+            test = path_utils.relative_test_filename(filename)
+            expected = self._expectations.get_expectations_string(filename)
+            actual = [keywords[result]]
+
+            if result == test_expectations.PASS:
+                num_passes += 1
+            elif result == test_expectations.CRASH:
+                num_regressions += 1
+            else:
+                if filename not in retry_summary.unexpected_results:
+                    actual.extend(
+                        self._expectations.get_expectations_string(
+                        filename).split(" "))
+                    num_flaky += 1
+                else:
+                    retry_result = retry_summary.unexpected_results[filename]
+                    if result != retry_result:
+                        actual.append(keywords[retry_result])
+                        num_flaky += 1
+                    else:
+                        num_regressions += 1
+
+            tests[test] = {}
+            tests[test]['expected'] = expected
+            tests[test]['actual'] = " ".join(actual)
+
+        results['tests'] = tests
+        results['num_passes'] = num_passes
+        results['num_flaky'] = num_flaky
+        results['num_regressions'] = num_regressions
+
+        return results
+
+    def _write_json_files(self, unexpected_results, result_summary,
+                        individual_test_timings):
+        """Writes the results of the test run as JSON files into the results
+        dir.
+
+        There are three different files written into the results dir:
+          unexpected_results.json: A short list of any unexpected results.
+            This is used by the buildbots to display results.
+          expectations.json: This is used by the flakiness dashboard.
+          results.json: A full list of the results - used by the flakiness
+            dashboard and the aggregate results dashboard.
+
+        Args:
+          unexpected_results: dict of unexpected results
+          result_summary: full summary object
+          individual_test_timings: list of test times (used by the flakiness
+            dashboard).
+        """
+        logging.debug("Writing JSON files in %s." %
+                      self._options.results_directory)
+        unexpected_file = open(os.path.join(self._options.results_directory,
+            "unexpected_results.json"), "w")
+        unexpected_file.write(simplejson.dumps(unexpected_results,
+                              sort_keys=True, indent=2))
+        unexpected_file.close()
+
+        # Write a json file of the test_expectations.txt file for the layout
+        # tests dashboard.
+        expectations_file = open(os.path.join(self._options.results_directory,
+            "expectations.json"), "w")
+        expectations_json = \
+            self._expectations.get_expectations_json_for_all_platforms()
+        expectations_file.write("ADD_EXPECTATIONS(" + expectations_json + ");")
+        expectations_file.close()
+
+        json_layout_results_generator.JSONLayoutResultsGenerator(
+            self._options.builder_name, self._options.build_name,
+            self._options.build_number, self._options.results_directory,
+            BUILDER_BASE_URL, individual_test_timings,
+            self._expectations, result_summary, self._test_files_list)
+
+        logging.debug("Finished writing JSON files.")
+
+    def _print_expected_results_of_type(self, write, result_summary,
+                                        result_type, result_type_str):
+        """Print the number of the tests in a given result class.
+
+        Args:
+          write: A callback to write info to (e.g., a LoggingWriter) or
+             sys.stdout.write.
+          result_summary - the object containing all the results to report on
+          result_type - the particular result type to report in the summary.
+          result_type_str - a string description of the result_type.
+        """
+        tests = self._expectations.get_tests_with_result_type(result_type)
+        now = result_summary.tests_by_timeline[test_expectations.NOW]
+        wontfix = result_summary.tests_by_timeline[test_expectations.WONTFIX]
+        defer = result_summary.tests_by_timeline[test_expectations.DEFER]
+
+        # We use a fancy format string in order to print the data out in a
+        # nicely-aligned table.
+        fmtstr = ("Expect: %%5d %%-8s (%%%dd now, %%%dd defer, %%%dd wontfix)"
+                  % (self._num_digits(now), self._num_digits(defer),
+                  self._num_digits(wontfix)))
+        write(fmtstr % (len(tests), result_type_str, len(tests & now),
+              len(tests & defer), len(tests & wontfix)))
+
+    def _num_digits(self, num):
+        """Returns the number of digits needed to represent the length of a
+        sequence."""
+        ndigits = 1
+        if len(num):
+            ndigits = int(math.log10(len(num))) + 1
+        return ndigits
+
+    def _print_timing_statistics(self, write, total_time, thread_timings,
+                               directory_test_timings, individual_test_timings,
+                               result_summary):
+        """Record timing-specific information for the test run.
+
+        Args:
+          write: A callback to write info to (e.g., a LoggingWriter) or
+              sys.stdout.write.
+          total_time: total elapsed time (in seconds) for the test run
+          thread_timings: wall clock time each thread ran for
+          directory_test_timings: timing by directory
+          individual_test_timings: timing by file
+          result_summary: summary object for the test run
+        """
+        write("Test timing:")
+        write("  %6.2f total testing time" % total_time)
+        write("")
+        write("Thread timing:")
+        cuml_time = 0
+        for t in thread_timings:
+            write("    %10s: %5d tests, %6.2f secs" %
+                  (t['name'], t['num_tests'], t['total_time']))
+            cuml_time += t['total_time']
+        write("   %6.2f cumulative, %6.2f optimal" %
+              (cuml_time, cuml_time / int(self._options.num_test_shells)))
+        write("")
+
+        self._print_aggregate_test_statistics(write, individual_test_timings)
+        self._print_individual_test_times(write, individual_test_timings,
+                                          result_summary)
+        self._print_directory_timings(write, directory_test_timings)
+
+    def _print_aggregate_test_statistics(self, write, individual_test_timings):
+        """Prints aggregate statistics (e.g. median, mean, etc.) for all tests.
+        Args:
+          write: A callback to write info to (e.g., a LoggingWriter) or
+              sys.stdout.write.
+          individual_test_timings: List of test_shell_thread.TestStats for all
+              tests.
+        """
+        test_types = individual_test_timings[0].time_for_diffs.keys()
+        times_for_test_shell = []
+        times_for_diff_processing = []
+        times_per_test_type = {}
+        for test_type in test_types:
+            times_per_test_type[test_type] = []
+
+        for test_stats in individual_test_timings:
+            times_for_test_shell.append(test_stats.test_run_time)
+            times_for_diff_processing.append(
+                test_stats.total_time_for_all_diffs)
+            time_for_diffs = test_stats.time_for_diffs
+            for test_type in test_types:
+                times_per_test_type[test_type].append(
+                    time_for_diffs[test_type])
+
+        self._print_statistics_for_test_timings(write,
+            "PER TEST TIME IN TESTSHELL (seconds):", times_for_test_shell)
+        self._print_statistics_for_test_timings(write,
+            "PER TEST DIFF PROCESSING TIMES (seconds):",
+            times_for_diff_processing)
+        for test_type in test_types:
+            self._print_statistics_for_test_timings(write,
+                "PER TEST TIMES BY TEST TYPE: %s" % test_type,
+                times_per_test_type[test_type])
+
+    def _print_individual_test_times(self, write, individual_test_timings,
+                                  result_summary):
+        """Prints the run times for slow, timeout and crash tests.
+        Args:
+          write: A callback to write info to (e.g., a LoggingWriter) or
+              sys.stdout.write.
+          individual_test_timings: List of test_shell_thread.TestStats for all
+              tests.
+          result_summary: summary object for test run
+        """
+        # Reverse-sort by the time spent in test_shell.
+        individual_test_timings.sort(lambda a, b:
+            cmp(b.test_run_time, a.test_run_time))
+
+        num_printed = 0
+        slow_tests = []
+        timeout_or_crash_tests = []
+        unexpected_slow_tests = []
+        for test_tuple in individual_test_timings:
+            filename = test_tuple.filename
+            is_timeout_crash_or_slow = False
+            if self._expectations.has_modifier(filename,
+                                               test_expectations.SLOW):
+                is_timeout_crash_or_slow = True
+                slow_tests.append(test_tuple)
+
+            if filename in result_summary.failures:
+                result = result_summary.results[filename]
+                if (result == test_expectations.TIMEOUT or
+                    result == test_expectations.CRASH):
+                    is_timeout_crash_or_slow = True
+                    timeout_or_crash_tests.append(test_tuple)
+
+            if (not is_timeout_crash_or_slow and
+                num_printed < self._options.num_slow_tests_to_log):
+                num_printed = num_printed + 1
+                unexpected_slow_tests.append(test_tuple)
+
+        write("")
+        self._print_test_list_timing(write, "%s slowest tests that are not "
+            "marked as SLOW and did not timeout/crash:" %
+            self._options.num_slow_tests_to_log, unexpected_slow_tests)
+        write("")
+        self._print_test_list_timing(write, "Tests marked as SLOW:",
+                                     slow_tests)
+        write("")
+        self._print_test_list_timing(write, "Tests that timed out or crashed:",
+                                     timeout_or_crash_tests)
+        write("")
+
+    def _print_test_list_timing(self, write, title, test_list):
+        """Print timing info for each test.
+
+        Args:
+          write: A callback to write info to (e.g., a LoggingWriter) or
+              sys.stdout.write.
+          title: section heading
+          test_list: tests that fall in this section
+        """
+        write(title)
+        for test_tuple in test_list:
+            filename = test_tuple.filename[len(
+                path_utils.layout_tests_dir()) + 1:]
+            filename = filename.replace('\\', '/')
+            test_run_time = round(test_tuple.test_run_time, 1)
+            write("  %s took %s seconds" % (filename, test_run_time))
+
+    def _print_directory_timings(self, write, directory_test_timings):
+        """Print timing info by directory for any directories that
+        take > 10 seconds to run.
+
+        Args:
+          write: A callback to write info to (e.g., a LoggingWriter) or
+              sys.stdout.write.
+          directory_test_timing: time info for each directory
+        """
+        timings = []
+        for directory in directory_test_timings:
+            num_tests, time_for_directory = directory_test_timings[directory]
+            timings.append((round(time_for_directory, 1), directory,
+                            num_tests))
+        timings.sort()
+
+        write("Time to process slowest subdirectories:")
+        min_seconds_to_print = 10
+        for timing in timings:
+            if timing[0] > min_seconds_to_print:
+                write("  %s took %s seconds to run %s tests." % (timing[1],
+                      timing[0], timing[2]))
+        write("")
+
+    def _print_statistics_for_test_timings(self, write, title, timings):
+        """Prints the median, mean and standard deviation of the values in
+        timings.
+
+        Args:
+          write: A callback to write info to (e.g., a LoggingWriter) or
+              sys.stdout.write.
+          title: Title for these timings.
+          timings: A list of floats representing times.
+        """
+        write(title)
+        timings.sort()
+
+        num_tests = len(timings)
+        percentile90 = timings[int(.9 * num_tests)]
+        percentile99 = timings[int(.99 * num_tests)]
+
+        if num_tests % 2 == 1:
+            median = timings[((num_tests - 1) / 2) - 1]
+        else:
+            lower = timings[num_tests / 2 - 1]
+            upper = timings[num_tests / 2]
+            median = (float(lower + upper)) / 2
+
+        mean = sum(timings) / num_tests
+
+        for time in timings:
+            sum_of_deviations = math.pow(time - mean, 2)
+
+        std_deviation = math.sqrt(sum_of_deviations / num_tests)
+        write("  Median:          %6.3f" % median)
+        write("  Mean:            %6.3f" % mean)
+        write("  90th percentile: %6.3f" % percentile90)
+        write("  99th percentile: %6.3f" % percentile99)
+        write("  Standard dev:    %6.3f" % std_deviation)
+        write("")
+
+    def _print_result_summary(self, write, result_summary):
+        """Print a short summary about how many tests passed.
+
+        Args:
+          write: A callback to write info to (e.g., a LoggingWriter) or
+              sys.stdout.write.
+          result_summary: information to log
+        """
+        failed = len(result_summary.failures)
+        skipped = len(
+            result_summary.tests_by_expectation[test_expectations.SKIP])
+        total = result_summary.total
+        passed = total - failed - skipped
+        pct_passed = 0.0
+        if total > 0:
+            pct_passed = float(passed) * 100 / total
+
+        write("")
+        write("=> Results: %d/%d tests passed (%.1f%%)" %
+                     (passed, total, pct_passed))
+        write("")
+        self._print_result_summary_entry(write, result_summary,
+            test_expectations.NOW, "Tests to be fixed for the current release")
+
+        write("")
+        self._print_result_summary_entry(write, result_summary,
+            test_expectations.DEFER,
+            "Tests we'll fix in the future if they fail (DEFER)")
+
+        write("")
+        self._print_result_summary_entry(write, result_summary,
+            test_expectations.WONTFIX,
+            "Tests that will only be fixed if they crash (WONTFIX)")
+
+    def _print_result_summary_entry(self, write, result_summary, timeline,
+                                    heading):
+        """Print a summary block of results for a particular timeline of test.
+
+        Args:
+          write: A callback to write info to (e.g., a LoggingWriter) or
+              sys.stdout.write.
+          result_summary: summary to print results for
+          timeline: the timeline to print results for (NOT, WONTFIX, etc.)
+          heading: a textual description of the timeline
+        """
+        total = len(result_summary.tests_by_timeline[timeline])
+        not_passing = (total -
+           len(result_summary.tests_by_expectation[test_expectations.PASS] &
+               result_summary.tests_by_timeline[timeline]))
+        write("=> %s (%d):" % (heading, not_passing))
+
+        for result in TestExpectationsFile.EXPECTATION_ORDER:
+            if result == test_expectations.PASS:
+                continue
+            results = (result_summary.tests_by_expectation[result] &
+                       result_summary.tests_by_timeline[timeline])
+            desc = TestExpectationsFile.EXPECTATION_DESCRIPTIONS[result]
+            if not_passing and len(results):
+                pct = len(results) * 100.0 / not_passing
+                write("  %5d %-24s (%4.1f%%)" % (len(results),
+                      desc[len(results) != 1], pct))
+
+    def _print_one_line_summary(self, total, expected):
+        """Print a one-line summary of the test run to stdout.
+
+        Args:
+          total: total number of tests run
+          expected: number of expected results
+        """
+        unexpected = total - expected
+        if unexpected == 0:
+            print "All %d tests ran as expected." % expected
+        elif expected == 1:
+            print "1 test ran as expected, %d didn't:" % unexpected
+        else:
+            print "%d tests ran as expected, %d didn't:" % (expected,
+                unexpected)
+
+    def _print_unexpected_results(self, unexpected_results):
+        """Prints any unexpected results in a human-readable form to stdout."""
+        passes = {}
+        flaky = {}
+        regressions = {}
+
+        if len(unexpected_results['tests']):
+            print ""
+
+        for test, results in unexpected_results['tests'].iteritems():
+            actual = results['actual'].split(" ")
+            expected = results['expected'].split(" ")
+            if actual == ['PASS']:
+                if 'CRASH' in expected:
+                    _add_to_dict_of_lists(passes,
+                                          'Expected to crash, but passed',
+                                          test)
+                elif 'TIMEOUT' in expected:
+                    _add_to_dict_of_lists(passes,
+                                          'Expected to timeout, but passed',
+                                           test)
+                else:
+                    _add_to_dict_of_lists(passes,
+                                          'Expected to fail, but passed',
+                                          test)
+            elif len(actual) > 1:
+                # We group flaky tests by the first actual result we got.
+                _add_to_dict_of_lists(flaky, actual[0], test)
+            else:
+                _add_to_dict_of_lists(regressions, results['actual'], test)
+
+        if len(passes):
+            for key, tests in passes.iteritems():
+                print "%s: (%d)" % (key, len(tests))
+                tests.sort()
+                for test in tests:
+                    print "  %s" % test
+                print
+
+        if len(flaky):
+            descriptions = TestExpectationsFile.EXPECTATION_DESCRIPTIONS
+            for key, tests in flaky.iteritems():
+                result = TestExpectationsFile.EXPECTATIONS[key.lower()]
+                print "Unexpected flakiness: %s (%d)" % (
+                  descriptions[result][1], len(tests))
+                tests.sort()
+
+                for test in tests:
+                    result = unexpected_results['tests'][test]
+                    actual = result['actual'].split(" ")
+                    expected = result['expected'].split(" ")
+                    result = TestExpectationsFile.EXPECTATIONS[key.lower()]
+                    new_expectations_list = list(set(actual) | set(expected))
+                    print "  %s = %s" % (test, " ".join(new_expectations_list))
+                print
+
+        if len(regressions):
+            descriptions = TestExpectationsFile.EXPECTATION_DESCRIPTIONS
+            for key, tests in regressions.iteritems():
+                result = TestExpectationsFile.EXPECTATIONS[key.lower()]
+                print "Regressions: Unexpected %s : (%d)" % (
+                  descriptions[result][1], len(tests))
+                tests.sort()
+                for test in tests:
+                    print "  %s = %s" % (test, key)
+                print
+
+        if len(unexpected_results['tests']) and self._options.verbose:
+            print "-" * 78
+
+    def _print_unexpected_test_result(self, test, result):
+        """Prints one unexpected test result line."""
+        desc = TestExpectationsFile.EXPECTATION_DESCRIPTIONS[result][0]
+        self._meter.write("  %s -> unexpected %s\n" %
+                          (path_utils.relative_test_filename(test), desc))
+
+    def _write_results_html_file(self, result_summary):
+        """Write results.html which is a summary of tests that failed.
+
+        Args:
+          result_summary: a summary of the results :)
+
+        Returns:
+          True if any results were written (since expected failures may be
+          omitted)
+        """
+        # test failures
+        if self._options.full_results_html:
+            test_files = result_summary.failures.keys()
+        else:
+            unexpected_failures = self._get_failures(result_summary,
+                include_crashes=True)
+            test_files = unexpected_failures.keys()
+        if not len(test_files):
+            return False
+
+        out_filename = os.path.join(self._options.results_directory,
+                                    "results.html")
+        out_file = open(out_filename, 'w')
+        # header
+        if self._options.full_results_html:
+            h2 = "Test Failures"
+        else:
+            h2 = "Unexpected Test Failures"
+        out_file.write("<html><head><title>Layout Test Results (%(time)s)"
+                       "</title></head><body><h2>%(h2)s (%(time)s)</h2>\n"
+                       % {'h2': h2, 'time': time.asctime()})
+
+        test_files.sort()
+        for test_file in test_files:
+            test_failures = result_summary.failures.get(test_file, [])
+            out_file.write("<p><a href='%s'>%s</a><br />\n"
+                           % (path_utils.filename_to_uri(test_file),
+                              path_utils.relative_test_filename(test_file)))
+            for failure in test_failures:
+                out_file.write("&nbsp;&nbsp;%s<br/>"
+                               % failure.result_html_output(
+                                 path_utils.relative_test_filename(test_file)))
+            out_file.write("</p>\n")
+
+        # footer
+        out_file.write("</body></html>\n")
+        return True
+
+    def _show_results_html_file(self):
+        """Launches the test shell open to the results.html page."""
+        results_filename = os.path.join(self._options.results_directory,
+                                        "results.html")
+        subprocess.Popen([path_utils.test_shell_path(self._options.target),
+                          path_utils.filename_to_uri(results_filename)])
+
+
+def _add_to_dict_of_lists(dict, key, value):
+    dict.setdefault(key, []).append(value)
+
+
+def read_test_files(files):
+    tests = []
+    for file in files:
+        for line in open(file):
+            line = test_expectations.strip_comments(line)
+            if line:
+                tests.append(line)
+    return tests
+
+
+def create_logging_writer(options, log_option):
+    """Returns a write() function that will write the string to logging.info()
+    if comp was specified in --log or if --verbose is true. Otherwise the
+    message is dropped.
+
+    Args:
+      options: list of command line options from optparse
+      log_option: option to match in options.log in order for the messages
+          to be logged (e.g., 'actual' or 'expected')
+    """
+    if options.verbose or log_option in options.log.split(","):
+        return logging.info
+    return lambda str: 1
+
+
+def main(options, args):
+    """Run the tests.  Will call sys.exit when complete.
+
+    Args:
+      options: a dictionary of command line options
+      args: a list of sub directories or files to test
+    """
+
+    if options.sources:
+        options.verbose = True
+
+    # Set up our logging format.
+    meter = metered_stream.MeteredStream(options.verbose, sys.stderr)
+    log_fmt = '%(message)s'
+    log_datefmt = '%y%m%d %H:%M:%S'
+    log_level = logging.INFO
+    if options.verbose:
+        log_fmt = ('%(asctime)s %(filename)s:%(lineno)-4d %(levelname)s '
+                  '%(message)s')
+        log_level = logging.DEBUG
+    logging.basicConfig(level=log_level, format=log_fmt, datefmt=log_datefmt,
+                        stream=meter)
+
+    if not options.target:
+        if options.debug:
+            options.target = "Debug"
+        else:
+            options.target = "Release"
+
+    if not options.use_apache:
+        options.use_apache = sys.platform in ('darwin', 'linux2')
+
+    if options.results_directory.startswith("/"):
+        # Assume it's an absolute path and normalize.
+        options.results_directory = path_utils.get_absolute_path(
+            options.results_directory)
+    else:
+        # If it's a relative path, make the output directory relative to
+        # Debug or Release.
+        basedir = path_utils.path_from_base('webkit')
+        options.results_directory = path_utils.get_absolute_path(
+            os.path.join(basedir, options.target, options.results_directory))
+
+    if options.clobber_old_results:
+        # Just clobber the actual test results directories since the other
+        # files in the results directory are explicitly used for cross-run
+        # tracking.
+        path = os.path.join(options.results_directory, 'LayoutTests')
+        if os.path.exists(path):
+            shutil.rmtree(path)
+
+    # Ensure platform is valid and force it to the form 'chromium-<platform>'.
+    options.platform = path_utils.platform_name(options.platform)
+
+    if not options.num_test_shells:
+        # TODO(ojan): Investigate perf/flakiness impact of using numcores + 1.
+        options.num_test_shells = platform_utils.get_num_cores()
+
+    write = create_logging_writer(options, 'config')
+    write("Running %s test_shells in parallel" % options.num_test_shells)
+
+    if not options.time_out_ms:
+        if options.target == "Debug":
+            options.time_out_ms = str(2 * TestRunner.DEFAULT_TEST_TIMEOUT_MS)
+        else:
+            options.time_out_ms = str(TestRunner.DEFAULT_TEST_TIMEOUT_MS)
+
+    options.slow_time_out_ms = str(5 * int(options.time_out_ms))
+    write("Regular timeout: %s, slow test timeout: %s" %
+          (options.time_out_ms, options.slow_time_out_ms))
+
+    # Include all tests if none are specified.
+    new_args = []
+    for arg in args:
+        if arg and arg != '':
+            new_args.append(arg)
+
+    paths = new_args
+    if not paths:
+        paths = []
+    if options.test_list:
+        paths += read_test_files(options.test_list)
+
+    # Create the output directory if it doesn't already exist.
+    path_utils.maybe_make_directory(options.results_directory)
+    meter.update("Gathering files ...")
+
+    test_runner = TestRunner(options, meter)
+    test_runner.gather_file_paths(paths)
+
+    if options.lint_test_files:
+        # Creating the expecations for each platform/target pair does all the
+        # test list parsing and ensures it's correct syntax (e.g. no dupes).
+        for platform in TestExpectationsFile.PLATFORMS:
+            test_runner.parse_expectations(platform, is_debug_mode=True)
+            test_runner.parse_expectations(platform, is_debug_mode=False)
+        print ("If there are no fail messages, errors or exceptions, then the "
+            "lint succeeded.")
+        sys.exit(0)
+
+    try:
+        test_shell_binary_path = path_utils.test_shell_path(options.target)
+    except path_utils.PathNotFound:
+        print "\nERROR: test_shell is not found. Be sure that you have built"
+        print "it and that you are using the correct build. This script"
+        print "will run the Release one by default. Use --debug to use the"
+        print "Debug build.\n"
+        sys.exit(1)
+
+    write = create_logging_writer(options, "config")
+    write("Using platform '%s'" % options.platform)
+    write("Placing test results in %s" % options.results_directory)
+    if options.new_baseline:
+        write("Placing new baselines in %s" %
+              path_utils.chromium_baseline_path(options.platform))
+    write("Using %s build at %s" % (options.target, test_shell_binary_path))
+    if options.no_pixel_tests:
+        write("Not running pixel tests")
+    write("")
+
+    meter.update("Parsing expectations ...")
+    test_runner.parse_expectations(options.platform, options.target == 'Debug')
+
+    meter.update("Preparing tests ...")
+    write = create_logging_writer(options, "expected")
+    result_summary = test_runner.prepare_lists_and_print_output(write)
+
+    if 'cygwin' == sys.platform:
+        logging.warn("#" * 40)
+        logging.warn("# UNEXPECTED PYTHON VERSION")
+        logging.warn("# This script should be run using the version of python")
+        logging.warn("# in third_party/python_24/")
+        logging.warn("#" * 40)
+        sys.exit(1)
+
+    # Delete the disk cache if any to ensure a clean test run.
+    cachedir = os.path.split(test_shell_binary_path)[0]
+    cachedir = os.path.join(cachedir, "cache")
+    if os.path.exists(cachedir):
+        shutil.rmtree(cachedir)
+
+    test_runner.add_test_type(text_diff.TestTextDiff)
+    if not options.no_pixel_tests:
+        test_runner.add_test_type(image_diff.ImageDiff)
+        if options.fuzzy_pixel_tests:
+            test_runner.add_test_type(fuzzy_image_diff.FuzzyImageDiff)
+
+    meter.update("Starting ...")
+    has_new_failures = test_runner.run(result_summary)
+
+    logging.debug("Exit status: %d" % has_new_failures)
+    sys.exit(has_new_failures)
+
+
+def parse_args(args=None):
+    """Provides a default set of command line args.
+
+    Returns a tuple of options, args from optparse"""
+    option_parser = optparse.OptionParser()
+    option_parser.add_option("", "--no-pixel-tests", action="store_true",
+                             default=False,
+                             help="disable pixel-to-pixel PNG comparisons")
+    option_parser.add_option("", "--fuzzy-pixel-tests", action="store_true",
+                             default=False,
+                             help="Also use fuzzy matching to compare pixel "
+                                  "test outputs.")
+    option_parser.add_option("", "--results-directory",
+                             default="layout-test-results",
+                             help="Output results directory source dir,"
+                                  " relative to Debug or Release")
+    option_parser.add_option("", "--new-baseline", action="store_true",
+                             default=False,
+                             help="save all generated results as new baselines"
+                                  " into the platform directory, overwriting "
+                                  "whatever's already there.")
+    option_parser.add_option("", "--noshow-results", action="store_true",
+                             default=False, help="don't launch the test_shell"
+                             " with results after the tests are done")
+    option_parser.add_option("", "--full-results-html", action="store_true",
+                             default=False, help="show all failures in "
+                             "results.html, rather than only regressions")
+    option_parser.add_option("", "--clobber-old-results", action="store_true",
+                             default=False, help="Clobbers test results from "
+                             "previous runs.")
+    option_parser.add_option("", "--lint-test-files", action="store_true",
+                             default=False, help="Makes sure the test files "
+                             "parse for all configurations. Does not run any "
+                             "tests.")
+    option_parser.add_option("", "--force", action="store_true",
+                             default=False,
+                             help="Run all tests, even those marked SKIP "
+                                  "in the test list")
+    option_parser.add_option("", "--num-test-shells",
+                             help="Number of testshells to run in parallel.")
+    option_parser.add_option("", "--use-apache", action="store_true",
+                             default=False,
+                             help="Whether to use apache instead of lighttpd.")
+    option_parser.add_option("", "--time-out-ms", default=None,
+                             help="Set the timeout for each test")
+    option_parser.add_option("", "--run-singly", action="store_true",
+                             default=False,
+                             help="run a separate test_shell for each test")
+    option_parser.add_option("", "--debug", action="store_true", default=False,
+                             help="use the debug binary instead of the release"
+                                  " binary")
+    option_parser.add_option("", "--num-slow-tests-to-log", default=50,
+                             help="Number of slow tests whose timings "
+                                  "to print.")
+    option_parser.add_option("", "--platform",
+                             help="Override the platform for expected results")
+    option_parser.add_option("", "--target", default="",
+                             help="Set the build target configuration "
+                                  "(overrides --debug)")
+    option_parser.add_option("", "--log", action="store",
+                             default="detailed-progress,unexpected",
+                             help="log various types of data. The param should"
+                             " be a comma-separated list of values from: "
+                             "actual,config," + LOG_DETAILED_PROGRESS +
+                             ",expected,timing," + LOG_UNEXPECTED + " "
+                             "(defaults to " +
+                             "--log detailed-progress,unexpected)")
+    option_parser.add_option("-v", "--verbose", action="store_true",
+                             default=False, help="include debug-level logging")
+    option_parser.add_option("", "--sources", action="store_true",
+                             help="show expected result file path for each "
+                                  "test (implies --verbose)")
+    option_parser.add_option("", "--startup-dialog", action="store_true",
+                             default=False,
+                             help="create a dialog on test_shell.exe startup")
+    option_parser.add_option("", "--gp-fault-error-box", action="store_true",
+                             default=False,
+                             help="enable Windows GP fault error box")
+    option_parser.add_option("", "--wrapper",
+                             help="wrapper command to insert before "
+                                  "invocations of test_shell; option is split "
+                                  "on whitespace before running. (Example: "
+                                  "--wrapper='valgrind --smc-check=all')")
+    option_parser.add_option("", "--test-list", action="append",
+                             help="read list of tests to run from file",
+                             metavar="FILE")
+    option_parser.add_option("", "--nocheck-sys-deps", action="store_true",
+                             default=False,
+                             help="Don't check the system dependencies "
+                                  "(themes)")
+    option_parser.add_option("", "--randomize-order", action="store_true",
+                             default=False,
+                             help=("Run tests in random order (useful for "
+                                   "tracking down corruption)"))
+    option_parser.add_option("", "--run-chunk",
+                             default=None,
+                             help=("Run a specified chunk (n:l), the "
+                                   "nth of len l, of the layout tests"))
+    option_parser.add_option("", "--run-part",
+                             default=None,
+                             help=("Run a specified part (n:m), the nth of m"
+                                   " parts, of the layout tests"))
+    option_parser.add_option("", "--batch-size",
+                             default=None,
+                             help=("Run a the tests in batches (n), after "
+                                   "every n tests, the test shell is "
+                                   "relaunched."))
+    option_parser.add_option("", "--builder-name",
+                             default="DUMMY_BUILDER_NAME",
+                             help=("The name of the builder shown on the "
+                                   "waterfall running this script e.g. "
+                                   "WebKit."))
+    option_parser.add_option("", "--build-name",
+                             default="DUMMY_BUILD_NAME",
+                             help=("The name of the builder used in its path, "
+                                   "e.g. webkit-rel."))
+    option_parser.add_option("", "--build-number",
+                             default="DUMMY_BUILD_NUMBER",
+                             help=("The build number of the builder running"
+                                   "this script."))
+    option_parser.add_option("", "--experimental-fully-parallel",
+                             action="store_true", default=False,
+                             help="run all tests in parallel")
+    return option_parser.parse_args(args)
+
+if '__main__' == __name__:
+    options, args = parse_args()
+    main(options, args)

-- 
WebKit Debian packaging



More information about the Pkg-webkit-commits mailing list