[SCM] WebKit Debian packaging branch, webkit-1.3, updated. upstream/1.3.7-4207-g178b198

dpranke at chromium.org dpranke at chromium.org
Sun Feb 20 23:21:32 UTC 2011


The following commit has been merged in the webkit-1.3 branch:
commit 06adebe1f93a617a65621d6ab02430c3731d7c78
Author: dpranke at chromium.org <dpranke at chromium.org@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Date:   Thu Jan 20 03:14:06 2011 +0000

    2011-01-19  Dirk Pranke  <dpranke at chromium.org>
    
            Reviewed by Mihai Parparita.
    
            remove fs refs from test_runner, dump_render_tree_thread
    
            https://bugs.webkit.org/show_bug.cgi?id=52753
    
            * Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py:
            * Scripts/webkitpy/layout_tests/layout_package/test_runner.py:
            * Scripts/webkitpy/layout_tests/layout_package/test_runner_unittest.py:
            * Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py:
    
    git-svn-id: http://svn.webkit.org/repository/webkit/trunk@76195 268f45cc-cd09-0410-ab3c-d52691b4dbfc

diff --git a/Tools/ChangeLog b/Tools/ChangeLog
index 6557014..29303d2 100644
--- a/Tools/ChangeLog
+++ b/Tools/ChangeLog
@@ -1,3 +1,16 @@
+2011-01-19  Dirk Pranke  <dpranke at chromium.org>
+
+        Reviewed by Mihai Parparita.
+
+        remove fs refs from test_runner, dump_render_tree_thread
+
+        https://bugs.webkit.org/show_bug.cgi?id=52753
+
+        * Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py:
+        * Scripts/webkitpy/layout_tests/layout_package/test_runner.py:
+        * Scripts/webkitpy/layout_tests/layout_package/test_runner_unittest.py:
+        * Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py:
+
 2011-01-19  James Robinson  <jamesr at chromium.org>
 
         Reviewed by Darin Fisher.
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py
index 4d6b5f6..bd52e65 100644
--- a/Tools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py
@@ -36,9 +36,6 @@ the output.  When there are no more URLs to process in the shared queue, the
 thread exits.
 """
 
-from __future__ import with_statement
-
-import codecs
 import copy
 import logging
 import os
@@ -387,7 +384,7 @@ class TestShellThread(WatchableThread):
         # This is created in run_webkit_tests.py:_PrepareListsAndPrintOutput.
         tests_run_filename = self._port._filesystem.join(self._options.results_directory,
                                           "tests_run.txt")
-        tests_run_file = codecs.open(tests_run_filename, "a", "utf-8")
+        tests_run_file = self._port._filesystem.open_text_file_for_writing(tests_run_filename, append=True)
 
         while True:
             if self._canceled:
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner.py
index 2f63ec8..67524d3 100644
--- a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner.py
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner.py
@@ -35,13 +35,9 @@ objects to the TestRunner.  The TestRunner then aggregates the TestFailures to
 create a final report.
 """
 
-from __future__ import with_statement
-
-import codecs
 import errno
 import logging
 import math
-import os
 import Queue
 import random
 import shutil
@@ -68,8 +64,6 @@ _log = logging.getLogger("webkitpy.layout_tests.run_webkit_tests")
 # Builder base URL where we have the archived test results.
 BUILDER_BASE_URL = "http://build.chromium.org/buildbot/layout_test_results/"
 
-LAYOUT_TESTS_DIRECTORY = "LayoutTests" + os.sep
-
 TestExpectationsFile = test_expectations.TestExpectationsFile
 
 
@@ -160,8 +154,6 @@ class TestRunner:
     """A class for managing running a series of tests on a series of layout
     test files."""
 
-    HTTP_SUBDIR = os.sep.join(['', 'http', ''])
-    WEBSOCKET_SUBDIR = os.sep.join(['', 'websocket', ''])
 
     # The per-test timeout in milliseconds, if no --time-out-ms option was
     # given to run_webkit_tests. This should correspond to the default timeout
@@ -177,10 +169,16 @@ class TestRunner:
           printer: a Printer object to record updates to.
         """
         self._port = port
+        self._fs = port._filesystem
         self._options = options
         self._printer = printer
         self._message_broker = None
 
+        self.HTTP_SUBDIR = self._fs.join('', 'http', '')
+        self.WEBSOCKET_SUBDIR = self._fs.join('', 'websocket', '')
+        self.LAYOUT_TESTS_DIRECTORY = "LayoutTests" + self._fs.sep
+
+
         # disable wss server. need to install pyOpenSSL on buildbots.
         # self._websocket_secure_server = websocket_server.PyWebSocket(
         #        options.results_directory, use_tls=True, port=9323)
@@ -202,15 +200,15 @@ class TestRunner:
         paths = self._strip_test_dir_prefixes(args)
         paths += last_unexpected_results
         if self._options.test_list:
-            paths += self._strip_test_dir_prefixes(read_test_files(self._options.test_list))
+            paths += self._strip_test_dir_prefixes(read_test_files(self._fs, self._options.test_list))
         self._test_files = self._port.tests(paths)
 
     def _strip_test_dir_prefixes(self, paths):
         return [self._strip_test_dir_prefix(path) for path in paths if path]
 
     def _strip_test_dir_prefix(self, path):
-        if path.startswith(LAYOUT_TESTS_DIRECTORY):
-            return path[len(LAYOUT_TESTS_DIRECTORY):]
+        if path.startswith(self.LAYOUT_TESTS_DIRECTORY):
+            return path[len(self.LAYOUT_TESTS_DIRECTORY):]
         return path
 
     def lint(self):
@@ -342,10 +340,9 @@ class TestRunner:
                 self._printer.print_expected(extra_msg)
                 tests_run_msg += "\n" + extra_msg
                 files.extend(test_files[0:extra])
-            tests_run_filename = os.path.join(self._options.results_directory,
+            tests_run_filename = self._fs.join(self._options.results_directory,
                                               "tests_run.txt")
-            with codecs.open(tests_run_filename, "w", "utf-8") as file:
-                file.write(tests_run_msg + "\n")
+            self._fs.write_text_file(tests_run_filename, tests_run_msg)
 
             len_skip_chunk = int(len(files) * len(skipped) /
                                  float(len(self._test_files)))
@@ -403,10 +400,10 @@ class TestRunner:
     def _get_dir_for_test_file(self, test_file):
         """Returns the highest-level directory by which to shard the given
         test file."""
-        index = test_file.rfind(os.sep + LAYOUT_TESTS_DIRECTORY)
+        index = test_file.rfind(self._fs.sep + self.LAYOUT_TESTS_DIRECTORY)
 
-        test_file = test_file[index + len(LAYOUT_TESTS_DIRECTORY):]
-        test_file_parts = test_file.split(os.sep, 1)
+        test_file = test_file[index + len(self.LAYOUT_TESTS_DIRECTORY):]
+        test_file_parts = test_file.split(self._fs.sep, 1)
         directory = test_file_parts[0]
         test_file = test_file_parts[1]
 
@@ -416,10 +413,10 @@ class TestRunner:
         # what made them stable on linux/mac.
         return_value = directory
         while ((directory != 'http' or sys.platform in ('darwin', 'linux2'))
-                and test_file.find(os.sep) >= 0):
-            test_file_parts = test_file.split(os.sep, 1)
+                and test_file.find(self._fs.sep) >= 0):
+            test_file_parts = test_file.split(self._fs.sep, 1)
             directory = test_file_parts[0]
-            return_value = os.path.join(return_value, directory)
+            return_value = self._fs.join(return_value, directory)
             test_file = test_file_parts[1]
 
         return return_value
@@ -435,7 +432,7 @@ class TestRunner:
     def _test_requires_lock(self, test_file):
         """Return True if the test needs to be locked when
         running multiple copies of NRWTs."""
-        split_path = test_file.split(os.sep)
+        split_path = test_file.split(self._port._filesystem.sep)
         return 'http' in split_path or 'websocket' in split_path
 
     def _test_is_slow(self, test_file):
@@ -765,10 +762,9 @@ class TestRunner:
         layout_tests_dir = self._port.layout_tests_dir()
         possible_dirs = self._port.test_dirs()
         for dirname in possible_dirs:
-            if os.path.isdir(os.path.join(layout_tests_dir, dirname)):
-                shutil.rmtree(os.path.join(self._options.results_directory,
-                                           dirname),
-                              ignore_errors=True)
+            if self._fs.isdir(self._fs.join(layout_tests_dir, dirname)):
+                self._fs.rmtree(self._fs.join(self._options.results_directory,
+                                              dirname))
 
     def _get_failures(self, result_summary, include_crashes):
         """Filters a dict of results and returns only the failures.
@@ -811,17 +807,17 @@ class TestRunner:
         """
         results_directory = self._options.results_directory
         _log.debug("Writing JSON files in %s." % results_directory)
-        unexpected_json_path = os.path.join(results_directory, "unexpected_results.json")
-        with codecs.open(unexpected_json_path, "w", "utf-8") as file:
+        unexpected_json_path = self._fs.join(results_directory, "unexpected_results.json")
+        with self._fs.open_text_file_for_writing(unexpected_json_path) as file:
             simplejson.dump(unexpected_results, file, sort_keys=True, indent=2)
 
         # Write a json file of the test_expectations.txt file for the layout
         # tests dashboard.
-        expectations_path = os.path.join(results_directory, "expectations.json")
+        expectations_path = self._fs.join(results_directory, "expectations.json")
         expectations_json = \
             self._expectations.get_expectations_json_for_all_platforms()
-        with codecs.open(expectations_path, "w", "utf-8") as file:
-            file.write(u"ADD_EXPECTATIONS(%s);" % expectations_json)
+        self._fs.write_text_file(expectations_path,
+                                 u"ADD_EXPECTATIONS(%s);" % expectations_json)
 
         generator = json_layout_results_generator.JSONLayoutResultsGenerator(
             self._port, self._options.builder_name, self._options.build_name,
@@ -1192,9 +1188,9 @@ class TestRunner:
         if not len(test_files):
             return False
 
-        out_filename = os.path.join(self._options.results_directory,
-                                    "results.html")
-        with codecs.open(out_filename, "w", "utf-8") as results_file:
+        out_filename = self._fs.join(self._options.results_directory,
+                                     "results.html")
+        with self._fs.open_text_file_for_writing(out_filename) as results_file:
             html = self._results_html(test_files, result_summary.failures, results_title)
             results_file.write(html)
 
@@ -1202,21 +1198,20 @@ class TestRunner:
 
     def _show_results_html_file(self):
         """Shows the results.html page."""
-        results_filename = os.path.join(self._options.results_directory,
-                                        "results.html")
+        results_filename = self._fs.join(self._options.results_directory,
+                                         "results.html")
         self._port.show_results_html_file(results_filename)
 
 
-def read_test_files(files):
+def read_test_files(fs, files):
     tests = []
     for file in files:
         try:
-            with codecs.open(file, 'r', 'utf-8') as file_contents:
-                # FIXME: This could be cleaner using a list comprehension.
-                for line in file_contents:
-                    line = test_expectations.strip_comments(line)
-                    if line:
-                        tests.append(line)
+            file_contents = fs.read_text_file(file).split('\n')
+            for line in file_contents:
+                line = test_expectations.strip_comments(line)
+                if line:
+                    tests.append(line)
         except IOError, e:
             if e.errno == errno.ENOENT:
                 _log.critical('')
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner_unittest.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner_unittest.py
index 3c564ae..97f8630 100644
--- a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner_unittest.py
@@ -32,6 +32,7 @@
 
 import unittest
 
+from webkitpy.common.system import filesystem_mock
 from webkitpy.thirdparty.mock import Mock
 
 import test_runner
@@ -45,6 +46,7 @@ class TestRunnerWrapper(test_runner.TestRunner):
 class TestRunnerTest(unittest.TestCase):
     def test_results_html(self):
         mock_port = Mock()
+        mock_port._filesystem = filesystem_mock.MockFileSystem()
         mock_port.relative_test_filename = lambda name: name
         mock_port.filename_to_uri = lambda name: name
 
@@ -66,7 +68,9 @@ class TestRunnerTest(unittest.TestCase):
     def test_shard_tests(self):
         # Test that _shard_tests in test_runner.TestRunner really
         # put the http tests first in the queue.
-        runner = TestRunnerWrapper(port=Mock(), options=Mock(),
+        port = Mock()
+        port._filesystem = filesystem_mock.MockFileSystem()
+        runner = TestRunnerWrapper(port=port, options=Mock(),
             printer=Mock())
 
         test_list = [
diff --git a/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py b/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py
index af1c6e1..f2ff82f 100644
--- a/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py
@@ -98,7 +98,7 @@ def passing_run(extra_args=None, port_obj=None, record_results=False,
     return res == 0
 
 
-def logging_run(extra_args=None, port_obj=None, tests_included=False):
+def logging_run(extra_args=None, port_obj=None, tests_included=False, filesystem=None):
     options, parsed_args = parse_args(extra_args=extra_args,
                                       record_results=False,
                                       tests_included=tests_included,
@@ -106,7 +106,7 @@ def logging_run(extra_args=None, port_obj=None, tests_included=False):
     user = MockUser()
     if not port_obj:
         port_obj = port.get(port_name=options.platform, options=options,
-                            user=user)
+                            user=user, filesystem=filesystem)
 
     res, buildbot_output, regular_output = run_and_capture(port_obj, options,
                                                            parsed_args)
@@ -127,7 +127,7 @@ def run_and_capture(port_obj, options, parsed_args):
     return (res, buildbot_output, regular_output)
 
 
-def get_tests_run(extra_args=None, tests_included=False, flatten_batches=False):
+def get_tests_run(extra_args=None, tests_included=False, flatten_batches=False, filesystem=None):
     extra_args = extra_args or []
     if not tests_included:
         # Not including http tests since they get run out of order (that
@@ -163,7 +163,7 @@ def get_tests_run(extra_args=None, tests_included=False, flatten_batches=False):
         def create_driver(self, worker_number):
             return RecordingTestDriver(self, worker_number)
 
-    recording_port = RecordingTestPort(options=options, user=user)
+    recording_port = RecordingTestPort(options=options, user=user, filesystem=filesystem)
     run_and_capture(recording_port, options, parsed_args)
 
     if flatten_batches:
@@ -324,24 +324,22 @@ class MainTest(unittest.TestCase):
         self.assertEquals([], tests_run)
 
     def test_test_list(self):
-        filename = tempfile.mktemp()
-        tmpfile = file(filename, mode='w+')
-        tmpfile.write('passes/text.html')
-        tmpfile.close()
-        tests_run = get_tests_run(['--test-list=%s' % filename], tests_included=True, flatten_batches=True)
+        fs = port.unit_test_filesystem()
+        filename = '/tmp/foo.txt'
+        fs.write_text_file(filename, 'passes/text.html')
+        tests_run = get_tests_run(['--test-list=%s' % filename], tests_included=True, flatten_batches=True, filesystem=fs)
         self.assertEquals(['passes/text.html'], tests_run)
-        os.remove(filename)
+        fs.remove(filename)
         res, out, err, user = logging_run(['--test-list=%s' % filename],
-                                          tests_included=True)
+                                          tests_included=True, filesystem=fs)
         self.assertEqual(res, -1)
         self.assertFalse(err.empty())
 
     def test_test_list_with_prefix(self):
-        filename = tempfile.mktemp()
-        tmpfile = file(filename, mode='w+')
-        tmpfile.write('LayoutTests/passes/text.html')
-        tmpfile.close()
-        tests_run = get_tests_run(['--test-list=%s' % filename], tests_included=True, flatten_batches=True)
+        fs = port.unit_test_filesystem()
+        filename = '/tmp/foo.txt'
+        fs.write_text_file(filename, 'passes/text.html')
+        tests_run = get_tests_run(['--test-list=%s' % filename], tests_included=True, flatten_batches=True, filesystem=fs)
         self.assertEquals(['passes/text.html'], tests_run)
 
     def test_unexpected_failures(self):

-- 
WebKit Debian packaging



More information about the Pkg-webkit-commits mailing list