[SCM] WebKit Debian packaging branch, debian/experimental, updated. upstream/1.3.3-9427-gc2be6fc

kinuko at chromium.org kinuko at chromium.org
Wed Dec 22 15:49:09 UTC 2010


The following commit has been merged in the debian/experimental branch:
commit 5754e3fea87ef35ff2a582a41e88ca709e6667db
Author: kinuko at chromium.org <kinuko at chromium.org@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Date:   Fri Nov 12 23:14:01 2010 +0000

    2010-11-12  Kinuko Yasuda  <kinuko at chromium.org>
    
            Reviewed by Ojan Vafai.
    
            Include detailed test modifiers in results.json and enable incremental uploading for non-layout tests
            https://bugs.webkit.org/show_bug.cgi?id=49354
    
            Also moved/integrated the upload method from run_webkit_tests.py to json_results_generator.py.
    
            * Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py:
            * Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py:
            * Scripts/webkitpy/layout_tests/layout_package/json_results_generator_unittest.py:
            * Scripts/webkitpy/layout_tests/run_webkit_tests.py:
    
    
    git-svn-id: http://svn.webkit.org/repository/webkit/trunk@71952 268f45cc-cd09-0410-ab3c-d52691b4dbfc

diff --git a/WebKitTools/ChangeLog b/WebKitTools/ChangeLog
index a99603f..c88adb2 100644
--- a/WebKitTools/ChangeLog
+++ b/WebKitTools/ChangeLog
@@ -1,3 +1,17 @@
+2010-11-12  Kinuko Yasuda  <kinuko at chromium.org>
+
+        Reviewed by Ojan Vafai.
+
+        Include detailed test modifiers in results.json and enable incremental uploading for non-layout tests
+        https://bugs.webkit.org/show_bug.cgi?id=49354
+
+        Also moved/integrated the upload method from run_webkit_tests.py to json_results_generator.py.
+
+        * Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py:
+        * Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py:
+        * Scripts/webkitpy/layout_tests/layout_package/json_results_generator_unittest.py:
+        * Scripts/webkitpy/layout_tests/run_webkit_tests.py:
+
 2010-11-12  Tony Chang  <tony at chromium.org>
 
         Reviewed by Kent Tamura.
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py
index 1cf88ef..101d30b 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py
@@ -56,7 +56,8 @@ class JSONLayoutResultsGenerator(json_results_generator.JSONResultsGeneratorBase
     def __init__(self, port, builder_name, build_name, build_number,
         results_file_base_path, builder_base_url,
         test_timings, expectations, result_summary, all_tests,
-        generate_incremental_results=False, test_results_server=None):
+        generate_incremental_results=False, test_results_server=None,
+        test_type="", master_name=""):
         """Modifies the results.json file. Grabs it off the archive directory
         if it is not found locally.
 
@@ -67,7 +68,8 @@ class JSONLayoutResultsGenerator(json_results_generator.JSONResultsGeneratorBase
         super(JSONLayoutResultsGenerator, self).__init__(
             builder_name, build_name, build_number, results_file_base_path,
             builder_base_url, {}, port.test_repository_paths(),
-            generate_incremental_results, test_results_server)
+            generate_incremental_results, test_results_server,
+            test_type, master_name)
 
         self._port = port
         self._expectations = expectations
@@ -117,7 +119,7 @@ class JSONLayoutResultsGenerator(json_results_generator.JSONResultsGeneratorBase
         return set(self._failures.keys())
 
     # override
-    def _get_result_type_char(self, test_name):
+    def _get_modifier_char(self, test_name):
         if test_name not in self._all_tests:
             return self.NO_DATA_RESULT
 
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py
index 765b4d8..3267718 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py
@@ -46,17 +46,35 @@ import webkitpy.thirdparty.simplejson as simplejson
 
 _log = logging.getLogger("webkitpy.layout_tests.layout_package.json_results_generator")
 
-
 class TestResult(object):
     """A simple class that represents a single test result."""
-    def __init__(self, name, failed=False, skipped=False, elapsed_time=0):
+
+    # Test modifier constants.
+    (NONE, FAILS, FLAKY, DISABLED) = range(4)
+
+    def __init__(self, name, failed=False, elapsed_time=0):
         self.name = name
         self.failed = failed
-        self.skipped = skipped
         self.time = elapsed_time
 
+        test_name = name
+        try:
+            test_name = name.split('.')[1]
+        except IndexError:
+            _log.warn("Invalid test name: %s.", name)
+            pass
+
+        if test_name.startswith('FAILS_'):
+            self.modifier = self.FAILS
+        elif test_name.startswith('FLAKY_'):
+            self.modifier = self.FLAKY
+        elif test_name.startswith('DISABLED_'):
+            self.modifier = self.DISABLED
+        else:
+            self.modifier = self.NONE
+
     def fixable(self):
-        return self.failed or self.skipped
+        return self.failed or self.modifier == self.DISABLED
 
 
 class JSONResultsGeneratorBase(object):
@@ -67,10 +85,20 @@ class JSONResultsGeneratorBase(object):
     MIN_TIME = 1
     JSON_PREFIX = "ADD_RESULTS("
     JSON_SUFFIX = ");"
+
+    # Note that in non-chromium tests those chars are used to indicate
+    # test modifiers (FAILS, FLAKY, etc) but not actual test results.
     PASS_RESULT = "P"
     SKIP_RESULT = "X"
     FAIL_RESULT = "F"
+    FLAKY_RESULT = "L"
     NO_DATA_RESULT = "N"
+
+    MODIFIER_TO_CHAR = {TestResult.NONE: PASS_RESULT,
+                        TestResult.DISABLED: SKIP_RESULT,
+                        TestResult.FAILS: FAIL_RESULT,
+                        TestResult.FLAKY: FLAKY_RESULT}
+
     VERSION = 3
     VERSION_KEY = "version"
     RESULTS = "results"
@@ -94,7 +122,8 @@ class JSONResultsGeneratorBase(object):
         test_results_map, svn_repositories=None,
         generate_incremental_results=False,
         test_results_server=None,
-        test_type=""):
+        test_type="",
+        master_name=""):
         """Modifies the results.json file. Grabs it off the archive directory
         if it is not found locally.
 
@@ -113,11 +142,14 @@ class JSONResultsGeneratorBase(object):
           generate_incremental_results: If true, generate incremental json file
               from current run results.
           test_results_server: server that hosts test results json.
+          test_type: test type string (e.g. 'layout-tests').
+          master_name: the name of the buildbot master.
         """
         self._builder_name = builder_name
         self._build_name = build_name
         self._build_number = build_number
         self._builder_base_url = builder_base_url
+        self._results_directory = results_file_base_path
         self._results_file_path = os.path.join(results_file_base_path,
             self.RESULTS_FILENAME)
         self._incremental_results_file_path = os.path.join(
@@ -133,6 +165,7 @@ class JSONResultsGeneratorBase(object):
 
         self._test_results_server = test_results_server
         self._test_type = test_type
+        self._master_name = master_name
 
         self._json = None
         self._archived_results = None
@@ -205,6 +238,36 @@ class JSONResultsGeneratorBase(object):
     def set_archived_results(self, archived_results):
         self._archived_results = archived_results
 
+    def upload_json_files(self, json_files):
+        """Uploads the given json_files to the test_results_server (if the
+        test_results_server is given)."""
+        if not self._test_results_server:
+            return
+
+        if not self._master_name:
+            _log.error("--test-results-server was set, but --master-name was not.  Not uploading JSON files.")
+            return
+
+        _log.info("Uploading JSON files for builder: %s", self._builder_name)
+        attrs = [("builder", self._builder_name),
+                 ("testtype", self._test_type),
+                 ("master", self._master_name)]
+
+        files = [(file, os.path.join(self._results_directory, file))
+            for file in json_files]
+
+        uploader = test_results_uploader.TestResultsUploader(
+            self._test_results_server)
+        try:
+            # Set uploading timeout in case appengine server is having problem.
+            # 120 seconds are more than enough to upload test results.
+            uploader.upload(attrs, files, 120)
+        except Exception, err:
+            _log.error("Upload failed: %s" % err)
+            return
+
+        _log.info("JSON files uploaded.")
+
     def _generate_json_file(self, json, file_path):
         # Specify separators in order to get compact encoding.
         json_data = simplejson.dumps(json, separators=(',', ':'))
@@ -226,19 +289,17 @@ class JSONResultsGeneratorBase(object):
         """Returns a set of failed test names."""
         return set([r.name for r in self._test_results if r.failed])
 
-    def _get_result_type_char(self, test_name):
+    def _get_modifier_char(self, test_name):
         """Returns a single char (e.g. SKIP_RESULT, FAIL_RESULT,
-        PASS_RESULT, NO_DATA_RESULT, etc) that indicates the test result
+        PASS_RESULT, NO_DATA_RESULT, etc) that indicates the test modifier
         for the given test_name.
         """
         if test_name not in self._test_results_map:
             return JSONResultsGenerator.NO_DATA_RESULT
 
         test_result = self._test_results_map[test_name]
-        if test_result.skipped:
-            return JSONResultsGenerator.SKIP_RESULT
-        if test_result.failed:
-            return JSONResultsGenerator.FAIL_RESULT
+        if test_result.modifier in self.MODIFIER_TO_CHAR.keys():
+            return self.MODIFIER_TO_CHAR[test_result.modifier]
 
         return JSONResultsGenerator.PASS_RESULT
 
@@ -344,10 +405,10 @@ class JSONResultsGeneratorBase(object):
         self._insert_item_into_raw_list(results_for_builder,
             fixable_count, self.FIXABLE_COUNT)
 
-        # Create a pass/skip/failure summary dictionary.
+        # Create a test modifiers (FAILS, FLAKY etc) summary dictionary.
         entry = {}
         for test_name in self._test_results_map.iterkeys():
-            result_char = self._get_result_type_char(test_name)
+            result_char = self._get_modifier_char(test_name)
             entry[result_char] = entry.get(result_char, 0) + 1
 
         # Insert the pass/skip/failure summary dictionary.
@@ -423,7 +484,7 @@ class JSONResultsGeneratorBase(object):
           tests: Dictionary containing test result entries.
         """
 
-        result = self._get_result_type_char(test_name)
+        result = self._get_modifier_char(test_name)
         time = self._get_test_timing(test_name)
 
         if test_name not in tests:
@@ -523,33 +584,10 @@ class JSONResultsGenerator(JSONResultsGeneratorBase):
     # The flag is for backward compatibility.
     output_json_in_init = True
 
-    def _upload_json_files(self):
-        if not self._test_results_server or not self._test_type:
-            return
-
-        _log.info("Uploading JSON files for %s to the server: %s",
-                  self._builder_name, self._test_results_server)
-        attrs = [("builder", self._builder_name), ("testtype", self._test_type)]
-        json_files = [self.INCREMENTAL_RESULTS_FILENAME]
-
-        files = [(file, os.path.join(self._results_directory, file))
-            for file in json_files]
-        uploader = test_results_uploader.TestResultsUploader(
-            self._test_results_server)
-        try:
-            # Set uploading timeout in case appengine server is having problem.
-            # 120 seconds are more than enough to upload test results.
-            uploader.upload(attrs, files, 120)
-        except Exception, err:
-            _log.error("Upload failed: %s" % err)
-            return
-
-        _log.info("JSON files uploaded.")
-
     def __init__(self, port, builder_name, build_name, build_number,
         results_file_base_path, builder_base_url,
         test_timings, failures, passed_tests, skipped_tests, all_tests,
-        test_results_server=None, test_type=None):
+        test_results_server=None, test_type=None, master_name=None):
         """Generates a JSON results file.
 
         Args
@@ -567,6 +605,7 @@ class JSONResultsGenerator(JSONResultsGeneratorBase):
               include skipped tests.
           test_results_server: server that hosts test results json.
           test_type: the test type.
+          master_name: the name of the buildbot master.
         """
 
         self._test_type = test_type
@@ -582,11 +621,9 @@ class JSONResultsGenerator(JSONResultsGeneratorBase):
             test_result.failed = True
         for test in skipped_tests:
             test_results_map[test] = test_result = get(test, TestResult(test))
-            test_result.skipped = True
         for test in passed_tests:
             test_results_map[test] = test_result = get(test, TestResult(test))
             test_result.failed = False
-            test_result.skipped = False
         for test in all_tests:
             if test not in test_results_map:
                 test_results_map[test] = TestResult(test)
@@ -599,8 +636,9 @@ class JSONResultsGenerator(JSONResultsGeneratorBase):
             svn_repositories=port.test_repository_paths(),
             generate_incremental_results=True,
             test_results_server=test_results_server,
-            test_type=test_type)
+            test_type=test_type,
+            master_name=master_name)
 
         if self.__class__.output_json_in_init:
             self.generate_json_output()
-            self._upload_json_files()
+            self.upload_json_files([self.INCREMENTAL_RESULTS_FILENAME])
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator_unittest.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator_unittest.py
index 785cc1c..606a613 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator_unittest.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator_unittest.py
@@ -47,43 +47,68 @@ class JSONGeneratorTest(unittest.TestCase):
         self.build_number = 'DUMMY_BUILDER_NUMBER'
         self._json = None
         self._num_runs = 0
-        self._tests_list = set([])
+        self._tests_set = set([])
         self._test_timings = {}
-        self._failed_tests = {}
-        self._passed_tests = set([])
-        self._skipped_tests = set([])
-
-    def _test_json_generation(self, passed_tests, failed_tests, skipped_tests):
-        # Make sure we have sets (rather than lists).
-        passed_tests = set(passed_tests)
-        skipped_tests = set(skipped_tests)
-        tests_list = passed_tests | set(failed_tests.keys())
+        self._failed_tests = set([])
+
+        self._PASS_tests = set([])
+        self._DISABLED_tests = set([])
+        self._FLAKY_tests = set([])
+        self._FAILS_tests = set([])
+
+    def _get_test_modifier(self, test_name):
+        if test_name.startswith('DISABLED_'):
+            return json_results_generator.JSONResultsGenerator.SKIP_RESULT
+        elif test_name.startswith('FLAKY_'):
+            return json_results_generator.JSONResultsGenerator.FLAKY_RESULT
+        elif test_name.startswith('FAILS_'):
+            return json_results_generator.JSONResultsGenerator.FAIL_RESULT
+        return json_results_generator.JSONResultsGenerator.PASS_RESULT
+
+    def _test_json_generation(self, passed_tests_list, failed_tests_list):
+        tests_set = set(passed_tests_list) | set(failed_tests_list)
+
+        DISABLED_tests = set([t for t in tests_set
+                             if t.startswith('DISABLED_')])
+        FLAKY_tests = set([t for t in tests_set
+                           if t.startswith('FLAKY_')])
+        FAILS_tests = set([t for t in tests_set
+                           if t.startswith('FAILS_')])
+        PASS_tests = tests_set ^ (DISABLED_tests | FLAKY_tests | FAILS_tests)
+
+        passed_tests = set(passed_tests_list) ^ DISABLED_tests
+        failed_tests = set(failed_tests_list)
+
         test_timings = {}
         i = 0
-        for test in tests_list:
+        for test in tests_set:
             test_timings[test] = float(self._num_runs * 100 + i)
             i += 1
 
-        port_obj = port.get(None)
+        # For backward compatibility.
+        reason = test_expectations.TEXT
+        failed_tests_dict = dict([(name, reason) for name in failed_tests])
 
+        port_obj = port.get(None)
         generator = json_results_generator.JSONResultsGenerator(port_obj,
             self.builder_name, self.build_name, self.build_number,
             '',
             None,   # don't fetch past json results archive
             test_timings,
-            failed_tests,
+            failed_tests_dict,
             passed_tests,
-            skipped_tests,
-            tests_list)
+            (),
+            tests_set)
 
         # Test incremental json results
         incremental_json = generator.get_json(incremental=True)
         self._verify_json_results(
-            tests_list,
+            tests_set,
             test_timings,
-            passed_tests,
             failed_tests,
-            skipped_tests,
+            PASS_tests,
+            DISABLED_tests,
+            FLAKY_tests,
             incremental_json,
             1)
 
@@ -92,23 +117,25 @@ class JSONGeneratorTest(unittest.TestCase):
         json = generator.get_json(incremental=False)
         self._json = json
         self._num_runs += 1
-        self._tests_list |= tests_list
+        self._tests_set |= tests_set
         self._test_timings.update(test_timings)
         self._failed_tests.update(failed_tests)
-        self._passed_tests |= passed_tests
-        self._skipped_tests |= skipped_tests
+        self._PASS_tests |= PASS_tests
+        self._DISABLED_tests |= DISABLED_tests
+        self._FLAKY_tests |= FLAKY_tests
         self._verify_json_results(
-            self._tests_list,
+            self._tests_set,
             self._test_timings,
-            self._passed_tests,
             self._failed_tests,
-            self._skipped_tests,
+            self._PASS_tests,
+            self._DISABLED_tests,
+            self._FLAKY_tests,
             self._json,
             self._num_runs)
 
-    def _verify_json_results(self, tests_list, test_timings,
-                             passed_tests, failed_tests,
-                             skipped_tests, json, num_runs):
+    def _verify_json_results(self, tests_set, test_timings, failed_tests,
+                             PASS_tests, DISABLED_tests, FLAKY_tests,
+                             json, num_runs):
         # Aliasing to a short name for better access to its constants.
         JRG = json_results_generator.JSONResultsGenerator
 
@@ -118,10 +145,10 @@ class JSONGeneratorTest(unittest.TestCase):
         buildinfo = json[self.builder_name]
         self.assertTrue(JRG.FIXABLE in buildinfo)
         self.assertTrue(JRG.TESTS in buildinfo)
-        self.assertTrue(len(buildinfo[JRG.BUILD_NUMBERS]) == num_runs)
-        self.assertTrue(buildinfo[JRG.BUILD_NUMBERS][0] == self.build_number)
+        self.assertEqual(len(buildinfo[JRG.BUILD_NUMBERS]), num_runs)
+        self.assertEqual(buildinfo[JRG.BUILD_NUMBERS][0], self.build_number)
 
-        if tests_list or skipped_tests:
+        if tests_set or DISABLED_tests:
             fixable = {}
             for fixable_items in buildinfo[JRG.FIXABLE]:
                 for (type, count) in fixable_items.iteritems():
@@ -130,52 +157,58 @@ class JSONGeneratorTest(unittest.TestCase):
                     else:
                         fixable[type] = count
 
-            if passed_tests:
-                self.assertTrue(fixable[JRG.PASS_RESULT] == len(passed_tests))
+            if PASS_tests:
+                self.assertEqual(fixable[JRG.PASS_RESULT], len(PASS_tests))
             else:
                 self.assertTrue(JRG.PASS_RESULT not in fixable or
                                 fixable[JRG.PASS_RESULT] == 0)
-            if skipped_tests:
-                self.assertTrue(fixable[JRG.SKIP_RESULT] == len(skipped_tests))
+            if DISABLED_tests:
+                self.assertEqual(fixable[JRG.SKIP_RESULT], len(DISABLED_tests))
             else:
                 self.assertTrue(JRG.SKIP_RESULT not in fixable or
                                 fixable[JRG.SKIP_RESULT] == 0)
+            if FLAKY_tests:
+                self.assertEqual(fixable[JRG.FLAKY_RESULT], len(FLAKY_tests))
+            else:
+                self.assertTrue(JRG.FLAKY_RESULT not in fixable or
+                                fixable[JRG.FLAKY_RESULT] == 0)
 
         if failed_tests:
             tests = buildinfo[JRG.TESTS]
-            for test_name, failure in failed_tests.iteritems():
+            for test_name in failed_tests:
                 self.assertTrue(test_name in tests)
                 test = tests[test_name]
 
                 failed = 0
+                modifier = self._get_test_modifier(test_name)
                 for result in test[JRG.RESULTS]:
-                    if result[1] == JRG.FAIL_RESULT:
+                    if result[1] == modifier:
                         failed = result[0]
-                self.assertTrue(failed == 1)
+                self.assertEqual(1, failed)
 
                 timing_count = 0
                 for timings in test[JRG.TIMES]:
                     if timings[1] == test_timings[test_name]:
                         timing_count = timings[0]
-                self.assertTrue(timing_count == 1)
+                self.assertEqual(1, timing_count)
 
-        fixable_count = len(skipped_tests) + len(failed_tests.keys())
-        if skipped_tests or failed_tests:
-            self.assertTrue(sum(buildinfo[JRG.FIXABLE_COUNT]) == fixable_count)
+        fixable_count = len(DISABLED_tests | failed_tests)
+        if DISABLED_tests or failed_tests:
+            self.assertEqual(sum(buildinfo[JRG.FIXABLE_COUNT]), fixable_count)
 
     def test_json_generation(self):
-        reason = test_expectations.TEXT
-
-        self._test_json_generation([], {}, [])
-        self._test_json_generation(['A1', 'B1'], {}, [])
-        self._test_json_generation([], {'A2': reason, 'B2': reason}, [])
-        self._test_json_generation([], {}, ['A3', 'B3'])
-        self._test_json_generation(['A4'], {'B4': reason, 'C4': reason}, [])
+        self._test_json_generation([], [])
+        self._test_json_generation(['A1', 'B1'], [])
+        self._test_json_generation([], ['FAILS_A2', 'FAILS_B2'])
+        self._test_json_generation(['DISABLED_A3', 'DISABLED_B3'], [])
+        self._test_json_generation(['A4'], ['B4', 'FAILS_C4'])
+        self._test_json_generation(['DISABLED_C5', 'DISABLED_D5'], ['A5', 'B5'])
         self._test_json_generation(
-            [], {'A5': reason, 'B5': reason}, ['C5', 'D5'])
+            ['A6', 'B6', 'FAILS_C6', 'DISABLED_E6', 'DISABLED_F6'],
+            ['FAILS_D6'])
         self._test_json_generation(
-            ['A6', 'B6', 'C6'], {'D6': reason}, ['E6', 'F6'])
-
+            ['A7', 'FLAKY_B7', 'DISABLED_C7'],
+            ['FAILS_D7', 'FLAKY_D8'])
 
 if __name__ == '__main__':
     unittest.main()
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/run_webkit_tests.py b/WebKitTools/Scripts/webkitpy/layout_tests/run_webkit_tests.py
index f360adc..9fb684f 100755
--- a/WebKitTools/Scripts/webkitpy/layout_tests/run_webkit_tests.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/run_webkit_tests.py
@@ -808,12 +808,10 @@ class TestRunner:
         self._printer.print_unexpected_results(unexpected_results)
 
         if self._options.record_results:
-            # Write the same data to log files.
-            self._write_json_files(unexpected_results, result_summary,
-                                   individual_test_timings)
-
-            # Upload generated JSON files to appengine server.
-            self._upload_json_files()
+            # Write the same data to log files and upload generated JSON files
+            # to appengine server.
+            self._upload_json_files(unexpected_results, result_summary,
+                                    individual_test_timings)
 
         # Write the summary to disk (results.html) and display it if requested.
         wrote_results = self._write_results_html_file(result_summary)
@@ -892,10 +890,10 @@ class TestRunner:
 
         return failed_results
 
-    def _write_json_files(self, unexpected_results, result_summary,
+    def _upload_json_files(self, unexpected_results, result_summary,
                         individual_test_timings):
         """Writes the results of the test run as JSON files into the results
-        dir.
+        dir and upload the files to the appengine server.
 
         There are three different files written into the results dir:
           unexpected_results.json: A short list of any unexpected results.
@@ -924,50 +922,25 @@ class TestRunner:
         with codecs.open(expectations_path, "w", "utf-8") as file:
             file.write(u"ADD_EXPECTATIONS(%s);" % expectations_json)
 
-        json_layout_results_generator.JSONLayoutResultsGenerator(
+        generator = json_layout_results_generator.JSONLayoutResultsGenerator(
             self._port, self._options.builder_name, self._options.build_name,
             self._options.build_number, self._options.results_directory,
             BUILDER_BASE_URL, individual_test_timings,
             self._expectations, result_summary, self._test_files_list,
             not self._options.upload_full_results,
-            self._options.test_results_server)
+            self._options.test_results_server,
+            "layout-tests",
+            self._options.master_name)
 
         _log.debug("Finished writing JSON files.")
 
-    def _upload_json_files(self):
-        if not self._options.test_results_server:
-            return
-
-        if not self._options.master_name:
-            _log.error("--test-results-server was set, but --master-name was not. Not uploading JSON files.")
-            return
-
-        _log.info("Uploading JSON files for builder: %s",
-                   self._options.builder_name)
-
-        attrs = [("builder", self._options.builder_name), ("testtype", "layout-tests"),
-            ("master", self._options.master_name)]
-
         json_files = ["expectations.json"]
         if self._options.upload_full_results:
             json_files.append("results.json")
         else:
             json_files.append("incremental_results.json")
 
-        files = [(file, os.path.join(self._options.results_directory, file))
-            for file in json_files]
-
-        uploader = test_results_uploader.TestResultsUploader(
-            self._options.test_results_server)
-        try:
-            # Set uploading timeout in case appengine server is having problem.
-            # 120 seconds are more than enough to upload test results.
-            uploader.upload(attrs, files, 120)
-        except Exception, err:
-            _log.error("Upload failed: %s" % err)
-            return
-
-        _log.info("JSON files uploaded.")
+        generator.upload_json_files(json_files)
 
     def _print_config(self):
         """Prints the configuration for the test run."""

-- 
WebKit Debian packaging



More information about the Pkg-webkit-commits mailing list