[SCM] WebKit Debian packaging branch, webkit-1.3, updated. upstream/1.3.7-4207-g178b198

dpranke at chromium.org dpranke at chromium.org
Mon Feb 21 00:28:15 UTC 2011


The following commit has been merged in the webkit-1.3 branch:
commit 9655ce455fae3f4a6217eed9c396054cce06584b
Author: dpranke at chromium.org <dpranke at chromium.org@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Date:   Mon Jan 31 22:59:08 2011 +0000

    2011-01-31  Dirk Pranke  <dpranke at chromium.org>
    
            Reviewed by Mihai Parparita.
    
            The current modifier parsing code in test_expectations is
            fragile and hard-coded, so it's not easy to understand the logic
            or easily add new types of modifiers (like GPU vs. CPU testing
            for graphics tests, or 32-bit vs. 64-bit differences).
    
            This is the first of two patches that will add in more generic
            support and then eliminate the GPU-specific test expectations
            files for Chromium.
    
            This patch adds two standalone objects for handling modifiers. The
            rules for interpreting modifiers, precedence, and conflicts are
            given in the docstring to the ModifierMatcher class, which
            returns ModifierMatchResult objects.
    
            This patch also adds routines to the Port interface and a
            default set of values in the base object, in order to obtain the
            values needed on a given test run. These values are then passed
            to the expectation parser. This also allows us to clean up the
            logic used to lint all of the different configurations in a
            single test_expectations.txt file.
    
            The next patch will merge in the separate GPU expectations file.
    
            https://bugs.webkit.org/show_bug.cgi?id=51222
    
            * Scripts/webkitpy/layout_tests/layout_package/printing_unittest.py:
            * Scripts/webkitpy/layout_tests/layout_package/test_expectations.py:
            * Scripts/webkitpy/layout_tests/layout_package/test_expectations_unittest.py:
            * Scripts/webkitpy/layout_tests/layout_package/test_runner.py:
            * Scripts/webkitpy/layout_tests/port/base.py:
            * Scripts/webkitpy/layout_tests/port/base_unittest.py:
            * Scripts/webkitpy/layout_tests/port/chromium.py:
            * Scripts/webkitpy/layout_tests/port/port_testcase.py:
            * Scripts/webkitpy/layout_tests/port/test.py:
            * Scripts/webkitpy/layout_tests/port/webkit.py:
            * Scripts/webkitpy/layout_tests/rebaseline_chromium_webkit_tests.py:
            * Scripts/webkitpy/layout_tests/rebaseline_chromium_webkit_tests_unittest.py:
            * Scripts/webkitpy/layout_tests/run_webkit_tests.py:
            * Tools/Scripts/webkitpy/style/checkers/test_expectations.py:
            * Tools/Scripts/webkitpy/style/checkers/test_expectations_unittest.py:
    
    git-svn-id: http://svn.webkit.org/repository/webkit/trunk@77163 268f45cc-cd09-0410-ab3c-d52691b4dbfc

diff --git a/Tools/ChangeLog b/Tools/ChangeLog
index d238c31..c340243 100644
--- a/Tools/ChangeLog
+++ b/Tools/ChangeLog
@@ -1,3 +1,48 @@
+2011-01-31  Dirk Pranke  <dpranke at chromium.org>
+
+        Reviewed by Mihai Parparita.
+
+        The current modifier parsing code in test_expectations is
+        fragile and hard-coded, so it's not easy to understand the logic
+        or easily add new types of modifiers (like GPU vs. CPU testing
+        for graphics tests, or 32-bit vs. 64-bit differences).
+
+        This is the first of two patches that will add in more generic
+        support and then eliminate the GPU-specific test expectations
+        files for Chromium.
+
+        This patch adds two standalone objects for handling modifiers. The
+        rules for interpreting modifiers, precedence, and conflicts are
+        given in the docstring to the ModifierMatcher class, which
+        returns ModifierMatchResult objects.
+        
+        This patch also adds routines to the Port interface and a
+        default set of values in the base object, in order to obtain the
+        values needed on a given test run. These values are then passed
+        to the expectation parser. This also allows us to clean up the
+        logic used to lint all of the different configurations in a
+        single test_expectations.txt file.
+
+        The next patch will merge in the separate GPU expectations file.
+
+        https://bugs.webkit.org/show_bug.cgi?id=51222
+
+        * Scripts/webkitpy/layout_tests/layout_package/printing_unittest.py:
+        * Scripts/webkitpy/layout_tests/layout_package/test_expectations.py:
+        * Scripts/webkitpy/layout_tests/layout_package/test_expectations_unittest.py:
+        * Scripts/webkitpy/layout_tests/layout_package/test_runner.py:
+        * Scripts/webkitpy/layout_tests/port/base.py:
+        * Scripts/webkitpy/layout_tests/port/base_unittest.py:
+        * Scripts/webkitpy/layout_tests/port/chromium.py:
+        * Scripts/webkitpy/layout_tests/port/port_testcase.py:
+        * Scripts/webkitpy/layout_tests/port/test.py:
+        * Scripts/webkitpy/layout_tests/port/webkit.py:
+        * Scripts/webkitpy/layout_tests/rebaseline_chromium_webkit_tests.py:
+        * Scripts/webkitpy/layout_tests/rebaseline_chromium_webkit_tests_unittest.py:
+        * Scripts/webkitpy/layout_tests/run_webkit_tests.py:
+        * Tools/Scripts/webkitpy/style/checkers/test_expectations.py:
+        * Tools/Scripts/webkitpy/style/checkers/test_expectations_unittest.py:
+
 2011-01-31  Scott Cameron  <sccameron at rim.com>
 
         Reviewed by Daniel Bates.
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/printing_unittest.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/printing_unittest.py
index 12a786e..7ab6da8 100644
--- a/Tools/Scripts/webkitpy/layout_tests/layout_package/printing_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/printing_unittest.py
@@ -144,7 +144,7 @@ class  Testprinter(unittest.TestCase):
                       test in tests]
         expectations = test_expectations.TestExpectations(
             self._port, test_paths, expectations_str,
-            self._port.test_platform_name(), is_debug_mode=False,
+            self._port.test_configuration(),
             is_lint_mode=False)
 
         rs = result_summary.ResultSummary(expectations, test_paths)
@@ -363,7 +363,7 @@ class  Testprinter(unittest.TestCase):
     def test_print_progress__detailed(self):
         tests = ['passes/text.html', 'failures/expected/timeout.html',
                  'failures/expected/crash.html']
-        expectations = 'failures/expected/timeout.html = TIMEOUT'
+        expectations = 'BUGX : failures/expected/timeout.html = TIMEOUT'
 
         # first, test that it is disabled properly
         # should still print one-line-progress
@@ -569,8 +569,8 @@ class  Testprinter(unittest.TestCase):
         self.assertFalse(out.empty())
 
         expectations = """
-failures/expected/crash.html = CRASH
-failures/expected/timeout.html = TIMEOUT
+BUGX : failures/expected/crash.html = CRASH
+BUGX : failures/expected/timeout.html = TIMEOUT
 """
         err.reset()
         out.reset()
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_expectations.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_expectations.py
index acadcb3..54030d0 100644
--- a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_expectations.py
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_expectations.py
@@ -31,6 +31,7 @@
 for layout tests.
 """
 
+import itertools
 import logging
 import re
 
@@ -84,18 +85,16 @@ def remove_pixel_failures(expected_results):
 class TestExpectations:
     TEST_LIST = "test_expectations.txt"
 
-    def __init__(self, port, tests, expectations, test_platform_name,
-                 is_debug_mode, is_lint_mode, overrides=None):
+    def __init__(self, port, tests, expectations, test_config,
+                 is_lint_mode, overrides=None):
         """Loads and parses the test expectations given in the string.
         Args:
             port: handle to object containing platform-specific functionality
             tests: list of all of the test files
             expectations: test expectations as a string
-            test_platform_name: name of the platform to match expectations
-                against. Note that this may be different than
-                port.test_platform_name() when is_lint_mode is True.
-            is_debug_mode: whether to use the DEBUG or RELEASE modifiers
-                in the expectations
+            test_config: specific values to check against when
+                parsing the file (usually port.test_config(),
+                but may be different when linting or doing other things).
             is_lint_mode: If True, just parse the expectations string
                 looking for errors.
             overrides: test expectations that are allowed to override any
@@ -104,7 +103,7 @@ class TestExpectations:
                 and downstream expectations).
         """
         self._expected_failures = TestExpectationsFile(port, expectations,
-            tests, test_platform_name, is_debug_mode, is_lint_mode,
+            tests, test_config, is_lint_mode,
             overrides=overrides)
 
     # TODO(ojan): Allow for removing skipped tests when getting the list of
@@ -197,7 +196,7 @@ class ParseError(Exception):
         return '\n'.join(map(str, self.errors))
 
     def __repr__(self):
-        return 'ParseError(fatal=%s, errors=%s)' % (fatal, errors)
+        return 'ParseError(fatal=%s, errors=%s)' % (self.fatal, self.errors)
 
 
 class ModifiersAndExpectations:
@@ -302,29 +301,15 @@ class TestExpectationsFile:
                     'fail': FAIL,
                     'flaky': FLAKY}
 
-    def __init__(self, port, expectations, full_test_list, test_platform_name,
-        is_debug_mode, is_lint_mode, overrides=None):
-        """
-        expectations: Contents of the expectations file
-        full_test_list: The list of all tests to be run pending processing of
-            the expections for those tests.
-        test_platform_name: name of the platform to match expectations
-            against. Note that this may be different than
-            port.test_platform_name() when is_lint_mode is True.
-        is_debug_mode: Whether we testing a test_shell built debug mode.
-        is_lint_mode: Whether this is just linting test_expecatations.txt.
-        overrides: test expectations that are allowed to override any
-            entries in |expectations|. This is used by callers
-            that need to manage two sets of expectations (e.g., upstream
-            and downstream expectations).
-        """
+    def __init__(self, port, expectations, full_test_list,
+                 test_config, is_lint_mode, overrides=None):
+        # See argument documentation in TestExpectation(), above.
 
         self._port = port
         self._fs = port._filesystem
         self._expectations = expectations
         self._full_test_list = full_test_list
-        self._test_platform_name = test_platform_name
-        self._is_debug_mode = is_debug_mode
+        self._test_config = test_config
         self._is_lint_mode = is_lint_mode
         self._overrides = overrides
         self._errors = []
@@ -332,7 +317,9 @@ class TestExpectationsFile:
 
         # Maps relative test paths as listed in the expectations file to a
         # list of maps containing modifiers and expectations for each time
-        # the test is listed in the expectations file.
+        # the test is listed in the expectations file. We use this to
+        # keep a representation of the entire list of expectations, even
+        # invalid ones.
         self._all_expectations = {}
 
         # Maps a test to its list of expectations.
@@ -345,7 +332,8 @@ class TestExpectationsFile:
         # the options minus any bug or platform strings
         self._test_to_modifiers = {}
 
-        # Maps a test to the base path that it was listed with in the list.
+        # Maps a test to the base path that it was listed with in the list and
+        # the number of matches that base path had.
         self._test_list_paths = {}
 
         self._modifier_to_tests = self._dict_of_sets(self.MODIFIERS)
@@ -372,13 +360,7 @@ class TestExpectationsFile:
 
     def _handle_any_read_errors(self):
         if len(self._errors) or len(self._non_fatal_errors):
-            if self._is_debug_mode:
-                build_type = 'DEBUG'
-            else:
-                build_type = 'RELEASE'
-            _log.error('')
-            _log.error("FAILURES FOR PLATFORM: %s, BUILD_TYPE: %s" %
-                       (self._test_platform_name.upper(), build_type))
+            _log.error("FAILURES FOR %s" % str(self._test_config))
 
             for error in self._errors:
                 _log.error(error)
@@ -394,11 +376,12 @@ class TestExpectationsFile:
         expectations = set([PASS])
         options = []
         modifiers = []
+        num_matches = 0
         if self._full_test_list:
             for test in self._full_test_list:
                 if not test in self._test_list_paths:
-                    self._add_test(test, modifiers, expectations, options,
-                        overrides_allowed=False)
+                    self._add_test(test, modifiers, num_matches, expectations,
+                                   options, overrides_allowed=False)
 
     def _dict_of_sets(self, strings_to_constants):
         """Takes a dict of strings->constants and returns a dict mapping
@@ -505,7 +488,8 @@ class TestExpectationsFile:
                 _log.info('  new: %s', new_line)
             elif action == ADD_PLATFORMS_EXCEPT_THIS:
                 parts = line.split(':')
-                new_options = parts[0]
+                _log.info('Test updated: ')
+                _log.info('  old: %s', line)
                 for p in self._port.test_platform_names():
                     p = p.upper()
                     # This is a temp solution for rebaselining tool.
@@ -515,13 +499,11 @@ class TestExpectationsFile:
                     # TODO(victorw): Remove WIN-VISTA and WIN-7 once we have
                     # reliable Win 7 and Win Vista buildbots setup.
                     if not p in (platform.upper(), 'WIN-VISTA', 'WIN-7'):
-                        new_options += p + ' '
-                new_line = ('%s:%s' % (new_options, parts[1]))
-                f_new.append(new_line)
+                        new_options = parts[0] + p + ' '
+                        new_line = ('%s:%s' % (new_options, parts[1]))
+                        f_new.append(new_line)
+                        _log.info('  new: %s', new_line)
                 tests_updated += 1
-                _log.info('Test updated: ')
-                _log.info('  old: %s', line)
-                _log.info('  new: %s', new_line)
 
         _log.info('Total tests removed: %d', tests_removed)
         _log.info('Total tests updated: %d', tests_updated)
@@ -537,12 +519,15 @@ class TestExpectationsFile:
 
         options = []
         if line.find(":") is -1:
-            test_and_expectation = line.split("=")
-        else:
-            parts = line.split(":")
-            options = self._get_options_list(parts[0])
-            test_and_expectation = parts[1].split('=')
+            self._add_error(lineno, "Missing a ':'", line)
+            return (None, None, None)
 
+        parts = line.split(':')
+
+        # FIXME: verify that there is exactly one colon in the line.
+
+        options = self._get_options_list(parts[0])
+        test_and_expectation = parts[1].split('=')
         test = test_and_expectation[0].strip()
         if (len(test_and_expectation) is not 2):
             self._add_error(lineno, "Missing expectations.",
@@ -588,69 +573,6 @@ class TestExpectationsFile:
 
         return REMOVE_TEST
 
-    def _has_valid_modifiers_for_current_platform(self, options, lineno,
-        test_and_expectations, modifiers):
-        """Returns true if the current platform is in the options list or if
-        no platforms are listed and if there are no fatal errors in the
-        options list.
-
-        Args:
-          options: List of lowercase options.
-          lineno: The line in the file where the test is listed.
-          test_and_expectations: The path and expectations for the test.
-          modifiers: The set to populate with modifiers.
-        """
-        has_any_platform = False
-        has_bug_id = False
-        for option in options:
-            if option in self.MODIFIERS:
-                modifiers.add(option)
-            elif option in self._port.test_platform_names():
-                has_any_platform = True
-            elif re.match(r'bug\d', option) != None:
-                self._add_error(lineno, 'Bug must be either BUGCR, BUGWK, or BUGV8_ for test: %s' %
-                                option, test_and_expectations)
-            elif option.startswith('bug'):
-                has_bug_id = True
-            elif option not in self.BUILD_TYPES:
-                self._add_error(lineno, 'Invalid modifier for test: %s' %
-                                option, test_and_expectations)
-
-        if has_any_platform and not self._match_platform(options):
-            return False
-
-        if not has_bug_id and 'wontfix' not in options:
-            # TODO(ojan): Turn this into an AddError call once all the
-            # tests have BUG identifiers.
-            self._log_non_fatal_error(lineno, 'Test lacks BUG modifier.',
-                test_and_expectations)
-
-        if 'release' in options or 'debug' in options:
-            if self._is_debug_mode and 'debug' not in options:
-                return False
-            if not self._is_debug_mode and 'release' not in options:
-                return False
-
-        if self._is_lint_mode and 'rebaseline' in options:
-            self._add_error(lineno,
-                'REBASELINE should only be used for running rebaseline.py. '
-                'Cannot be checked in.', test_and_expectations)
-
-        return True
-
-    def _match_platform(self, options):
-        """Match the list of options against our specified platform. If any
-        of the options prefix-match self._platform, return True. This handles
-        the case where a test is marked WIN and the platform is WIN-VISTA.
-
-        Args:
-          options: list of options
-        """
-        for opt in options:
-            if self._test_platform_name.startswith(opt):
-                return True
-        return False
-
     def _add_to_all_expectations(self, test, options, expectations):
         # Make all paths unix-style so the dashboard doesn't need to.
         test = test.replace('\\', '/')
@@ -663,54 +585,43 @@ class TestExpectationsFile:
         """For each test in an expectations iterable, generate the
         expectations for it."""
         lineno = 0
+        matcher = ModifierMatcher(self._test_config)
         for line in expectations:
             lineno += 1
+            self._process_line(line, lineno, matcher, overrides_allowed)
 
-            test_list_path, options, expectations = \
-                self.parse_expectations_line(line, lineno)
-            if not expectations:
-                continue
+    def _process_line(self, line, lineno, matcher, overrides_allowed):
+        test_list_path, options, expectations = \
+            self.parse_expectations_line(line, lineno)
+        if not expectations:
+            return
 
-            self._add_to_all_expectations(test_list_path,
-                                          " ".join(options).upper(),
-                                          " ".join(expectations).upper())
+        self._add_to_all_expectations(test_list_path,
+                                        " ".join(options).upper(),
+                                        " ".join(expectations).upper())
 
-            modifiers = set()
-            if options and not self._has_valid_modifiers_for_current_platform(
-                options, lineno, test_list_path, modifiers):
-                continue
+        num_matches = self._check_options(matcher, options, lineno,
+                                          test_list_path)
+        if num_matches == ModifierMatcher.NO_MATCH:
+            return
 
-            expectations = self._parse_expectations(expectations, lineno,
-                test_list_path)
+        expectations = self._parse_expectations(expectations, lineno,
+            test_list_path)
 
-            if 'slow' in options and TIMEOUT in expectations:
-                self._add_error(lineno,
-                    'A test can not be both slow and timeout. If it times out '
-                    'indefinitely, then it should be just timeout.',
-                    test_list_path)
+        self._check_options_against_expectations(options, expectations,
+            lineno, test_list_path)
 
-            full_path = self._fs.join(self._port.layout_tests_dir(),
-                                      test_list_path)
-            full_path = self._fs.normpath(full_path)
-            # WebKit's way of skipping tests is to add a -disabled suffix.
-            # So we should consider the path existing if the path or the
-            # -disabled version exists.
-            if (not self._port.path_exists(full_path)
-                and not self._port.path_exists(full_path + '-disabled')):
-                # Log a non fatal error here since you hit this case any
-                # time you update test_expectations.txt without syncing
-                # the LayoutTests directory
-                self._log_non_fatal_error(lineno, 'Path does not exist.',
-                                       test_list_path)
-                continue
+        if self._check_path_does_not_exist(lineno, test_list_path):
+            return
 
-            if not self._full_test_list:
-                tests = [test_list_path]
-            else:
-                tests = self._expand_tests(test_list_path)
+        if not self._full_test_list:
+            tests = [test_list_path]
+        else:
+            tests = self._expand_tests(test_list_path)
 
-            self._add_tests(tests, expectations, test_list_path, lineno,
-                           modifiers, options, overrides_allowed)
+        modifiers = [o for o in options if o in self.MODIFIERS]
+        self._add_tests(tests, expectations, test_list_path, lineno,
+                        modifiers, num_matches, options, overrides_allowed)
 
     def _get_options_list(self, listString):
         return [part.strip().lower() for part in listString.strip().split(' ')]
@@ -726,6 +637,65 @@ class TestExpectationsFile:
             result.add(expectation)
         return result
 
+    def _check_options(self, matcher, options, lineno, test_list_path):
+        match_result = self._check_syntax(matcher, options, lineno,
+                                          test_list_path)
+        self._check_semantics(options, lineno, test_list_path)
+        return match_result.num_matches
+
+    def _check_syntax(self, matcher, options, lineno, test_list_path):
+        match_result = matcher.match(options)
+        for error in match_result.errors:
+            self._add_error(lineno, error, test_list_path)
+        for warning in match_result.warnings:
+            self._log_non_fatal_error(lineno, warning, test_list_path)
+        return match_result
+
+    def _check_semantics(self, options, lineno, test_list_path):
+        has_wontfix = 'wontfix' in options
+        has_bug = False
+        for opt in options:
+            if opt.startswith('bug'):
+                has_bug = True
+                if re.match('bug\d+', opt):
+                    self._add_error(lineno,
+                        'BUG\d+ is not allowed, must be one of '
+                        'BUGCR\d+, BUGWK\d+, BUGV8_\d+, '
+                        'or a non-numeric bug identifier.', test_list_path)
+
+        if not has_bug and not has_wontfix:
+            self._log_non_fatal_error(lineno, 'Test lacks BUG modifier.',
+                                      test_list_path)
+
+        if self._is_lint_mode and 'rebaseline' in options:
+            self._add_error(lineno,
+                'REBASELINE should only be used for running rebaseline.py. '
+                'Cannot be checked in.', test_list_path)
+
+    def _check_options_against_expectations(self, options, expectations,
+                                            lineno, test_list_path):
+        if 'slow' in options and TIMEOUT in expectations:
+            self._add_error(lineno,
+                'A test can not be both SLOW and TIMEOUT. If it times out '
+                'indefinitely, then it should be just TIMEOUT.', test_list_path)
+
+    def _check_path_does_not_exist(self, lineno, test_list_path):
+        full_path = self._fs.join(self._port.layout_tests_dir(),
+                                  test_list_path)
+        full_path = self._fs.normpath(full_path)
+        # WebKit's way of skipping tests is to add a -disabled suffix.
+            # So we should consider the path existing if the path or the
+        # -disabled version exists.
+        if (not self._port.path_exists(full_path)
+            and not self._port.path_exists(full_path + '-disabled')):
+            # Log a non fatal error here since you hit this case any
+            # time you update test_expectations.txt without syncing
+            # the LayoutTests directory
+            self._log_non_fatal_error(lineno, 'Path does not exist.',
+                                      test_list_path)
+            return True
+        return False
+
     def _expand_tests(self, test_list_path):
         """Convert the test specification to an absolute, normalized
         path and make sure directories end with the OS path separator."""
@@ -751,17 +721,19 @@ class TestExpectationsFile:
         return result
 
     def _add_tests(self, tests, expectations, test_list_path, lineno,
-                   modifiers, options, overrides_allowed):
+                   modifiers, num_matches, options, overrides_allowed):
         for test in tests:
-            if self._already_seen_test(test, test_list_path, lineno,
-                                       overrides_allowed):
+            if self._already_seen_better_match(test, test_list_path,
+                num_matches, lineno, overrides_allowed):
                 continue
 
             self._clear_expectations_for_test(test, test_list_path)
-            self._add_test(test, modifiers, expectations, options,
+            self._test_list_paths[test] = (self._fs.normpath(test_list_path),
+                num_matches, lineno)
+            self._add_test(test, modifiers, num_matches, expectations, options,
                            overrides_allowed)
 
-    def _add_test(self, test, modifiers, expectations, options,
+    def _add_test(self, test, modifiers, num_matches, expectations, options,
                   overrides_allowed):
         """Sets the expected state for a given test.
 
@@ -772,6 +744,7 @@ class TestExpectationsFile:
         Args:
           test: test to add
           modifiers: sequence of modifier keywords ('wontfix', 'slow', etc.)
+          num_matches: number of modifiers that matched the configuration
           expectations: sequence of expectations (PASS, IMAGE, etc.)
           options: sequence of keywords and bug identifiers.
           overrides_allowed: whether we're parsing the regular expectations
@@ -810,11 +783,12 @@ class TestExpectationsFile:
         than a previous listing.
         """
         if test in self._test_list_paths:
-            self._test_to_expectations.pop(test, '')
-            self._remove_from_sets(test, self._expectation_to_tests)
-            self._remove_from_sets(test, self._modifier_to_tests)
-            self._remove_from_sets(test, self._timeline_to_tests)
-            self._remove_from_sets(test, self._result_type_to_tests)
+            base_path = self._test_list_paths[test][0]
+            self._test_to_expectations.pop(base_path, '')
+            self._remove_from_sets(base_path, self._expectation_to_tests)
+            self._remove_from_sets(base_path, self._modifier_to_tests)
+            self._remove_from_sets(base_path, self._timeline_to_tests)
+            self._remove_from_sets(base_path, self._result_type_to_tests)
 
         self._test_list_paths[test] = self._fs.normpath(test_list_path)
 
@@ -828,32 +802,70 @@ class TestExpectationsFile:
             if test in set_of_tests:
                 set_of_tests.remove(test)
 
-    def _already_seen_test(self, test, test_list_path, lineno,
-                           allow_overrides):
-        """Returns true if we've already seen a more precise path for this test
-        than the test_list_path.
+    def _already_seen_better_match(self, test, test_list_path, num_matches,
+                                   lineno, overrides_allowed):
+        """Returns whether we've seen a better match already in the file.
+
+        Returns True if we've already seen a test_list_path that matches more of the test
+            than this path does
         """
+        # FIXME: See comment below about matching test configs and num_matches.
+
         if not test in self._test_list_paths:
+            # We've never seen this test before.
             return False
 
-        prev_base_path = self._test_list_paths[test]
-        if (prev_base_path == self._fs.normpath(test_list_path)):
-            if (not allow_overrides or test in self._overridding_tests):
-                if allow_overrides:
-                    expectation_source = "override"
-                else:
-                    expectation_source = "expectation"
-                self._add_error(lineno, 'Duplicate %s.' % expectation_source,
-                                   test)
-                return True
-            else:
-                # We have seen this path, but that's okay because its
-                # in the overrides and the earlier path was in the
-                # expectations.
-                return False
+        prev_base_path, prev_num_matches, prev_lineno = self._test_list_paths[test]
+        base_path = self._fs.normpath(test_list_path)
+
+        if len(prev_base_path) > len(base_path):
+            # The previous path matched more of the test.
+            return True
+
+        if len(prev_base_path) < len(base_path):
+            # This path matches more of the test.
+            return False
+
+        if overrides_allowed and test not in self._overridding_tests:
+            # We have seen this path, but that's okay because it is
+            # in the overrides and the earlier path was in the
+            # expectations (not the overrides).
+            return False
+
+        # At this point we know we have seen a previous exact match on this
+        # base path, so we need to check the two sets of modifiers.
 
-        # Check if we've already seen a more precise path.
-        return prev_base_path.startswith(self._fs.normpath(test_list_path))
+        if overrides_allowed:
+            expectation_source = "override"
+        else:
+            expectation_source = "expectation"
+
+        # FIXME: This code was originally designed to allow lines that matched
+        # more modifiers to override lines that matched fewer modifiers.
+        # However, we currently view these as errors. If we decide to make
+        # this policy permanent, we can probably simplify this code
+        # and the ModifierMatcher code a fair amount.
+        #
+        # To use the "more modifiers wins" policy, change the "_add_error" lines for overrides
+        # to _log_non_fatal_error() and change the commented-out "return False".
+
+        if prev_num_matches == num_matches:
+            self._add_error(lineno,
+                'Duplicate or ambiguous %s.' % expectation_source,
+                test)
+            return True
+
+        if prev_num_matches < num_matches:
+            self._add_error(lineno,
+                'More specific entry on line %d overrides line %d' %
+                (lineno, prev_lineno), test_list_path)
+            # FIXME: return False if we want more specific to win.
+            return True
+
+        self._add_error(lineno,
+            'More specific entry on line %d overrides line %d' %
+            (prev_lineno, lineno), test_list_path)
+        return True
 
     def _add_error(self, lineno, msg, path):
         """Reports an error that will prevent running the tests. Does not
@@ -865,3 +877,188 @@ class TestExpectationsFile:
         """Reports an error that will not prevent running the tests. These are
         still errors, but not bad enough to warrant breaking test running."""
         self._non_fatal_errors.append('Line:%s %s %s' % (lineno, msg, path))
+
+
+class ModifierMatchResult(object):
+    def __init__(self, options):
+        self.num_matches = ModifierMatcher.NO_MATCH
+        self.options = options
+        self.errors = []
+        self.warnings = []
+        self.modifiers = []
+        self._matched_regexes = set()
+        self._matched_macros = set()
+
+
+class ModifierMatcher(object):
+
+    """
+    This class manages the interpretation of the "modifiers" for a given
+    line in the expectations file. Modifiers are the tokens that appear to the
+    left of the colon on a line. For example, "BUG1234", "DEBUG", and "WIN" are
+    all modifiers. This class gets what the valid modifiers are, and which
+    modifiers are allowed to exist together on a line, from the
+    TestConfiguration object that is passed in to the call.
+
+    This class detects *intra*-line errors like unknown modifiers, but
+    does not detect *inter*-line modifiers like duplicate expectations.
+
+    More importantly, this class is also used to determine if a given line
+    matches the port in question. Matches are ranked according to the number
+    of modifiers that match on a line. A line with no modifiers matches
+    everything and has a score of zero. A line with one modifier matches only
+    ports that have that modifier and gets a score of 1, and so one. Ports
+    that don't match at all get a score of -1.
+
+    Given two lines in a file that apply to the same test, if both expectations
+    match the current config, then the expectation is considered ambiguous,
+    even if one expectation matches more of the config than the other. For
+    example, in:
+
+    BUG1 RELEASE : foo.html = FAIL
+    BUG1 WIN RELEASE : foo.html = PASS
+    BUG2 WIN : bar.html = FAIL
+    BUG2 DEBUG : bar.html = PASS
+
+    lines 1 and 2 would produce an error on a Win XP Release bot (the scores
+    would be 1 and 2, respectively), and lines three and four would produce
+    a duplicate expectation on a Win Debug bot since both the 'win' and the
+    'debug' expectations would apply (both had scores of 1).
+
+    In addition to the definitions of all of the modifiers, the class
+    supports "macros" that are expanded prior to interpretation, and "ignore
+    regexes" that can be used to skip over modifiers like the BUG* modifiers.
+    """
+    MACROS = {
+        'mac-snowleopard': ['mac', 'snowleopard'],
+        'mac-leopard': ['mac', 'leopard'],
+        'win-xp': ['win', 'xp'],
+        'win-vista': ['win', 'vista'],
+        'win-7': ['win', 'win7'],
+    }
+
+    # We don't include the "none" modifier because it isn't actually legal.
+    REGEXES_TO_IGNORE = (['bug\w+'] +
+                         TestExpectationsFile.MODIFIERS.keys()[:-1])
+    DUPLICATE_REGEXES_ALLOWED = ['bug\w+']
+
+    # Magic value returned when the options don't match.
+    NO_MATCH = -1
+
+    # FIXME: The code currently doesn't detect combinations of modifiers
+    # that are syntactically valid but semantically invalid, like
+    # 'MAC XP'. See ModifierMatchTest.test_invalid_combinations() in the
+    # _unittest.py file.
+
+    def __init__(self, test_config):
+        """Initialize a ModifierMatcher argument with the TestConfiguration it
+        should be matched against."""
+        self.test_config = test_config
+        self.allowed_configurations = test_config.all_test_configurations()
+        self.macros = self.MACROS
+
+        self.regexes_to_ignore = {}
+        for regex_str in self.REGEXES_TO_IGNORE:
+            self.regexes_to_ignore[regex_str] = re.compile(regex_str)
+
+        # Keep a set of all of the legal modifiers for quick checking.
+        self._all_modifiers = set()
+
+        # Keep a dict mapping values back to their categories.
+        self._categories_for_modifiers = {}
+        for config in self.allowed_configurations:
+            for category, modifier in config.items():
+                self._categories_for_modifiers[modifier] = category
+                self._all_modifiers.add(modifier)
+
+    def match(self, options):
+        """Checks a list of options against the config set in the constructor.
+        Options may be either actual modifier strings, "macro" strings
+        that get expanded to a list of modifiers, or strings that are allowed
+        to be ignored. All of the options must be passed in in lower case.
+
+        Returns the number of matching categories, or NO_MATCH (-1) if it
+        doesn't match or there were errors found. Matches are prioritized
+        by the number of matching categories, because the more specific
+        the options list, the more categories will match.
+
+        The results of the most recent match are available in the 'options',
+        'modifiers', 'num_matches', 'errors', and 'warnings' properties.
+        """
+        result = ModifierMatchResult(options)
+        self._parse(result)
+        if result.errors:
+            return result
+        self._count_matches(result)
+        return result
+
+    def _parse(self, result):
+        # FIXME: Should we warn about lines having every value in a category?
+        for option in result.options:
+            self._parse_one(option, result)
+
+    def _parse_one(self, option, result):
+        if option in self._all_modifiers:
+            self._add_modifier(option, result)
+        elif option in self.macros:
+            self._expand_macro(option, result)
+        elif not self._matches_any_regex(option, result):
+            result.errors.append("Unrecognized option '%s'" % option)
+
+    def _add_modifier(self, option, result):
+        if option in result.modifiers:
+            result.errors.append("More than one '%s'" % option)
+        else:
+            result.modifiers.append(option)
+
+    def _expand_macro(self, macro, result):
+        if macro in result._matched_macros:
+            result.errors.append("More than one '%s'" % macro)
+            return
+
+        mods = []
+        for modifier in self.macros[macro]:
+            if modifier in result.options:
+                result.errors.append("Can't specify both modifier '%s' and "
+                                     "macro '%s'" % (modifier, macro))
+            else:
+                mods.append(modifier)
+        result._matched_macros.add(macro)
+        result.modifiers.extend(mods)
+
+    def _matches_any_regex(self, option, result):
+        for regex_str, pattern in self.regexes_to_ignore.iteritems():
+            if pattern.match(option):
+                self._handle_regex_match(regex_str, result)
+                return True
+        return False
+
+    def _handle_regex_match(self, regex_str, result):
+        if (regex_str in result._matched_regexes and
+            regex_str not in self.DUPLICATE_REGEXES_ALLOWED):
+            result.errors.append("More than one option matching '%s'" %
+                                 regex_str)
+        else:
+            result._matched_regexes.add(regex_str)
+
+    def _count_matches(self, result):
+        """Returns the number of modifiers that match the test config."""
+        categorized_modifiers = self._group_by_category(result.modifiers)
+        result.num_matches = 0
+        for category, modifier in self.test_config.items():
+            if category in categorized_modifiers:
+                if modifier in categorized_modifiers[category]:
+                    result.num_matches += 1
+                else:
+                    result.num_matches = self.NO_MATCH
+                    return
+
+    def _group_by_category(self, modifiers):
+        # Returns a dict of category name -> list of modifiers.
+        modifiers_by_category = {}
+        for m in modifiers:
+            modifiers_by_category.setdefault(self._category(m), []).append(m)
+        return modifiers_by_category
+
+    def _category(self, modifier):
+        return self._categories_for_modifiers[modifier]
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_expectations_unittest.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_expectations_unittest.py
index 8f9e5dd..fd99d6b 100644
--- a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_expectations_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_expectations_unittest.py
@@ -32,6 +32,7 @@
 import unittest
 
 from webkitpy.layout_tests import port
+from webkitpy.layout_tests.port import base
 from webkitpy.layout_tests.layout_package.test_expectations import *
 
 class FunctionsTest(unittest.TestCase):
@@ -78,8 +79,11 @@ class FunctionsTest(unittest.TestCase):
 
 
 class Base(unittest.TestCase):
+    # Note that all of these tests are written assuming the configuration
+    # being tested is Windows XP, Release build.
+
     def __init__(self, testFunc, setUp=None, tearDown=None, description=None):
-        self._port = port.get('test', None)
+        self._port = port.get('test-win-xp', None)
         self._fs = self._port._filesystem
         self._exp = None
         unittest.TestCase.__init__(self, testFunc)
@@ -101,16 +105,15 @@ BUG_TEST : failures/expected/text.html = TEXT
 BUG_TEST WONTFIX SKIP : failures/expected/crash.html = CRASH
 BUG_TEST REBASELINE : failures/expected/missing_image.html = MISSING
 BUG_TEST WONTFIX : failures/expected/image_checksum.html = IMAGE
-BUG_TEST WONTFIX WIN : failures/expected/image.html = IMAGE
+BUG_TEST WONTFIX MAC : failures/expected/image.html = IMAGE
 """
 
-    def parse_exp(self, expectations, overrides=None, is_lint_mode=False,
-                  is_debug_mode=False):
+    def parse_exp(self, expectations, overrides=None, is_lint_mode=False):
+        test_config = self._port.test_configuration()
         self._exp = TestExpectations(self._port,
              tests=self.get_basic_tests(),
              expectations=expectations,
-             test_platform_name=self._port.test_platform_name(),
-             is_debug_mode=is_debug_mode,
+             test_config=test_config,
              is_lint_mode=is_lint_mode,
              overrides=overrides)
 
@@ -119,7 +122,7 @@ BUG_TEST WONTFIX WIN : failures/expected/image.html = IMAGE
                           set([result]))
 
 
-class TestExpectationsTest(Base):
+class BasicTests(Base):
     def test_basic(self):
         self.parse_exp(self.get_basic_expectations())
         self.assert_exp('failures/expected/text.html', TEXT)
@@ -127,23 +130,14 @@ class TestExpectationsTest(Base):
         self.assert_exp('passes/text.html', PASS)
         self.assert_exp('failures/expected/image.html', PASS)
 
+
+class MiscTests(Base):
     def test_multiple_results(self):
         self.parse_exp('BUGX : failures/expected/text.html = TEXT CRASH')
         self.assertEqual(self._exp.get_expectations(
             self.get_test('failures/expected/text.html')),
             set([TEXT, CRASH]))
 
-    def test_precedence(self):
-        # This tests handling precedence of specific lines over directories
-        # and tests expectations covering entire directories.
-        exp_str = """
-BUGX : failures/expected/text.html = TEXT
-BUGX WONTFIX : failures/expected = IMAGE
-"""
-        self.parse_exp(exp_str)
-        self.assert_exp('failures/expected/text.html', TEXT)
-        self.assert_exp('failures/expected/crash.html', IMAGE)
-
     def test_category_expectations(self):
         # This test checks unknown tests are not present in the
         # expectations and that known test part of a test category is
@@ -158,20 +152,6 @@ BUGX WONTFIX : failures/expected = IMAGE
                           unknown_test)
         self.assert_exp('failures/expected/crash.html', IMAGE)
 
-    def test_release_mode(self):
-        self.parse_exp('BUGX DEBUG : failures/expected/text.html = TEXT',
-                       is_debug_mode=True)
-        self.assert_exp('failures/expected/text.html', TEXT)
-        self.parse_exp('BUGX RELEASE : failures/expected/text.html = TEXT',
-                       is_debug_mode=True)
-        self.assert_exp('failures/expected/text.html', PASS)
-        self.parse_exp('BUGX DEBUG : failures/expected/text.html = TEXT',
-                       is_debug_mode=False)
-        self.assert_exp('failures/expected/text.html', PASS)
-        self.parse_exp('BUGX RELEASE : failures/expected/text.html = TEXT',
-                       is_debug_mode=False)
-        self.assert_exp('failures/expected/text.html', TEXT)
-
     def test_get_options(self):
         self.parse_exp(self.get_basic_expectations())
         self.assertEqual(self._exp.get_options(
@@ -216,7 +196,7 @@ SKIP : failures/expected/image.html""")
             self.assertFalse(True, "ParseError wasn't raised")
         except ParseError, e:
             self.assertTrue(e.fatal)
-            exp_errors = [u'Line:1 Invalid modifier for test: foo failures/expected/text.html',
+            exp_errors = [u"Line:1 Unrecognized option 'foo' failures/expected/text.html",
                           u"Line:2 Missing expectations. [' failures/expected/image.html']"]
             self.assertEqual(str(e), '\n'.join(map(str, exp_errors)))
             self.assertEqual(e.errors, exp_errors)
@@ -232,77 +212,160 @@ SKIP : failures/expected/image.html""")
             self.assertEqual(str(e), '\n'.join(map(str, exp_errors)))
             self.assertEqual(e.errors, exp_errors)
 
-    def test_syntax_missing_expectation(self):
+    def test_overrides(self):
+        self.parse_exp("BUG_EXP: failures/expected/text.html = TEXT",
+                       "BUG_OVERRIDE : failures/expected/text.html = IMAGE")
+        self.assert_exp('failures/expected/text.html', IMAGE)
+
+    def test_overrides__duplicate(self):
+        self.assertRaises(ParseError, self.parse_exp,
+             "BUG_EXP: failures/expected/text.html = TEXT",
+             """
+BUG_OVERRIDE : failures/expected/text.html = IMAGE
+BUG_OVERRIDE : failures/expected/text.html = CRASH
+""")
+
+    def test_pixel_tests_flag(self):
+        def match(test, result, pixel_tests_enabled):
+            return self._exp.matches_an_expected_result(
+                self.get_test(test), result, pixel_tests_enabled)
+
+        self.parse_exp(self.get_basic_expectations())
+        self.assertTrue(match('failures/expected/text.html', TEXT, True))
+        self.assertTrue(match('failures/expected/text.html', TEXT, False))
+        self.assertFalse(match('failures/expected/text.html', CRASH, True))
+        self.assertFalse(match('failures/expected/text.html', CRASH, False))
+        self.assertTrue(match('failures/expected/image_checksum.html', IMAGE,
+                              True))
+        self.assertTrue(match('failures/expected/image_checksum.html', PASS,
+                              False))
+        self.assertTrue(match('failures/expected/crash.html', SKIP, False))
+        self.assertTrue(match('passes/text.html', PASS, False))
+
+
+class ExpectationSyntaxTests(Base):
+    def test_missing_expectation(self):
         # This is missing the expectation.
         self.assertRaises(ParseError, self.parse_exp,
-                          'BUG_TEST: failures/expected/text.html',
-                          is_debug_mode=True)
+                          'BUG_TEST: failures/expected/text.html')
 
-    def test_syntax_invalid_option(self):
+    def test_missing_colon(self):
+        # This is missing the modifiers and the ':'
         self.assertRaises(ParseError, self.parse_exp,
-                          'BUG_TEST FOO: failures/expected/text.html = PASS')
+                          'failures/expected/text.html = TEXT')
 
-    def test_syntax_invalid_expectation(self):
-        # This is missing the expectation.
+    def disabled_test_too_many_colons(self):
+        # FIXME: Enable this test and fix the underlying bug.
+        self.assertRaises(ParseError, self.parse_exp,
+                          'BUG_TEST: failures/expected/text.html = PASS :')
+
+    def test_too_many_equals_signs(self):
         self.assertRaises(ParseError, self.parse_exp,
-                          'BUG_TEST: failures/expected/text.html = FOO')
+                          'BUG_TEST: failures/expected/text.html = TEXT = IMAGE')
+
+    def test_unrecognized_expectation(self):
+        self.assertRaises(ParseError, self.parse_exp,
+                          'BUG_TEST: failures/expected/text.html = UNKNOWN')
+
+    def test_macro(self):
+        exp_str = """
+BUG_TEST WIN-XP : failures/expected/text.html = TEXT
+"""
+        self.parse_exp(exp_str)
+        self.assert_exp('failures/expected/text.html', TEXT)
+
+
+class SemanticTests(Base):
+    def test_bug_format(self):
+        self.assertRaises(ParseError, self.parse_exp, 'BUG1234 : failures/expected/text.html = TEXT')
 
-    def test_syntax_missing_bugid(self):
+    def test_missing_bugid(self):
         # This should log a non-fatal error.
         self.parse_exp('SLOW : failures/expected/text.html = TEXT')
         self.assertEqual(
             len(self._exp._expected_failures.get_non_fatal_errors()), 1)
 
-    def test_semantic_slow_and_timeout(self):
+    def test_slow_and_timeout(self):
         # A test cannot be SLOW and expected to TIMEOUT.
         self.assertRaises(ParseError, self.parse_exp,
             'BUG_TEST SLOW : failures/expected/timeout.html = TIMEOUT')
 
-    def test_semantic_rebaseline(self):
+    def test_rebaseline(self):
         # Can't lint a file w/ 'REBASELINE' in it.
         self.assertRaises(ParseError, self.parse_exp,
             'BUG_TEST REBASELINE : failures/expected/text.html = TEXT',
             is_lint_mode=True)
 
-    def test_semantic_duplicates(self):
+    def test_duplicates(self):
         self.assertRaises(ParseError, self.parse_exp, """
-BUG_TEST : failures/expected/text.html = TEXT
-BUG_TEST : failures/expected/text.html = IMAGE""")
+BUG_EXP : failures/expected/text.html = TEXT
+BUG_EXP : failures/expected/text.html = IMAGE""")
 
         self.assertRaises(ParseError, self.parse_exp,
-            self.get_basic_expectations(), """
-BUG_TEST : failures/expected/text.html = TEXT
-BUG_TEST : failures/expected/text.html = IMAGE""")
+            self.get_basic_expectations(), overrides="""
+BUG_OVERRIDE : failures/expected/text.html = TEXT
+BUG_OVERRIDE : failures/expected/text.html = IMAGE""", )
 
-    def test_semantic_missing_file(self):
+    def test_missing_file(self):
         # This should log a non-fatal error.
         self.parse_exp('BUG_TEST : missing_file.html = TEXT')
         self.assertEqual(
             len(self._exp._expected_failures.get_non_fatal_errors()), 1)
 
 
-    def test_overrides(self):
-        self.parse_exp(self.get_basic_expectations(), """
-BUG_OVERRIDE : failures/expected/text.html = IMAGE""")
-        self.assert_exp('failures/expected/text.html', IMAGE)
+class PrecedenceTests(Base):
+    def test_file_over_directory(self):
+        # This tests handling precedence of specific lines over directories
+        # and tests expectations covering entire directories.
+        exp_str = """
+BUGX : failures/expected/text.html = TEXT
+BUGX WONTFIX : failures/expected = IMAGE
+"""
+        self.parse_exp(exp_str)
+        self.assert_exp('failures/expected/text.html', TEXT)
+        self.assert_exp('failures/expected/crash.html', IMAGE)
 
-    def test_matches_an_expected_result(self):
+        exp_str = """
+BUGX WONTFIX : failures/expected = IMAGE
+BUGX : failures/expected/text.html = TEXT
+"""
+        self.parse_exp(exp_str)
+        self.assert_exp('failures/expected/text.html', TEXT)
+        self.assert_exp('failures/expected/crash.html', IMAGE)
 
-        def match(test, result, pixel_tests_enabled):
-            return self._exp.matches_an_expected_result(
-                self.get_test(test), result, pixel_tests_enabled)
+    def test_ambiguous(self):
+        self.assertRaises(ParseError, self.parse_exp, """
+BUG_TEST RELEASE : passes/text.html = PASS
+BUG_TEST WIN : passes/text.html = FAIL
+""")
 
-        self.parse_exp(self.get_basic_expectations())
-        self.assertTrue(match('failures/expected/text.html', TEXT, True))
-        self.assertTrue(match('failures/expected/text.html', TEXT, False))
-        self.assertFalse(match('failures/expected/text.html', CRASH, True))
-        self.assertFalse(match('failures/expected/text.html', CRASH, False))
-        self.assertTrue(match('failures/expected/image_checksum.html', IMAGE,
-                              True))
-        self.assertTrue(match('failures/expected/image_checksum.html', PASS,
-                              False))
-        self.assertTrue(match('failures/expected/crash.html', SKIP, False))
-        self.assertTrue(match('passes/text.html', PASS, False))
+    def test_more_modifiers(self):
+        exp_str = """
+BUG_TEST RELEASE : passes/text.html = PASS
+BUG_TEST WIN RELEASE : passes/text.html = TEXT
+"""
+        self.assertRaises(ParseError, self.parse_exp, exp_str)
+
+    def test_order_in_file(self):
+        exp_str = """
+BUG_TEST WIN RELEASE : passes/text.html = TEXT
+BUG_TEST RELEASE : passes/text.html = PASS
+"""
+        self.assertRaises(ParseError, self.parse_exp, exp_str)
+
+    def test_version_overrides(self):
+        exp_str = """
+BUG_TEST WIN : passes/text.html = PASS
+BUG_TEST WIN XP : passes/text.html = TEXT
+"""
+        self.assertRaises(ParseError, self.parse_exp, exp_str)
+
+    def test_macro_overrides(self):
+        exp_str = """
+BUG_TEST WIN : passes/text.html = PASS
+BUG_TEST WIN-XP : passes/text.html = TEXT
+"""
+        self.assertRaises(ParseError, self.parse_exp, exp_str)
 
 
 class RebaseliningTest(Base):
@@ -327,7 +390,8 @@ BUG_TEST REBASELINE : failures/expected/text.html = TEXT
     def test_remove_expand(self):
         self.assertRemove('mac',
             'BUGX REBASELINE : failures/expected/text.html = TEXT\n',
-            'BUGX REBASELINE WIN : failures/expected/text.html = TEXT\n')
+            'BUGX REBASELINE WIN : failures/expected/text.html = TEXT\n'
+            'BUGX REBASELINE WIN-XP : failures/expected/text.html = TEXT\n')
 
     def test_remove_mac_win(self):
         self.assertRemove('mac',
@@ -345,5 +409,85 @@ BUG_TEST REBASELINE : failures/expected/text.html = TEXT
             '\n\n')
 
 
+class ModifierTests(unittest.TestCase):
+    def setUp(self):
+        port_obj = port.get('test-win-xp', None)
+        self.config = port_obj.test_configuration()
+        self.matcher = ModifierMatcher(self.config)
+
+    def match(self, modifiers, expected_num_matches=-1, values=None, num_errors=0):
+        matcher = self.matcher
+        if values:
+            matcher = ModifierMatcher(self.FakeTestConfiguration(values))
+        match_result = matcher.match(modifiers)
+        self.assertEqual(len(match_result.warnings), 0)
+        self.assertEqual(len(match_result.errors), num_errors)
+        self.assertEqual(match_result.num_matches, expected_num_matches,
+             'match(%s, %s) returned -> %d, expected %d' %
+             (modifiers, str(self.config.values()),
+              match_result.num_matches, expected_num_matches))
+
+    def test_bad_match_modifier(self):
+        self.match(['foo'], num_errors=1)
+
+    def test_none(self):
+        self.match([], 0)
+
+    def test_one(self):
+        self.match(['xp'], 1)
+        self.match(['win'], 1)
+        self.match(['release'], 1)
+        self.match(['cpu'], 1)
+        self.match(['x86'], 1)
+        self.match(['leopard'], -1)
+        self.match(['gpu'], -1)
+        self.match(['debug'], -1)
+
+    def test_two(self):
+        self.match(['xp', 'release'], 2)
+        self.match(['win7', 'release'], -1)
+        self.match(['win7', 'xp'], 1)
+
+    def test_three(self):
+        self.match(['win7', 'xp', 'release'], 2)
+        self.match(['xp', 'debug', 'x86'], -1)
+        self.match(['xp', 'release', 'x86'], 3)
+        self.match(['xp', 'cpu', 'release'], 3)
+
+    def test_four(self):
+        self.match(['xp', 'release', 'cpu', 'x86'], 4)
+        self.match(['win7', 'xp', 'release', 'cpu'], 3)
+        self.match(['win7', 'xp', 'debug', 'cpu'], -1)
+
+    def test_case_insensitivity(self):
+        self.match(['Win'], num_errors=1)
+        self.match(['WIN'], num_errors=1)
+        self.match(['win'], 1)
+
+    def test_duplicates(self):
+        self.match(['release', 'release'], num_errors=1)
+        self.match(['win-xp', 'xp'], num_errors=1)
+        self.match(['win-xp', 'win-xp'], num_errors=1)
+        self.match(['xp', 'release', 'xp', 'release'], num_errors=2)
+        self.match(['rebaseline', 'rebaseline'], num_errors=1)
+
+    def test_unknown_option(self):
+        self.match(['vms'], num_errors=1)
+
+    def test_duplicate_bugs(self):
+        # BUG* regexes can appear multiple times.
+        self.match(['bugfoo', 'bugbar'], 0)
+
+    def test_invalid_combinations(self):
+        # FIXME: This should probably raise an error instead of NO_MATCH.
+        self.match(['mac', 'xp'], num_errors=0)
+
+    def test_regexes_are_ignored(self):
+        self.match(['bug123xy', 'rebaseline', 'wontfix', 'slow', 'skip'], 0)
+
+    def test_none_is_invalid(self):
+        self.match(['none'], num_errors=1)
+
+
 if __name__ == '__main__':
     unittest.main()
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner.py
index b3f0828..6e4f8c4 100644
--- a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner.py
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner.py
@@ -214,21 +214,13 @@ class TestRunner:
 
     def lint(self):
         lint_failed = False
-
-        # Creating the expecations for each platform/configuration pair does
-        # all the test list parsing and ensures it's correct syntax (e.g. no
-        # dupes).
-        for platform_name in self._port.test_platform_names():
-            try:
-                self.parse_expectations(platform_name, is_debug_mode=True)
-            except test_expectations.ParseError:
-                lint_failed = True
+        for test_configuration in self._port.all_test_configurations():
             try:
-                self.parse_expectations(platform_name, is_debug_mode=False)
+                self.lint_expectations(test_configuration)
             except test_expectations.ParseError:
                 lint_failed = True
+                self._printer.write("")
 
-        self._printer.write("")
         if lint_failed:
             _log.error("Lint failed.")
             return -1
@@ -236,22 +228,28 @@ class TestRunner:
         _log.info("Lint succeeded.")
         return 0
 
-    def parse_expectations(self, test_platform_name, is_debug_mode):
+    def lint_expectations(self, config):
+        port = self._port
+        test_expectations.TestExpectations(
+            port,
+            None,
+            port.test_expectations(),
+            config,
+            self._options.lint_test_files,
+            port.test_expectations_overrides())
+
+    def parse_expectations(self):
         """Parse the expectations from the test_list files and return a data
         structure holding them. Throws an error if the test_list files have
         invalid syntax."""
-        if self._options.lint_test_files:
-            test_files = None
-        else:
-            test_files = self._test_files
-
-        expectations_str = self._port.test_expectations()
-        overrides_str = self._port.test_expectations_overrides()
+        port = self._port
         self._expectations = test_expectations.TestExpectations(
-            self._port, test_files, expectations_str, test_platform_name,
-            is_debug_mode, self._options.lint_test_files,
-            overrides=overrides_str)
-        return self._expectations
+            port,
+            self._test_files,
+            port.test_expectations(),
+            port.test_configuration(),
+            self._options.lint_test_files,
+            port.test_expectations_overrides())
 
     # FIXME: This method is way too long and needs to be broken into pieces.
     def prepare_lists_and_print_output(self):
@@ -358,9 +356,7 @@ class TestRunner:
             self._test_files_list = files + skip_chunk_list
             self._test_files = set(self._test_files_list)
 
-            self._expectations = self.parse_expectations(
-                self._port.test_platform_name(),
-                self._options.configuration == 'Debug')
+            self.parse_expectations()
 
             self._test_files = set(files)
             self._test_files_list = files
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/base.py b/Tools/Scripts/webkitpy/layout_tests/port/base.py
index e2257ce..05d737d 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/base.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/base.py
@@ -123,6 +123,7 @@ class Port(object):
         self._pretty_patch_available = self._filesystem.exists(self._pretty_patch_path)
         if not hasattr(self._options, 'configuration') or self._options.configuration is None:
             self._options.configuration = self.default_configuration()
+        self._test_configuration = None
 
     def default_child_processes(self):
         """Return the number of DumpRenderTree instances to use for this
@@ -579,6 +580,19 @@ class Port(object):
         if self._http_lock:
             self._http_lock.cleanup_http_lock()
 
+    #
+    # TEST EXPECTATION-RELATED METHODS
+    #
+
+    def test_configuration(self):
+        """Returns the current TestConfiguration for the port."""
+        if not self._test_configuration:
+            self._test_configuration = TestConfiguration(self)
+        return self._test_configuration
+
+    def all_test_configurations(self):
+        return self.test_configuration().all_test_configurations()
+
     def test_expectations(self):
         """Returns the test expectations for this port.
 
@@ -595,18 +609,6 @@ class Port(object):
         sync up the two repos."""
         return None
 
-    def test_base_platform_names(self):
-        """Return a list of the 'base' platforms on your port. The base
-        platforms represent different architectures, operating systems,
-        or implementations (as opposed to different versions of a single
-        platform). For example, 'mac' and 'win' might be different base
-        platforms, wherease 'mac-tiger' and 'mac-leopard' might be
-        different platforms. This routine is used by the rebaselining tool
-        and the dashboards, and the strings correspond to the identifiers
-        in your test expectations (*not* necessarily the platform names
-        themselves)."""
-        raise NotImplementedError('Port.base_test_platforms')
-
     def test_platform_name(self):
         """Returns the string that corresponds to the given platform name
         in the test expectations. This may be the same as name(), or it
@@ -865,3 +867,68 @@ class Driver:
 
     def stop(self):
         raise NotImplementedError('Driver.stop')
+
+
+class TestConfiguration(object):
+    def __init__(self, port=None, os=None, version=None, architecture=None,
+                 build_type=None, graphics_type=None):
+
+        # FIXME: We can get the O/S and version from test_platform_name()
+        # and version() for now, but those should go away and be cleaned up
+        # with more generic methods like operation_system() and os_version()
+        # or something. Note that we need to strip the leading '-' off the
+        # version string if it is present.
+        if port:
+            port_version = port.version()
+        self.os = os or port.test_platform_name().replace(port_version, '')
+        self.version = version or port_version[1:]
+        self.architecture = architecture or 'x86'
+        self.build_type = build_type or port._options.configuration.lower()
+        self.graphics_type = graphics_type or 'cpu'
+
+    def items(self):
+        return self.__dict__.items()
+
+    def keys(self):
+        return self.__dict__.keys()
+
+    def __str__(self):
+        return ("<%(version)s, %(build_type)s, %(graphics_type)s>" %
+                self.__dict__)
+
+    def __repr__(self):
+        return "TestConfig(os='%(os)s', version='%(version)s', architecture='%(architecture)s', build_type='%(build_type)s', graphics_type='%(graphics_type)s')" % self.__dict__
+
+    def values(self):
+        """Returns the configuration values of this instance as a tuple."""
+        return self.__dict__.values()
+
+    def all_test_configurations(self):
+        """Returns a sequence of the TestConfigurations the port supports."""
+        # By default, we assume we want to test every graphics type in
+        # every configuration on every system.
+        test_configurations = []
+        for system in self.all_systems():
+            for build_type in self.all_build_types():
+                for graphics_type in self.all_graphics_types():
+                    test_configurations.append(TestConfiguration(
+                        os=system[0],
+                        version=system[1],
+                        architecture=system[2],
+                        build_type=build_type,
+                        graphics_type=graphics_type))
+        return test_configurations
+
+    def all_systems(self):
+        return (('mac', 'leopard', 'x86'),
+                ('mac', 'snowleopard', 'x86'),
+                ('win', 'xp', 'x86'),
+                ('win', 'vista', 'x86'),
+                ('win', 'win7', 'x86'),
+                ('linux', 'hardy', 'x86'))
+
+    def all_build_types(self):
+        return ('debug', 'release')
+
+    def all_graphics_types(self):
+        return ('cpu', 'gpu')
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/base_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/base_unittest.py
index 18b568c..ef90484 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/base_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/base_unittest.py
@@ -250,7 +250,6 @@ class VirtualTest(unittest.TestCase):
         self.assertVirtual(port.test_platform_name)
         self.assertVirtual(port.results_directory)
         self.assertVirtual(port.test_expectations)
-        self.assertVirtual(port.test_base_platform_names)
         self.assertVirtual(port.test_platform_name)
         self.assertVirtual(port.test_platforms)
         self.assertVirtual(port.test_platform_name_to_name, None)
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium.py
index ad1bea6..179a9a6 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/chromium.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium.py
@@ -241,9 +241,6 @@ class ChromiumPort(base.Port):
             # http://bugs.python.org/issue1731717
             self._helper.wait()
 
-    def test_base_platform_names(self):
-        return ('linux', 'mac', 'win')
-
     def test_expectations(self):
         """Returns the test expectations for this port.
 
@@ -273,15 +270,14 @@ class ChromiumPort(base.Port):
             all_test_files.update(extra_test_files)
 
         expectations = test_expectations.TestExpectations(
-            self, all_test_files, expectations_str, test_platform_name,
-            is_debug_mode, is_lint_mode=False, overrides=overrides_str)
+            self, all_test_files, expectations_str, self.test_configuration(),
+            is_lint_mode=False, overrides=overrides_str)
         tests_dir = self.layout_tests_dir()
         return [self.relative_test_filename(test)
                 for test in expectations.get_tests_with_result_type(test_expectations.SKIP)]
 
     def test_platform_names(self):
-        return self.test_base_platform_names() + ('win-xp',
-            'win-vista', 'win-7')
+        return ('mac', 'win', 'linux', 'win-xp', 'win-vista', 'win-7')
 
     def test_platform_name_to_name(self, test_platform_name):
         if test_platform_name in self.test_platform_names():
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/port_testcase.py b/Tools/Scripts/webkitpy/layout_tests/port/port_testcase.py
index 0b03b4c..4146d40 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/port_testcase.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/port_testcase.py
@@ -88,3 +88,15 @@ class PortTestCase(unittest.TestCase):
             return
         port.start_websocket_server()
         port.stop_websocket_server()
+
+    def test_test_configuration(self):
+        port = self.make_port()
+        if not port:
+            return
+        self.assertTrue(port.test_configuration())
+
+    def test_all_test_configurations(self):
+        port = self.make_port()
+        if not port:
+            return
+        self.assertTrue(len(port.all_test_configurations()) > 0)
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/test.py b/Tools/Scripts/webkitpy/layout_tests/port/test.py
index 66618a5..6d71a96 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/test.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/test.py
@@ -215,8 +215,8 @@ class TestPort(base.Port):
         return 'junk'
 
     def baseline_path(self):
-        return self._filesystem.join(self.layout_tests_dir(), 'platform',
-                                     self.name() + self.version())
+        # We don't bother with a fallback path.
+        return self._filesystem.join(self.layout_tests_dir(), 'platform', self.name())
 
     def baseline_search_path(self):
         return [self.baseline_path()]
@@ -224,6 +224,9 @@ class TestPort(base.Port):
     def check_build(self, needs_http):
         return True
 
+    def default_configuration(self):
+        return 'Release'
+
     def diff_image(self, expected_contents, actual_contents,
                    diff_filename=None):
         diffed = actual_contents != expected_contents
@@ -262,24 +265,25 @@ class TestPort(base.Port):
     def stop_websocket_server(self):
         pass
 
-    def test_base_platform_names(self):
-        return ('mac', 'win')
-
     def path_to_test_expectations_file(self):
         return self._expectations_path
 
     def test_platform_name(self):
-        if self._name == 'test-win':
-            return 'win'
-        return 'mac'
+        name_map = {
+            'test-mac': 'mac',
+            'test-win': 'win',
+            'test-win-xp': 'win-xp',
+        }
+        return name_map[self._name]
 
     def test_platform_names(self):
-        return self.test_base_platform_names()
+        return ('mac', 'win', 'win-xp')
 
     def test_platform_name_to_name(self, test_platform_name):
         name_map = {
             'mac': 'test-mac',
             'win': 'test-win',
+            'win-xp': 'test-win-xp',
         }
         return name_map[test_platform_name]
 
@@ -341,9 +345,18 @@ class TestPort(base.Port):
 
         raise NotImplementedError('unknown url type: %s' % uri)
 
-
     def version(self):
-        return ''
+        version_map = {
+            'test-win-xp': '-xp',
+            'test-win': '-7',
+            'test-mac': '-leopard',
+        }
+        return version_map[self._name]
+
+    def test_configuration(self):
+        if not self._test_configuration:
+            self._test_configuration = TestTestConfiguration(self)
+        return self._test_configuration
 
 
 class TestDriver(base.Driver):
@@ -378,3 +391,10 @@ class TestDriver(base.Driver):
 
     def stop(self):
         pass
+
+
+class TestTestConfiguration(base.TestConfiguration):
+    def all_systems(self):
+        return (('mac', 'leopard', 'x86'),
+                ('win', 'xp', 'x86'),
+                ('win', 'win7', 'x86'))
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/webkit.py b/Tools/Scripts/webkitpy/layout_tests/port/webkit.py
index 3f00927..0ade755 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/webkit.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/webkit.py
@@ -180,11 +180,6 @@ class WebKitPort(base.Port):
     def create_driver(self, worker_number):
         return WebKitDriver(self, worker_number)
 
-    def test_base_platform_names(self):
-        # At the moment we don't use test platform names, but we have
-        # to return something.
-        return ('mac', 'win')
-
     def _tests_for_other_platforms(self):
         raise NotImplementedError('WebKitPort._tests_for_other_platforms')
         # The original run-webkit-tests builds up a "whitelist" of tests to
@@ -337,8 +332,7 @@ class WebKitPort(base.Port):
         return self._name + self.version()
 
     def test_platform_names(self):
-        return self.test_base_platform_names() + (
-            'mac-tiger', 'mac-leopard', 'mac-snowleopard')
+        return ('mac', 'win', 'mac-tiger', 'mac-leopard', 'mac-snowleopard')
 
     def _build_path(self, *comps):
         return self._filesystem.join(self._config.build_directory(
diff --git a/Tools/Scripts/webkitpy/layout_tests/rebaseline_chromium_webkit_tests.py b/Tools/Scripts/webkitpy/layout_tests/rebaseline_chromium_webkit_tests.py
index 21a1760..567975c 100644
--- a/Tools/Scripts/webkitpy/layout_tests/rebaseline_chromium_webkit_tests.py
+++ b/Tools/Scripts/webkitpy/layout_tests/rebaseline_chromium_webkit_tests.py
@@ -159,6 +159,7 @@ class Rebaseliner(object):
         self._port = running_port
         self._filesystem = running_port._filesystem
         self._target_port = target_port
+
         self._rebaseline_port = port.get(
             self._target_port.test_platform_name_to_name(platform), options,
             filesystem=self._filesystem)
@@ -174,8 +175,7 @@ class Rebaseliner(object):
             test_expectations.TestExpectations(self._rebaseline_port,
                                                None,
                                                expectations_str,
-                                               self._platform,
-                                               False,
+                                               self._rebaseline_port.test_configuration(),
                                                False)
         self._url_fetcher = url_fetcher
         self._zip_factory = zip_factory
diff --git a/Tools/Scripts/webkitpy/layout_tests/rebaseline_chromium_webkit_tests_unittest.py b/Tools/Scripts/webkitpy/layout_tests/rebaseline_chromium_webkit_tests_unittest.py
index aace036..730220b 100644
--- a/Tools/Scripts/webkitpy/layout_tests/rebaseline_chromium_webkit_tests_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/rebaseline_chromium_webkit_tests_unittest.py
@@ -70,7 +70,7 @@ def test_options():
                                 target_platform='chromium',
                                 verbose=False,
                                 quiet=False,
-                                platforms='mac,win')
+                                platforms='mac,win,win-xp')
 
 
 def test_host_port_and_filesystem(options, expectations):
@@ -180,9 +180,11 @@ class TestRebaseliner(unittest.TestCase):
         # We expect to have written 12 files over the course of this rebaseline:
         # *) 3 files in /__im_tmp for the extracted archive members
         # *) 3 new baselines under '/test.checkout/LayoutTests'
-        # *) 1 updated test_expectations file
         # *) 4 files in /tmp for the new and old baselines in the result file
-        # *) 1 text diff in /tmp for the result file
+        #    (-{old,new}.{txt,png}
+        # *) 1 text diff in /tmp for the result file (-diff.txt). We don't
+        #    create image diffs (FIXME?) and don't display the checksums.
+        # *) 1 updated test_expectations file
         self.assertEqual(len(filesystem.written_files), 12)
         self.assertEqual(filesystem.files['/test.checkout/LayoutTests/platform/test/test_expectations.txt'], '')
         self.assertEqual(filesystem.files['/test.checkout/LayoutTests/platform/test-mac/failures/expected/image-expected.checksum'], 'new-image-checksum')
@@ -197,7 +199,9 @@ class TestRebaseliner(unittest.TestCase):
         # Note that even though the rebaseline is marked for all platforms, each
         # rebaseliner only ever does one.
         self.assertEqual(len(filesystem.written_files), 12)
-        self.assertEqual(filesystem.files['/test.checkout/LayoutTests/platform/test/test_expectations.txt'], 'BUGX REBASELINE WIN : failures/expected/image.html = IMAGE\n')
+        self.assertEqual(filesystem.files['/test.checkout/LayoutTests/platform/test/test_expectations.txt'],
+            'BUGX REBASELINE WIN : failures/expected/image.html = IMAGE\n'
+            'BUGX REBASELINE WIN-XP : failures/expected/image.html = IMAGE\n')
         self.assertEqual(filesystem.files['/test.checkout/LayoutTests/platform/test-mac/failures/expected/image-expected.checksum'], 'new-image-checksum')
         self.assertEqual(filesystem.files['/test.checkout/LayoutTests/platform/test-mac/failures/expected/image-expected.png'], 'new-image-png')
         self.assertEqual(filesystem.files['/test.checkout/LayoutTests/platform/test-mac/failures/expected/image-expected.txt'], 'new-image-txt')
@@ -235,9 +239,12 @@ class TestRealMain(unittest.TestCase):
             host_port_obj, url_fetcher, zip_factory, mock_scm)
         oc.restore_output()
 
-        # We expect to have written 24 files over the course of this rebaseline:
-        # 2 * test_all_platforms, above
-        self.assertEqual(len(filesystem.written_files), 24)
+        # We expect to have written 35 files over the course of this rebaseline:
+        # *) 11 files * 3 ports for the new baselines and the diffs (see breakdown
+        #    under test_one_platform, above)
+        # *) the updated test_expectations file
+        # *) the rebaseline results html file
+        self.assertEqual(len(filesystem.written_files), 35)
         self.assertEqual(filesystem.files['/test.checkout/LayoutTests/platform/test/test_expectations.txt'], '')
 
 
diff --git a/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py b/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py
index 136e995..074d9c4 100755
--- a/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py
+++ b/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py
@@ -98,11 +98,11 @@ def run(port, options, args, regular_output=sys.stderr,
                 return -1
             raise
 
-        printer.print_update("Parsing expectations ...")
         if options.lint_test_files:
             return runner.lint()
-        runner.parse_expectations(port.test_platform_name(),
-                                  options.configuration == 'Debug')
+
+        printer.print_update("Parsing expectations ...")
+        runner.parse_expectations()
 
         printer.print_update("Checking build ...")
         if not port.check_build(runner.needs_http()):
diff --git a/Tools/Scripts/webkitpy/style/checkers/test_expectations.py b/Tools/Scripts/webkitpy/style/checkers/test_expectations.py
index c86b32c..e37f908 100644
--- a/Tools/Scripts/webkitpy/style/checkers/test_expectations.py
+++ b/Tools/Scripts/webkitpy/style/checkers/test_expectations.py
@@ -75,7 +75,6 @@ class TestExpectationsChecker(object):
                       "Using 'test' port, but platform-specific expectations "
                       "will fail the check." % self._file_path)
             self._port_obj = port.get('test')
-        self._port_to_check = self._port_obj.test_platform_name()
         # Suppress error messages of test_expectations module since they will be
         # reported later.
         log = logging.getLogger("webkitpy.layout_tests.layout_package."
@@ -91,7 +90,7 @@ class TestExpectationsChecker(object):
         try:
             expectations = test_expectations.TestExpectationsFile(
                 port=self._port_obj, expectations=expectations_str, full_test_list=tests,
-                test_platform_name=self._port_to_check, is_debug_mode=False,
+                test_config=self._port_obj.test_configuration(),
                 is_lint_mode=True, overrides=overrides)
         except test_expectations.ParseError, error:
             err = error
diff --git a/Tools/Scripts/webkitpy/style/checkers/test_expectations_unittest.py b/Tools/Scripts/webkitpy/style/checkers/test_expectations_unittest.py
index 9817c5d..f0813e1 100644
--- a/Tools/Scripts/webkitpy/style/checkers/test_expectations_unittest.py
+++ b/Tools/Scripts/webkitpy/style/checkers/test_expectations_unittest.py
@@ -84,15 +84,6 @@ class TestExpectationsTestCase(unittest.TestCase):
 
     def test_valid_expectations(self):
         self.assert_lines_lint(
-            ["passes/text.html = PASS"],
-            "")
-        self.assert_lines_lint(
-            ["passes/text.html = FAIL PASS"],
-            "")
-        self.assert_lines_lint(
-            ["passes/text.html = CRASH TIMEOUT FAIL PASS"],
-            "")
-        self.assert_lines_lint(
             ["BUGCR1234 MAC : passes/text.html = PASS FAIL"],
             "")
         self.assert_lines_lint(
@@ -120,12 +111,12 @@ class TestExpectationsTestCase(unittest.TestCase):
     def test_modifier_errors(self):
         self.assert_lines_lint(
             ["BUG1234 : passes/text.html = FAIL"],
-            'Bug must be either BUGCR, BUGWK, or BUGV8_ for test: bug1234 passes/text.html  [test/expectations] [5]')
+            "BUG\\d+ is not allowed, must be one of BUGCR\\d+, BUGWK\\d+, BUGV8_\\d+, or a non-numeric bug identifier. passes/text.html  [test/expectations] [5]")
 
     def test_valid_modifiers(self):
         self.assert_lines_lint(
             ["INVALID-MODIFIER : passes/text.html = PASS"],
-            "Invalid modifier for test: invalid-modifier "
+            "Unrecognized option 'invalid-modifier' "
             "passes/text.html  [test/expectations] [5]")
         self.assert_lines_lint(
             ["SKIP : passes/text.html = PASS"],
@@ -135,38 +126,38 @@ class TestExpectationsTestCase(unittest.TestCase):
     def test_expectation_errors(self):
         self.assert_lines_lint(
             ["missing expectations"],
-            "Missing expectations. ['missing expectations']  [test/expectations] [5]")
+            "Missing a ':' missing expectations  [test/expectations] [5]")
         self.assert_lines_lint(
             ["SLOW : passes/text.html = TIMEOUT"],
-            "A test can not be both slow and timeout. "
-            "If it times out indefinitely, then it should be just timeout. "
+            "A test can not be both SLOW and TIMEOUT. "
+            "If it times out indefinitely, then it should be just TIMEOUT. "
             "passes/text.html  [test/expectations] [5]")
         self.assert_lines_lint(
-            ["does/not/exist.html = FAIL"],
+            ["BUGWK1 : does/not/exist.html = FAIL"],
             "Path does not exist. does/not/exist.html  [test/expectations] [2]")
 
     def test_parse_expectations(self):
         self.assert_lines_lint(
-            ["passes/text.html = PASS"],
+            ["BUGWK1 : passes/text.html = PASS"],
             "")
         self.assert_lines_lint(
-            ["passes/text.html = UNSUPPORTED"],
+            ["BUGWK1 : passes/text.html = UNSUPPORTED"],
             "Unsupported expectation: unsupported "
             "passes/text.html  [test/expectations] [5]")
         self.assert_lines_lint(
-            ["passes/text.html = PASS UNSUPPORTED"],
+            ["BUGWK1 : passes/text.html = PASS UNSUPPORTED"],
             "Unsupported expectation: unsupported "
             "passes/text.html  [test/expectations] [5]")
 
     def test_already_seen_test(self):
         self.assert_lines_lint(
-            ["passes/text.html = PASS",
-             "passes/text.html = TIMEOUT"],
-            "Duplicate expectation. %s  [test/expectations] [5]" % self._test_file)
+            ["BUGWK1 : passes/text.html = PASS",
+             "BUGWK1 : passes/text.html = TIMEOUT"],
+            "Duplicate or ambiguous expectation. %s  [test/expectations] [5]" % self._test_file)
 
     def test_tab(self):
         self.assert_lines_lint(
-            ["\tpasses/text.html = PASS"],
+            ["\tBUGWK1 : passes/text.html = PASS"],
             "Line contains tab character.  [whitespace/tab] [5]")
 
 if __name__ == '__main__':

-- 
WebKit Debian packaging



More information about the Pkg-webkit-commits mailing list