Title: [91704] trunk/Tools
Revision
91704
Author
[email protected]
Date
2011-07-25 13:45:12 -0700 (Mon, 25 Jul 2011)

Log Message

stop generating expectations.json now that it's unused
https://bugs.webkit.org/show_bug.cgi?id=65130

Reviewed by Adam Barth.

* Scripts/webkitpy/layout_tests/controllers/manager.py:
* Scripts/webkitpy/layout_tests/models/test_expectations.py:
* Scripts/webkitpy/layout_tests/models/test_expectations_unittest.py:
* TestResultServer/handlers/menu.py:
* TestResultServer/static-dashboards/dashboard_base.js:
* TestResultServer/static-dashboards/flakiness_dashboard.html:

Modified Paths

Diff

Modified: trunk/Tools/ChangeLog (91703 => 91704)


--- trunk/Tools/ChangeLog	2011-07-25 20:20:29 UTC (rev 91703)
+++ trunk/Tools/ChangeLog	2011-07-25 20:45:12 UTC (rev 91704)
@@ -1,5 +1,19 @@
 2011-07-25  Ojan Vafai  <[email protected]>
 
+        stop generating expectations.json now that it's unused
+        https://bugs.webkit.org/show_bug.cgi?id=65130
+
+        Reviewed by Adam Barth.
+
+        * Scripts/webkitpy/layout_tests/controllers/manager.py:
+        * Scripts/webkitpy/layout_tests/models/test_expectations.py:
+        * Scripts/webkitpy/layout_tests/models/test_expectations_unittest.py:
+        * TestResultServer/handlers/menu.py:
+        * TestResultServer/static-dashboards/dashboard_base.js:
+        * TestResultServer/static-dashboards/flakiness_dashboard.html:
+
+2011-07-25  Ojan Vafai  <[email protected]>
+
         don't show builders in the flakiness dashboard that have stopped running a given test suite
         https://bugs.webkit.org/show_bug.cgi?id=65126
 

Modified: trunk/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py (91703 => 91704)


--- trunk/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py	2011-07-25 20:20:29 UTC (rev 91703)
+++ trunk/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py	2011-07-25 20:45:12 UTC (rev 91704)
@@ -1040,11 +1040,6 @@
         """Writes the results of the test run as JSON files into the results
         dir and upload the files to the appengine server.
 
-        There are two different files written into the results dir:
-          expectations.json: This is used by the flakiness dashboard.
-          results.json: A full list of the results - used by the flakiness
-            dashboard and the aggregate results dashboard.
-
         Args:
           unexpected_results: dict of unexpected results
           summarized_results: dict of results
@@ -1061,14 +1056,6 @@
         full_results_path = self._fs.join(self._results_directory, "full_results.json")
         json_results_generator.write_json(self._fs, summarized_results, full_results_path)
 
-        # Write a json file of the test_expectations.txt file for the layout
-        # tests dashboard.
-        expectations_path = self._fs.join(self._results_directory, "expectations.json")
-        expectations_json = \
-            self._expectations.get_expectations_json_for_all_platforms()
-        self._fs.write_text_file(expectations_path,
-                                 u"ADD_EXPECTATIONS(%s);" % expectations_json)
-
         generator = json_layout_results_generator.JSONLayoutResultsGenerator(
             self._port, self._options.builder_name, self._options.build_name,
             self._options.build_number, self._results_directory,
@@ -1080,7 +1067,7 @@
 
         _log.debug("Finished writing JSON files.")
 
-        json_files = ["expectations.json", "incremental_results.json", "full_results.json", "times_ms.json"]
+        json_files = ["incremental_results.json", "full_results.json", "times_ms.json"]
 
         generator.upload_json_files(json_files)
 
@@ -1088,7 +1075,6 @@
 
         # Remove these files from the results directory so they don't take up too much space on the buildbot.
         # The tools use the version we uploaded to the results server anyway.
-        self._fs.remove(expectations_path)
         self._fs.remove(times_json_path)
         self._fs.remove(incremental_results_path)
 

Modified: trunk/Tools/Scripts/webkitpy/layout_tests/models/test_expectations.py (91703 => 91704)


--- trunk/Tools/Scripts/webkitpy/layout_tests/models/test_expectations.py	2011-07-25 20:20:29 UTC (rev 91703)
+++ trunk/Tools/Scripts/webkitpy/layout_tests/models/test_expectations.py	2011-07-25 20:45:12 UTC (rev 91704)
@@ -121,27 +121,6 @@
         return 'ParseError(fatal=%s, errors=%s)' % (self.fatal, self.errors)
 
 
-class ModifiersAndExpectations:
-    """A holder for modifiers and expectations on a test that serializes to
-    JSON."""
-
-    def __init__(self, modifiers, expectations):
-        self.modifiers = modifiers
-        self.expectations = expectations
-
-
-class ExpectationsJsonEncoder(json.JSONEncoder):
-    """JSON encoder that can handle ModifiersAndExpectations objects."""
-    def default(self, obj):
-        # A ModifiersAndExpectations object has two fields, each of which
-        # is a dict. Since JSONEncoders handle all the builtin types directly,
-        # the only time this routine should be called is on the top level
-        # object (i.e., the encoder shouldn't recurse).
-        assert isinstance(obj, ModifiersAndExpectations)
-        return {"modifiers": obj.modifiers,
-                "expectations": obj.expectations}
-
-
 class TestExpectationSerializer:
     """Provides means of serializing TestExpectationLine instances."""
     @classmethod
@@ -663,13 +642,6 @@
         self._model = TestExpectationsModel()
         self._parser = TestExpectationParser(port, test_config, tests, is_lint_mode)
 
-        # Maps relative test paths as listed in the expectations file to a
-        # list of maps containing modifiers and expectations for each time
-        # the test is listed in the expectations file. We use this to
-        # keep a representation of the entire list of expectations, even
-        # invalid ones.
-        self._all_expectations = {}
-
         self._expectations = TestExpectationParser.tokenize_list(expectations)
         self._add_expectations(self._expectations, overrides_allowed=False)
 
@@ -776,11 +748,6 @@
                 if not self._model.has_test(test):
                     self._model.add_expectation_line(TestExpectationLine.create_passing_expectation(test), overrides_allowed=False)
 
-    def get_expectations_json_for_all_platforms(self):
-        # Specify separators in order to get compact encoding.
-        return ExpectationsJsonEncoder(separators=(',', ':')).encode(
-            self._all_expectations)
-
     def has_warnings(self):
         return self._has_warnings
 
@@ -791,21 +758,11 @@
 
         return TestExpectationSerializer.list_to_string(filter(without_rebaseline_modifier, self._expectations))
 
-    def _add_to_all_expectations(self, test, modifiers, expectations):
-        if not test in self._all_expectations:
-            self._all_expectations[test] = []
-        self._all_expectations[test].append(
-            ModifiersAndExpectations(modifiers, expectations))
-
     def _add_expectations(self, expectation_list, overrides_allowed):
         for expectation_line in expectation_list:
             if not expectation_line.expectations:
                 continue
 
-            self._add_to_all_expectations(expectation_line.name,
-                                            " ".join(expectation_line.modifiers).upper(),
-                                            " ".join(expectation_line.expectations).upper())
-
             self._parser.parse(expectation_line)
             self._model.add_expectation_line(expectation_line, overrides_allowed)
 

Modified: trunk/Tools/Scripts/webkitpy/layout_tests/models/test_expectations_unittest.py (91703 => 91704)


--- trunk/Tools/Scripts/webkitpy/layout_tests/models/test_expectations_unittest.py	2011-07-25 20:20:29 UTC (rev 91703)
+++ trunk/Tools/Scripts/webkitpy/layout_tests/models/test_expectations_unittest.py	2011-07-25 20:45:12 UTC (rev 91704)
@@ -156,12 +156,6 @@
         self.assertEqual(self._exp.get_modifiers(
                          self.get_test('passes/text.html')), [])
 
-    def test_expectations_json_for_all_platforms(self):
-        self.parse_exp(self.get_basic_expectations())
-        json_str = self._exp.get_expectations_json_for_all_platforms()
-        # FIXME: test actual content?
-        self.assertTrue(json_str)
-
     def test_get_expectations_string(self):
         self.parse_exp(self.get_basic_expectations())
         self.assertEquals(self._exp.get_expectations_string(

Modified: trunk/Tools/TestResultServer/handlers/menu.py (91703 => 91704)


--- trunk/Tools/TestResultServer/handlers/menu.py	2011-07-25 20:20:29 UTC (rev 91703)
+++ trunk/Tools/TestResultServer/handlers/menu.py	2011-07-25 20:45:12 UTC (rev 91704)
@@ -40,7 +40,6 @@
 menu = [
     ["List of test files", "/testfile"],
     ["List of results.json files", "/testfile?name=results.json"],
-    ["List of expectations.json files", "/testfile?name=expectations.json"],
     ["Upload test file", "/testfile/uploadform"],
 ]
 

Modified: trunk/Tools/TestResultServer/static-dashboards/dashboard_base.js (91703 => 91704)


--- trunk/Tools/TestResultServer/static-dashboards/dashboard_base.js	2011-07-25 20:20:29 UTC (rev 91703)
+++ trunk/Tools/TestResultServer/static-dashboards/dashboard_base.js	2011-07-25 20:45:12 UTC (rev 91704)
@@ -446,8 +446,7 @@
 function initBuilders(state)
 {
     if (state.buildDir) {
-        // If buildDir is set, point to the results.json and expectations.json in the
-        // local tree. Useful for debugging changes to the python JSON generator.
+        // If buildDir is set, point to the results.json in the local tree. Useful for debugging changes to the python JSON generator.
         g_defaultBuilderName = 'DUMMY_BUILDER_NAME';
         g_builders = {'DUMMY_BUILDER_NAME': ''};
         var loc = document.location.toString();

Modified: trunk/Tools/TestResultServer/static-dashboards/flakiness_dashboard.html (91703 => 91704)


--- trunk/Tools/TestResultServer/static-dashboards/flakiness_dashboard.html	2011-07-25 20:20:29 UTC (rev 91703)
+++ trunk/Tools/TestResultServer/static-dashboards/flakiness_dashboard.html	2011-07-25 20:45:12 UTC (rev 91704)
@@ -289,21 +289,6 @@
 <script src=""
 <script src=""
 <script>
-// @fileoverview Creates a dashboard for multiple runs of a given set of tests
-// on the buildbots. Pulls in JSONP-ish files with the results for running
-// tests on a given builder (i.e. ADD_RESULTS(json_here)) and the expectations
-// for all tests on all builders (i.e. ADD_EXPECTATIONS(json_here)).
-//
-// This shows flakiness of the tests as well as runtimes for slow tests.
-//
-// Also, each column in the dashboard is sortable.
-//
-// Currently, only webkit tests are supported, but adding other test types
-// should just require the following steps:
-//   -generate results.json and expectations.json for these tests
-//   -copy them to the appropriate location
-//   -add the builder name to the list of builders below.
-
 //////////////////////////////////////////////////////////////////////////////
 // CONSTANTS
 //////////////////////////////////////////////////////////////////////////////
_______________________________________________
webkit-changes mailing list
[email protected]
http://lists.webkit.org/mailman/listinfo.cgi/webkit-changes

Reply via email to