Title: [97672] trunk/Tools
Revision
97672
Author
[email protected]
Date
2011-10-17 16:42:42 -0700 (Mon, 17 Oct 2011)

Log Message

[nrwt] Add new category for MISSING
https://bugs.webkit.org/show_bug.cgi?id=69990

Reviewed by Dirk Pranke.

Treat missing results as a new category in summerize_results.

Because Chromium port wants to turn bots red when there are tests with missing results,
extracted the logic to compute the exit code as exit_code_from_summarized_results in
base and chromium ports.

* Scripts/webkitpy/layout_tests/controllers/manager.py:
* Scripts/webkitpy/layout_tests/port/base.py:
* Scripts/webkitpy/layout_tests/port/chromium.py:
* Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py:

Modified Paths

Diff

Modified: trunk/Tools/ChangeLog (97671 => 97672)


--- trunk/Tools/ChangeLog	2011-10-17 23:41:20 UTC (rev 97671)
+++ trunk/Tools/ChangeLog	2011-10-17 23:42:42 UTC (rev 97672)
@@ -1,3 +1,21 @@
+2011-10-17  Ryosuke Niwa  <[email protected]>
+
+        [nrwt] Add new category for MISSING
+        https://bugs.webkit.org/show_bug.cgi?id=69990
+
+        Reviewed by Dirk Pranke.
+
+        Treat missing results as a new category in summerize_results.
+
+        Because Chromium port wants to turn bots red when there are tests with missing results,
+        extracted the logic to compute the exit code as exit_code_from_summarized_results in
+        base and chromium ports.
+
+        * Scripts/webkitpy/layout_tests/controllers/manager.py:
+        * Scripts/webkitpy/layout_tests/port/base.py:
+        * Scripts/webkitpy/layout_tests/port/chromium.py:
+        * Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py:
+
 2011-10-17  Dimitri Glazkov  <[email protected]>
 
         garden-o-matic operations fail because of CSP.

Modified: trunk/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py (97671 => 97672)


--- trunk/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py	2011-10-17 23:41:20 UTC (rev 97671)
+++ trunk/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py	2011-10-17 23:42:42 UTC (rev 97672)
@@ -88,11 +88,12 @@
         A dictionary containing a summary of the unexpected results from the
         run, with the following fields:
         'version': a version indicator
-        'fixable': # of fixable tests (NOW - PASS)
-        'skipped': # of skipped tests (NOW & SKIPPED)
-        'num_regressions': # of non-flaky failures
-        'num_flaky': # of flaky failures
-        'num_passes': # of unexpected passes
+        'fixable': The number of fixable tests (NOW - PASS)
+        'skipped': The number of skipped tests (NOW & SKIPPED)
+        'num_regressions': The number of non-flaky failures
+        'num_flaky': The number of flaky failures
+        'num_missing': The number of tests with missing results
+        'num_passes': The number of unexpected passes
         'tests': a dict of tests -> {'expected': '...', 'actual': '...'}
     """
     results = {}
@@ -105,6 +106,7 @@
 
     num_passes = 0
     num_flaky = 0
+    num_missing = 0
     num_regressions = 0
     keywords = {}
     for expecation_string, expectation_enum in TestExpectations.EXPECTATIONS.iteritems():
@@ -139,11 +141,8 @@
         elif result_type == test_expectations.CRASH:
             num_regressions += 1
         elif result_type == test_expectations.MISSING:
-            # We count missing results as flaky not to turn buildbot red
-            # This is a huge hack should be fixed by adding new category for MISSING results.
-            # See also: https://bugs.webkit.org/show_bug.cgi?id=64812
             if test_name in result_summary.unexpected_results:
-                num_flaky += 1
+                num_missing += 1
         elif test_name in result_summary.unexpected_results:
             if test_name not in retry_summary.unexpected_results:
                 actual.extend(expectations.get_expectations_string(test_name).split(" "))
@@ -209,6 +208,7 @@
     results['tests'] = tests
     results['num_passes'] = num_passes
     results['num_flaky'] = num_flaky
+    results['num_missing'] = num_missing
     results['num_regressions'] = num_regressions
     results['uses_expectations_file'] = port_obj.uses_test_expectations_file()
     results['interrupted'] = interrupted  # Does results.html have enough information to compute this itself? (by checking total number of results vs. total number of tests?)
@@ -927,9 +927,7 @@
             if self._options.show_results:
                 self._show_results_html_file(result_summary)
 
-        # Ignore flaky failures and unexpected passes so we don't turn the
-        # bot red for those.
-        return unexpected_results['num_regressions']
+        return self._port.exit_code_from_summarized_results(unexpected_results)
 
     def start_servers_with_lock(self):
         assert(self._options.http)

Modified: trunk/Tools/Scripts/webkitpy/layout_tests/port/base.py (97671 => 97672)


--- trunk/Tools/Scripts/webkitpy/layout_tests/port/base.py	2011-10-17 23:41:20 UTC (rev 97671)
+++ trunk/Tools/Scripts/webkitpy/layout_tests/port/base.py	2011-10-17 23:42:42 UTC (rev 97672)
@@ -721,6 +721,13 @@
         if self._http_lock:
             self._http_lock.cleanup_http_lock()
 
+    def exit_code_from_summarized_results(self, unexpected_results):
+        """Given summarized results, compute the exit code to be returned by new-run-webkit-tests.
+        Bots turn red when this function returns a non-zero value. By default, return the number of regressions
+        to avoid turning bots red by flaky failures, unexpected passes, and missing results"""
+        # Don't turn bots red for flaky failures, unexpected passes, and missing results.
+        return unexpected_results['num_regressions']
+
     #
     # TEST EXPECTATION-RELATED METHODS
     #

Modified: trunk/Tools/Scripts/webkitpy/layout_tests/port/chromium.py (97671 => 97672)


--- trunk/Tools/Scripts/webkitpy/layout_tests/port/chromium.py	2011-10-17 23:41:20 UTC (rev 97671)
+++ trunk/Tools/Scripts/webkitpy/layout_tests/port/chromium.py	2011-10-17 23:42:42 UTC (rev 97672)
@@ -274,6 +274,10 @@
             self._helper.stdin.close()
             self._helper.wait()
 
+    def exit_code_from_summarized_results(self, unexpected_results):
+        # Turn bots red for missing results.
+        return unexpected_results['num_regressions'] + unexpected_results['num_missing']
+
     def configuration_specifier_macros(self):
         return self.CONFIGURATION_SPECIFIER_MACROS
 

Modified: trunk/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py (97671 => 97672)


--- trunk/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py	2011-10-17 23:41:20 UTC (rev 97671)
+++ trunk/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py	2011-10-17 23:42:42 UTC (rev 97672)
@@ -441,8 +441,32 @@
         expected_token = '"unexpected":{"text-image-checksum.html":{"expected":"PASS","actual":"TEXT"},"missing_text.html":{"expected":"PASS","is_missing_text":true,"actual":"MISSING"}'
         json_string = fs.read_text_file('/tmp/layout-test-results/full_results.json')
         self.assertTrue(json_string.find(expected_token) != -1)
-        self.assertTrue(json_string.find('"num_flaky":1') != -1)
+        self.assertTrue(json_string.find('"num_regression":1') == -1)
+        self.assertTrue(json_string.find('"num_flaky":1') == -1)
+        self.assertTrue(json_string.find('"num_missing":1') != -1)
 
+    def test_missing_and_unexpected_results_with_custom_exit_code(self):
+        # Test that we update expectations in place. If the expectation
+        # is missing, update the expected generic location.
+        fs = unit_test_filesystem()
+
+        class CustomExitCodePort(TestPort):
+            def exit_code_from_summarized_results(self, unexpected_results):
+                return unexpected_results['num_regressions'] + unexpected_results['num_missing']
+
+        test_port = CustomExitCodePort(options=options, user=mocktool.MockUser())
+        res, out, err, _ = logging_run(['--no-show-results',
+            'failures/expected/missing_image.html',
+            'failures/unexpected/missing_text.html',
+            'failures/unexpected/text-image-checksum.html'],
+            tests_included=True, filesystem=fs, record_results=True, port_obj=test_port)
+        file_list = fs.written_files.keys()
+        file_list.remove('/tmp/layout-test-results/tests_run0.txt')
+        self.assertEquals(res, 2)
+        self.assertTrue(json_string.find('"num_regression":1') == -1)
+        self.assertTrue(json_string.find('"num_flaky":1') == -1)
+        self.assertTrue(json_string.find('"num_missing":1') != -1)
+
     def test_crash_with_stderr(self):
         fs = unit_test_filesystem()
         res, buildbot_output, regular_output, user = logging_run([
_______________________________________________
webkit-changes mailing list
[email protected]
http://lists.webkit.org/mailman/listinfo.cgi/webkit-changes

Reply via email to