Title: [137183] trunk/Tools
Revision
137183
Author
dpra...@chromium.org
Date
2012-12-10 10:58:39 -0800 (Mon, 10 Dec 2012)

Log Message

nrwt: fix total number of tests computed in the result summary
https://bugs.webkit.org/show_bug.cgi?id=104061

Reviewed by Jochen Eisinger.

Good thing I left this open, because I actually broke it again
the same way in a merge. Fix the computation of the one_line_summary
again, and add tests for it (also clean up some redundant tests).

Also, add constants into the test port code so that the knowledge
of how many tests should fail and how is next to the list of tests,
rather than a totally different place.

* Scripts/webkitpy/layout_tests/controllers/layout_test_runner.py:
(LayoutTestRunner.run_tests):
* Scripts/webkitpy/layout_tests/port/test.py:
(TestList.__getitem__):
* Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py:
(get_test_results):
(MainTest.setUp):
(MainTest.test_basic):
(MainTest.test_run_singly_actually_runs_tests):
(MainTest.test_test_list_with_prefix):
(MainTest.test_missing_and_unexpected_results_with_custom_exit_code.CustomExitCodePort):
(MainTest.test_tolerance.ImageDiffTestPort):
(EndToEndTest.parse_full_results):

Modified Paths

Diff

Modified: trunk/Tools/ChangeLog (137182 => 137183)


--- trunk/Tools/ChangeLog	2012-12-10 18:47:20 UTC (rev 137182)
+++ trunk/Tools/ChangeLog	2012-12-10 18:58:39 UTC (rev 137183)
@@ -1,3 +1,32 @@
+2012-12-10  Dirk Pranke  <dpra...@chromium.org>
+
+        nrwt: fix total number of tests computed in the result summary
+        https://bugs.webkit.org/show_bug.cgi?id=104061
+
+        Reviewed by Jochen Eisinger.
+
+        Good thing I left this open, because I actually broke it again
+        the same way in a merge. Fix the computation of the one_line_summary
+        again, and add tests for it (also clean up some redundant tests).
+
+        Also, add constants into the test port code so that the knowledge
+        of how many tests should fail and how is next to the list of tests,
+        rather than a totally different place.
+
+        * Scripts/webkitpy/layout_tests/controllers/layout_test_runner.py:
+        (LayoutTestRunner.run_tests):
+        * Scripts/webkitpy/layout_tests/port/test.py:
+        (TestList.__getitem__):
+        * Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py:
+        (get_test_results):
+        (MainTest.setUp):
+        (MainTest.test_basic):
+        (MainTest.test_run_singly_actually_runs_tests):
+        (MainTest.test_test_list_with_prefix):
+        (MainTest.test_missing_and_unexpected_results_with_custom_exit_code.CustomExitCodePort):
+        (MainTest.test_tolerance.ImageDiffTestPort):
+        (EndToEndTest.parse_full_results):
+
 2012-12-10  Mike West  <mk...@chromium.org>
 
         Log to console when script is blocked by sandbox attributes.

Modified: trunk/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner.py (137182 => 137183)


--- trunk/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner.py	2012-12-10 18:47:20 UTC (rev 137182)
+++ trunk/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner.py	2012-12-10 18:58:39 UTC (rev 137183)
@@ -87,7 +87,7 @@
         self._needs_websockets = needs_websockets
         self._retrying = retrying
 
-        result_summary = ResultSummary(self._expectations, len(test_inputs))
+        result_summary = ResultSummary(self._expectations, len(test_inputs) + len(tests_to_skip))
         self._current_result_summary = result_summary
         self._remaining_locked_shards = []
         self._has_http_lock = False

Modified: trunk/Tools/Scripts/webkitpy/layout_tests/port/test.py (137182 => 137183)


--- trunk/Tools/Scripts/webkitpy/layout_tests/port/test.py	2012-12-10 18:47:20 UTC (rev 137182)
+++ trunk/Tools/Scripts/webkitpy/layout_tests/port/test.py	2012-12-10 18:58:39 UTC (rev 137183)
@@ -97,7 +97,16 @@
     def __getitem__(self, item):
         return self.tests[item]
 
+#
+# These numbers may need to be updated whenever we add or delete tests.
+#
+TOTAL_TESTS = 104
+TOTAL_SKIPS = 25
+TOTAL_RETRIES = 14
 
+UNEXPECTED_PASSES = 6
+UNEXPECTED_FAILURES = 17
+
 def unit_test_list():
     tests = TestList()
     tests.add('failures/expected/crash.html', crash=True)

Modified: trunk/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py (137182 => 137183)


--- trunk/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py	2012-12-10 18:47:20 UTC (rev 137182)
+++ trunk/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py	2012-12-10 18:58:39 UTC (rev 137183)
@@ -51,7 +51,7 @@
 from webkitpy.layout_tests import port
 from webkitpy.layout_tests import run_webkit_tests
 from webkitpy.layout_tests.port import Port
-from webkitpy.layout_tests.port.test import TestPort, TestDriver
+from webkitpy.layout_tests.port import test
 from webkitpy.test.skip import skip_if
 from webkitpy.tool.mocktool import MockOptions
 
@@ -158,12 +158,6 @@
     return all_results
 
 
-# Update this magic number if you add an unexpected test to webkitpy.layout_tests.port.test
-# FIXME: It's nice to have a routine in port/test.py that returns this number.
-unexpected_failures = 12
-unexpected_tests_count = unexpected_failures + 4
-
-
 class StreamTestingMixin(object):
     def assertContains(self, stream, string):
         self.assertTrue(string in stream.getvalue())
@@ -275,13 +269,39 @@
         # properly on cygwin (bug 63846).
         self.should_test_processes = not self._platform.is_win()
 
-    def test_all(self):
-        res, _, _ = logging_run([], tests_included=True)
-        self.assertEqual(res, unexpected_tests_count)
-
     def test_basic(self):
-        self.assertTrue(passing_run())
+        options, args = parse_args(tests_included=True)
+        logging_stream = StringIO.StringIO()
+        host = MockHost()
+        port_obj = host.port_factory.get(options.platform, options)
+        details = run_webkit_tests.run(port_obj, options, args, logging_stream)
 
+        # These numbers will need to be updated whenever we add new tests.
+        self.assertEqual(details.result_summary.total, test.TOTAL_TESTS)
+        self.assertEqual(details.result_summary.expected_skips, test.TOTAL_SKIPS)
+        self.assertEqual(len(details.result_summary.unexpected_results), test.UNEXPECTED_PASSES + test.UNEXPECTED_FAILURES)
+        self.assertEqual(details.exit_code, test.UNEXPECTED_FAILURES)
+        self.assertEqual(details.retry_summary.total, test.TOTAL_RETRIES)
+
+        _one_line_summary_ = "%d tests ran as expected, %d didn't:\n" % (
+            details.result_summary.total - details.result_summary.expected_skips - len(details.result_summary.unexpected_results),
+            len(details.result_summary.unexpected_results))
+        self.assertTrue(one_line_summary in logging_stream.buflist)
+
+        # Ensure the results were summarized properly.
+        self.assertEqual(details.summarized_results['num_regressions'], details.exit_code)
+
+        # Ensure the image diff percentage is in the results.
+        self.assertEqual(details.summarized_results['tests']['failures']['expected']['image.html']['image_diff_percent'], 1)
+
+        # Ensure the results were written out and displayed.
+        full_results_text = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
+        json_to_eval = full_results_text.replace("ADD_RESULTS(", "").replace(");", "")
+        self.assertEqual(json.loads(json_to_eval), details.summarized_results)
+
+        self.assertEqual(host.user.opened_urls, [path.abspath_to_uri(MockHost().platform, '/tmp/layout-test-results/results.html')])
+
+
     def test_batch_size(self):
         batch_tests_run = get_test_batches(['--batch-size', '2'])
         for batch in batch_tests_run:
@@ -509,8 +529,8 @@
         self.assertTrue(has_passes_text)
 
     def test_run_singly_actually_runs_tests(self):
-        res, _, _ = logging_run(['--run-singly', 'failures/unexpected'])
-        self.assertEqual(res, unexpected_failures)
+        res, _, _ = logging_run(['--run-singly'], tests_included=True)
+        self.assertEqual(res, test.UNEXPECTED_FAILURES - 1)  # failures/expected/hang.html actually passes w/ --run-singly.
 
     def test_single_file(self):
         tests_run = get_tests_run(['passes/text.html'])
@@ -548,15 +568,6 @@
         tests_run = get_tests_run(['--test-list=%s' % filename], host=host)
         self.assertEqual(['passes/text.html'], tests_run)
 
-    def test_unexpected_failures(self):
-        # Run tests including the unexpected failures.
-        self._url_opened = None
-        res, err, user = logging_run(tests_included=True)
-
-        self.assertEqual(res, unexpected_tests_count)
-        self.assertNotEmpty(err)
-        self.assertEqual(user.opened_urls, [path.abspath_to_uri(MockHost().platform, '/tmp/layout-test-results/results.html')])
-
     def test_missing_and_unexpected_results(self):
         # Test that we update expectations in place. If the expectation
         # is missing, update the expected generic location.
@@ -593,7 +604,7 @@
     def test_missing_and_unexpected_results_with_custom_exit_code(self):
         # Test that we update expectations in place. If the expectation
         # is missing, update the expected generic location.
-        class CustomExitCodePort(TestPort):
+        class CustomExitCodePort(test.TestPort):
             def exit_code_from_summarized_results(self, unexpected_results):
                 return unexpected_results['num_regressions'] + unexpected_results['num_missing']
 
@@ -740,7 +751,7 @@
         self.assertEqual(tests_run, sorted(tests_run))
 
     def test_tolerance(self):
-        class ImageDiffTestPort(TestPort):
+        class ImageDiffTestPort(test.TestPort):
             def diff_image(self, expected_contents, actual_contents, tolerance=None):
                 self.tolerance_used_for_diff_image = self._options.tolerance
                 return (True, 1, None)
@@ -893,19 +904,6 @@
         compressed_results = json.loads(json_to_eval)
         return compressed_results
 
-    def test_end_to_end(self):
-        host = MockHost()
-        res, _, user = logging_run(tests_included=True, host=host)
-
-        self.assertEqual(res, unexpected_tests_count)
-        results = self.parse_full_results(host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json'))
-
-        # Check to ensure we're passing back image diff %age correctly.
-        self.assertEqual(results['tests']['failures']['expected']['image.html']['image_diff_percent'], 1)
-
-        # Check that we attempted to display the results page in a browser.
-        self.assertTrue(user.opened_urls)
-
     def test_reftest_with_two_notrefs(self):
         # Test that we update expectations in place. If the expectation
         # is missing, update the expected generic location.
_______________________________________________
webkit-changes mailing list
webkit-changes@lists.webkit.org
http://lists.webkit.org/mailman/listinfo/webkit-changes

Reply via email to