Diff
Modified: trunk/Tools/ChangeLog (122658 => 122659)
--- trunk/Tools/ChangeLog 2012-07-14 03:05:06 UTC (rev 122658)
+++ trunk/Tools/ChangeLog 2012-07-14 03:45:48 UTC (rev 122659)
@@ -1,5 +1,34 @@
2012-07-13 Dirk Pranke <[email protected]>
+ run-webkit-test outputs the wrong number of tests executed when some are skipped.
+ https://bugs.webkit.org/show_bug.cgi?id=89894
+
+ Reviewed by Ojan Vafai.
+
+ Fix the logging of the actual number of tests run so that tests
+ that are skipped aren't included.
+
+ Also revamp the 'expected' output so we distinguish the number
+ of tests found from the number of tests run (to account for
+ --repeat-each and --iterations).
+
+ Covered by existing tests.
+
+ * Scripts/webkitpy/layout_tests/controllers/manager.py:
+ (Manager.prepare_lists_and_print_output):
+ (Manager._log_num_workers):
+ (Manager.run):
+ (Manager._print_result_summary):
+ * Scripts/webkitpy/layout_tests/models/result_summary.py:
+ (ResultSummary.__init__):
+ (ResultSummary.add):
+ * Scripts/webkitpy/layout_tests/views/printing.py:
+ (Printer.print_one_line_summary):
+ * Scripts/webkitpy/layout_tests/views/printing_unittest.py:
+ (Testprinter.test_print_one_line_summary):
+
+2012-07-13 Dirk Pranke <[email protected]>
+
nrwt: actually print the exception name and message for otherwise unhandled exceptions
https://bugs.webkit.org/show_bug.cgi?id=91305
Modified: trunk/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py (122658 => 122659)
--- trunk/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py 2012-07-14 03:05:06 UTC (rev 122658)
+++ trunk/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py 2012-07-14 03:45:48 UTC (rev 122659)
@@ -480,20 +480,13 @@
# now make sure we're explicitly running any tests passed on the command line.
self._test_files.update(found_test_files.intersection(self._paths))
- if not num_all_test_files:
+ num_to_run = len(self._test_files)
+ num_skipped = num_all_test_files - num_to_run
+
+ if not num_to_run:
_log.critical('No tests to run.')
return None
- num_skipped = num_all_test_files - len(self._test_files)
- if num_skipped:
- self._printer.print_expected("Running %s (found %d, skipping %d)." % (
- grammar.pluralize('test', num_all_test_files - num_skipped),
- num_all_test_files, num_skipped))
- elif len(self._test_files) > 1:
- self._printer.print_expected("Running all %d tests." % len(self._test_files))
- else:
- self._printer.print_expected("Running 1 test.")
-
# Create a sorted list of test files so the subset chunk,
# if used, contains alphabetically consecutive tests.
self._test_files_list = list(self._test_files)
@@ -518,6 +511,8 @@
(self._options.repeat_each if self._options.repeat_each else 1) * \
(self._options.iterations if self._options.iterations else 1)
result_summary = ResultSummary(self._expectations, self._test_files | skipped, iterations)
+
+ self._printer.print_expected('Found %s.' % grammar.pluralize('test', num_all_test_files))
self._print_expected_results_of_type(result_summary, test_expectations.PASS, "passes")
self._print_expected_results_of_type(result_summary, test_expectations.FAIL, "failures")
self._print_expected_results_of_type(result_summary, test_expectations.FLAKY, "flaky")
@@ -530,17 +525,16 @@
for test in skipped:
result = test_results.TestResult(test)
result.type = test_expectations.SKIP
- iterations = \
- (self._options.repeat_each if self._options.repeat_each else 1) * \
- (self._options.iterations if self._options.iterations else 1)
for iteration in range(iterations):
result_summary.add(result, expected=True)
self._printer.print_expected('')
- # Check to make sure we didn't filter out all of the tests.
- if not len(self._test_files):
- _log.info("All tests are being skipped")
- return None
+ if self._options.repeat_each > 1:
+ self._printer.print_expected('Running each test %d times.' % self._options.repeat_each)
+ if self._options.iterations > 1:
+ self._printer.print_expected('Running %d iterations of the tests.' % self._options.iterations)
+ if iterations > 1:
+ self._printer.print_expected('')
return result_summary
@@ -715,11 +709,12 @@
def _log_num_workers(self, num_workers, num_shards, num_locked_shards):
driver_name = self._port.driver_name()
if num_workers == 1:
- self._printer.print_config("Running 1 %s over %s" %
+ self._printer.print_config("Running 1 %s over %s." %
(driver_name, grammar.pluralize('shard', num_shards)))
else:
- self._printer.print_config("Running %d %ss in parallel over %d shards (%d locked)" %
+ self._printer.print_config("Running %d %ss in parallel over %d shards (%d locked)." %
(num_workers, driver_name, num_shards, num_locked_shards))
+ self._printer.print_config('')
def _run_tests(self, file_list, result_summary, num_workers):
"""Runs the tests in the file_list.
@@ -890,7 +885,7 @@
self._print_timing_statistics(end_time - start_time, thread_timings, test_timings, individual_test_timings, result_summary)
self._print_result_summary(result_summary)
- self._printer.print_one_line_summary(result_summary.total, result_summary.expected, result_summary.unexpected)
+ self._printer.print_one_line_summary(result_summary.total - result_summary.expected_skips, result_summary.expected - result_summary.expected_skips, result_summary.unexpected)
unexpected_results = summarize_results(self._port, self._expectations, result_summary, retry_summary, individual_test_timings, _only_unexpected_=True, interrupted=interrupted)
self._printer.print_unexpected_results(unexpected_results)
@@ -1331,9 +1326,8 @@
result_summary: information to log
"""
failed = result_summary.total_failures
- skipped = result_summary.total_tests_by_expectation[test_expectations.SKIP]
- total = result_summary.total
- passed = total - failed - skipped
+ total = result_summary.total - result_summary.expected_skips
+ passed = total - failed
pct_passed = 0.0
if total > 0:
pct_passed = float(passed) * 100 / total
Modified: trunk/Tools/Scripts/webkitpy/layout_tests/models/result_summary.py (122658 => 122659)
--- trunk/Tools/Scripts/webkitpy/layout_tests/models/result_summary.py 2012-07-14 03:05:06 UTC (rev 122658)
+++ trunk/Tools/Scripts/webkitpy/layout_tests/models/result_summary.py 2012-07-14 03:45:48 UTC (rev 122659)
@@ -47,6 +47,7 @@
self.unexpected_results = {}
self.failures = {}
self.total_failures = 0
+ self.expected_skips = 0
self.total_tests_by_expectation[SKIP] = 0
self.tests_by_expectation[SKIP] = set()
for expectation in TestExpectations.EXPECTATIONS.values():
@@ -65,6 +66,8 @@
self.failures[test_result.test_name] = test_result.failures
if expected:
self.expected += 1
+ if test_result.type == SKIP:
+ self.expected_skips += 1
else:
self.unexpected_results[test_result.test_name] = test_result
self.unexpected += 1
Modified: trunk/Tools/Scripts/webkitpy/layout_tests/views/printing.py (122658 => 122659)
--- trunk/Tools/Scripts/webkitpy/layout_tests/views/printing.py 2012-07-14 03:05:06 UTC (rev 122658)
+++ trunk/Tools/Scripts/webkitpy/layout_tests/views/printing.py 2012-07-14 03:45:48 UTC (rev 122659)
@@ -31,6 +31,7 @@
import optparse
+from webkitpy.tool import grammar
from webkitpy.common.net import resultsjsonparser
from webkitpy.layout_tests.models.test_expectations import TestExpectations
from webkitpy.layout_tests.views.metered_stream import MeteredStream
@@ -217,23 +218,21 @@
return
incomplete = total - expected - unexpected
+ incomplete_str = ''
if incomplete:
self._write("")
incomplete_str = " (%d didn't run)" % incomplete
- expected_str = str(expected)
- else:
- incomplete_str = ""
- expected_str = "All %d" % expected
if unexpected == 0:
- self._write("%s tests ran as expected%s." %
- (expected_str, incomplete_str))
- elif expected == 1:
- self._write("1 test ran as expected, %d didn't%s:" %
- (unexpected, incomplete_str))
+ if expected == total:
+ if expected > 1:
+ self._write("All %d tests ran as expected." % expected)
+ else:
+ self._write("The test ran as expected.")
+ else:
+ self._write("%s ran as expected%s." % (grammar.pluralize('test', expected), incomplete_str))
else:
- self._write("%d tests ran as expected, %d didn't%s:" %
- (expected, unexpected, incomplete_str))
+ self._write("%s ran as expected, %d didn't%s:" % (grammar.pluralize('test', expected), unexpected, incomplete_str))
self._write("")
def print_test_result(self, result, expected, exp_str, got_str):
Modified: trunk/Tools/Scripts/webkitpy/layout_tests/views/printing_unittest.py (122658 => 122659)
--- trunk/Tools/Scripts/webkitpy/layout_tests/views/printing_unittest.py 2012-07-14 03:05:06 UTC (rev 122658)
+++ trunk/Tools/Scripts/webkitpy/layout_tests/views/printing_unittest.py 2012-07-14 03:45:48 UTC (rev 122659)
@@ -206,11 +206,11 @@
printer, err, out = self.get_printer(['--print', 'one-line-summary'])
printer.print_one_line_summary(1, 1, 0)
- self.assertWritten(err, ["All 1 tests ran as expected.\n", "\n"])
+ self.assertWritten(err, ["The test ran as expected.\n", "\n"])
printer, err, out = self.get_printer(['--print', 'everything'])
printer.print_one_line_summary(1, 1, 0)
- self.assertWritten(err, ["All 1 tests ran as expected.\n", "\n"])
+ self.assertWritten(err, ["The test ran as expected.\n", "\n"])
printer, err, out = self.get_printer(['--print', 'everything'])
printer.print_one_line_summary(2, 1, 1)