Reviewers: Jakob,
Message:
PTAL
https://codereview.chromium.org/360113003/diff/20001/tools/testrunner/local/execution.py
File tools/testrunner/local/execution.py (right):
https://codereview.chromium.org/360113003/diff/20001/tools/testrunner/local/execution.py#newcode149
tools/testrunner/local/execution.py:149:
queue.append([self._GetJob(test)])
For simplicity, I put it all within the try block. Shouldn't make a big
difference as there is not much that can fail besides GetCommand.
Description:
Let test runner rerun failures to test for flakes.
When enabled, this dynamically adds jobs that had failures back to the pool.
Special json output for flakes will be handled in a separate CL.
BUG=374134
LOG=n
Please review this at https://codereview.chromium.org/360113003/
SVN Base: https://v8.googlecode.com/svn/branches/bleeding_edge
Affected files (+63, -18 lines):
M tools/run-deopt-fuzzer.py
M tools/run-tests.py
M tools/testrunner/local/execution.py
M tools/testrunner/objects/context.py
M tools/testrunner/objects/testcase.py
Index: tools/run-deopt-fuzzer.py
diff --git a/tools/run-deopt-fuzzer.py b/tools/run-deopt-fuzzer.py
index
eafed016511e7434e8424ea0525173ade09d4b56..1e39e7705b092ce06e20529ba56694443a8b793c
100755
--- a/tools/run-deopt-fuzzer.py
+++ b/tools/run-deopt-fuzzer.py
@@ -371,7 +371,9 @@ def Execute(arch, mode, args, options, suites,
workspace):
options.extra_flags,
False,
options.random_seed,
- True)
+ True,
+ 0,
+ 0)
# Find available test suites and read test cases from them.
variables = {
Index: tools/run-tests.py
diff --git a/tools/run-tests.py b/tools/run-tests.py
index
fbbe41641f29fa975920edec2624d06e70134c71..ede2a8c77cef7b5a674e95adfd5bb106f2b90966
100755
--- a/tools/run-tests.py
+++ b/tools/run-tests.py
@@ -181,6 +181,13 @@ def BuildOptions():
default=False, action="store_true")
result.add_option("--json-test-results",
help="Path to a file for storing json results.")
+ result.add_option("--rerun-failures-count",
+ help=("Number of times to rerun failures. Very slow
tests"
+ "will be rerun only once."),
+ default=0, type="int")
+ result.add_option("--rerun-failures-max",
+ help="Maximum number of failures to rerun.",
+ default=100, type="int")
result.add_option("--shard-count",
help="Split testsuites into this number of shards",
default=1, type="int")
@@ -416,7 +423,9 @@ def Execute(arch, mode, args, options, suites,
workspace):
options.extra_flags,
options.no_i18n,
options.random_seed,
- options.no_sorting)
+ options.no_sorting,
+ options.rerun_failures_count,
+ options.rerun_failures_max)
# TODO(all): Combine "simulator" and "simulator_run".
simulator_run = not options.dont_skip_simulator_slow_tests and \
Index: tools/testrunner/local/execution.py
diff --git a/tools/testrunner/local/execution.py
b/tools/testrunner/local/execution.py
index
79f856c7d7da21a4319edbd5424af557623336ec..939995cd8f112b917953d25ce4c7800b968e5150
100644
--- a/tools/testrunner/local/execution.py
+++ b/tools/testrunner/local/execution.py
@@ -81,6 +81,7 @@ class Runner(object):
self.remaining = num_tests
self.failed = []
self.crashed = 0
+ self.reran_tests = 0
def _RunPerfSafe(self, fun):
try:
@@ -89,6 +90,42 @@ class Runner(object):
print("PerfData exception: %s" % e)
self.perf_failures = True
+ def _GetJob(self, test):
+ command = self.GetCommand(test)
+ timeout = self.context.timeout
+ if ("--stress-opt" in test.flags or
+ "--stress-opt" in self.context.mode_flags or
+ "--stress-opt" in self.context.extra_flags):
+ timeout *= 4
+ if test.dependency is not None:
+ dep_command = [ c.replace(test.path, test.dependency) for c in
command ]
+ else:
+ dep_command = None
+ return Job(command, dep_command, test.id, timeout,
self.context.verbose)
+
+ def _MaybeRerun(self, pool, test):
+ if test.run <= self.context.rerun_failures_count:
+ # Possibly rerun this test if its run count is below the maximum per
+ # test.
+ if test.run == 1:
+ # Count the overall number of reran tests on the first rerun.
+ if self.reran_tests < self.context.rerun_failures_max:
+ self.reran_tests += 1
+ else:
+ # Don't rerun this if the overall number of rerun tests has been
+ # reached.
+ return
+ if test.run >= 2 and test.duration > self.context.timeout / 20:
+ # Rerun slow tests at most once.
+ return
+
+ # Rerun this test.
+ test.duration = None
+ test.output = None
+ test.run += 1
+ pool.add([self._GetJob(test)])
+ self.remaining += 1
+
def Run(self, jobs):
self.indicator.Starting()
self._RunInternal(jobs)
@@ -109,23 +146,12 @@ class Runner(object):
assert test.id >= 0
test_map[test.id] = test
try:
- command = self.GetCommand(test)
+ queue.append([self._GetJob(test)])
except Exception, e:
# If this failed, save the exception and re-raise it later (after
# all other tests have had a chance to run).
queued_exception = e
continue
- timeout = self.context.timeout
- if ("--stress-opt" in test.flags or
- "--stress-opt" in self.context.mode_flags or
- "--stress-opt" in self.context.extra_flags):
- timeout *= 4
- if test.dependency is not None:
- dep_command = [ c.replace(test.path, test.dependency) for c in
command ]
- else:
- dep_command = None
- job = Job(command, dep_command, test.id, timeout,
self.context.verbose)
- queue.append([job])
try:
it = pool.imap_unordered(RunTest, queue)
for result in it:
@@ -143,6 +169,9 @@ class Runner(object):
self.succeeded += 1
self.remaining -= 1
self.indicator.HasRun(test, has_unexpected_output)
+ if has_unexpected_output:
+ # Rerun test failures after the indicator has processed the
results.
+ self._MaybeRerun(pool, test)
finally:
pool.terminate()
self._RunPerfSafe(lambda: self.perf_data_manager.close())
Index: tools/testrunner/objects/context.py
diff --git a/tools/testrunner/objects/context.py
b/tools/testrunner/objects/context.py
index
f8f764b3a6cf63e10a55b1758df824df07430cdc..d288e0e05304eab99acb1fd870968f0b1d1e256e
100644
--- a/tools/testrunner/objects/context.py
+++ b/tools/testrunner/objects/context.py
@@ -29,7 +29,7 @@
class Context():
def __init__(self, arch, mode, shell_dir, mode_flags, verbose, timeout,
isolates, command_prefix, extra_flags, noi18n, random_seed,
- no_sorting):
+ no_sorting, rerun_failures_count, rerun_failures_max):
self.arch = arch
self.mode = mode
self.shell_dir = shell_dir
@@ -42,15 +42,18 @@ class Context():
self.noi18n = noi18n
self.random_seed = random_seed
self.no_sorting = no_sorting
+ self.rerun_failures_count = rerun_failures_count
+ self.rerun_failures_max = rerun_failures_max
def Pack(self):
return [self.arch, self.mode, self.mode_flags, self.timeout,
self.isolates,
self.command_prefix, self.extra_flags, self.noi18n,
- self.random_seed, self.no_sorting]
+ self.random_seed, self.no_sorting, self.rerun_failures_count,
+ self.rerun_failures_max]
@staticmethod
def Unpack(packed):
# For the order of the fields, refer to Pack() above.
return Context(packed[0], packed[1], None, packed[2], False,
packed[3], packed[4], packed[5], packed[6], packed[7],
- packed[8], packed[9])
+ packed[8], packed[9], packed[10], packed[11])
Index: tools/testrunner/objects/testcase.py
diff --git a/tools/testrunner/objects/testcase.py
b/tools/testrunner/objects/testcase.py
index
cfc522ea7380c1fef996c4491de1f39352553a16..374e242e14662235124b256c6552201c4cbbb65e
100644
--- a/tools/testrunner/objects/testcase.py
+++ b/tools/testrunner/objects/testcase.py
@@ -38,6 +38,7 @@ class TestCase(object):
self.output = None
self.id = None # int, used to map result back to TestCase instance
self.duration = None # assigned during execution
+ self.run = 1 # The nth time this test is executed.
def CopyAddingFlags(self, flags):
copy = TestCase(self.suite, self.path, self.flags + flags,
self.dependency)
@@ -51,7 +52,7 @@ class TestCase(object):
"""
assert self.id is not None
return [self.suitename(), self.path, self.flags,
- self.dependency, list(self.outcomes or []), self.id]
+ self.dependency, list(self.outcomes or []), self.id, self.run]
@staticmethod
def UnpackTask(task):
@@ -60,6 +61,7 @@ class TestCase(object):
test = TestCase(str(task[0]), task[1], task[2], task[3])
test.outcomes = set(task[4])
test.id = task[5]
+ test.run = task[6]
return test
def SetSuiteObject(self, suites):
--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
---
You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email
to [email protected].
For more options, visit https://groups.google.com/d/optout.