Reviewers: Jakob,

Message:
PTAL

Description:
Add flaky test classification feature to test suites.

Test expectations can now include outcome: FLAKY.

The test runner can now run a class of tests (flaky|non-flaky|all). All tests
are in the non-flaky class that are not marked as FLAKY.

The slash correction for windows is now pulled into the test name method.
Currently the progress output on windows contains a mixture of / and \.

Please review this at https://codereview.chromium.org/22381003/

SVN Base: https://v8.googlecode.com/svn/branches/bleeding_edge

Affected files:
  M tools/run-tests.py
  M tools/testrunner/local/old_statusfile.py
  M tools/testrunner/local/statusfile.py
  M tools/testrunner/local/testsuite.py
  M tools/testrunner/local/verbose.py


Index: tools/run-tests.py
diff --git a/tools/run-tests.py b/tools/run-tests.py
index 761d03fe3351f4a15a46c3745d3561bc5abac86e..03722a5ca3caa29dbf222d31757e692760dcdf35 100755
--- a/tools/run-tests.py
+++ b/tools/run-tests.py
@@ -94,6 +94,9 @@ def BuildOptions():
                     default=False, action="store_true")
   result.add_option("--cat", help="Print the source of the tests",
                     default=False, action="store_true")
+  result.add_option("--classification",
+                    help="Class of tests to run (all|flaky|non-flaky)",
+                    default="all")
   result.add_option("--command-prefix",
help="Prepended to each shell command used to run a test",
                     default="")
@@ -204,6 +207,9 @@ def ProcessOptions(options):
# This is OK for distributed running, so we don't need to set no_network.
     options.command_prefix = (["python", "-u", run_valgrind] +
                               options.command_prefix)
+  if not options.classification in ["all", "flaky", "non-flaky"]:
+    print "Unknown classification %s" % options.classification
+    return False
   return True


@@ -315,7 +321,7 @@ def Execute(arch, mode, args, options, suites, workspace):
     if len(args) > 0:
       s.FilterTestCasesByArgs(args)
     all_tests += s.tests
-    s.FilterTestCasesByStatus(options.warn_unused)
+    s.FilterTestCasesByStatus(options.warn_unused, options.classification)
     if options.cat:
       verbose.PrintTestSource(s.tests)
       continue
Index: tools/testrunner/local/old_statusfile.py
diff --git a/tools/testrunner/local/old_statusfile.py b/tools/testrunner/local/old_statusfile.py index a9a62036ec48fcfcd68135e8c93358a163122a6c..d634e3ec955b9f278fb6a09df2b0e03df2c8df6c 100644
--- a/tools/testrunner/local/old_statusfile.py
+++ b/tools/testrunner/local/old_statusfile.py
@@ -37,6 +37,7 @@ OKAY = 'OKAY'
 TIMEOUT = 'TIMEOUT'
 CRASH = 'CRASH'
 SLOW = 'SLOW'
+FLAKY = 'FLAKY'
 # These are just for the status files and are mapped below in DEFS:
 FAIL_OK = 'FAIL_OK'
 PASS_OR_FAIL = 'PASS_OR_FAIL'
@@ -48,6 +49,7 @@ KEYWORDS = {SKIP: SKIP,
             TIMEOUT: TIMEOUT,
             CRASH: CRASH,
             SLOW: SLOW,
+            FLAKY: FLAKY,
             FAIL_OK: FAIL_OK,
             PASS_OR_FAIL: PASS_OR_FAIL}

Index: tools/testrunner/local/statusfile.py
diff --git a/tools/testrunner/local/statusfile.py b/tools/testrunner/local/statusfile.py index 634fe6a08a82ac639f87a4dd14161d1699d10836..1d30fe3d3c1edbe4e167822fd62e11eb0e458f62 100644
--- a/tools/testrunner/local/statusfile.py
+++ b/tools/testrunner/local/statusfile.py
@@ -42,6 +42,7 @@ OKAY = "OKAY"
 TIMEOUT = "TIMEOUT"
 CRASH = "CRASH"
 SLOW = "SLOW"
+FLAKY = "FLAKY"
 # These are just for the status files and are mapped below in DEFS:
 FAIL_OK = "FAIL_OK"
 PASS_OR_FAIL = "PASS_OR_FAIL"
@@ -49,7 +50,7 @@ PASS_OR_FAIL = "PASS_OR_FAIL"
 ALWAYS = "ALWAYS"

 KEYWORDS = {}
-for key in [SKIP, FAIL, PASS, OKAY, TIMEOUT, CRASH, SLOW, FAIL_OK,
+for key in [SKIP, FAIL, PASS, OKAY, TIMEOUT, CRASH, SLOW, FLAKY, FAIL_OK,
             PASS_OR_FAIL, ALWAYS]:
   KEYWORDS[key] = key

@@ -68,6 +69,10 @@ def DoSkip(outcomes):


 def IsFlaky(outcomes):
+  return FLAKY in outcomes
+
+
+def IsPassOrFail(outcomes):
   return ((PASS in outcomes) and (FAIL in outcomes) and
           (not CRASH in outcomes) and (not OKAY in outcomes))

Index: tools/testrunner/local/testsuite.py
diff --git a/tools/testrunner/local/testsuite.py b/tools/testrunner/local/testsuite.py index 473e8b1efed2e4b860df31707b436b784f7b3654..42890b4429079d00a4a46a677589df312b191ddd 100644
--- a/tools/testrunner/local/testsuite.py
+++ b/tools/testrunner/local/testsuite.py
@@ -66,7 +66,10 @@ class TestSuite(object):

   # Used in the status file and for stdout printing.
   def CommonTestName(self, testcase):
-    return testcase.path
+    if utils.IsWindows():
+      return testcase.path.replace("\\", "/")
+    else:
+      return testcase.path

   def ListTests(self, context):
     raise NotImplementedError
@@ -84,32 +87,36 @@ class TestSuite(object):
   def ReadTestCases(self, context):
     self.tests = self.ListTests(context)

-  def FilterTestCasesByStatus(self, warn_unused_rules):
+  @staticmethod
+  def _OutsideClass(flaky, cls):
+    return cls == "flaky" and not flaky or cls == "non-flaky" and flaky
+
+ def FilterTestCasesByStatus(self, warn_unused_rules, classification="all"):
     filtered = []
     used_rules = set()
     for t in self.tests:
+      flaky = False
       testname = self.CommonTestName(t)
-      if utils.IsWindows():
-        testname = testname.replace("\\", "/")
       if testname in self.rules:
         used_rules.add(testname)
-        outcomes = self.rules[testname]
-        t.outcomes = outcomes  # Even for skipped tests, as the TestCase
-        # object stays around and PrintReport() uses it.
-        if statusfile.DoSkip(outcomes):
+        # Even for skipped tests, as the TestCase object stays around and
+        # PrintReport() uses it.
+        t.outcomes = self.rules[testname]
+        if statusfile.DoSkip(t.outcomes):
           continue  # Don't add skipped tests to |filtered|.
-      if len(self.wildcards) != 0:
-        skip = False
-        for rule in self.wildcards:
-          assert rule[-1] == '*'
-          if testname.startswith(rule[:-1]):
-            used_rules.add(rule)
-            outcomes = self.wildcards[rule]
-            t.outcomes = outcomes
-            if statusfile.DoSkip(outcomes):
-              skip = True
-              break  # "for rule in self.wildcards"
-        if skip: continue  # "for t in self.tests"
+        flaky = statusfile.IsFlaky(t.outcomes)
+      skip = False
+      for rule in self.wildcards:
+        assert rule[-1] == '*'
+        if testname.startswith(rule[:-1]):
+          used_rules.add(rule)
+          t.outcomes = self.wildcards[rule]
+          if statusfile.DoSkip(t.outcomes):
+            skip = True
+            break  # "for rule in self.wildcards"
+          flaky = flaky or statusfile.IsFlaky(t.outcomes)
+      if skip or self._OutsideClass(flaky, classification):
+        continue  # "for t in self.tests"
       filtered.append(t)
     self.tests = filtered

Index: tools/testrunner/local/verbose.py
diff --git a/tools/testrunner/local/verbose.py b/tools/testrunner/local/verbose.py index f693467523100acdc794d7a34409519174f1813c..00c330d2d9c24c46ea957c2f5228c755f327876b 100644
--- a/tools/testrunner/local/verbose.py
+++ b/tools/testrunner/local/verbose.py
@@ -54,7 +54,7 @@ def PrintReport(tests):
       skipped += 1
       continue
     if statusfile.TIMEOUT in o: timeout += 1
-    if statusfile.IsFlaky(o): nocrash += 1
+    if statusfile.IsPassOrFail(o): nocrash += 1
     if list(o) == [statusfile.PASS]: passes += 1
     if statusfile.IsFailOk(o): fail_ok += 1
     if list(o) == [statusfile.FAIL]: fail += 1


--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
--- You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to [email protected].
For more options, visit https://groups.google.com/groups/opt_out.


Reply via email to