This revision was automatically updated to reflect the committed changes.
Closed by commit rG99d6e05e7144: [lit] Improve naming of test result categories 
(authored by yln).
Herald added subscribers: cfe-commits, msifontes, jurahul, Kayjukh, frgossen, 
grosul1, Joonsoo, stephenneuendorffer, liufengdb, lucyrfox, mgester, 
arpith-jacob, nicolasvasilache, antiagainst, shauheen, jpienaar, rriddle, 
mehdi_amini.
Herald added projects: clang, MLIR.

Changed prior to commit:
  https://reviews.llvm.org/D77708?vs=264307&id=268809#toc

Repository:
  rG LLVM Github Monorepo

CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D77708/new/

https://reviews.llvm.org/D77708

Files:
  clang/www/hacking.html
  llvm/utils/lit/lit/main.py
  llvm/utils/lit/tests/allow-retries.py
  llvm/utils/lit/tests/custom-result-category.py
  llvm/utils/lit/tests/googletest-discovery-failed.py
  llvm/utils/lit/tests/googletest-format.py
  llvm/utils/lit/tests/googletest-timeout.py
  llvm/utils/lit/tests/googletest-upstream-format.py
  llvm/utils/lit/tests/lit-opts.py
  llvm/utils/lit/tests/max-failures.py
  llvm/utils/lit/tests/max-time.py
  llvm/utils/lit/tests/parallelism-groups.py
  llvm/utils/lit/tests/selecting.py
  llvm/utils/lit/tests/shtest-env.py
  llvm/utils/lit/tests/shtest-format.py
  llvm/utils/lit/tests/shtest-inject.py
  llvm/utils/lit/tests/shtest-not.py
  llvm/utils/lit/tests/shtest-shell.py
  llvm/utils/lit/tests/shtest-timeout.py
  mlir/test/Examples/standalone/test.toy

Index: mlir/test/Examples/standalone/test.toy
===================================================================
--- mlir/test/Examples/standalone/test.toy
+++ mlir/test/Examples/standalone/test.toy
@@ -1,4 +1,4 @@
 # RUN: %cmake %mlir_src_root/examples/standalone -DCMAKE_CXX_COMPILER=%host_cxx -DCMAKE_C_COMPILER=%host_cc -DMLIR_DIR=%llvm_lib_dir/cmake/mlir ; %cmake --build . --target check-standalone | tee %t | FileCheck %s
 
-# CHECK: Expected Passes: 3
+# CHECK: Passed: 3
 # UNSUPPORTED: windows, android
Index: llvm/utils/lit/tests/shtest-timeout.py
===================================================================
--- llvm/utils/lit/tests/shtest-timeout.py
+++ llvm/utils/lit/tests/shtest-timeout.py
@@ -50,8 +50,8 @@
 
 # CHECK-OUT-COMMON: PASS: per_test_timeout :: short.py
 
-# CHECK-OUT-COMMON: Expected Passes{{ *}}: 1
-# CHECK-OUT-COMMON: Individual Timeouts{{ *}}: 1
+# CHECK-OUT-COMMON: Passed   : 1
+# CHECK-OUT-COMMON: Timed Out: 1
 
 # Test per test timeout via a config file and on the command line.
 # The value set on the command line should override the config file.
@@ -71,5 +71,5 @@
 
 # CHECK-CMDLINE-OVERRIDE-OUT: PASS: per_test_timeout :: short.py
 
-# CHECK-CMDLINE-OVERRIDE-OUT: Expected Passes{{ *}}: 1
-# CHECK-CMDLINE-OVERRIDE-OUT: Individual Timeouts{{ *}}: 1
+# CHECK-CMDLINE-OVERRIDE-OUT: Passed   : 1
+# CHECK-CMDLINE-OVERRIDE-OUT: Timed Out: 1
Index: llvm/utils/lit/tests/shtest-shell.py
===================================================================
--- llvm/utils/lit/tests/shtest-shell.py
+++ llvm/utils/lit/tests/shtest-shell.py
@@ -583,4 +583,4 @@
 # CHECK: ***
 
 # CHECK: PASS: shtest-shell :: valid-shell.txt
-# CHECK: Failing Tests (35)
+# CHECK: Failed Tests (35)
Index: llvm/utils/lit/tests/shtest-not.py
===================================================================
--- llvm/utils/lit/tests/shtest-not.py
+++ llvm/utils/lit/tests/shtest-not.py
@@ -110,6 +110,6 @@
 # CHECK: Error: 'not --crash' cannot call 'rm'
 # CHECK: error: command failed with exit status: {{.*}}
 
-# CHECK: Expected Passes : 1
-# CHECK: Unexpected Failures: 12
+# CHECK: Passed:  1
+# CHECK: Failed: 12
 # CHECK-NOT: {{.}}
Index: llvm/utils/lit/tests/shtest-inject.py
===================================================================
--- llvm/utils/lit/tests/shtest-inject.py
+++ llvm/utils/lit/tests/shtest-inject.py
@@ -11,7 +11,7 @@
 # CHECK-TEST1: THIS WAS
 # CHECK-TEST1: INJECTED
 #
-# CHECK-TEST1: Expected Passes: 1
+# CHECK-TEST1: Passed: 1
 
 # RUN: %{lit} -j 1 %{inputs}/shtest-inject/test-one.txt --show-all | FileCheck --check-prefix=CHECK-TEST2 %s
 #
@@ -26,7 +26,7 @@
 # CHECK-TEST2: INJECTED
 # CHECK-TEST2: IN THE FILE
 #
-# CHECK-TEST2: Expected Passes: 1
+# CHECK-TEST2: Passed: 1
 
 # RUN: %{lit} -j 1 %{inputs}/shtest-inject/test-many.txt --show-all | FileCheck --check-prefix=CHECK-TEST3 %s
 #
@@ -45,4 +45,4 @@
 # CHECK-TEST3: IF IT WORKS
 # CHECK-TEST3: AS EXPECTED
 #
-# CHECK-TEST3: Expected Passes: 1
+# CHECK-TEST3: Passed: 1
Index: llvm/utils/lit/tests/shtest-format.py
===================================================================
--- llvm/utils/lit/tests/shtest-format.py
+++ llvm/utils/lit/tests/shtest-format.py
@@ -69,21 +69,21 @@
 # CHECK-NEXT: true
 # CHECK-NEXT: --
 
-# CHECK: Failing Tests (3)
+# CHECK: Failed Tests (3)
 # CHECK: shtest-format :: external_shell/fail.txt
 # CHECK: shtest-format :: external_shell/fail_with_bad_encoding.txt
 # CHECK: shtest-format :: fail.txt
 
-# CHECK: Unexpected Passing Tests (1)
+# CHECK: Unexpectedly Passed Tests (1)
 # CHECK: shtest-format :: xpass.txt
 
 # CHECK: Testing Time:
-# CHECK: Unsupported Tests  : 4
-# CHECK: Expected Passes    : 7
-# CHECK: Expected Failures  : 4
-# CHECK: Unresolved Tests   : 3
-# CHECK: Unexpected Failures: 3
-# CHECK: Unexpected Passes  : 1
+# CHECK: Unsupported        : 4
+# CHECK: Passed             : 7
+# CHECK: Expectedly Failed  : 4
+# CHECK: Unresolved         : 3
+# CHECK: Failed             : 3
+# CHECK: Unexpectedly Passed: 1
 
 
 # XUNIT: <?xml version="1.0" encoding="UTF-8"?>
Index: llvm/utils/lit/tests/shtest-env.py
===================================================================
--- llvm/utils/lit/tests/shtest-env.py
+++ llvm/utils/lit/tests/shtest-env.py
@@ -93,6 +93,6 @@
 # CHECK: $ "env" "A_FOO=1" "-u" "FOO" "B_BAR=2" "-u" "BAR" "C_OOF=3" "{{[^"]*}}" "print_environment.py"
 # CHECK-NOT: ${{.*}}print_environment.py
 
-# CHECK: Expected Passes : 4
-# CHECK: Unexpected Failures: 12
+# CHECK: Passed:  4
+# CHECK: Failed: 12
 # CHECK-NOT: {{.}}
Index: llvm/utils/lit/tests/selecting.py
===================================================================
--- llvm/utils/lit/tests/selecting.py
+++ llvm/utils/lit/tests/selecting.py
@@ -22,14 +22,14 @@
 # RUN: %{lit} --filter 'O[A-Z]E' %{inputs}/discovery | FileCheck --check-prefix=CHECK-FILTER %s
 # RUN: env LIT_FILTER='o[a-z]e' %{lit} %{inputs}/discovery | FileCheck --check-prefix=CHECK-FILTER %s
 # CHECK-FILTER: Testing: 2 of 5 tests
-# CHECK-FILTER: Excluded Tests : 3
+# CHECK-FILTER: Excluded: 3
 
 
 # Check that maximum counts work
 #
 # RUN: %{lit} --max-tests 3 %{inputs}/discovery | FileCheck --check-prefix=CHECK-MAX %s
 # CHECK-MAX: Testing: 3 of 5 tests
-# CHECK-MAX: Excluded Tests : 2
+# CHECK-MAX: Excluded: 2
 
 
 # Check that sharding partitions the testsuite in a way that distributes the
@@ -40,7 +40,7 @@
 # RUN: FileCheck --check-prefix=CHECK-SHARD0-OUT < %t.out %s
 # CHECK-SHARD0-ERR: note: Selecting shard 1/3 = size 2/5 = tests #(3*k)+1 = [1, 4]
 # CHECK-SHARD0-OUT: Testing: 2 of 5 tests
-# CHECK-SHARD0-OUT: Excluded Tests : 3
+# CHECK-SHARD0-OUT: Excluded: 3
 #
 # RUN: %{lit} --num-shards 3 --run-shard 2 %{inputs}/discovery >%t.out 2>%t.err
 # RUN: FileCheck --check-prefix=CHECK-SHARD1-ERR < %t.err %s
Index: llvm/utils/lit/tests/parallelism-groups.py
===================================================================
--- llvm/utils/lit/tests/parallelism-groups.py
+++ llvm/utils/lit/tests/parallelism-groups.py
@@ -15,4 +15,4 @@
 # CHECK:     -- Testing: 2 tests, 2 workers --
 # CHECK-DAG: PASS: parallelism-groups :: test1.txt
 # CHECK-DAG: PASS: parallelism-groups :: test2.txt
-# CHECK:     Expected Passes: 2
+# CHECK:     Passed: 2
Index: llvm/utils/lit/tests/max-time.py
===================================================================
--- llvm/utils/lit/tests/max-time.py
+++ llvm/utils/lit/tests/max-time.py
@@ -5,5 +5,5 @@
 # RUN: %{lit} %{inputs}/max-time --max-time=5 2>&1  |  FileCheck %s
 
 # CHECK: reached timeout, skipping remaining tests
-# CHECK: Skipped Tests  : 1
-# CHECK: Expected Passes: 1
+# CHECK: Skipped: 1
+# CHECK: Passed : 1
Index: llvm/utils/lit/tests/max-failures.py
===================================================================
--- llvm/utils/lit/tests/max-failures.py
+++ llvm/utils/lit/tests/max-failures.py
@@ -10,15 +10,15 @@
 #
 
 # CHECK-NOT: reached maximum number of test failures
-# CHECK-NOT: Skipped Tests
-# CHECK: Unexpected Failures: 3
+# CHECK-NOT: Skipped
+# CHECK: Failed: 3
 
 # CHECK: reached maximum number of test failures, skipping remaining tests
-# CHECK: Skipped Tests      : 2
-# CHECK: Unexpected Failures: 1
+# CHECK: Skipped: 2
+# CHECK: Failed : 1
 
 # CHECK: reached maximum number of test failures, skipping remaining tests
-# CHECK: Skipped Tests      : 1
-# CHECK: Unexpected Failures: 2
+# CHECK: Skipped: 1
+# CHECK: Failed : 2
 
 # CHECK: error: argument --max-failures: requires positive integer, but found '0'
Index: llvm/utils/lit/tests/lit-opts.py
===================================================================
--- llvm/utils/lit/tests/lit-opts.py
+++ llvm/utils/lit/tests/lit-opts.py
@@ -24,10 +24,10 @@
 
 # CHECK:      Testing: 1 tests
 # CHECK-NOT:  PASS
-# CHECK:      Expected Passes: 1
+# CHECK:      Passed: 1
 
 # SHOW-ALL:     Testing: 1 tests
 # SHOW-ALL:     PASS: lit-opts :: test.txt (1 of 1)
 # SHOW-ALL:     {{^}}[[VAR]]
 # SHOW-ALL-NOT: PASS
-# SHOW-ALL:     Expected Passes: 1
+# SHOW-ALL:     Passed: 1
Index: llvm/utils/lit/tests/googletest-upstream-format.py
===================================================================
--- llvm/utils/lit/tests/googletest-upstream-format.py
+++ llvm/utils/lit/tests/googletest-upstream-format.py
@@ -15,6 +15,6 @@
 # CHECK: ***
 # CHECK: PASS: googletest-upstream-format :: {{[Dd]ummy[Ss]ub[Dd]ir}}/OneTest.py/ParameterizedTest/0.subTest
 # CHECK: PASS: googletest-upstream-format :: {{[Dd]ummy[Ss]ub[Dd]ir}}/OneTest.py/ParameterizedTest/1.subTest
-# CHECK: Failing Tests (1)
-# CHECK: Expected Passes    : 3
-# CHECK: Unexpected Failures: 1
+# CHECK: Failed Tests (1)
+# CHECK: Passed: 3
+# CHECK: Failed: 1
Index: llvm/utils/lit/tests/googletest-timeout.py
===================================================================
--- llvm/utils/lit/tests/googletest-timeout.py
+++ llvm/utils/lit/tests/googletest-timeout.py
@@ -16,8 +16,8 @@
 # CHECK: PASS: googletest-timeout :: {{[Dd]ummy[Ss]ub[Dd]ir}}/OneTest.py/FirstTest.subTestA
 # CHECK: TIMEOUT: googletest-timeout :: {{[Dd]ummy[Ss]ub[Dd]ir}}/OneTest.py/FirstTest.subTestB
 # CHECK: TIMEOUT: googletest-timeout :: {{[Dd]ummy[Ss]ub[Dd]ir}}/OneTest.py/FirstTest.subTestC
-# CHECK: Expected Passes    : 1
-# CHECK: Individual Timeouts: 2
+# CHECK: Passed   : 1
+# CHECK: Timed Out: 2
 
 # Test per test timeout via a config file and on the command line.
 # The value set on the command line should override the config file.
Index: llvm/utils/lit/tests/googletest-format.py
===================================================================
--- llvm/utils/lit/tests/googletest-format.py
+++ llvm/utils/lit/tests/googletest-format.py
@@ -17,7 +17,7 @@
 # CHECK: ***
 # CHECK: PASS: googletest-format :: {{[Dd]ummy[Ss]ub[Dd]ir}}/OneTest.py/ParameterizedTest/0.subTest
 # CHECK: PASS: googletest-format :: {{[Dd]ummy[Ss]ub[Dd]ir}}/OneTest.py/ParameterizedTest/1.subTest
-# CHECK: Failing Tests (1)
-# CHECK: Expected Passes    : 3
-# CHECK: Unexpected Failures: 1
+# CHECK: Failed Tests (1)
+# CHECK: Passed: 3
+# CHECK: Failed: 1
 
Index: llvm/utils/lit/tests/googletest-discovery-failed.py
===================================================================
--- llvm/utils/lit/tests/googletest-discovery-failed.py
+++ llvm/utils/lit/tests/googletest-discovery-failed.py
@@ -5,6 +5,6 @@
 
 
 # CHECK: -- Testing:
-# CHECK: Failing Tests (1):
+# CHECK: Failed Tests (1):
 # CHECK:   googletest-discovery-failed :: subdir/OneTest.py/failed_to_discover_tests_from_gtest
-# CHECK: Unexpected Failures: 1
+# CHECK: Failed: 1
Index: llvm/utils/lit/tests/custom-result-category.py
===================================================================
--- llvm/utils/lit/tests/custom-result-category.py
+++ llvm/utils/lit/tests/custom-result-category.py
@@ -6,10 +6,10 @@
 # CHECK: CUSTOM_PASS: custom-result-category :: test1.txt
 # CHECK: CUSTOM_FAILURE: custom-result-category :: test2.txt
 
-# TODO(yln): Passing tests shouldn't be printed by default.
+# TODO(yln): Passed tests shouldn't be printed by default.
 # CHECK: My Passed Tests (1)
 # CHECK: My Failed Tests (1)
 # CHECK:   custom-result-category :: test2.txt
 
-# CHECK: My Passed Tests: 1
-# CHECK: My Failed Tests: 1
+# CHECK: My Passed: 1
+# CHECK: My Failed: 1
Index: llvm/utils/lit/tests/allow-retries.py
===================================================================
--- llvm/utils/lit/tests/allow-retries.py
+++ llvm/utils/lit/tests/allow-retries.py
@@ -5,18 +5,18 @@
 #
 # RUN: rm -f %t.counter
 # RUN: %{lit} -j 1 %{inputs}/allow-retries/succeeds-within-limit.py -Dcounter=%t.counter -Dpython=%{python} | FileCheck --check-prefix=CHECK-TEST1 %s
-# CHECK-TEST1: Passes With Retry: 1
+# CHECK-TEST1: Passed With Retry: 1
 
 # Test that a per-file ALLOW_RETRIES overwrites the config-wide test_retry_attempts property, if any.
 #
 # RUN: rm -f %t.counter
 # RUN: %{lit} -j 1 %{inputs}/allow-retries/succeeds-within-limit.py -Dtest_retry_attempts=2 -Dcounter=%t.counter -Dpython=%{python} | FileCheck --check-prefix=CHECK-TEST2 %s
-# CHECK-TEST2: Passes With Retry: 1
+# CHECK-TEST2: Passed With Retry: 1
 
 # This test does not succeed within the allowed retry limit
 #
 # RUN: not %{lit} -j 1 %{inputs}/allow-retries/does-not-succeed-within-limit.py | FileCheck --check-prefix=CHECK-TEST3 %s
-# CHECK-TEST3: Failing Tests (1):
+# CHECK-TEST3: Failed Tests (1):
 # CHECK-TEST3: allow-retries :: does-not-succeed-within-limit.py
 
 # This test should be UNRESOLVED since it has more than one ALLOW_RETRIES
@@ -38,4 +38,4 @@
 #
 # RUN: rm -f %t.counter
 # RUN: %{lit} -j 1 %{inputs}/test_retry_attempts/test.py -Dcounter=%t.counter -Dpython=%{python} | FileCheck --check-prefix=CHECK-TEST6 %s
-# CHECK-TEST6: Passes With Retry: 1
+# CHECK-TEST6: Passed With Retry: 1
Index: llvm/utils/lit/lit/main.py
===================================================================
--- llvm/utils/lit/lit/main.py
+++ llvm/utils/lit/lit/main.py
@@ -265,34 +265,33 @@
 
 def add_result_category(result_code, label):
     assert isinstance(result_code, lit.Test.ResultCode)
-    category = (result_code, "%s Tests" % label, label)
+    category = (result_code, label)
     result_codes.append(category)
 
 
-# Status code, summary label, group label
 result_codes = [
     # Passes
-    (lit.Test.EXCLUDED,    'Excluded Tests',      'Excluded'),
-    (lit.Test.SKIPPED,     'Skipped Tests',       'Skipped'),
-    (lit.Test.UNSUPPORTED, 'Unsupported Tests',   'Unsupported'),
-    (lit.Test.PASS,        'Expected Passes',     ''),
-    (lit.Test.FLAKYPASS,   'Passes With Retry',   ''),
-    (lit.Test.XFAIL,       'Expected Failures',   'Expected Failing'),
+    (lit.Test.EXCLUDED,    'Excluded'),
+    (lit.Test.SKIPPED,     'Skipped'),
+    (lit.Test.UNSUPPORTED, 'Unsupported'),
+    (lit.Test.PASS,        'Passed'),
+    (lit.Test.FLAKYPASS,   'Passed With Retry'),
+    (lit.Test.XFAIL,       'Expectedly Failed'),
     # Failures
-    (lit.Test.UNRESOLVED,  'Unresolved Tests',    'Unresolved'),
-    (lit.Test.TIMEOUT,     'Individual Timeouts', 'Timed Out'),
-    (lit.Test.FAIL,        'Unexpected Failures', 'Failing'),
-    (lit.Test.XPASS,       'Unexpected Passes',   'Unexpected Passing')
+    (lit.Test.UNRESOLVED,  'Unresolved'),
+    (lit.Test.TIMEOUT,     'Timed Out'),
+    (lit.Test.FAIL,        'Failed'),
+    (lit.Test.XPASS,       'Unexpectedly Passed')
 ]
 
 
 def print_results(tests, elapsed, opts):
-    tests_by_code = {code: [] for (code, _, _) in result_codes}
+    tests_by_code = {code: [] for code, _ in result_codes}
     for test in tests:
         tests_by_code[test.result.code].append(test)
 
-    for (code, _, group_label) in result_codes:
-        print_group(code, group_label, tests_by_code[code], opts)
+    for (code, label) in result_codes:
+        print_group(code, label, tests_by_code[code], opts)
 
     print_summary(tests_by_code, opts.quiet, elapsed)
 
@@ -318,7 +317,7 @@
         print('\nTesting Time: %.2fs' % elapsed)
 
     codes = [c for c in result_codes if not quiet or c.isFailure]
-    groups = [(label, len(tests_by_code[code])) for code, label, _ in codes]
+    groups = [(label, len(tests_by_code[code])) for code, label in codes]
     groups = [(label, count) for label, count in groups if count]
     if not groups:
         return
Index: clang/www/hacking.html
===================================================================
--- clang/www/hacking.html
+++ clang/www/hacking.html
@@ -264,12 +264,12 @@
 -- Testing: Testing: 2534 tests, 4 threads --
 Testing: 0 .. 10.. 20.. 30.. 40.. 50.. 60.. 70.. 80.. 90..
 Testing Time: 81.52s
-  Expected Passes    : 2503
-  Expected Failures  : 28
-  Unsupported Tests  : 3
+  Passed           : 2503
+  Expectedly Failed:   28
+  Unsupported      :    3
 </pre>
 
-  <p>The statistic, "Unexpected Failures" (not shown if all tests pass), is the important one.</p>
+  <p>The statistic, "Failed" (not shown if all tests pass), is the important one.</p>
 
   <!--=====================================================================-->
   <h2 id="patches">Creating Patch Files</h2>
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to