This is an automated email from the ASF dual-hosted git repository.

bcall pushed a commit to branch parallel-autest
in repository https://gitbox.apache.org/repos/asf/trafficserver.git

commit a7ebae6de7ed74ca56358778492af82560c2785b
Author: Bryan Call <[email protected]>
AuthorDate: Sun Feb 8 08:10:05 2026 -0800

    Fix test counting and update serial tests list
    
    - Progress line and summary now count top-level tests, not autest
      sub-test results. Previously thread_config (12 sub-tests) inflated
      the count, causing progress to exceed 100%.
    - Deduplicate failed test names in summary output
    - Remove tls_sni_with_port, redirect_to_same_origin_on_cache, and
      parent-retry from serial list -- all use dynamic ports via get_port()
      and run fine in parallel
    - Add thread_config to serial list -- spins up 12 ATS instances and
      fails under parallel load due to resource contention
---
 tests/autest-parallel.py | 22 ++++++++++++++--------
 tests/serial_tests.txt   | 14 ++++----------
 2 files changed, 18 insertions(+), 18 deletions(-)

diff --git a/tests/autest-parallel.py b/tests/autest-parallel.py
index 80bb094800..4b6d5e6f67 100755
--- a/tests/autest-parallel.py
+++ b/tests/autest-parallel.py
@@ -556,11 +556,17 @@ def print_summary(results: List[TestResult], 
total_duration: float,
     total_warnings = sum(r.warnings for r in results)
     total_exceptions = sum(r.exceptions for r in results)
     total_unknown = sum(r.unknown for r in results)
-    total_tests = total_passed + total_failed + total_skipped + total_warnings 
+ total_exceptions + total_unknown
+    # Use actual test count (top-level tests), not sub-test counts from autest
+    total_tests = sum(len(r.tests) for r in results)
 
+    # Deduplicate failed test names (a test may appear in sub-test output 
multiple times)
+    seen = set()
     all_failed_tests = []
     for r in results:
-        all_failed_tests.extend(r.failed_tests)
+        for t in r.failed_tests:
+            if t not in seen:
+                seen.add(t)
+                all_failed_tests.append(t)
 
     # Collect actual timings from results
     actual_timings = {}
@@ -827,9 +833,8 @@ Examples:
     sandbox_base = Path(args.sandbox)
     sandbox_base.mkdir(parents=True, exist_ok=True)
 
-    # Progress tracking state
+    # Progress tracking state (counts top-level tests, not autest sub-tests)
     tests_done = 0
-    tests_passed = 0
     tests_failed = 0
     tests_skipped = 0
     workers_done = 0
@@ -892,9 +897,11 @@ Examples:
                     result = future.result()
                     results.append(result)
                     workers_done += 1
-                    tests_done += result.passed + result.failed + 
result.skipped
-                    tests_passed += result.passed
-                    tests_failed += result.failed
+                    # Use actual test count (top-level), not autest sub-test 
counts
+                    tests_done += len(result.tests)
+                    # Count top-level tests that failed (from failed_tests 
list)
+                    tests_failed += len(result.failed_tests)
+                    # Skipped is still useful from autest counts for visibility
                     tests_skipped += result.skipped
 
                     if args.verbose:
@@ -950,7 +957,6 @@ Examples:
 
             if status == "PASS":
                 serial_result.passed += 1
-                tests_passed += 1
             elif status == "SKIP":
                 serial_result.skipped += 1
                 tests_skipped += 1
diff --git a/tests/serial_tests.txt b/tests/serial_tests.txt
index 43675a3b9d..d6eff1d294 100644
--- a/tests/serial_tests.txt
+++ b/tests/serial_tests.txt
@@ -1,14 +1,8 @@
-# Tests that must run serially (not in parallel) due to hardcoded ports or
-# other constraints that prevent parallel execution.
+# Tests that must run serially (not in parallel) due to resource conflicts
+# or other constraints that prevent parallel execution.
 #
 # Format: One test file path per line, relative to tests/gold_tests/
 # Lines starting with # are comments and ignored.
 
-# Tests that intentionally use select_ports=False for specific port testing
-tls/tls_sni_with_port.test.py
-
-# Tests using select_ports=False without dynamic ports
-redirect/redirect_to_same_origin_on_cache.test.py
-
-# Tests with hardcoded ports that are difficult to make dynamic
-parent_proxy/parent-retry.test.py
+# Spins up 12 ATS instances with varying thread configs; fails under parallel 
load
+thread_config/thread_config.test.py

Reply via email to