https://github.com/python/cpython/commit/212448b1623b45f19c5529595e081da72a6521a0
commit: 212448b1623b45f19c5529595e081da72a6521a0
branch: main
author: Hugo van Kemenade <[email protected]>
committer: hugovk <[email protected]>
date: 2024-12-10T09:44:15+02:00
summary:
gh-127718: Add colour to `test.regrtest` output (#127719)
files:
A Misc/NEWS.d/next/Library/2024-12-07-15-28-31.gh-issue-127718.9dpLfi.rst
M Doc/library/test.rst
M Lib/test/libregrtest/main.py
M Lib/test/libregrtest/result.py
M Lib/test/libregrtest/results.py
M Lib/test/libregrtest/single.py
M Lib/test/test_regrtest.py
diff --git a/Doc/library/test.rst b/Doc/library/test.rst
index 04d28aee0f8672..b5b6e442e218fd 100644
--- a/Doc/library/test.rst
+++ b/Doc/library/test.rst
@@ -192,6 +192,10 @@ top-level directory where Python was built. On Windows,
executing :program:`rt.bat` from your :file:`PCbuild` directory will run all
regression tests.
+.. versionadded:: 3.14
+ Output is colorized by default and can be
+ :ref:`controlled using environment variables <using-on-controlling-color>`.
+
:mod:`test.support` --- Utilities for the Python test suite
===========================================================
diff --git a/Lib/test/libregrtest/main.py b/Lib/test/libregrtest/main.py
index 49209b0cec756e..dcbcc6790c68d8 100644
--- a/Lib/test/libregrtest/main.py
+++ b/Lib/test/libregrtest/main.py
@@ -6,6 +6,7 @@
import sysconfig
import time
import trace
+from _colorize import get_colors # type: ignore[import-not-found]
from typing import NoReturn
from test.support import os_helper, MS_WINDOWS, flush_std_streams
@@ -270,6 +271,9 @@ def _rerun_failed_tests(self, runtests: RunTests) ->
RunTests:
return runtests
def rerun_failed_tests(self, runtests: RunTests) -> None:
+ ansi = get_colors()
+ red, reset = ansi.BOLD_RED, ansi.RESET
+
if self.python_cmd:
# Temp patch for https://github.com/python/cpython/issues/94052
self.log(
@@ -284,7 +288,10 @@ def rerun_failed_tests(self, runtests: RunTests) -> None:
rerun_runtests = self._rerun_failed_tests(runtests)
if self.results.bad:
- print(count(len(self.results.bad), 'test'), "failed again:")
+ print(
+ f"{red}{count(len(self.results.bad), 'test')} "
+ f"failed again:{reset}"
+ )
printlist(self.results.bad)
self.display_result(rerun_runtests)
diff --git a/Lib/test/libregrtest/result.py b/Lib/test/libregrtest/result.py
index 7553efe5e8abeb..daf7624366ee20 100644
--- a/Lib/test/libregrtest/result.py
+++ b/Lib/test/libregrtest/result.py
@@ -1,5 +1,6 @@
import dataclasses
import json
+from _colorize import get_colors # type: ignore[import-not-found]
from typing import Any
from .utils import (
@@ -105,54 +106,71 @@ def is_failed(self, fail_env_changed: bool) -> bool:
return State.is_failed(self.state)
def _format_failed(self):
+ ansi = get_colors()
+ red, reset = ansi.BOLD_RED, ansi.RESET
if self.errors and self.failures:
le = len(self.errors)
lf = len(self.failures)
error_s = "error" + ("s" if le > 1 else "")
failure_s = "failure" + ("s" if lf > 1 else "")
- return f"{self.test_name} failed ({le} {error_s}, {lf}
{failure_s})"
+ return (
+ f"{red}{self.test_name} failed "
+ f"({le} {error_s}, {lf} {failure_s}){reset}"
+ )
if self.errors:
le = len(self.errors)
error_s = "error" + ("s" if le > 1 else "")
- return f"{self.test_name} failed ({le} {error_s})"
+ return f"{red}{self.test_name} failed ({le} {error_s}){reset}"
if self.failures:
lf = len(self.failures)
failure_s = "failure" + ("s" if lf > 1 else "")
- return f"{self.test_name} failed ({lf} {failure_s})"
+ return f"{red}{self.test_name} failed ({lf} {failure_s}){reset}"
- return f"{self.test_name} failed"
+ return f"{red}{self.test_name} failed{reset}"
def __str__(self) -> str:
+ ansi = get_colors()
+ green = ansi.GREEN
+ red = ansi.BOLD_RED
+ reset = ansi.RESET
+ yellow = ansi.YELLOW
+
match self.state:
case State.PASSED:
- return f"{self.test_name} passed"
+ return f"{green}{self.test_name} passed{reset}"
case State.FAILED:
- return self._format_failed()
+ return f"{red}{self._format_failed()}{reset}"
case State.SKIPPED:
- return f"{self.test_name} skipped"
+ return f"{yellow}{self.test_name} skipped{reset}"
case State.UNCAUGHT_EXC:
- return f"{self.test_name} failed (uncaught exception)"
+ return (
+ f"{red}{self.test_name} failed (uncaught exception){reset}"
+ )
case State.REFLEAK:
- return f"{self.test_name} failed (reference leak)"
+ return f"{red}{self.test_name} failed (reference leak){reset}"
case State.ENV_CHANGED:
- return f"{self.test_name} failed (env changed)"
+ return f"{red}{self.test_name} failed (env changed){reset}"
case State.RESOURCE_DENIED:
- return f"{self.test_name} skipped (resource denied)"
+ return f"{yellow}{self.test_name} skipped (resource
denied){reset}"
case State.INTERRUPTED:
- return f"{self.test_name} interrupted"
+ return f"{yellow}{self.test_name} interrupted{reset}"
case State.WORKER_FAILED:
- return f"{self.test_name} worker non-zero exit code"
+ return (
+ f"{red}{self.test_name} worker non-zero exit code{reset}"
+ )
case State.WORKER_BUG:
- return f"{self.test_name} worker bug"
+ return f"{red}{self.test_name} worker bug{reset}"
case State.DID_NOT_RUN:
- return f"{self.test_name} ran no tests"
+ return f"{yellow}{self.test_name} ran no tests{reset}"
case State.TIMEOUT:
assert self.duration is not None, "self.duration is None"
return f"{self.test_name} timed out
({format_duration(self.duration)})"
case _:
- raise ValueError("unknown result state: {state!r}")
+ raise ValueError(
+ f"{red}unknown result state: {{state!r}}{reset}"
+ )
def has_meaningful_duration(self):
return State.has_meaningful_duration(self.state)
diff --git a/Lib/test/libregrtest/results.py b/Lib/test/libregrtest/results.py
index 9eda926966dc7e..a35934fc2c9ca8 100644
--- a/Lib/test/libregrtest/results.py
+++ b/Lib/test/libregrtest/results.py
@@ -1,5 +1,6 @@
import sys
import trace
+from _colorize import get_colors # type: ignore[import-not-found]
from typing import TYPE_CHECKING
from .runtests import RunTests
@@ -59,19 +60,24 @@ def no_tests_run(self) -> bool:
def get_state(self, fail_env_changed: bool) -> str:
state = []
+ ansi = get_colors()
+ green = ansi.GREEN
+ red = ansi.BOLD_RED
+ reset = ansi.RESET
+ yellow = ansi.YELLOW
if self.bad:
- state.append("FAILURE")
+ state.append(f"{red}FAILURE{reset}")
elif fail_env_changed and self.env_changed:
- state.append("ENV CHANGED")
+ state.append(f"{yellow}ENV CHANGED{reset}")
elif self.no_tests_run():
- state.append("NO TESTS RAN")
+ state.append(f"{yellow}NO TESTS RAN{reset}")
if self.interrupted:
- state.append("INTERRUPTED")
+ state.append(f"{yellow}INTERRUPTED{reset}")
if self.worker_bug:
- state.append("WORKER BUG")
+ state.append(f"{red}WORKER BUG{reset}")
if not state:
- state.append("SUCCESS")
+ state.append(f"{green}SUCCESS{reset}")
return ', '.join(state)
@@ -197,27 +203,51 @@ def write_junit(self, filename: StrPath) -> None:
f.write(s)
def display_result(self, tests: TestTuple, quiet: bool, print_slowest:
bool) -> None:
+ ansi = get_colors()
+ green = ansi.GREEN
+ red = ansi.BOLD_RED
+ reset = ansi.RESET
+ yellow = ansi.YELLOW
+
if print_slowest:
self.test_times.sort(reverse=True)
print()
- print("10 slowest tests:")
+ print(f"{yellow}10 slowest tests:{reset}")
for test_time, test in self.test_times[:10]:
- print("- %s: %s" % (test, format_duration(test_time)))
+ print(f"- {test}: {format_duration(test_time)}")
all_tests = []
omitted = set(tests) - self.get_executed()
# less important
- all_tests.append((sorted(omitted), "test", "{} omitted:"))
+ all_tests.append(
+ (sorted(omitted), "test", f"{yellow}{{}} omitted:{reset}")
+ )
if not quiet:
- all_tests.append((self.skipped, "test", "{} skipped:"))
- all_tests.append((self.resource_denied, "test", "{} skipped
(resource denied):"))
- all_tests.append((self.run_no_tests, "test", "{} run no tests:"))
+ all_tests.append(
+ (self.skipped, "test", f"{yellow}{{}} skipped:{reset}")
+ )
+ all_tests.append(
+ (
+ self.resource_denied,
+ "test",
+ f"{yellow}{{}} skipped (resource denied):{reset}",
+ )
+ )
+ all_tests.append(
+ (self.run_no_tests, "test", f"{yellow}{{}} run no tests:{reset}")
+ )
# more important
- all_tests.append((self.env_changed, "test", "{} altered the execution
environment (env changed):"))
- all_tests.append((self.rerun, "re-run test", "{}:"))
- all_tests.append((self.bad, "test", "{} failed:"))
+ all_tests.append(
+ (
+ self.env_changed,
+ "test",
+ f"{yellow}{{}} altered the execution environment (env
changed):{reset}",
+ )
+ )
+ all_tests.append((self.rerun, "re-run test", f"{yellow}{{}}:{reset}"))
+ all_tests.append((self.bad, "test", f"{red}{{}} failed:{reset}"))
for tests_list, count_text, title_format in all_tests:
if tests_list:
@@ -229,26 +259,29 @@ def display_result(self, tests: TestTuple, quiet: bool,
print_slowest: bool) ->
if self.good and not quiet:
print()
text = count(len(self.good), "test")
- text = f"{text} OK."
- if (self.is_all_good() and len(self.good) > 1):
+ text = f"{green}{text} OK.{reset}"
+ if self.is_all_good() and len(self.good) > 1:
text = f"All {text}"
print(text)
if self.interrupted:
print()
- print("Test suite interrupted by signal SIGINT.")
+ print(f"{yellow}Test suite interrupted by signal SIGINT.{reset}")
def display_summary(self, first_runtests: RunTests, filtered: bool) ->
None:
# Total tests
+ ansi = get_colors()
+ red, reset, yellow = ansi.RED, ansi.RESET, ansi.YELLOW
+
stats = self.stats
text = f'run={stats.tests_run:,}'
if filtered:
text = f"{text} (filtered)"
report = [text]
if stats.failures:
- report.append(f'failures={stats.failures:,}')
+ report.append(f'{red}failures={stats.failures:,}{reset}')
if stats.skipped:
- report.append(f'skipped={stats.skipped:,}')
+ report.append(f'{yellow}skipped={stats.skipped:,}{reset}')
print(f"Total tests: {' '.join(report)}")
# Total test files
@@ -263,14 +296,14 @@ def display_summary(self, first_runtests: RunTests,
filtered: bool) -> None:
if filtered:
text = f"{text} (filtered)"
report = [text]
- for name, tests in (
- ('failed', self.bad),
- ('env_changed', self.env_changed),
- ('skipped', self.skipped),
- ('resource_denied', self.resource_denied),
- ('rerun', self.rerun),
- ('run_no_tests', self.run_no_tests),
+ for name, tests, color in (
+ ('failed', self.bad, red),
+ ('env_changed', self.env_changed, yellow),
+ ('skipped', self.skipped, yellow),
+ ('resource_denied', self.resource_denied, yellow),
+ ('rerun', self.rerun, yellow),
+ ('run_no_tests', self.run_no_tests, yellow),
):
if tests:
- report.append(f'{name}={len(tests)}')
+ report.append(f'{color}{name}={len(tests)}{reset}')
print(f"Total test files: {' '.join(report)}")
diff --git a/Lib/test/libregrtest/single.py b/Lib/test/libregrtest/single.py
index 17323e7f9cf730..0e174f82abed28 100644
--- a/Lib/test/libregrtest/single.py
+++ b/Lib/test/libregrtest/single.py
@@ -7,6 +7,7 @@
import traceback
import unittest
+from _colorize import get_colors # type: ignore[import-not-found]
from test import support
from test.support import threading_helper
@@ -161,6 +162,8 @@ def test_func():
def _runtest_env_changed_exc(result: TestResult, runtests: RunTests,
display_failure: bool = True) -> None:
# Handle exceptions, detect environment changes.
+ ansi = get_colors()
+ red, reset, yellow = ansi.RED, ansi.RESET, ansi.YELLOW
# Reset the environment_altered flag to detect if a test altered
# the environment
@@ -181,18 +184,18 @@ def _runtest_env_changed_exc(result: TestResult,
runtests: RunTests,
_load_run_test(result, runtests)
except support.ResourceDenied as exc:
if not quiet and not pgo:
- print(f"{test_name} skipped -- {exc}", flush=True)
+ print(f"{yellow}{test_name} skipped -- {exc}{reset}", flush=True)
result.state = State.RESOURCE_DENIED
return
except unittest.SkipTest as exc:
if not quiet and not pgo:
- print(f"{test_name} skipped -- {exc}", flush=True)
+ print(f"{yellow}{test_name} skipped -- {exc}{reset}", flush=True)
result.state = State.SKIPPED
return
except support.TestFailedWithDetails as exc:
- msg = f"test {test_name} failed"
+ msg = f"{red}test {test_name} failed{reset}"
if display_failure:
- msg = f"{msg} -- {exc}"
+ msg = f"{red}{msg} -- {exc}{reset}"
print(msg, file=sys.stderr, flush=True)
result.state = State.FAILED
result.errors = exc.errors
@@ -200,9 +203,9 @@ def _runtest_env_changed_exc(result: TestResult, runtests:
RunTests,
result.stats = exc.stats
return
except support.TestFailed as exc:
- msg = f"test {test_name} failed"
+ msg = f"{red}test {test_name} failed{reset}"
if display_failure:
- msg = f"{msg} -- {exc}"
+ msg = f"{red}{msg} -- {exc}{reset}"
print(msg, file=sys.stderr, flush=True)
result.state = State.FAILED
result.stats = exc.stats
@@ -217,7 +220,7 @@ def _runtest_env_changed_exc(result: TestResult, runtests:
RunTests,
except:
if not pgo:
msg = traceback.format_exc()
- print(f"test {test_name} crashed -- {msg}",
+ print(f"{red}test {test_name} crashed -- {msg}{reset}",
file=sys.stderr, flush=True)
result.state = State.UNCAUGHT_EXC
return
@@ -300,6 +303,9 @@ def run_single_test(test_name: TestName, runtests:
RunTests) -> TestResult:
If runtests.use_junit, xml_data is a list containing each generated
testsuite element.
"""
+ ansi = get_colors()
+ red, reset, yellow = ansi.BOLD_RED, ansi.RESET, ansi.YELLOW
+
start_time = time.perf_counter()
result = TestResult(test_name)
pgo = runtests.pgo
@@ -308,7 +314,7 @@ def run_single_test(test_name: TestName, runtests:
RunTests) -> TestResult:
except:
if not pgo:
msg = traceback.format_exc()
- print(f"test {test_name} crashed -- {msg}",
+ print(f"{red}test {test_name} crashed -- {msg}{reset}",
file=sys.stderr, flush=True)
result.state = State.UNCAUGHT_EXC
diff --git a/Lib/test/test_regrtest.py b/Lib/test/test_regrtest.py
index 0ab7a23aca1df8..ab46ccbf004a3a 100644
--- a/Lib/test/test_regrtest.py
+++ b/Lib/test/test_regrtest.py
@@ -4,6 +4,7 @@
Note: test_regrtest cannot be run twice in parallel.
"""
+import _colorize
import contextlib
import dataclasses
import glob
@@ -21,6 +22,7 @@
import tempfile
import textwrap
import unittest
+import unittest.mock
from xml.etree import ElementTree
from test import support
@@ -2487,5 +2489,49 @@ def test_sanitize_xml(self):
'valid t\xe9xt \u20ac')
+from test.libregrtest.results import TestResults
+
+
+class TestColorized(unittest.TestCase):
+ def test_test_result_get_state(self):
+ # Arrange
+ green = _colorize.ANSIColors.GREEN
+ red = _colorize.ANSIColors.BOLD_RED
+ reset = _colorize.ANSIColors.RESET
+ yellow = _colorize.ANSIColors.YELLOW
+
+ good_results = TestResults()
+ good_results.good = ["good1", "good2"]
+ bad_results = TestResults()
+ bad_results.bad = ["bad1", "bad2"]
+ no_results = TestResults()
+ no_results.bad = []
+ interrupted_results = TestResults()
+ interrupted_results.interrupted = True
+ interrupted_worker_bug = TestResults()
+ interrupted_worker_bug.interrupted = True
+ interrupted_worker_bug.worker_bug = True
+
+ for results, expected in (
+ (good_results, f"{green}SUCCESS{reset}"),
+ (bad_results, f"{red}FAILURE{reset}"),
+ (no_results, f"{yellow}NO TESTS RAN{reset}"),
+ (interrupted_results, f"{yellow}INTERRUPTED{reset}"),
+ (
+ interrupted_worker_bug,
+ f"{yellow}INTERRUPTED{reset}, {red}WORKER BUG{reset}",
+ ),
+ ):
+ with self.subTest(results=results, expected=expected):
+ # Act
+ with unittest.mock.patch(
+ "_colorize.can_colorize", return_value=True
+ ):
+ result = results.get_state(fail_env_changed=False)
+
+ # Assert
+ self.assertEqual(result, expected)
+
+
if __name__ == '__main__':
unittest.main()
diff --git
a/Misc/NEWS.d/next/Library/2024-12-07-15-28-31.gh-issue-127718.9dpLfi.rst
b/Misc/NEWS.d/next/Library/2024-12-07-15-28-31.gh-issue-127718.9dpLfi.rst
new file mode 100644
index 00000000000000..6c1b7bef610e8c
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2024-12-07-15-28-31.gh-issue-127718.9dpLfi.rst
@@ -0,0 +1 @@
+Add colour to :mod:`test.regrtest` output. Patch by Hugo van Kemenade.
_______________________________________________
Python-checkins mailing list -- [email protected]
To unsubscribe send an email to [email protected]
https://mail.python.org/mailman3/lists/python-checkins.python.org/
Member address: [email protected]