https://github.com/python/cpython/commit/d864a9150bb2a2594a389538b8ee3187ad34afbc
commit: d864a9150bb2a2594a389538b8ee3187ad34afbc
branch: 3.13
author: Serhiy Storchaka <storch...@gmail.com>
committer: serhiy-storchaka <storch...@gmail.com>
date: 2025-06-06T13:16:50Z
summary:

[3.13] gh-135120: Add test.support.subTests() (GH-135121) (GH-135210)

(cherry picked from commit 6ef06fad84244261c695ec337c7d2734277054db)

files:
A Misc/NEWS.d/next/Tests/2025-06-04-13-07-44.gh-issue-135120.NapnZT.rst
M Lib/test/support/__init__.py
M Lib/test/test_http_cookiejar.py
M Lib/test/test_ntpath.py
M Lib/test/test_posixpath.py
M Lib/test/test_urlparse.py

diff --git a/Lib/test/support/__init__.py b/Lib/test/support/__init__.py
index 7543c833ddb3df..3d1e17d8aa2497 100644
--- a/Lib/test/support/__init__.py
+++ b/Lib/test/support/__init__.py
@@ -920,6 +920,31 @@ def check_sizeof(test, o, size):
             % (type(o), result, size)
     test.assertEqual(result, size, msg)
 
+def subTests(arg_names, arg_values, /, *, _do_cleanups=False):
+    """Run multiple subtests with different parameters.
+    """
+    single_param = False
+    if isinstance(arg_names, str):
+        arg_names = arg_names.replace(',',' ').split()
+        if len(arg_names) == 1:
+            single_param = True
+    arg_values = tuple(arg_values)
+    def decorator(func):
+        if isinstance(func, type):
+            raise TypeError('subTests() can only decorate methods, not 
classes')
+        @functools.wraps(func)
+        def wrapper(self, /, *args, **kwargs):
+            for values in arg_values:
+                if single_param:
+                    values = (values,)
+                subtest_kwargs = dict(zip(arg_names, values))
+                with self.subTest(**subtest_kwargs):
+                    func(self, *args, **kwargs, **subtest_kwargs)
+                if _do_cleanups:
+                    self.doCleanups()
+        return wrapper
+    return decorator
+
 #=======================================================================
 # Decorator/context manager for running a code in a different locale,
 # correctly resetting it afterwards.
diff --git a/Lib/test/test_http_cookiejar.py b/Lib/test/test_http_cookiejar.py
index 15f04362e31693..51fa4a3d4137d9 100644
--- a/Lib/test/test_http_cookiejar.py
+++ b/Lib/test/test_http_cookiejar.py
@@ -4,6 +4,7 @@
 import stat
 import sys
 import re
+from test import support
 from test.support import os_helper
 from test.support import warnings_helper
 from test.support.testcase import ExtraAssertions
@@ -106,8 +107,7 @@ def test_http2time_formats(self):
             self.assertEqual(http2time(s.lower()), test_t, s.lower())
             self.assertEqual(http2time(s.upper()), test_t, s.upper())
 
-    def test_http2time_garbage(self):
-        for test in [
+    @support.subTests('test', [
             '',
             'Garbage',
             'Mandag 16. September 1996',
@@ -122,10 +122,9 @@ def test_http2time_garbage(self):
             '08-01-3697739',
             '09 Feb 19942632 22:23:32 GMT',
             'Wed, 09 Feb 1994834 22:23:32 GMT',
-            ]:
-            self.assertIsNone(http2time(test),
-                              "http2time(%s) is not None\n"
-                              "http2time(test) %s" % (test, http2time(test)))
+        ])
+    def test_http2time_garbage(self, test):
+        self.assertIsNone(http2time(test))
 
     def test_http2time_redos_regression_actually_completes(self):
         # LOOSE_HTTP_DATE_RE was vulnerable to malicious input which caused 
catastrophic backtracking (REDoS).
@@ -150,9 +149,7 @@ def parse_date(text):
         self.assertEqual(parse_date("1994-02-03 19:45:29 +0530"),
                          (1994, 2, 3, 14, 15, 29))
 
-    def test_iso2time_formats(self):
-        # test iso2time for supported dates.
-        tests = [
+    @support.subTests('s', [
             '1994-02-03 00:00:00 -0000', # ISO 8601 format
             '1994-02-03 00:00:00 +0000', # ISO 8601 format
             '1994-02-03 00:00:00',       # zone is optional
@@ -165,16 +162,15 @@ def test_iso2time_formats(self):
             # A few tests with extra space at various places
             '  1994-02-03 ',
             '  1994-02-03T00:00:00  ',
-        ]
-
+        ])
+    def test_iso2time_formats(self, s):
+        # test iso2time for supported dates.
         test_t = 760233600  # assume broken POSIX counting of seconds
-        for s in tests:
-            self.assertEqual(iso2time(s), test_t, s)
-            self.assertEqual(iso2time(s.lower()), test_t, s.lower())
-            self.assertEqual(iso2time(s.upper()), test_t, s.upper())
+        self.assertEqual(iso2time(s), test_t, s)
+        self.assertEqual(iso2time(s.lower()), test_t, s.lower())
+        self.assertEqual(iso2time(s.upper()), test_t, s.upper())
 
-    def test_iso2time_garbage(self):
-        for test in [
+    @support.subTests('test', [
             '',
             'Garbage',
             'Thursday, 03-Feb-94 00:00:00 GMT',
@@ -187,9 +183,9 @@ def test_iso2time_garbage(self):
             '01-01-1980 00:00:62',
             '01-01-1980T00:00:62',
             '19800101T250000Z',
-            ]:
-            self.assertIsNone(iso2time(test),
-                              "iso2time(%r)" % test)
+        ])
+    def test_iso2time_garbage(self, test):
+        self.assertIsNone(iso2time(test))
 
     def test_iso2time_performance_regression(self):
         # If ISO_DATE_RE regresses to quadratic complexity, this test will 
take a very long time to succeed.
@@ -200,24 +196,23 @@ def test_iso2time_performance_regression(self):
 
 class HeaderTests(unittest.TestCase):
 
-    def test_parse_ns_headers(self):
-        # quotes should be stripped
-        expected = [[('foo', 'bar'), ('expires', 2209069412), ('version', 
'0')]]
-        for hdr in [
+    @support.subTests('hdr', [
             'foo=bar; expires=01 Jan 2040 22:23:32 GMT',
             'foo=bar; expires="01 Jan 2040 22:23:32 GMT"',
-            ]:
-            self.assertEqual(parse_ns_headers([hdr]), expected)
-
-    def test_parse_ns_headers_version(self):
-
+        ])
+    def test_parse_ns_headers(self, hdr):
         # quotes should be stripped
-        expected = [[('foo', 'bar'), ('version', '1')]]
-        for hdr in [
+        expected = [[('foo', 'bar'), ('expires', 2209069412), ('version', 
'0')]]
+        self.assertEqual(parse_ns_headers([hdr]), expected)
+
+    @support.subTests('hdr', [
             'foo=bar; version="1"',
             'foo=bar; Version="1"',
-            ]:
-            self.assertEqual(parse_ns_headers([hdr]), expected)
+        ])
+    def test_parse_ns_headers_version(self, hdr):
+        # quotes should be stripped
+        expected = [[('foo', 'bar'), ('version', '1')]]
+        self.assertEqual(parse_ns_headers([hdr]), expected)
 
     def test_parse_ns_headers_special_names(self):
         # names such as 'expires' are not special in first name=value pair
@@ -233,8 +228,7 @@ def test_join_header_words(self):
 
         self.assertEqual(join_header_words([[]]), "")
 
-    def test_split_header_words(self):
-        tests = [
+    @support.subTests('arg,expect', [
             ("foo", [[("foo", None)]]),
             ("foo=bar", [[("foo", "bar")]]),
             ("   foo   ", [[("foo", None)]]),
@@ -251,24 +245,22 @@ def test_split_header_words(self):
             (r'foo; bar=baz, spam=, foo="\,\;\"", bar= ',
              [[("foo", None), ("bar", "baz")],
               [("spam", "")], [("foo", ',;"')], [("bar", "")]]),
-            ]
-
-        for arg, expect in tests:
-            try:
-                result = split_header_words([arg])
-            except:
-                import traceback, io
-                f = io.StringIO()
-                traceback.print_exc(None, f)
-                result = "(error -- traceback follows)\n\n%s" % f.getvalue()
-            self.assertEqual(result,  expect, """
+        ])
+    def test_split_header_words(self, arg, expect):
+        try:
+            result = split_header_words([arg])
+        except:
+            import traceback, io
+            f = io.StringIO()
+            traceback.print_exc(None, f)
+            result = "(error -- traceback follows)\n\n%s" % f.getvalue()
+        self.assertEqual(result,  expect, """
 When parsing: '%s'
 Expected:     '%s'
 Got:          '%s'
 """ % (arg, expect, result))
 
-    def test_roundtrip(self):
-        tests = [
+    @support.subTests('arg,expect', [
             ("foo", "foo"),
             ("foo=bar", "foo=bar"),
             ("   foo   ", "foo"),
@@ -301,12 +293,11 @@ def test_roundtrip(self):
 
             ('n; foo="foo;_", bar="foo,_"',
              'n; foo="foo;_", bar="foo,_"'),
-            ]
-
-        for arg, expect in tests:
-            input = split_header_words([arg])
-            res = join_header_words(input)
-            self.assertEqual(res, expect, """
+        ])
+    def test_roundtrip(self, arg, expect):
+        input = split_header_words([arg])
+        res = join_header_words(input)
+        self.assertEqual(res, expect, """
 When parsing: '%s'
 Expected:     '%s'
 Got:          '%s'
@@ -508,14 +499,7 @@ class CookieTests(unittest.TestCase):
 ##   just the 7 special TLD's listed in their spec. And folks rely on
 ##   that...
 
-    def test_domain_return_ok(self):
-        # test optimization: .domain_return_ok() should filter out most
-        # domains in the CookieJar before we try to access them (because that
-        # may require disk access -- in particular, with MSIECookieJar)
-        # This is only a rough check for performance reasons, so it's not too
-        # critical as long as it's sufficiently liberal.
-        pol = DefaultCookiePolicy()
-        for url, domain, ok in [
+    @support.subTests('url,domain,ok', [
             ("http://foo.bar.com/";, "blah.com", False),
             ("http://foo.bar.com/";, "rhubarb.blah.com", False),
             ("http://foo.bar.com/";, "rhubarb.foo.bar.com", False),
@@ -535,11 +519,18 @@ def test_domain_return_ok(self):
             ("http://foo/";, ".local", True),
             ("http://barfoo.com";, ".foo.com", False),
             ("http://barfoo.com";, "foo.com", False),
-            ]:
-            request = urllib.request.Request(url)
-            r = pol.domain_return_ok(domain, request)
-            if ok: self.assertTrue(r)
-            else: self.assertFalse(r)
+        ])
+    def test_domain_return_ok(self, url, domain, ok):
+        # test optimization: .domain_return_ok() should filter out most
+        # domains in the CookieJar before we try to access them (because that
+        # may require disk access -- in particular, with MSIECookieJar)
+        # This is only a rough check for performance reasons, so it's not too
+        # critical as long as it's sufficiently liberal.
+        pol = DefaultCookiePolicy()
+        request = urllib.request.Request(url)
+        r = pol.domain_return_ok(domain, request)
+        if ok: self.assertTrue(r)
+        else: self.assertFalse(r)
 
     def test_missing_value(self):
         # missing = sign in Cookie: header is regarded by Mozilla as a missing
@@ -573,10 +564,7 @@ def test_missing_value(self):
         self.assertEqual(interact_netscape(c, "http://www.acme.com/foo/";),
                          '"spam"; eggs')
 
-    def test_rfc2109_handling(self):
-        # RFC 2109 cookies are handled as RFC 2965 or Netscape cookies,
-        # dependent on policy settings
-        for rfc2109_as_netscape, rfc2965, version in [
+    @support.subTests('rfc2109_as_netscape,rfc2965,version', [
             # default according to rfc2965 if not explicitly specified
             (None, False, 0),
             (None, True, 1),
@@ -585,24 +573,27 @@ def test_rfc2109_handling(self):
             (False, True, 1),
             (True, False, 0),
             (True, True, 0),
-            ]:
-            policy = DefaultCookiePolicy(
-                rfc2109_as_netscape=rfc2109_as_netscape,
-                rfc2965=rfc2965)
-            c = CookieJar(policy)
-            interact_netscape(c, "http://www.example.com/";, "ni=ni; Version=1")
-            try:
-                cookie = c._cookies["www.example.com"]["/"]["ni"]
-            except KeyError:
-                self.assertIsNone(version)  # didn't expect a stored cookie
-            else:
-                self.assertEqual(cookie.version, version)
-                # 2965 cookies are unaffected
-                interact_2965(c, "http://www.example.com/";,
-                              "foo=bar; Version=1")
-                if rfc2965:
-                    cookie2965 = c._cookies["www.example.com"]["/"]["foo"]
-                    self.assertEqual(cookie2965.version, 1)
+        ])
+    def test_rfc2109_handling(self, rfc2109_as_netscape, rfc2965, version):
+        # RFC 2109 cookies are handled as RFC 2965 or Netscape cookies,
+        # dependent on policy settings
+        policy = DefaultCookiePolicy(
+            rfc2109_as_netscape=rfc2109_as_netscape,
+            rfc2965=rfc2965)
+        c = CookieJar(policy)
+        interact_netscape(c, "http://www.example.com/";, "ni=ni; Version=1")
+        try:
+            cookie = c._cookies["www.example.com"]["/"]["ni"]
+        except KeyError:
+            self.assertIsNone(version)  # didn't expect a stored cookie
+        else:
+            self.assertEqual(cookie.version, version)
+            # 2965 cookies are unaffected
+            interact_2965(c, "http://www.example.com/";,
+                            "foo=bar; Version=1")
+            if rfc2965:
+                cookie2965 = c._cookies["www.example.com"]["/"]["foo"]
+                self.assertEqual(cookie2965.version, 1)
 
     def test_ns_parser(self):
         c = CookieJar()
@@ -770,8 +761,7 @@ def test_default_path_with_query(self):
         # Cookie is sent back to the same URI.
         self.assertEqual(interact_netscape(cj, uri), value)
 
-    def test_escape_path(self):
-        cases = [
+    @support.subTests('arg,result', [
             # quoted safe
             ("/foo%2f/bar", "/foo%2F/bar"),
             ("/foo%2F/bar", "/foo%2F/bar"),
@@ -791,9 +781,9 @@ def test_escape_path(self):
             ("/foo/bar\u00fc", "/foo/bar%C3%BC"),     # UTF-8 encoded
             # unicode
             ("/foo/bar\uabcd", "/foo/bar%EA%AF%8D"),  # UTF-8 encoded
-            ]
-        for arg, result in cases:
-            self.assertEqual(escape_path(arg), result)
+        ])
+    def test_escape_path(self, arg, result):
+        self.assertEqual(escape_path(arg), result)
 
     def test_request_path(self):
         # with parameters
diff --git a/Lib/test/test_ntpath.py b/Lib/test/test_ntpath.py
index 7fa46fe84a7304..e1982dfd0bdfd9 100644
--- a/Lib/test/test_ntpath.py
+++ b/Lib/test/test_ntpath.py
@@ -7,6 +7,7 @@
 import unittest
 import warnings
 from ntpath import ALLOW_MISSING
+from test import support
 from test.support import cpython_only, os_helper
 from test.support import TestFailed, is_emscripten
 from test.support.os_helper import FakePath
@@ -79,24 +80,7 @@ def tester(fn, wantResult):
 
 
 def _parameterize(*parameters):
-    """Simplistic decorator to parametrize a test
-
-    Runs the decorated test multiple times in subTest, with a value from
-    'parameters' passed as an extra positional argument.
-    Calls doCleanups() after each run.
-
-    Not for general use. Intended to avoid indenting for easier backports.
-
-    See https://discuss.python.org/t/91827 for discussing generalizations.
-    """
-    def _parametrize_decorator(func):
-        def _parameterized(self, *args, **kwargs):
-            for parameter in parameters:
-                with self.subTest(parameter):
-                    func(self, *args, parameter, **kwargs)
-                self.doCleanups()
-        return _parameterized
-    return _parametrize_decorator
+    return support.subTests('kwargs', parameters, _do_cleanups=True)
 
 
 class NtpathTestCase(unittest.TestCase):
diff --git a/Lib/test/test_posixpath.py b/Lib/test/test_posixpath.py
index c45ce6d3ef7820..21f06712548d88 100644
--- a/Lib/test/test_posixpath.py
+++ b/Lib/test/test_posixpath.py
@@ -36,23 +36,7 @@ def skip_if_ABSTFN_contains_backslash(test):
 
 
 def _parameterize(*parameters):
-    """Simplistic decorator to parametrize a test
-
-    Runs the decorated test multiple times in subTest, with a value from
-    'parameters' passed as an extra positional argument.
-    Does *not* call doCleanups() after each run.
-
-    Not for general use. Intended to avoid indenting for easier backports.
-
-    See https://discuss.python.org/t/91827 for discussing generalizations.
-    """
-    def _parametrize_decorator(func):
-        def _parameterized(self, *args, **kwargs):
-            for parameter in parameters:
-                with self.subTest(parameter):
-                    func(self, *args, parameter, **kwargs)
-        return _parameterized
-    return _parametrize_decorator
+    return support.subTests('kwargs', parameters)
 
 
 class PosixPathTest(unittest.TestCase):
diff --git a/Lib/test/test_urlparse.py b/Lib/test/test_urlparse.py
index 1fa27257c3c423..77aeadfcc3418f 100644
--- a/Lib/test/test_urlparse.py
+++ b/Lib/test/test_urlparse.py
@@ -2,6 +2,7 @@
 import unicodedata
 import unittest
 import urllib.parse
+from test import support
 from test.support.testcase import ExtraAssertions
 
 RFC1808_BASE = "http://a/b/c/d;p?q#f";
@@ -157,27 +158,25 @@ def checkRoundtrips(self, url, parsed, split, url2=None):
         self.assertEqual(result3.hostname, result.hostname)
         self.assertEqual(result3.port,     result.port)
 
-    def test_qsl(self):
-        for orig, expect in parse_qsl_test_cases:
-            result = urllib.parse.parse_qsl(orig, keep_blank_values=True)
-            self.assertEqual(result, expect, "Error parsing %r" % orig)
-            expect_without_blanks = [v for v in expect if len(v[1])]
-            result = urllib.parse.parse_qsl(orig, keep_blank_values=False)
-            self.assertEqual(result, expect_without_blanks,
-                            "Error parsing %r" % orig)
-
-    def test_qs(self):
-        for orig, expect in parse_qs_test_cases:
-            result = urllib.parse.parse_qs(orig, keep_blank_values=True)
-            self.assertEqual(result, expect, "Error parsing %r" % orig)
-            expect_without_blanks = {v: expect[v]
-                                     for v in expect if len(expect[v][0])}
-            result = urllib.parse.parse_qs(orig, keep_blank_values=False)
-            self.assertEqual(result, expect_without_blanks,
-                            "Error parsing %r" % orig)
-
-    def test_roundtrips(self):
-        str_cases = [
+    @support.subTests('orig,expect', parse_qsl_test_cases)
+    def test_qsl(self, orig, expect):
+        result = urllib.parse.parse_qsl(orig, keep_blank_values=True)
+        self.assertEqual(result, expect)
+        expect_without_blanks = [v for v in expect if len(v[1])]
+        result = urllib.parse.parse_qsl(orig, keep_blank_values=False)
+        self.assertEqual(result, expect_without_blanks)
+
+    @support.subTests('orig,expect', parse_qs_test_cases)
+    def test_qs(self, orig, expect):
+        result = urllib.parse.parse_qs(orig, keep_blank_values=True)
+        self.assertEqual(result, expect)
+        expect_without_blanks = {v: expect[v]
+                                 for v in expect if len(expect[v][0])}
+        result = urllib.parse.parse_qs(orig, keep_blank_values=False)
+        self.assertEqual(result, expect_without_blanks)
+
+    @support.subTests('bytes', (False, True))
+    @support.subTests('url,parsed,split', [
             ('path/to/file',
              ('', '', 'path/to/file', '', '', ''),
              ('', '', 'path/to/file', '', '')),
@@ -264,23 +263,21 @@ def test_roundtrips(self):
             ('sch_me:path/to/file',
              ('', '', 'sch_me:path/to/file', '', '', ''),
              ('', '', 'sch_me:path/to/file', '', '')),
-            ]
-        def _encode(t):
-            return (t[0].encode('ascii'),
-                    tuple(x.encode('ascii') for x in t[1]),
-                    tuple(x.encode('ascii') for x in t[2]))
-        bytes_cases = [_encode(x) for x in str_cases]
-        str_cases += [
             ('schème:path/to/file',
              ('', '', 'schème:path/to/file', '', '', ''),
              ('', '', 'schème:path/to/file', '', '')),
-            ]
-        for url, parsed, split in str_cases + bytes_cases:
-            with self.subTest(url):
-                self.checkRoundtrips(url, parsed, split)
-
-    def test_roundtrips_normalization(self):
-        str_cases = [
+            ])
+    def test_roundtrips(self, bytes, url, parsed, split):
+        if bytes:
+            if not url.isascii():
+                self.skipTest('non-ASCII bytes')
+            url = str_encode(url)
+            parsed = tuple_encode(parsed)
+            split = tuple_encode(split)
+        self.checkRoundtrips(url, parsed, split)
+
+    @support.subTests('bytes', (False, True))
+    @support.subTests('url,url2,parsed,split', [
             ('///path/to/file',
              '/path/to/file',
              ('', '', '/path/to/file', '', '', ''),
@@ -301,22 +298,18 @@ def test_roundtrips_normalization(self):
              'https:///tmp/junk.txt',
              ('https', '', '/tmp/junk.txt', '', '', ''),
              ('https', '', '/tmp/junk.txt', '', '')),
-        ]
-        def _encode(t):
-            return (t[0].encode('ascii'),
-                    t[1].encode('ascii'),
-                    tuple(x.encode('ascii') for x in t[2]),
-                    tuple(x.encode('ascii') for x in t[3]))
-        bytes_cases = [_encode(x) for x in str_cases]
-        for url, url2, parsed, split in str_cases + bytes_cases:
-            with self.subTest(url):
-                self.checkRoundtrips(url, parsed, split, url2)
-
-    def test_http_roundtrips(self):
-        # urllib.parse.urlsplit treats 'http:' as an optimized special case,
-        # so we test both 'http:' and 'https:' in all the following.
-        # Three cheers for white box knowledge!
-        str_cases = [
+        ])
+    def test_roundtrips_normalization(self, bytes, url, url2, parsed, split):
+        if bytes:
+            url = str_encode(url)
+            url2 = str_encode(url2)
+            parsed = tuple_encode(parsed)
+            split = tuple_encode(split)
+        self.checkRoundtrips(url, parsed, split, url2)
+
+    @support.subTests('bytes', (False, True))
+    @support.subTests('scheme', ('http', 'https'))
+    @support.subTests('url,parsed,split', [
             ('://www.python.org',
              ('www.python.org', '', '', '', ''),
              ('www.python.org', '', '', '')),
@@ -332,23 +325,20 @@ def test_http_roundtrips(self):
             ('://a/b/c/d;p?q#f',
              ('a', '/b/c/d', 'p', 'q', 'f'),
              ('a', '/b/c/d;p', 'q', 'f')),
-            ]
-        def _encode(t):
-            return (t[0].encode('ascii'),
-                    tuple(x.encode('ascii') for x in t[1]),
-                    tuple(x.encode('ascii') for x in t[2]))
-        bytes_cases = [_encode(x) for x in str_cases]
-        str_schemes = ('http', 'https')
-        bytes_schemes = (b'http', b'https')
-        str_tests = str_schemes, str_cases
-        bytes_tests = bytes_schemes, bytes_cases
-        for schemes, test_cases in (str_tests, bytes_tests):
-            for scheme in schemes:
-                for url, parsed, split in test_cases:
-                    url = scheme + url
-                    parsed = (scheme,) + parsed
-                    split = (scheme,) + split
-                    self.checkRoundtrips(url, parsed, split)
+            ])
+    def test_http_roundtrips(self, bytes, scheme, url, parsed, split):
+        # urllib.parse.urlsplit treats 'http:' as an optimized special case,
+        # so we test both 'http:' and 'https:' in all the following.
+        # Three cheers for white box knowledge!
+        if bytes:
+            scheme = str_encode(scheme)
+            url = str_encode(url)
+            parsed = tuple_encode(parsed)
+            split = tuple_encode(split)
+        url = scheme + url
+        parsed = (scheme,) + parsed
+        split = (scheme,) + split
+        self.checkRoundtrips(url, parsed, split)
 
     def checkJoin(self, base, relurl, expected):
         with self.subTest(base=base, relurl=relurl):
@@ -363,12 +353,13 @@ def checkJoin(self, base, relurl, expected):
             relurlb = urllib.parse.urlunsplit(urllib.parse.urlsplit(relurlb))
             self.assertEqual(urllib.parse.urljoin(baseb, relurlb), expectedb)
 
-    def test_unparse_parse(self):
-        str_cases = ['Python', 
'./Python','x-newscheme://foo.com/stuff','x://y','x:/y','x:/','/',]
-        bytes_cases = [x.encode('ascii') for x in str_cases]
-        for u in str_cases + bytes_cases:
-            
self.assertEqual(urllib.parse.urlunsplit(urllib.parse.urlsplit(u)), u)
-            
self.assertEqual(urllib.parse.urlunparse(urllib.parse.urlparse(u)), u)
+    @support.subTests('bytes', (False, True))
+    @support.subTests('u', ['Python', 
'./Python','x-newscheme://foo.com/stuff','x://y','x:/y','x:/','/',])
+    def test_unparse_parse(self, bytes, u):
+        if bytes:
+            u = str_encode(u)
+        self.assertEqual(urllib.parse.urlunsplit(urllib.parse.urlsplit(u)), u)
+        self.assertEqual(urllib.parse.urlunparse(urllib.parse.urlparse(u)), u)
 
     def test_RFC1808(self):
         # "normal" cases from RFC 1808:
@@ -580,8 +571,8 @@ def test_urljoins(self):
         # issue 23703: don't duplicate filename
         self.checkJoin('a', 'b', 'b')
 
-    def test_RFC2732(self):
-        str_cases = [
+    @support.subTests('bytes', (False, True))
+    @support.subTests('url,hostname,port', [
             ('http://Test.python.org:5432/foo/', 'test.python.org', 5432),
             ('http://12.34.56.78:5432/foo/', '12.34.56.78', 5432),
             ('http://[::1]:5432/foo/', '::1', 5432),
@@ -612,26 +603,28 @@ def test_RFC2732(self):
             ('http://[::12.34.56.78]:/foo/', '::12.34.56.78', None),
             ('http://[::ffff:12.34.56.78]:/foo/',
              '::ffff:12.34.56.78', None),
-            ]
-        def _encode(t):
-            return t[0].encode('ascii'), t[1].encode('ascii'), t[2]
-        bytes_cases = [_encode(x) for x in str_cases]
-        for url, hostname, port in str_cases + bytes_cases:
-            urlparsed = urllib.parse.urlparse(url)
-            self.assertEqual((urlparsed.hostname, urlparsed.port) , (hostname, 
port))
-
-        str_cases = [
+            ])
+    def test_RFC2732(self, bytes, url, hostname, port):
+        if bytes:
+            url = str_encode(url)
+            hostname = str_encode(hostname)
+        urlparsed = urllib.parse.urlparse(url)
+        self.assertEqual((urlparsed.hostname, urlparsed.port), (hostname, 
port))
+
+    @support.subTests('bytes', (False, True))
+    @support.subTests('invalid_url', [
                 'http://::12.34.56.78]/',
                 'http://[::1/foo/',
                 'ftp://[::1/foo/bad]/bad',
                 'http://[::1/foo/bad]/bad',
-                'http://[::ffff:12.34.56.78']
-        bytes_cases = [x.encode('ascii') for x in str_cases]
-        for invalid_url in str_cases + bytes_cases:
-            self.assertRaises(ValueError, urllib.parse.urlparse, invalid_url)
-
-    def test_urldefrag(self):
-        str_cases = [
+                'http://[::ffff:12.34.56.78'])
+    def test_RFC2732_invalid(self, bytes, invalid_url):
+        if bytes:
+            invalid_url = str_encode(invalid_url)
+        self.assertRaises(ValueError, urllib.parse.urlparse, invalid_url)
+
+    @support.subTests('bytes', (False, True))
+    @support.subTests('url,defrag,frag', [
             ('http://python.org#frag', 'http://python.org', 'frag'),
             ('http://python.org', 'http://python.org', ''),
             ('http://python.org/#frag', 'http://python.org/', 'frag'),
@@ -642,16 +635,18 @@ def test_urldefrag(self):
             ('http://python.org/p?q', 'http://python.org/p?q', ''),
             (RFC1808_BASE, 'http://a/b/c/d;p?q', 'f'),
             (RFC2396_BASE, 'http://a/b/c/d;p?q', ''),
-        ]
-        def _encode(t):
-            return type(t)(x.encode('ascii') for x in t)
-        bytes_cases = [_encode(x) for x in str_cases]
-        for url, defrag, frag in str_cases + bytes_cases:
-            result = urllib.parse.urldefrag(url)
-            self.assertEqual(result.geturl(), url)
-            self.assertEqual(result, (defrag, frag))
-            self.assertEqual(result.url, defrag)
-            self.assertEqual(result.fragment, frag)
+        ])
+    def test_urldefrag(self, bytes, url, defrag, frag):
+        if bytes:
+            url = str_encode(url)
+            defrag = str_encode(defrag)
+            frag = str_encode(frag)
+        result = urllib.parse.urldefrag(url)
+        hash = '#' if isinstance(url, str) else b'#'
+        self.assertEqual(result.geturl(), url.rstrip(hash))
+        self.assertEqual(result, (defrag, frag))
+        self.assertEqual(result.url, defrag)
+        self.assertEqual(result.fragment, frag)
 
     def test_urlsplit_scoped_IPv6(self):
         p = 
urllib.parse.urlsplit('http://[FE80::822a:a8ff:fe49:470c%tESt]:1234')
@@ -851,42 +846,35 @@ def test_urlsplit_strip_url(self):
             self.assertEqual(p.scheme, "https")
             self.assertEqual(p.geturl(), "https://www.python.org/";)
 
-    def test_attributes_bad_port(self):
+    @support.subTests('bytes', (False, True))
+    @support.subTests('parse', (urllib.parse.urlsplit, urllib.parse.urlparse))
+    @support.subTests('port', ("foo", "1.5", "-1", "0x10", "-0", "1_1", " 1", 
"1 ", "६"))
+    def test_attributes_bad_port(self, bytes, parse, port):
         """Check handling of invalid ports."""
-        for bytes in (False, True):
-            for parse in (urllib.parse.urlsplit, urllib.parse.urlparse):
-                for port in ("foo", "1.5", "-1", "0x10", "-0", "1_1", " 1", "1 
", "६"):
-                    with self.subTest(bytes=bytes, parse=parse, port=port):
-                        netloc = "www.example.net:" + port
-                        url = "http://"; + netloc + "/"
-                        if bytes:
-                            if netloc.isascii() and port.isascii():
-                                netloc = netloc.encode("ascii")
-                                url = url.encode("ascii")
-                            else:
-                                continue
-                        p = parse(url)
-                        self.assertEqual(p.netloc, netloc)
-                        with self.assertRaises(ValueError):
-                            p.port
+        netloc = "www.example.net:" + port
+        url = "http://"; + netloc + "/"
+        if bytes:
+            if not (netloc.isascii() and port.isascii()):
+                self.skipTest('non-ASCII bytes')
+            netloc = str_encode(netloc)
+            url = str_encode(url)
+        p = parse(url)
+        self.assertEqual(p.netloc, netloc)
+        with self.assertRaises(ValueError):
+            p.port
 
-    def test_attributes_bad_scheme(self):
+    @support.subTests('bytes', (False, True))
+    @support.subTests('parse', (urllib.parse.urlsplit, urllib.parse.urlparse))
+    @support.subTests('scheme', (".", "+", "-", "0", "http&", "६http"))
+    def test_attributes_bad_scheme(self, bytes, parse, scheme):
         """Check handling of invalid schemes."""
-        for bytes in (False, True):
-            for parse in (urllib.parse.urlsplit, urllib.parse.urlparse):
-                for scheme in (".", "+", "-", "0", "http&", "६http"):
-                    with self.subTest(bytes=bytes, parse=parse, scheme=scheme):
-                        url = scheme + "://www.example.net"
-                        if bytes:
-                            if url.isascii():
-                                url = url.encode("ascii")
-                            else:
-                                continue
-                        p = parse(url)
-                        if bytes:
-                            self.assertEqual(p.scheme, b"")
-                        else:
-                            self.assertEqual(p.scheme, "")
+        url = scheme + "://www.example.net"
+        if bytes:
+            if not url.isascii():
+                self.skipTest('non-ASCII bytes')
+            url = url.encode("ascii")
+        p = parse(url)
+        self.assertEqual(p.scheme, b"" if bytes else "")
 
     def test_attributes_without_netloc(self):
         # This example is straight from RFC 3261.  It looks like it
@@ -998,24 +986,21 @@ def test_anyscheme(self):
         
self.assertEqual(urllib.parse.urlparse(b"x-newscheme://foo.com/stuff?query"),
                          (b'x-newscheme', b'foo.com', b'/stuff', b'', 
b'query', b''))
 
-    def test_default_scheme(self):
+    @support.subTests('func', (urllib.parse.urlparse, urllib.parse.urlsplit))
+    def test_default_scheme(self, func):
         # Exercise the scheme parameter of urlparse() and urlsplit()
-        for func in (urllib.parse.urlparse, urllib.parse.urlsplit):
-            with self.subTest(function=func):
-                result = func("http://example.net/";, "ftp")
-                self.assertEqual(result.scheme, "http")
-                result = func(b"http://example.net/";, b"ftp")
-                self.assertEqual(result.scheme, b"http")
-                self.assertEqual(func("path", "ftp").scheme, "ftp")
-                self.assertEqual(func("path", scheme="ftp").scheme, "ftp")
-                self.assertEqual(func(b"path", scheme=b"ftp").scheme, b"ftp")
-                self.assertEqual(func("path").scheme, "")
-                self.assertEqual(func(b"path").scheme, b"")
-                self.assertEqual(func(b"path", "").scheme, b"")
-
-    def test_parse_fragments(self):
-        # Exercise the allow_fragments parameter of urlparse() and urlsplit()
-        tests = (
+        result = func("http://example.net/";, "ftp")
+        self.assertEqual(result.scheme, "http")
+        result = func(b"http://example.net/";, b"ftp")
+        self.assertEqual(result.scheme, b"http")
+        self.assertEqual(func("path", "ftp").scheme, "ftp")
+        self.assertEqual(func("path", scheme="ftp").scheme, "ftp")
+        self.assertEqual(func(b"path", scheme=b"ftp").scheme, b"ftp")
+        self.assertEqual(func("path").scheme, "")
+        self.assertEqual(func(b"path").scheme, b"")
+        self.assertEqual(func(b"path", "").scheme, b"")
+
+    @support.subTests('url,attr,expected_frag', (
             ("http:#frag", "path", "frag"),
             ("//example.net#frag", "path", "frag"),
             ("index.html#frag", "path", "frag"),
@@ -1026,24 +1011,24 @@ def test_parse_fragments(self):
             ("//abc#@frag", "path", "@frag"),
             ("//abc:80#@frag", "path", "@frag"),
             ("//abc#@frag:80", "path", "@frag:80"),
-        )
-        for url, attr, expected_frag in tests:
-            for func in (urllib.parse.urlparse, urllib.parse.urlsplit):
-                if attr == "params" and func is urllib.parse.urlsplit:
-                    attr = "path"
-                with self.subTest(url=url, function=func):
-                    result = func(url, allow_fragments=False)
-                    self.assertEqual(result.fragment, "")
-                    self.assertEndsWith(getattr(result, attr),
-                                        "#" + expected_frag)
-                    self.assertEqual(func(url, "", False).fragment, "")
-
-                    result = func(url, allow_fragments=True)
-                    self.assertEqual(result.fragment, expected_frag)
-                    self.assertNotEndsWith(getattr(result, attr), 
expected_frag)
-                    self.assertEqual(func(url, "", True).fragment,
-                                     expected_frag)
-                    self.assertEqual(func(url).fragment, expected_frag)
+        ))
+    @support.subTests('func', (urllib.parse.urlparse, urllib.parse.urlsplit))
+    def test_parse_fragments(self, url, attr, expected_frag, func):
+        # Exercise the allow_fragments parameter of urlparse() and urlsplit()
+        if attr == "params" and func is urllib.parse.urlsplit:
+            attr = "path"
+        result = func(url, allow_fragments=False)
+        self.assertEqual(result.fragment, "")
+        self.assertEndsWith(getattr(result, attr),
+                            "#" + expected_frag)
+        self.assertEqual(func(url, "", False).fragment, "")
+
+        result = func(url, allow_fragments=True)
+        self.assertEqual(result.fragment, expected_frag)
+        self.assertNotEndsWith(getattr(result, attr), expected_frag)
+        self.assertEqual(func(url, "", True).fragment,
+                            expected_frag)
+        self.assertEqual(func(url).fragment, expected_frag)
 
     def test_mixed_types_rejected(self):
         # Several functions that process either strings or ASCII encoded bytes
@@ -1069,7 +1054,14 @@ def test_mixed_types_rejected(self):
         with self.assertRaisesRegex(TypeError, "Cannot mix str"):
             urllib.parse.urljoin(b"http://python.org";, "http://python.org";)
 
-    def _check_result_type(self, str_type):
+    @support.subTests('result_type', [
+          urllib.parse.DefragResult,
+          urllib.parse.SplitResult,
+          urllib.parse.ParseResult,
+        ])
+    def test_result_pairs(self, result_type):
+        # Check encoding and decoding between result pairs
+        str_type = result_type
         num_args = len(str_type._fields)
         bytes_type = str_type._encoded_counterpart
         self.assertIs(bytes_type._decoded_counterpart, str_type)
@@ -1094,16 +1086,6 @@ def _check_result_type(self, str_type):
         self.assertEqual(str_result.encode(encoding, errors), bytes_args)
         self.assertEqual(str_result.encode(encoding, errors), bytes_result)
 
-    def test_result_pairs(self):
-        # Check encoding and decoding between result pairs
-        result_types = [
-          urllib.parse.DefragResult,
-          urllib.parse.SplitResult,
-          urllib.parse.ParseResult,
-        ]
-        for result_type in result_types:
-            self._check_result_type(result_type)
-
     def test_parse_qs_encoding(self):
         result = urllib.parse.parse_qs("key=\u0141%E9", encoding="latin-1")
         self.assertEqual(result, {'key': ['\u0141\xE9']})
@@ -1135,8 +1117,7 @@ def test_parse_qsl_max_num_fields(self):
             urllib.parse.parse_qsl('&'.join(['a=a']*11), max_num_fields=10)
         urllib.parse.parse_qsl('&'.join(['a=a']*10), max_num_fields=10)
 
-    def test_parse_qs_separator(self):
-        parse_qs_semicolon_cases = [
+    @support.subTests('orig,expect', [
             (";", {}),
             (";;", {}),
             (";a=b", {'a': ['b']}),
@@ -1147,17 +1128,14 @@ def test_parse_qs_separator(self):
             (b";a=b", {b'a': [b'b']}),
             (b"a=a+b;b=b+c", {b'a': [b'a b'], b'b': [b'b c']}),
             (b"a=1;a=2", {b'a': [b'1', b'2']}),
-        ]
-        for orig, expect in parse_qs_semicolon_cases:
-            with self.subTest(f"Original: {orig!r}, Expected: {expect!r}"):
-                result = urllib.parse.parse_qs(orig, separator=';')
-                self.assertEqual(result, expect, "Error parsing %r" % orig)
-                result_bytes = urllib.parse.parse_qs(orig, separator=b';')
-                self.assertEqual(result_bytes, expect, "Error parsing %r" % 
orig)
-
-
-    def test_parse_qsl_separator(self):
-        parse_qsl_semicolon_cases = [
+        ])
+    def test_parse_qs_separator(self, orig, expect):
+        result = urllib.parse.parse_qs(orig, separator=';')
+        self.assertEqual(result, expect)
+        result_bytes = urllib.parse.parse_qs(orig, separator=b';')
+        self.assertEqual(result_bytes, expect)
+
+    @support.subTests('orig,expect', [
             (";", []),
             (";;", []),
             (";a=b", [('a', 'b')]),
@@ -1168,13 +1146,12 @@ def test_parse_qsl_separator(self):
             (b";a=b", [(b'a', b'b')]),
             (b"a=a+b;b=b+c", [(b'a', b'a b'), (b'b', b'b c')]),
             (b"a=1;a=2", [(b'a', b'1'), (b'a', b'2')]),
-        ]
-        for orig, expect in parse_qsl_semicolon_cases:
-            with self.subTest(f"Original: {orig!r}, Expected: {expect!r}"):
-                result = urllib.parse.parse_qsl(orig, separator=';')
-                self.assertEqual(result, expect, "Error parsing %r" % orig)
-                result_bytes = urllib.parse.parse_qsl(orig, separator=b';')
-                self.assertEqual(result_bytes, expect, "Error parsing %r" % 
orig)
+        ])
+    def test_parse_qsl_separator(self, orig, expect):
+        result = urllib.parse.parse_qsl(orig, separator=';')
+        self.assertEqual(result, expect)
+        result_bytes = urllib.parse.parse_qsl(orig, separator=b';')
+        self.assertEqual(result_bytes, expect)
 
     def test_parse_qsl_bytes(self):
         self.assertEqual(urllib.parse.parse_qsl(b'a=b'), [(b'a', b'b')])
@@ -1557,11 +1534,12 @@ def test_to_bytes(self):
         self.assertRaises(UnicodeError, urllib.parse._to_bytes,
                           'http://www.python.org/medi\u00e6val')
 
-    def test_unwrap(self):
-        for wrapped_url in ('<URL:scheme://host/path>', '<scheme://host/path>',
-                            'URL:scheme://host/path', 'scheme://host/path'):
-            url = urllib.parse.unwrap(wrapped_url)
-            self.assertEqual(url, 'scheme://host/path')
+    @support.subTests('wrapped_url',
+                          ('<URL:scheme://host/path>', '<scheme://host/path>',
+                           'URL:scheme://host/path', 'scheme://host/path'))
+    def test_unwrap(self, wrapped_url):
+        url = urllib.parse.unwrap(wrapped_url)
+        self.assertEqual(url, 'scheme://host/path')
 
 
 class DeprecationTest(unittest.TestCase):
@@ -1649,5 +1627,11 @@ def test_to_bytes_deprecation(self):
                          'urllib.parse.to_bytes() is deprecated as of 3.8')
 
 
+def str_encode(s):
+    return s.encode('ascii')
+
+def tuple_encode(t):
+    return tuple(str_encode(x) for x in t)
+
 if __name__ == "__main__":
     unittest.main()
diff --git 
a/Misc/NEWS.d/next/Tests/2025-06-04-13-07-44.gh-issue-135120.NapnZT.rst 
b/Misc/NEWS.d/next/Tests/2025-06-04-13-07-44.gh-issue-135120.NapnZT.rst
new file mode 100644
index 00000000000000..772173774b1ac1
--- /dev/null
+++ b/Misc/NEWS.d/next/Tests/2025-06-04-13-07-44.gh-issue-135120.NapnZT.rst
@@ -0,0 +1 @@
+Add :func:`!test.support.subTests`.

_______________________________________________
Python-checkins mailing list -- python-checkins@python.org
To unsubscribe send an email to python-checkins-le...@python.org
https://mail.python.org/mailman3//lists/python-checkins.python.org
Member address: arch...@mail-archive.com

Reply via email to