Script 'mail_helper' called by obssrc
Hello community,

here is the log from the commit of package python-packaging for 
openSUSE:Factory checked in at 2023-04-17 17:40:56
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/python-packaging (Old)
 and      /work/SRC/openSUSE:Factory/.python-packaging.new.2023 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "python-packaging"

Mon Apr 17 17:40:56 2023 rev:29 rq:1079575 version:23.1

Changes:
--------
--- /work/SRC/openSUSE:Factory/python-packaging/python-packaging.changes        
2023-03-12 16:22:18.068250899 +0100
+++ 
/work/SRC/openSUSE:Factory/.python-packaging.new.2023/python-packaging.changes  
    2023-04-17 17:40:57.530102162 +0200
@@ -1,0 +2,17 @@
+Sat Apr 15 10:28:14 UTC 2023 - Dirk Müller <dmuel...@suse.com>
+
+- update to 23.1
+  * chore: add typed classifier
+  * Improve parser error messaging around mistakes in/around 
+    version specifiers
+  * Upgrade to latest mypy
+  * Delete print() from test_tags
+  * Update our linters
+  * Improve error for local version label with unsupported operators
+  * Parse raw metadata
+  * Handle prefix match with zeros at end of prefix correctly
+  * Relax typing of _key on _BaseVersion
+  * Replace spaces in platform names with underscores
+  * Import underlying parser function as an underscored variable
+  
+-------------------------------------------------------------------

Old:
----
  packaging-23.0.tar.gz

New:
----
  packaging-23.1.tar.gz

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ python-packaging.spec ++++++
--- /var/tmp/diff_new_pack.l2VM4p/_old  2023-04-17 17:40:57.994104874 +0200
+++ /var/tmp/diff_new_pack.l2VM4p/_new  2023-04-17 17:40:57.998104897 +0200
@@ -52,7 +52,7 @@
 %endif
 
 Name:           %{pprefix}-packaging%{?psuffix}
-Version:        23.0
+Version:        23.1
 Release:        0
 Summary:        Core utilities for Python packages
 License:        Apache-2.0 AND BSD-2-Clause

++++++ packaging-23.0.tar.gz -> packaging-23.1.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/packaging-23.0/PKG-INFO new/packaging-23.1/PKG-INFO
--- old/packaging-23.0/PKG-INFO 1970-01-01 01:00:00.000000000 +0100
+++ new/packaging-23.1/PKG-INFO 1970-01-01 01:00:00.000000000 +0100
@@ -1,6 +1,6 @@
 Metadata-Version: 2.1
 Name: packaging
-Version: 23.0
+Version: 23.1
 Summary: Core utilities for Python packages
 Author-email: Donald Stufft <don...@stufft.io>
 Requires-Python: >=3.7
@@ -19,6 +19,7 @@
 Classifier: Programming Language :: Python :: 3.11
 Classifier: Programming Language :: Python :: Implementation :: CPython
 Classifier: Programming Language :: Python :: Implementation :: PyPy
+Classifier: Typing :: Typed
 Project-URL: Documentation, https://packaging.pypa.io/
 Project-URL: Source, https://github.com/pypa/packaging
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/packaging-23.0/docs/index.rst 
new/packaging-23.1/docs/index.rst
--- old/packaging-23.0/docs/index.rst   2022-11-25 20:59:55.164905800 +0100
+++ new/packaging-23.1/docs/index.rst   2023-04-12 18:05:06.672022600 +0200
@@ -25,6 +25,7 @@
     specifiers
     markers
     requirements
+    metadata
     tags
     utils
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/packaging-23.0/docs/metadata.rst 
new/packaging-23.1/docs/metadata.rst
--- old/packaging-23.0/docs/metadata.rst        1970-01-01 01:00:00.000000000 
+0100
+++ new/packaging-23.1/docs/metadata.rst        2023-04-12 18:05:06.672171400 
+0200
@@ -0,0 +1,42 @@
+Metadata
+========
+
+.. currentmodule:: packaging.markers
+
+
+Both `source distributions`_ and `binary distributions`
+(_sdists_ and _wheels_, respectively) contain files recording the
+`core metadata`_ for the distribution. This information is used for
+everything from recording the name of the distribution to the
+installation dependencies.
+
+
+Usage
+-----
+
+.. doctest::
+
+    >>> from packaging.metadata import parse_email
+    >>> metadata = "Metadata-Version: 2.3\nName: packaging\nVersion: 24.0"
+    >>> raw, unparsed = parse_email(metadata)
+    >>> raw["metadata_version"]
+    '2.3'
+    >>> raw["name"]
+    'packaging'
+    >>> raw["version"]
+    '24.0'
+
+
+Reference
+---------
+
+Low Level Interface
+'''''''''''''''''''
+
+.. automodule:: packaging.metadata
+    :members:
+
+
+.. _source distributions: 
https://packaging.python.org/en/latest/specifications/source-distribution-format/
+.. _binary distributions: 
https://packaging.python.org/en/latest/specifications/binary-distribution-format/
+.. _core metadata: 
https://packaging.python.org/en/latest/specifications/core-metadata/
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/packaging-23.0/pyproject.toml 
new/packaging-23.1/pyproject.toml
--- old/packaging-23.0/pyproject.toml   2022-12-09 00:11:51.459903200 +0100
+++ new/packaging-23.1/pyproject.toml   2023-01-30 16:29:49.449063000 +0100
@@ -25,6 +25,7 @@
   "Programming Language :: Python :: 3.11",
   "Programming Language :: Python :: Implementation :: CPython",
   "Programming Language :: Python :: Implementation :: PyPy",
+  "Typing :: Typed",
 ]
 dependencies = []
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/packaging-23.0/src/packaging/__init__.py 
new/packaging-23.1/src/packaging/__init__.py
--- old/packaging-23.0/src/packaging/__init__.py        2023-01-08 
19:18:23.340532500 +0100
+++ new/packaging-23.1/src/packaging/__init__.py        2023-04-12 
18:10:22.111652100 +0200
@@ -6,7 +6,7 @@
 __summary__ = "Core utilities for Python packages"
 __uri__ = "https://github.com/pypa/packaging";
 
-__version__ = "23.0"
+__version__ = "23.1"
 
 __author__ = "Donald Stufft and individual contributors"
 __email__ = "don...@stufft.io"
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/packaging-23.0/src/packaging/_manylinux.py 
new/packaging-23.1/src/packaging/_manylinux.py
--- old/packaging-23.0/src/packaging/_manylinux.py      2022-12-09 
00:11:51.460600000 +0100
+++ new/packaging-23.1/src/packaging/_manylinux.py      2023-04-12 
18:05:06.672746200 +0200
@@ -14,6 +14,8 @@
 EF_ARM_ABI_FLOAT_HARD = 0x00000400
 
 
+# `os.PathLike` not a generic type until Python 3.9, so sticking with `str`
+# as the type for `path` until then.
 @contextlib.contextmanager
 def _parse_elf(path: str) -> Generator[Optional[ELFFile], None, None]:
     try:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/packaging-23.0/src/packaging/_parser.py 
new/packaging-23.1/src/packaging/_parser.py
--- old/packaging-23.0/src/packaging/_parser.py 2023-01-08 19:00:19.700918200 
+0100
+++ new/packaging-23.1/src/packaging/_parser.py 2023-04-12 18:05:06.673098800 
+0200
@@ -163,7 +163,11 @@
     if not tokenizer.check("LEFT_BRACKET", peek=True):
         return []
 
-    with tokenizer.enclosing_tokens("LEFT_BRACKET", "RIGHT_BRACKET"):
+    with tokenizer.enclosing_tokens(
+        "LEFT_BRACKET",
+        "RIGHT_BRACKET",
+        around="extras",
+    ):
         tokenizer.consume("WS")
         extras = _parse_extras_list(tokenizer)
         tokenizer.consume("WS")
@@ -203,7 +207,11 @@
     specifier = LEFT_PARENTHESIS WS? version_many WS? RIGHT_PARENTHESIS
               | WS? version_many WS?
     """
-    with tokenizer.enclosing_tokens("LEFT_PARENTHESIS", "RIGHT_PARENTHESIS"):
+    with tokenizer.enclosing_tokens(
+        "LEFT_PARENTHESIS",
+        "RIGHT_PARENTHESIS",
+        around="version specifier",
+    ):
         tokenizer.consume("WS")
         parsed_specifiers = _parse_version_many(tokenizer)
         tokenizer.consume("WS")
@@ -217,7 +225,20 @@
     """
     parsed_specifiers = ""
     while tokenizer.check("SPECIFIER"):
+        span_start = tokenizer.position
         parsed_specifiers += tokenizer.read().text
+        if tokenizer.check("VERSION_PREFIX_TRAIL", peek=True):
+            tokenizer.raise_syntax_error(
+                ".* suffix can only be used with `==` or `!=` operators",
+                span_start=span_start,
+                span_end=tokenizer.position + 1,
+            )
+        if tokenizer.check("VERSION_LOCAL_LABEL_TRAIL", peek=True):
+            tokenizer.raise_syntax_error(
+                "Local version label can only be used with `==` or `!=` 
operators",
+                span_start=span_start,
+                span_end=tokenizer.position,
+            )
         tokenizer.consume("WS")
         if not tokenizer.check("COMMA"):
             break
@@ -254,7 +275,11 @@
 
     tokenizer.consume("WS")
     if tokenizer.check("LEFT_PARENTHESIS", peek=True):
-        with tokenizer.enclosing_tokens("LEFT_PARENTHESIS", 
"RIGHT_PARENTHESIS"):
+        with tokenizer.enclosing_tokens(
+            "LEFT_PARENTHESIS",
+            "RIGHT_PARENTHESIS",
+            around="marker expression",
+        ):
             tokenizer.consume("WS")
             marker: MarkerAtom = _parse_marker(tokenizer)
             tokenizer.consume("WS")
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/packaging-23.0/src/packaging/_tokenizer.py 
new/packaging-23.1/src/packaging/_tokenizer.py
--- old/packaging-23.0/src/packaging/_tokenizer.py      2022-12-13 
08:03:34.352187200 +0100
+++ new/packaging-23.1/src/packaging/_tokenizer.py      2023-04-12 
18:05:06.673353400 +0200
@@ -78,6 +78,8 @@
     "AT": r"\@",
     "URL": r"[^ \t]+",
     "IDENTIFIER": r"\b[a-zA-Z0-9][a-zA-Z0-9._-]*\b",
+    "VERSION_PREFIX_TRAIL": r"\.\*",
+    "VERSION_LOCAL_LABEL_TRAIL": r"\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*",
     "WS": r"[ \t]+",
     "END": r"$",
 }
@@ -167,21 +169,23 @@
         )
 
     @contextlib.contextmanager
-    def enclosing_tokens(self, open_token: str, close_token: str) -> 
Iterator[bool]:
+    def enclosing_tokens(
+        self, open_token: str, close_token: str, *, around: str
+    ) -> Iterator[None]:
         if self.check(open_token):
             open_position = self.position
             self.read()
         else:
             open_position = None
 
-        yield open_position is not None
+        yield
 
         if open_position is None:
             return
 
         if not self.check(close_token):
             self.raise_syntax_error(
-                f"Expected closing {close_token}",
+                f"Expected matching {close_token} for {open_token}, after 
{around}",
                 span_start=open_position,
             )
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/packaging-23.0/src/packaging/markers.py 
new/packaging-23.1/src/packaging/markers.py
--- old/packaging-23.0/src/packaging/markers.py 2022-12-27 16:22:45.000761000 
+0100
+++ new/packaging-23.1/src/packaging/markers.py 2023-04-12 18:05:29.534912600 
+0200
@@ -8,7 +8,14 @@
 import sys
 from typing import Any, Callable, Dict, List, Optional, Tuple, Union
 
-from ._parser import MarkerAtom, MarkerList, Op, Value, Variable, parse_marker
+from ._parser import (
+    MarkerAtom,
+    MarkerList,
+    Op,
+    Value,
+    Variable,
+    parse_marker as _parse_marker,
+)
 from ._tokenizer import ParserSyntaxError
 from .specifiers import InvalidSpecifier, Specifier
 from .utils import canonicalize_name
@@ -189,7 +196,7 @@
         #       packaging.requirements.Requirement. If any additional logic is
         #       added here, make sure to mirror/adapt Requirement.
         try:
-            self._markers = _normalize_extra_values(parse_marker(marker))
+            self._markers = _normalize_extra_values(_parse_marker(marker))
             # The attribute `_markers` can be described in terms of a 
recursive type:
             # MarkerList = List[Union[Tuple[Node, ...], str, MarkerList]]
             #
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/packaging-23.0/src/packaging/metadata.py 
new/packaging-23.1/src/packaging/metadata.py
--- old/packaging-23.0/src/packaging/metadata.py        1970-01-01 
01:00:00.000000000 +0100
+++ new/packaging-23.1/src/packaging/metadata.py        2023-04-12 
18:05:06.674206300 +0200
@@ -0,0 +1,408 @@
+import email.feedparser
+import email.header
+import email.message
+import email.parser
+import email.policy
+import sys
+import typing
+from typing import Dict, List, Optional, Tuple, Union, cast
+
+if sys.version_info >= (3, 8):  # pragma: no cover
+    from typing import TypedDict
+else:  # pragma: no cover
+    if typing.TYPE_CHECKING:
+        from typing_extensions import TypedDict
+    else:
+        try:
+            from typing_extensions import TypedDict
+        except ImportError:
+
+            class TypedDict:
+                def __init_subclass__(*_args, **_kwargs):
+                    pass
+
+
+# The RawMetadata class attempts to make as few assumptions about the 
underlying
+# serialization formats as possible. The idea is that as long as a 
serialization
+# formats offer some very basic primitives in *some* way then we can support
+# serializing to and from that format.
+class RawMetadata(TypedDict, total=False):
+    """A dictionary of raw core metadata.
+
+    Each field in core metadata maps to a key of this dictionary (when data is
+    provided). The key is lower-case and underscores are used instead of dashes
+    compared to the equivalent core metadata field. Any core metadata field 
that
+    can be specified multiple times or can hold multiple values in a single
+    field have a key with a plural name.
+
+    Core metadata fields that can be specified multiple times are stored as a
+    list or dict depending on which is appropriate for the field. Any fields
+    which hold multiple values in a single field are stored as a list.
+
+    """
+
+    # Metadata 1.0 - PEP 241
+    metadata_version: str
+    name: str
+    version: str
+    platforms: List[str]
+    summary: str
+    description: str
+    keywords: List[str]
+    home_page: str
+    author: str
+    author_email: str
+    license: str
+
+    # Metadata 1.1 - PEP 314
+    supported_platforms: List[str]
+    download_url: str
+    classifiers: List[str]
+    requires: List[str]
+    provides: List[str]
+    obsoletes: List[str]
+
+    # Metadata 1.2 - PEP 345
+    maintainer: str
+    maintainer_email: str
+    requires_dist: List[str]
+    provides_dist: List[str]
+    obsoletes_dist: List[str]
+    requires_python: str
+    requires_external: List[str]
+    project_urls: Dict[str, str]
+
+    # Metadata 2.0
+    # PEP 426 attempted to completely revamp the metadata format
+    # but got stuck without ever being able to build consensus on
+    # it and ultimately ended up withdrawn.
+    #
+    # However, a number of tools had started emiting METADATA with
+    # `2.0` Metadata-Version, so for historical reasons, this version
+    # was skipped.
+
+    # Metadata 2.1 - PEP 566
+    description_content_type: str
+    provides_extra: List[str]
+
+    # Metadata 2.2 - PEP 643
+    dynamic: List[str]
+
+    # Metadata 2.3 - PEP 685
+    # No new fields were added in PEP 685, just some edge case were
+    # tightened up to provide better interoptability.
+
+
+_STRING_FIELDS = {
+    "author",
+    "author_email",
+    "description",
+    "description_content_type",
+    "download_url",
+    "home_page",
+    "license",
+    "maintainer",
+    "maintainer_email",
+    "metadata_version",
+    "name",
+    "requires_python",
+    "summary",
+    "version",
+}
+
+_LIST_STRING_FIELDS = {
+    "classifiers",
+    "dynamic",
+    "obsoletes",
+    "obsoletes_dist",
+    "platforms",
+    "provides",
+    "provides_dist",
+    "provides_extra",
+    "requires",
+    "requires_dist",
+    "requires_external",
+    "supported_platforms",
+}
+
+
+def _parse_keywords(data: str) -> List[str]:
+    """Split a string of comma-separate keyboards into a list of keywords."""
+    return [k.strip() for k in data.split(",")]
+
+
+def _parse_project_urls(data: List[str]) -> Dict[str, str]:
+    """Parse a list of label/URL string pairings separated by a comma."""
+    urls = {}
+    for pair in data:
+        # Our logic is slightly tricky here as we want to try and do
+        # *something* reasonable with malformed data.
+        #
+        # The main thing that we have to worry about, is data that does
+        # not have a ',' at all to split the label from the Value. There
+        # isn't a singular right answer here, and we will fail validation
+        # later on (if the caller is validating) so it doesn't *really*
+        # matter, but since the missing value has to be an empty str
+        # and our return value is dict[str, str], if we let the key
+        # be the missing value, then they'd have multiple '' values that
+        # overwrite each other in a accumulating dict.
+        #
+        # The other potentional issue is that it's possible to have the
+        # same label multiple times in the metadata, with no solid "right"
+        # answer with what to do in that case. As such, we'll do the only
+        # thing we can, which is treat the field as unparseable and add it
+        # to our list of unparsed fields.
+        parts = [p.strip() for p in pair.split(",", 1)]
+        parts.extend([""] * (max(0, 2 - len(parts))))  # Ensure 2 items
+
+        # TODO: The spec doesn't say anything about if the keys should be
+        #       considered case sensitive or not... logically they should
+        #       be case-preserving and case-insensitive, but doing that
+        #       would open up more cases where we might have duplicate
+        #       entries.
+        label, url = parts
+        if label in urls:
+            # The label already exists in our set of urls, so this field
+            # is unparseable, and we can just add the whole thing to our
+            # unparseable data and stop processing it.
+            raise KeyError("duplicate labels in project urls")
+        urls[label] = url
+
+    return urls
+
+
+def _get_payload(msg: email.message.Message, source: Union[bytes, str]) -> str:
+    """Get the body of the message."""
+    # If our source is a str, then our caller has managed encodings for us,
+    # and we don't need to deal with it.
+    if isinstance(source, str):
+        payload: str = msg.get_payload()
+        return payload
+    # If our source is a bytes, then we're managing the encoding and we need
+    # to deal with it.
+    else:
+        bpayload: bytes = msg.get_payload(decode=True)
+        try:
+            return bpayload.decode("utf8", "strict")
+        except UnicodeDecodeError:
+            raise ValueError("payload in an invalid encoding")
+
+
+# The various parse_FORMAT functions here are intended to be as lenient as
+# possible in their parsing, while still returning a correctly typed
+# RawMetadata.
+#
+# To aid in this, we also generally want to do as little touching of the
+# data as possible, except where there are possibly some historic holdovers
+# that make valid data awkward to work with.
+#
+# While this is a lower level, intermediate format than our ``Metadata``
+# class, some light touch ups can make a massive difference in usability.
+
+# Map METADATA fields to RawMetadata.
+_EMAIL_TO_RAW_MAPPING = {
+    "author": "author",
+    "author-email": "author_email",
+    "classifier": "classifiers",
+    "description": "description",
+    "description-content-type": "description_content_type",
+    "download-url": "download_url",
+    "dynamic": "dynamic",
+    "home-page": "home_page",
+    "keywords": "keywords",
+    "license": "license",
+    "maintainer": "maintainer",
+    "maintainer-email": "maintainer_email",
+    "metadata-version": "metadata_version",
+    "name": "name",
+    "obsoletes": "obsoletes",
+    "obsoletes-dist": "obsoletes_dist",
+    "platform": "platforms",
+    "project-url": "project_urls",
+    "provides": "provides",
+    "provides-dist": "provides_dist",
+    "provides-extra": "provides_extra",
+    "requires": "requires",
+    "requires-dist": "requires_dist",
+    "requires-external": "requires_external",
+    "requires-python": "requires_python",
+    "summary": "summary",
+    "supported-platform": "supported_platforms",
+    "version": "version",
+}
+
+
+def parse_email(data: Union[bytes, str]) -> Tuple[RawMetadata, Dict[str, 
List[str]]]:
+    """Parse a distribution's metadata.
+
+    This function returns a two-item tuple of dicts. The first dict is of
+    recognized fields from the core metadata specification. Fields that can be
+    parsed and translated into Python's built-in types are converted
+    appropriately. All other fields are left as-is. Fields that are allowed to
+    appear multiple times are stored as lists.
+
+    The second dict contains all other fields from the metadata. This includes
+    any unrecognized fields. It also includes any fields which are expected to
+    be parsed into a built-in type but were not formatted appropriately. 
Finally,
+    any fields that are expected to appear only once but are repeated are
+    included in this dict.
+
+    """
+    raw: Dict[str, Union[str, List[str], Dict[str, str]]] = {}
+    unparsed: Dict[str, List[str]] = {}
+
+    if isinstance(data, str):
+        parsed = 
email.parser.Parser(policy=email.policy.compat32).parsestr(data)
+    else:
+        parsed = 
email.parser.BytesParser(policy=email.policy.compat32).parsebytes(data)
+
+    # We have to wrap parsed.keys() in a set, because in the case of multiple
+    # values for a key (a list), the key will appear multiple times in the
+    # list of keys, but we're avoiding that by using get_all().
+    for name in frozenset(parsed.keys()):
+        # Header names in RFC are case insensitive, so we'll normalize to all
+        # lower case to make comparisons easier.
+        name = name.lower()
+
+        # We use get_all() here, even for fields that aren't multiple use,
+        # because otherwise someone could have e.g. two Name fields, and we
+        # would just silently ignore it rather than doing something about it.
+        headers = parsed.get_all(name)
+
+        # The way the email module works when parsing bytes is that it
+        # unconditionally decodes the bytes as ascii using the surrogateescape
+        # handler. When you pull that data back out (such as with get_all() ),
+        # it looks to see if the str has any surrogate escapes, and if it does
+        # it wraps it in a Header object instead of returning the string.
+        #
+        # As such, we'll look for those Header objects, and fix up the 
encoding.
+        value = []
+        # Flag if we have run into any issues processing the headers, thus
+        # signalling that the data belongs in 'unparsed'.
+        valid_encoding = True
+        for h in headers:
+            # It's unclear if this can return more types than just a Header or
+            # a str, so we'll just assert here to make sure.
+            assert isinstance(h, (email.header.Header, str))
+
+            # If it's a header object, we need to do our little dance to get
+            # the real data out of it. In cases where there is invalid data
+            # we're going to end up with mojibake, but there's no obvious, good
+            # way around that without reimplementing parts of the Header object
+            # ourselves.
+            #
+            # That should be fine since, if mojibacked happens, this key is
+            # going into the unparsed dict anyways.
+            if isinstance(h, email.header.Header):
+                # The Header object stores it's data as chunks, and each chunk
+                # can be independently encoded, so we'll need to check each
+                # of them.
+                chunks: List[Tuple[bytes, Optional[str]]] = []
+                for bin, encoding in email.header.decode_header(h):
+                    try:
+                        bin.decode("utf8", "strict")
+                    except UnicodeDecodeError:
+                        # Enable mojibake.
+                        encoding = "latin1"
+                        valid_encoding = False
+                    else:
+                        encoding = "utf8"
+                    chunks.append((bin, encoding))
+
+                # Turn our chunks back into a Header object, then let that
+                # Header object do the right thing to turn them into a
+                # string for us.
+                value.append(str(email.header.make_header(chunks)))
+            # This is already a string, so just add it.
+            else:
+                value.append(h)
+
+        # We've processed all of our values to get them into a list of str,
+        # but we may have mojibake data, in which case this is an unparsed
+        # field.
+        if not valid_encoding:
+            unparsed[name] = value
+            continue
+
+        raw_name = _EMAIL_TO_RAW_MAPPING.get(name)
+        if raw_name is None:
+            # This is a bit of a weird situation, we've encountered a key that
+            # we don't know what it means, so we don't know whether it's meant
+            # to be a list or not.
+            #
+            # Since we can't really tell one way or another, we'll just leave 
it
+            # as a list, even though it may be a single item list, because 
that's
+            # what makes the most sense for email headers.
+            unparsed[name] = value
+            continue
+
+        # If this is one of our string fields, then we'll check to see if our
+        # value is a list of a single item. If it is then we'll assume that
+        # it was emitted as a single string, and unwrap the str from inside
+        # the list.
+        #
+        # If it's any other kind of data, then we haven't the faintest clue
+        # what we should parse it as, and we have to just add it to our list
+        # of unparsed stuff.
+        if raw_name in _STRING_FIELDS and len(value) == 1:
+            raw[raw_name] = value[0]
+        # If this is one of our list of string fields, then we can just assign
+        # the value, since email *only* has strings, and our get_all() call
+        # above ensures that this is a list.
+        elif raw_name in _LIST_STRING_FIELDS:
+            raw[raw_name] = value
+        # Special Case: Keywords
+        # The keywords field is implemented in the metadata spec as a str,
+        # but it conceptually is a list of strings, and is serialized using
+        # ", ".join(keywords), so we'll do some light data massaging to turn
+        # this into what it logically is.
+        elif raw_name == "keywords" and len(value) == 1:
+            raw[raw_name] = _parse_keywords(value[0])
+        # Special Case: Project-URL
+        # The project urls is implemented in the metadata spec as a list of
+        # specially-formatted strings that represent a key and a value, which
+        # is fundamentally a mapping, however the email format doesn't support
+        # mappings in a sane way, so it was crammed into a list of strings
+        # instead.
+        #
+        # We will do a little light data massaging to turn this into a map as
+        # it logically should be.
+        elif raw_name == "project_urls":
+            try:
+                raw[raw_name] = _parse_project_urls(value)
+            except KeyError:
+                unparsed[name] = value
+        # Nothing that we've done has managed to parse this, so it'll just
+        # throw it in our unparseable data and move on.
+        else:
+            unparsed[name] = value
+
+    # We need to support getting the Description from the message payload in
+    # addition to getting it from the the headers. This does mean, though, 
there
+    # is the possibility of it being set both ways, in which case we put both
+    # in 'unparsed' since we don't know which is right.
+    try:
+        payload = _get_payload(parsed, data)
+    except ValueError:
+        unparsed.setdefault("description", []).append(
+            parsed.get_payload(decode=isinstance(data, bytes))
+        )
+    else:
+        if payload:
+            # Check to see if we've already got a description, if so then both
+            # it, and this body move to unparseable.
+            if "description" in raw:
+                description_header = cast(str, raw.pop("description"))
+                unparsed.setdefault("description", []).extend(
+                    [description_header, payload]
+                )
+            elif "description" in unparsed:
+                unparsed["description"].append(payload)
+            else:
+                raw["description"] = payload
+
+    # We need to cast our `raw` to a metadata, because a TypedDict only support
+    # literal key names, but we're computing our key names on purpose, but the
+    # way this function is implemented, our `TypedDict` can only have valid key
+    # names.
+    return cast(RawMetadata, raw), unparsed
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/packaging-23.0/src/packaging/requirements.py 
new/packaging-23.1/src/packaging/requirements.py
--- old/packaging-23.0/src/packaging/requirements.py    2022-12-09 
00:11:51.462091000 +0100
+++ new/packaging-23.1/src/packaging/requirements.py    2023-04-12 
18:05:06.674517900 +0200
@@ -5,7 +5,7 @@
 import urllib.parse
 from typing import Any, List, Optional, Set
 
-from ._parser import parse_requirement
+from ._parser import parse_requirement as _parse_requirement
 from ._tokenizer import ParserSyntaxError
 from .markers import Marker, _normalize_extra_values
 from .specifiers import SpecifierSet
@@ -32,7 +32,7 @@
 
     def __init__(self, requirement_string: str) -> None:
         try:
-            parsed = parse_requirement(requirement_string)
+            parsed = _parse_requirement(requirement_string)
         except ParserSyntaxError as e:
             raise InvalidRequirement(str(e)) from e
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/packaging-23.0/src/packaging/specifiers.py 
new/packaging-23.1/src/packaging/specifiers.py
--- old/packaging-23.0/src/packaging/specifiers.py      2023-01-08 
19:00:19.701309200 +0100
+++ new/packaging-23.1/src/packaging/specifiers.py      2023-04-12 
18:05:06.674899300 +0200
@@ -252,7 +252,8 @@
         # Store whether or not this Specifier should accept prereleases
         self._prereleases = prereleases
 
-    @property
+    # https://github.com/python/mypy/pull/13475#pullrequestreview-1079784515
+    @property  # type: ignore[override]
     def prereleases(self) -> bool:
         # If there is an explicit prereleases set for this, then we'll just
         # blindly use that.
@@ -398,7 +399,9 @@
         # We need special logic to handle prefix matching
         if spec.endswith(".*"):
             # In the case of prefix matching we want to ignore local segment.
-            normalized_prospective = canonicalize_version(prospective.public)
+            normalized_prospective = canonicalize_version(
+                prospective.public, strip_trailing_zero=False
+            )
             # Get the normalized version string ignoring the trailing .*
             normalized_spec = canonicalize_version(spec[:-2], 
strip_trailing_zero=False)
             # Split the spec out by dots, and pretend that there is an implicit
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/packaging-23.0/src/packaging/tags.py 
new/packaging-23.1/src/packaging/tags.py
--- old/packaging-23.0/src/packaging/tags.py    2022-12-26 20:52:02.396540400 
+0100
+++ new/packaging-23.1/src/packaging/tags.py    2023-04-12 18:05:06.675215000 
+0200
@@ -111,7 +111,7 @@
 
 
 def _get_config_var(name: str, warn: bool = False) -> Union[int, str, None]:
-    value = sysconfig.get_config_var(name)
+    value: Union[int, str, None] = sysconfig.get_config_var(name)
     if value is None and warn:
         logger.debug(
             "Config variable '%s' is unset, Python ABI tag may be incorrect", 
name
@@ -120,7 +120,7 @@
 
 
 def _normalize_string(string: str) -> str:
-    return string.replace(".", "_").replace("-", "_")
+    return string.replace(".", "_").replace("-", "_").replace(" ", "_")
 
 
 def _abi3_applies(python_version: PythonVersion) -> bool:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/packaging-23.0/src/packaging/version.py 
new/packaging-23.1/src/packaging/version.py
--- old/packaging-23.0/src/packaging/version.py 2022-12-09 00:11:51.463324000 
+0100
+++ new/packaging-23.1/src/packaging/version.py 2023-04-12 18:05:06.675496300 
+0200
@@ -10,7 +10,7 @@
 import collections
 import itertools
 import re
-from typing import Callable, Optional, SupportsInt, Tuple, Union
+from typing import Any, Callable, Optional, SupportsInt, Tuple, Union
 
 from ._structures import Infinity, InfinityType, NegativeInfinity, 
NegativeInfinityType
 
@@ -63,7 +63,7 @@
 
 
 class _BaseVersion:
-    _key: CmpKey
+    _key: Tuple[Any, ...]
 
     def __hash__(self) -> int:
         return hash(self._key)
@@ -179,6 +179,7 @@
     """
 
     _regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | 
re.IGNORECASE)
+    _key: CmpKey
 
     def __init__(self, version: str) -> None:
         """Initialize a Version object.
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/packaging-23.0/tests/metadata/everything.metadata 
new/packaging-23.1/tests/metadata/everything.metadata
--- old/packaging-23.0/tests/metadata/everything.metadata       1970-01-01 
01:00:00.000000000 +0100
+++ new/packaging-23.1/tests/metadata/everything.metadata       2023-04-12 
18:05:06.675785500 +0200
@@ -0,0 +1,42 @@
+Metadata-Version: 2.3
+Name: BeagleVote
+Version: 1.0a2
+Platform: ObscureUnix
+Platform: RareDOS
+Supported-Platform: RedHat 7.2
+Supported-Platform: i386-win32-2791
+Summary: A module for collecting votes from beagles.
+Description-Content-Type: text/markdown; charset=UTF-8; variant=GFM
+Keywords: dog,puppy,voting,election
+Home-page: http://www.example.com/~cschultz/bvote/
+Download-URL: …/BeagleVote-0.45.tgz
+Author: C. Schultz, Universal Features Syndicate,
+        Los Angeles, CA <cschu...@peanuts.example.com>
+Author-email: "C. Schultz" <cschu...@example.com>
+Maintainer: C. Schultz, Universal Features Syndicate,
+        Los Angeles, CA <cschu...@peanuts.example.com>
+Maintainer-email: "C. Schultz" <cschu...@example.com>
+License: This software may only be obtained by sending the
+        author a postcard, and then the user promises not
+        to redistribute it.
+Classifier: Development Status :: 4 - Beta
+Classifier: Environment :: Console (Text Based)
+Provides-Extra: pdf
+Requires-Dist: reportlab; extra == 'pdf'
+Requires-Dist: pkginfo
+Requires-Dist: PasteDeploy
+Requires-Dist: zope.interface (>3.5.0)
+Requires-Dist: pywin32 >1.0; sys_platform == 'win32'
+Requires-Python: >=3
+Requires-External: C
+Requires-External: libpng (>=1.5)
+Requires-External: make; sys_platform != "win32"
+Project-URL: Bug Tracker, http://bitbucket.org/tarek/distribute/issues/
+Project-URL: Documentation, https://example.com/BeagleVote
+Provides-Dist: OtherProject
+Provides-Dist: AnotherProject (3.4)
+Provides-Dist: virtual_package; python_version >= "3.4"
+Dynamic: Obsoletes-Dist
+ThisIsNotReal: Hello!
+
+This description intentionally left blank.
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/packaging-23.0/tests/test_manylinux.py 
new/packaging-23.1/tests/test_manylinux.py
--- old/packaging-23.0/tests/test_manylinux.py  2022-09-30 11:55:33.606207600 
+0200
+++ new/packaging-23.1/tests/test_manylinux.py  2023-04-12 18:05:06.676032800 
+0200
@@ -3,6 +3,7 @@
 except ImportError:
     ctypes = None
 import os
+import pathlib
 import platform
 import sys
 import types
@@ -169,11 +170,8 @@
 )
 def test_parse_elf_bad_executable(monkeypatch, content):
     if content:
-        path = os.path.join(
-            os.path.dirname(__file__),
-            "manylinux",
-            f"hello-world-{content}",
-        )
+        path = pathlib.Path(__file__).parent / "manylinux" / 
f"hello-world-{content}"
+        path = os.fsdecode(path)
     else:
         path = None
     with _parse_elf(path) as ef:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/packaging-23.0/tests/test_metadata.py 
new/packaging-23.1/tests/test_metadata.py
--- old/packaging-23.0/tests/test_metadata.py   1970-01-01 01:00:00.000000000 
+0100
+++ new/packaging-23.1/tests/test_metadata.py   2023-04-12 18:05:06.676202000 
+0200
@@ -0,0 +1,249 @@
+import pathlib
+
+import pytest
+
+from packaging import metadata
+
+_RAW_TO_EMAIL_MAPPING = {
+    raw: email for email, raw in metadata._EMAIL_TO_RAW_MAPPING.items()
+}
+
+
+class TestRawMetadata:
+    @pytest.mark.parametrize("raw_field", metadata._STRING_FIELDS)
+    def test_non_repeating_fields_only_once(self, raw_field):
+        data = "VaLuE"
+        header_field = _RAW_TO_EMAIL_MAPPING[raw_field]
+        single_header = f"{header_field}: {data}"
+        raw, unparsed = metadata.parse_email(single_header)
+        assert not unparsed
+        assert len(raw) == 1
+        assert raw_field in raw
+        assert raw[raw_field] == data
+
+    @pytest.mark.parametrize("raw_field", metadata._STRING_FIELDS)
+    def test_non_repeating_fields_repeated(self, raw_field):
+        header_field = _RAW_TO_EMAIL_MAPPING[raw_field]
+        data = "VaLuE"
+        single_header = f"{header_field}: {data}"
+        repeated_header = "\n".join([single_header] * 2)
+        raw, unparsed = metadata.parse_email(repeated_header)
+        assert not raw
+        assert len(unparsed) == 1
+        assert header_field in unparsed
+        assert unparsed[header_field] == [data] * 2
+
+    @pytest.mark.parametrize("raw_field", metadata._LIST_STRING_FIELDS)
+    def test_repeating_fields_only_once(self, raw_field):
+        data = "VaLuE"
+        header_field = _RAW_TO_EMAIL_MAPPING[raw_field]
+        single_header = f"{header_field}: {data}"
+        raw, unparsed = metadata.parse_email(single_header)
+        assert not unparsed
+        assert len(raw) == 1
+        assert raw_field in raw
+        assert raw[raw_field] == [data]
+
+    @pytest.mark.parametrize("raw_field", metadata._LIST_STRING_FIELDS)
+    def test_repeating_fields_repeated(self, raw_field):
+        header_field = _RAW_TO_EMAIL_MAPPING[raw_field]
+        data = "VaLuE"
+        single_header = f"{header_field}: {data}"
+        repeated_header = "\n".join([single_header] * 2)
+        raw, unparsed = metadata.parse_email(repeated_header)
+        assert not unparsed
+        assert len(raw) == 1
+        assert raw_field in raw
+        assert raw[raw_field] == [data] * 2
+
+    @pytest.mark.parametrize(
+        ["given", "expected"],
+        [
+            ("A", ["A"]),
+            ("A ", ["A"]),
+            (" A", ["A"]),
+            ("A, B", ["A", "B"]),
+            ("A,B", ["A", "B"]),
+            (" A, B", ["A", "B"]),
+            ("A,B ", ["A", "B"]),
+            ("A B", ["A B"]),
+        ],
+    )
+    def test_keywords(self, given, expected):
+        header = f"Keywords: {given}"
+        raw, unparsed = metadata.parse_email(header)
+        assert not unparsed
+        assert len(raw) == 1
+        assert "keywords" in raw
+        assert raw["keywords"] == expected
+
+    @pytest.mark.parametrize(
+        ["given", "expected"],
+        [
+            ("", {"": ""}),
+            ("A", {"A": ""}),
+            ("A,B", {"A": "B"}),
+            ("A, B", {"A": "B"}),
+            (" A,B", {"A": "B"}),
+            ("A,B ", {"A": "B"}),
+            ("A,B,C", {"A": "B,C"}),
+        ],
+    )
+    def test_project_urls_parsing(self, given, expected):
+        header = f"project-url: {given}"
+        raw, unparsed = metadata.parse_email(header)
+        assert not unparsed
+        assert len(raw) == 1
+        assert "project_urls" in raw
+        assert raw["project_urls"] == expected
+
+    def test_duplicate_project_urls(self):
+        header = "project-url: A, B\nproject-url: A, C"
+        raw, unparsed = metadata.parse_email(header)
+        assert not raw
+        assert len(unparsed) == 1
+        assert "project-url" in unparsed
+        assert unparsed["project-url"] == ["A, B", "A, C"]
+
+    def test_str_input(self):
+        name = "Tarek Ziadé"
+        header = f"author: {name}"
+        raw, unparsed = metadata.parse_email(header)
+        assert not unparsed
+        assert len(raw) == 1
+        assert "author" in raw
+        assert raw["author"] == name
+
+    def test_bytes_input(self):
+        name = "Tarek Ziadé"
+        header = f"author: {name}".encode()
+        raw, unparsed = metadata.parse_email(header)
+        assert not unparsed
+        assert len(raw) == 1
+        assert "author" in raw
+        assert raw["author"] == name
+
+    def test_header_mojibake(self):
+        value = "\xc0msterdam"
+        header_name = "value"
+        header_bytes = f"{header_name}: {value}".encode("latin1")
+        raw, unparsed = metadata.parse_email(header_bytes)
+        # Sanity check
+        with pytest.raises(UnicodeDecodeError):
+            header_bytes.decode("utf-8")
+        assert not raw
+        assert len(unparsed) == 1
+        assert header_name in unparsed
+        assert unparsed[header_name] == [value]
+
+    @pytest.mark.parametrize(
+        ["given"], [("hello",), ("description: hello",), (b"hello",)]
+    )
+    def test_description(self, given):
+        raw, unparsed = metadata.parse_email(given)
+        assert not unparsed
+        assert len(raw) == 1
+        assert "description" in raw
+        assert raw["description"] == "hello"
+
+    def test_description_non_utf8(self):
+        header = "\xc0msterdam"
+        header_bytes = header.encode("latin1")
+        raw, unparsed = metadata.parse_email(header_bytes)
+        assert not raw
+        assert len(unparsed) == 1
+        assert "description" in unparsed
+        assert unparsed["description"] == [header_bytes]
+
+    @pytest.mark.parametrize(
+        ["given", "expected"],
+        [
+            ("description: 1\ndescription: 2", ["1", "2"]),
+            ("description: 1\n\n2", ["1", "2"]),
+            ("description: 1\ndescription: 2\n\n3", ["1", "2", "3"]),
+        ],
+    )
+    def test_description_multiple(self, given, expected):
+        raw, unparsed = metadata.parse_email(given)
+        assert not raw
+        assert len(unparsed) == 1
+        assert "description" in unparsed
+        assert unparsed["description"] == expected
+
+    def test_lowercase_keys(self):
+        header = "AUTHOR: Tarek Ziadé\nWhatever: Else"
+        raw, unparsed = metadata.parse_email(header)
+        assert len(raw) == 1
+        assert "author" in raw
+        assert len(unparsed) == 1
+        assert "whatever" in unparsed
+
+    def test_complete(self):
+        """Test all fields (except `Obsoletes-Dist`).
+
+        `Obsoletes-Dist` was sacrificed to provide a value for `Dynamic`.
+        """
+        path = pathlib.Path(__file__).parent / "metadata" / 
"everything.metadata"
+        with path.open("r", encoding="utf-8") as file:
+            metadata_contents = file.read()
+        raw, unparsed = metadata.parse_email(metadata_contents)
+        assert len(unparsed) == 1
+        assert unparsed["thisisnotreal"] == ["Hello!"]
+        assert len(raw) == 24
+        assert raw["metadata_version"] == "2.3"
+        assert raw["name"] == "BeagleVote"
+        assert raw["version"] == "1.0a2"
+        assert raw["platforms"] == ["ObscureUnix", "RareDOS"]
+        assert raw["supported_platforms"] == ["RedHat 7.2", "i386-win32-2791"]
+        assert raw["summary"] == "A module for collecting votes from beagles."
+        assert (
+            raw["description_content_type"]
+            == "text/markdown; charset=UTF-8; variant=GFM"
+        )
+        assert raw["keywords"] == ["dog", "puppy", "voting", "election"]
+        assert raw["home_page"] == "http://www.example.com/~cschultz/bvote/";
+        assert raw["download_url"] == "…/BeagleVote-0.45.tgz"
+        assert raw["author"] == (
+            "C. Schultz, Universal Features Syndicate,\n"
+            "        Los Angeles, CA <cschu...@peanuts.example.com>"
+        )
+        assert raw["author_email"] == '"C. Schultz" <cschu...@example.com>'
+        assert raw["maintainer"] == (
+            "C. Schultz, Universal Features Syndicate,\n"
+            "        Los Angeles, CA <cschu...@peanuts.example.com>"
+        )
+        assert raw["maintainer_email"] == '"C. Schultz" <cschu...@example.com>'
+        assert raw["license"] == (
+            "This software may only be obtained by sending the\n"
+            "        author a postcard, and then the user promises not\n"
+            "        to redistribute it."
+        )
+        assert raw["classifiers"] == [
+            "Development Status :: 4 - Beta",
+            "Environment :: Console (Text Based)",
+        ]
+        assert raw["provides_extra"] == ["pdf"]
+        assert raw["requires_dist"] == [
+            "reportlab; extra == 'pdf'",
+            "pkginfo",
+            "PasteDeploy",
+            "zope.interface (>3.5.0)",
+            "pywin32 >1.0; sys_platform == 'win32'",
+        ]
+        assert raw["requires_python"] == ">=3"
+        assert raw["requires_external"] == [
+            "C",
+            "libpng (>=1.5)",
+            'make; sys_platform != "win32"',
+        ]
+        assert raw["project_urls"] == {
+            "Bug Tracker": "http://bitbucket.org/tarek/distribute/issues/";,
+            "Documentation": "https://example.com/BeagleVote";,
+        }
+        assert raw["provides_dist"] == [
+            "OtherProject",
+            "AnotherProject (3.4)",
+            'virtual_package; python_version >= "3.4"',
+        ]
+        assert raw["dynamic"] == ["Obsoletes-Dist"]
+        assert raw["description"] == "This description intentionally left 
blank.\n"
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/packaging-23.0/tests/test_requirements.py 
new/packaging-23.1/tests/test_requirements.py
--- old/packaging-23.0/tests/test_requirements.py       2023-01-08 
19:18:04.410140300 +0100
+++ new/packaging-23.1/tests/test_requirements.py       2023-04-12 
18:05:06.676444800 +0200
@@ -279,11 +279,48 @@
         # THEN
         assert ctx.exconly() == (
             "packaging.requirements.InvalidRequirement: "
-            "Expected closing RIGHT_PARENTHESIS\n"
+            "Expected matching RIGHT_PARENTHESIS for LEFT_PARENTHESIS, "
+            "after version specifier\n"
             "    name (>= 1.0\n"
             "         ~~~~~~~^"
         )
 
+    def test_error_when_prefix_match_is_used_incorrectly(self) -> None:
+        # GIVEN
+        to_parse = "black (>=20.*) ; extra == 'format'"
+
+        # WHEN
+        with pytest.raises(InvalidRequirement) as ctx:
+            Requirement(to_parse)
+
+        # THEN
+        assert ctx.exconly() == (
+            "packaging.requirements.InvalidRequirement: "
+            ".* suffix can only be used with `==` or `!=` operators\n"
+            "    black (>=20.*) ; extra == 'format'\n"
+            "           ~~~~~^"
+        )
+
+    @pytest.mark.parametrize("operator", [">=", "<=", ">", "<", "~="])
+    def test_error_when_local_version_label_is_used_incorrectly(
+        self, operator: str
+    ) -> None:
+        # GIVEN
+        to_parse = f"name {operator} 1.0+local.version.label"
+        op_tilde = len(operator) * "~"
+
+        # WHEN
+        with pytest.raises(InvalidRequirement) as ctx:
+            Requirement(to_parse)
+
+        # THEN
+        assert ctx.exconly() == (
+            "packaging.requirements.InvalidRequirement: "
+            "Local version label can only be used with `==` or `!=` 
operators\n"
+            f"    name {operator} 1.0+local.version.label\n"
+            f"         {op_tilde}~~~~^"
+        )
+
     def test_error_when_bracket_not_closed_correctly(self) -> None:
         # GIVEN
         to_parse = "name[bar, baz >= 1.0"
@@ -295,7 +332,8 @@
         # THEN
         assert ctx.exconly() == (
             "packaging.requirements.InvalidRequirement: "
-            "Expected closing RIGHT_BRACKET\n"
+            "Expected matching RIGHT_BRACKET for LEFT_BRACKET, "
+            "after extras\n"
             "    name[bar, baz >= 1.0\n"
             "        ~~~~~~~~~~^"
         )
@@ -311,7 +349,8 @@
         # THEN
         assert ctx.exconly() == (
             "packaging.requirements.InvalidRequirement: "
-            "Expected closing RIGHT_BRACKET\n"
+            "Expected matching RIGHT_BRACKET for LEFT_BRACKET, "
+            "after extras\n"
             "    name[bar, baz\n"
             "        ~~~~~~~~~^"
         )
@@ -332,6 +371,23 @@
             "           ~~~~~~~~~~~~~~~~~~~~~~^"
         )
 
+    def test_error_marker_bracket_unclosed(self) -> None:
+        # GIVEN
+        to_parse = "name; (extra == 'example'"
+
+        # WHEN
+        with pytest.raises(InvalidRequirement) as ctx:
+            Requirement(to_parse)
+
+        # THEN
+        assert ctx.exconly() == (
+            "packaging.requirements.InvalidRequirement: "
+            "Expected matching RIGHT_PARENTHESIS for LEFT_PARENTHESIS, "
+            "after marker expression\n"
+            "    name; (extra == 'example'\n"
+            "          ~~~~~~~~~~~~~~~~~~~^"
+        )
+
     def test_error_no_url_after_at(self) -> None:
         # GIVEN
         to_parse = "name @ "
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/packaging-23.0/tests/test_specifiers.py 
new/packaging-23.1/tests/test_specifiers.py
--- old/packaging-23.0/tests/test_specifiers.py 2022-09-30 11:55:33.607073800 
+0200
+++ new/packaging-23.1/tests/test_specifiers.py 2023-04-12 18:05:06.676737300 
+0200
@@ -369,6 +369,8 @@
                 ("2!1.0", "==2!1.*"),
                 ("2!1.0", "==2!1.0"),
                 ("2!1.0", "!=1.0"),
+                ("2!1.0.0", "==2!1.0.*"),
+                ("2!1.0.0", "==2!1.*"),
                 ("1.0", "!=2!1.0"),
                 ("1.0", "<=2!0.1"),
                 ("2!1.0", ">=2.0"),
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/packaging-23.0/tests/test_tags.py 
new/packaging-23.1/tests/test_tags.py
--- old/packaging-23.0/tests/test_tags.py       2022-12-26 20:52:02.397271200 
+0100
+++ new/packaging-23.1/tests/test_tags.py       2023-04-12 18:05:06.677162400 
+0200
@@ -234,7 +234,6 @@
 
         platforms = list(tags.mac_platforms(arch="x86_64"))
         if (major, minor) == ("10", "16"):
-            print(platforms, "macosx_11+")
             # For 10.16, the real version is at least 11.0.
             prefix, major, minor, _ = platforms[0].split("_", maxsplit=3)
             assert prefix == "macosx"
@@ -242,7 +241,6 @@
             assert minor == "0"
         else:
             expected = f"macosx_{major}_{minor}_"
-            print(platforms, expected)
             assert platforms[0].startswith(expected)
 
     def test_version_detection_10_15(self, monkeypatch):
@@ -252,7 +250,6 @@
         expected = "macosx_10_15_"
 
         platforms = list(tags.mac_platforms(arch="x86_64"))
-        print(platforms, expected)
         assert platforms[0].startswith(expected)
 
     def test_version_detection_compatibility(self, monkeypatch):
@@ -270,7 +267,6 @@
         unexpected = "macosx_10_16_"
 
         platforms = list(tags.mac_platforms(arch="x86_64"))
-        print(platforms, unexpected)
         assert not platforms[0].startswith(unexpected)
 
     @pytest.mark.parametrize("arch", ["x86_64", "i386"])
@@ -603,6 +599,13 @@
     assert tags.platform_tags() == expected
 
 
+def test_platform_tags_space(monkeypatch):
+    """Ensure spaces in platform tags are normalized to underscores."""
+    monkeypatch.setattr(platform, "system", lambda: "Isilon OneFS")
+    monkeypatch.setattr(sysconfig, "get_platform", lambda: "isilon onefs")
+    assert list(tags.platform_tags()) == ["isilon_onefs"]
+
+
 class TestCPythonABI:
     @pytest.mark.parametrize(
         "py_debug,gettotalrefcount,result",
@@ -774,6 +777,12 @@
         result = list(tags.cpython_tags((3, 11), abis=["whatever"]))
         assert tags.Tag("cp311", "whatever", "plat1") in result
 
+    def test_platform_name_space_normalization(self, monkeypatch):
+        """Ensure that spaces are translated to underscores in platform 
names."""
+        monkeypatch.setattr(sysconfig, "get_platform", lambda: "isilon onefs")
+        for tag in tags.cpython_tags():
+            assert " " not in tag.platform
+
     def test_major_only_python_version(self):
         result = list(tags.cpython_tags((3,), ["abi"], ["plat"]))
         assert result == [
@@ -843,9 +852,9 @@
         assert tags._generic_abi() == ["cp37m"]
 
     def test__generic_abi_jp(self, monkeypatch):
-        config = {"EXT_SUFFIX": ".return exactly this.so"}
+        config = {"EXT_SUFFIX": ".return_exactly_this.so"}
         monkeypatch.setattr(sysconfig, "get_config_var", config.__getitem__)
-        assert tags._generic_abi() == ["return exactly this"]
+        assert tags._generic_abi() == ["return_exactly_this"]
 
     def test__generic_abi_graal(self, monkeypatch):
         config = {"EXT_SUFFIX": ".graalpy-38-native-x86_64-darwin.so"}
@@ -901,6 +910,12 @@
         platform = platform.replace(".", "_")
         assert list(tags._generic_platforms()) == [platform]
 
+    def test_generic_platforms_space(self, monkeypatch):
+        """Ensure platform tags normalize spaces to underscores."""
+        platform_ = "isilon onefs"
+        monkeypatch.setattr(sysconfig, "get_platform", lambda: platform_)
+        assert list(tags._generic_platforms()) == [platform_.replace(" ", "_")]
+
     def test_iterator_returned(self):
         result_iterator = tags.generic_tags("sillywalk33", ["abi"], ["plat1", 
"plat2"])
         assert isinstance(result_iterator, collections.abc.Iterator)

Reply via email to