Script 'mail_helper' called by obssrc
Hello community,

here is the log from the commit of package python-s3transfer for 
openSUSE:Factory checked in at 2025-04-25 22:18:06
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/python-s3transfer (Old)
 and      /work/SRC/openSUSE:Factory/.python-s3transfer.new.30101 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "python-s3transfer"

Fri Apr 25 22:18:06 2025 rev:37 rq:1272393 version:0.12.0

Changes:
--------
--- /work/SRC/openSUSE:Factory/python-s3transfer/python-s3transfer.changes      
2025-03-20 19:25:05.613889419 +0100
+++ 
/work/SRC/openSUSE:Factory/.python-s3transfer.new.30101/python-s3transfer.changes
   2025-04-25 22:18:09.500634193 +0200
@@ -1,0 +2,6 @@
+Thu Apr 24 06:53:46 UTC 2025 - John Paul Adrian Glaubitz 
<adrian.glaub...@suse.com>
+
+- Update to version 0.12.0
+  * feature:Python: End of support for Python 3.8
+
+-------------------------------------------------------------------

Old:
----
  s3transfer-0.11.4.tar.gz

New:
----
  s3transfer-0.12.0.tar.gz

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ python-s3transfer.spec ++++++
--- /var/tmp/diff_new_pack.DUVyZh/_old  2025-04-25 22:18:10.588679919 +0200
+++ /var/tmp/diff_new_pack.DUVyZh/_new  2025-04-25 22:18:10.592680088 +0200
@@ -18,7 +18,7 @@
 
 %{?sle15_python_module_pythons}
 Name:           python-s3transfer
-Version:        0.11.4
+Version:        0.12.0
 Release:        0
 Summary:        Python S3 transfer manager
 License:        Apache-2.0

++++++ s3transfer-0.11.4.tar.gz -> s3transfer-0.12.0.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/s3transfer-0.11.4/PKG-INFO 
new/s3transfer-0.12.0/PKG-INFO
--- old/s3transfer-0.11.4/PKG-INFO      2025-03-04 20:15:32.381687000 +0100
+++ new/s3transfer-0.12.0/PKG-INFO      2025-04-22 21:49:29.054536000 +0200
@@ -1,6 +1,6 @@
 Metadata-Version: 2.1
 Name: s3transfer
-Version: 0.11.4
+Version: 0.12.0
 Summary: An Amazon S3 Transfer Manager
 Home-page: https://github.com/boto/s3transfer
 Author: Amazon Web Services
@@ -13,13 +13,12 @@
 Classifier: Programming Language :: Python
 Classifier: Programming Language :: Python :: 3
 Classifier: Programming Language :: Python :: 3 :: Only
-Classifier: Programming Language :: Python :: 3.8
 Classifier: Programming Language :: Python :: 3.9
 Classifier: Programming Language :: Python :: 3.10
 Classifier: Programming Language :: Python :: 3.11
 Classifier: Programming Language :: Python :: 3.12
 Classifier: Programming Language :: Python :: 3.13
-Requires-Python: >= 3.8
+Requires-Python: >= 3.9
 License-File: LICENSE.txt
 License-File: NOTICE.txt
 Requires-Dist: botocore<2.0a.0,>=1.37.4
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/s3transfer-0.11.4/pyproject.toml 
new/s3transfer-0.12.0/pyproject.toml
--- old/s3transfer-0.11.4/pyproject.toml        2025-03-04 20:12:08.000000000 
+0100
+++ new/s3transfer-0.12.0/pyproject.toml        2025-04-22 21:49:28.000000000 
+0200
@@ -37,7 +37,7 @@
 line-length = 79
 indent-width = 4
 
-target-version = "py38"
+target-version = "py39"
 
 [tool.ruff.lint]
 # Enable Pyflakes (`F`) and a subset of the pycodestyle (`E`)  codes by 
default.
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/s3transfer-0.11.4/s3transfer/__init__.py 
new/s3transfer-0.12.0/s3transfer/__init__.py
--- old/s3transfer-0.11.4/s3transfer/__init__.py        2025-03-04 
20:15:32.000000000 +0100
+++ new/s3transfer-0.12.0/s3transfer/__init__.py        2025-04-22 
21:49:28.000000000 +0200
@@ -145,7 +145,7 @@
 from s3transfer.exceptions import RetriesExceededError, S3UploadFailedError
 
 __author__ = 'Amazon Web Services'
-__version__ = '0.11.4'
+__version__ = '0.12.0'
 
 
 class NullHandler(logging.Handler):
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/s3transfer-0.11.4/s3transfer/download.py 
new/s3transfer-0.12.0/s3transfer/download.py
--- old/s3transfer-0.11.4/s3transfer/download.py        2025-03-04 
20:12:08.000000000 +0100
+++ new/s3transfer-0.12.0/s3transfer/download.py        2025-04-22 
21:45:42.000000000 +0200
@@ -750,7 +750,7 @@
 
     def __init__(self):
         self._writes = []
-        self._pending_offsets = set()
+        self._pending_offsets = {}
         self._next_offset = 0
 
     def request_writes(self, offset, data):
@@ -766,23 +766,49 @@
         each method call.
 
         """
-        if offset < self._next_offset:
+        if offset + len(data) <= self._next_offset:
             # This is a request for a write that we've already
             # seen.  This can happen in the event of a retry
             # where if we retry at at offset N/2, we'll requeue
             # offsets 0-N/2 again.
             return []
         writes = []
+        if offset < self._next_offset:
+            # This is a special case where the write request contains
+            # both seen AND unseen data. This can happen in the case
+            # that we queue part of a chunk due to an incomplete read,
+            # then pop the incomplete data for writing, then we receive the 
retry
+            # for the incomplete read which contains both the previously-seen
+            # partial chunk followed by the rest of the chunk (unseen).
+            #
+            # In this case, we discard the bytes of the data we've already
+            # queued before, and only queue the unseen bytes.
+            seen_bytes = self._next_offset - offset
+            data = data[seen_bytes:]
+            offset = self._next_offset
         if offset in self._pending_offsets:
-            # We've already queued this offset so this request is
-            # a duplicate.  In this case we should ignore
-            # this request and prefer what's already queued.
-            return []
-        heapq.heappush(self._writes, (offset, data))
-        self._pending_offsets.add(offset)
-        while self._writes and self._writes[0][0] == self._next_offset:
-            next_write = heapq.heappop(self._writes)
-            writes.append({'offset': next_write[0], 'data': next_write[1]})
-            self._pending_offsets.remove(next_write[0])
-            self._next_offset += len(next_write[1])
+            queued_data = self._pending_offsets[offset]
+            if len(data) <= len(queued_data):
+                # We already have a write request queued with the same offset
+                # with at least as much data that is present in this
+                # request. In this case we should ignore this request
+                # and prefer what's already queued.
+                return []
+            else:
+                # We have a write request queued with the same offset,
+                # but this request contains more data. This can happen
+                # in the case of a retried request due to an incomplete
+                # read, followed by a retry containing the full response
+                # body. In this case, we should overwrite the queued
+                # request with this one since it contains more data.
+                self._pending_offsets[offset] = data
+        else:
+            heapq.heappush(self._writes, offset)
+            self._pending_offsets[offset] = data
+        while self._writes and self._writes[0] == self._next_offset:
+            next_write_offset = heapq.heappop(self._writes)
+            next_write = self._pending_offsets[next_write_offset]
+            writes.append({'offset': next_write_offset, 'data': next_write})
+            del self._pending_offsets[next_write_offset]
+            self._next_offset += len(next_write)
         return writes
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/s3transfer-0.11.4/s3transfer.egg-info/PKG-INFO 
new/s3transfer-0.12.0/s3transfer.egg-info/PKG-INFO
--- old/s3transfer-0.11.4/s3transfer.egg-info/PKG-INFO  2025-03-04 
20:15:32.000000000 +0100
+++ new/s3transfer-0.12.0/s3transfer.egg-info/PKG-INFO  2025-04-22 
21:49:29.000000000 +0200
@@ -1,6 +1,6 @@
 Metadata-Version: 2.1
 Name: s3transfer
-Version: 0.11.4
+Version: 0.12.0
 Summary: An Amazon S3 Transfer Manager
 Home-page: https://github.com/boto/s3transfer
 Author: Amazon Web Services
@@ -13,13 +13,12 @@
 Classifier: Programming Language :: Python
 Classifier: Programming Language :: Python :: 3
 Classifier: Programming Language :: Python :: 3 :: Only
-Classifier: Programming Language :: Python :: 3.8
 Classifier: Programming Language :: Python :: 3.9
 Classifier: Programming Language :: Python :: 3.10
 Classifier: Programming Language :: Python :: 3.11
 Classifier: Programming Language :: Python :: 3.12
 Classifier: Programming Language :: Python :: 3.13
-Requires-Python: >= 3.8
+Requires-Python: >= 3.9
 License-File: LICENSE.txt
 License-File: NOTICE.txt
 Requires-Dist: botocore<2.0a.0,>=1.37.4
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/s3transfer-0.11.4/setup.py 
new/s3transfer-0.12.0/setup.py
--- old/s3transfer-0.11.4/setup.py      2025-03-04 20:15:32.000000000 +0100
+++ new/s3transfer-0.12.0/setup.py      2025-04-22 21:49:28.000000000 +0200
@@ -33,7 +33,7 @@
         'crt': 'botocore[crt]>=1.37.4,<2.0a.0',
     },
     license="Apache License 2.0",
-    python_requires=">= 3.8",
+    python_requires=">= 3.9",
     classifiers=[
         'Development Status :: 3 - Alpha',
         'Intended Audience :: Developers',
@@ -42,7 +42,6 @@
         'Programming Language :: Python',
         'Programming Language :: Python :: 3',
         'Programming Language :: Python :: 3 :: Only',
-        'Programming Language :: Python :: 3.8',
         'Programming Language :: Python :: 3.9',
         'Programming Language :: Python :: 3.10',
         'Programming Language :: Python :: 3.11',
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/s3transfer-0.11.4/tests/unit/test_download.py 
new/s3transfer-0.12.0/tests/unit/test_download.py
--- old/s3transfer-0.11.4/tests/unit/test_download.py   2025-03-04 
20:12:08.000000000 +0100
+++ new/s3transfer-0.12.0/tests/unit/test_download.py   2025-04-22 
21:45:42.000000000 +0200
@@ -963,7 +963,7 @@
         writes = self.q.request_writes(offset=11, data='hello again')
         self.assertEqual(writes, [{'offset': 11, 'data': 'hello again'}])
 
-    def test_writes_below_min_offset_are_ignored(self):
+    def test_writes_with_last_byte_below_min_offset_are_ignored(self):
         self.q.request_writes(offset=0, data='a')
         self.q.request_writes(offset=1, data='b')
         self.q.request_writes(offset=2, data='c')
@@ -978,13 +978,36 @@
             [{'offset': 3, 'data': 'd'}],
         )
 
-    def test_duplicate_writes_are_ignored(self):
+    def 
test_writes_below_min_offset_with_last_byte_above_min_offset_are_queued(
+        self,
+    ):
+        self.assertEqual(
+            self.q.request_writes(offset=0, data='foo'),
+            [{'offset': 0, 'data': 'foo'}],
+        )
+
+        # Even though a partial write of 'foo' was completed at offset 0,
+        # a subsequent request to the same offset with a longer
+        # length will write a substring of the data starting at
+        # index next_offset.
+        self.assertEqual(
+            self.q.request_writes(offset=0, data='foo bar'),
+            [
+                # Note we are writing a substring of the data starting at
+                # index 3 since the previous write to index 0 had length 3.
+                {'offset': 3, 'data': ' bar'},
+            ],
+        )
+
+    def test_duplicate_writes_same_length_are_ignored(self):
         self.q.request_writes(offset=2, data='c')
         self.q.request_writes(offset=1, data='b')
 
         # We're still waiting for offset=0, but if
-        # a duplicate write comes in for offset=2/offset=1
-        # it's ignored.  This gives "first one wins" behavior.
+        # a duplicate write with the same length comes in
+        # for offset=2/offset=1 it's ignored.
+        # This gives "largest one wins" behavior with ties
+        # broken via "first one wins".
         self.assertEqual(self.q.request_writes(offset=2, data='X'), [])
         self.assertEqual(self.q.request_writes(offset=1, data='Y'), [])
 
@@ -997,3 +1020,22 @@
                 {'offset': 2, 'data': 'c'},
             ],
         )
+
+    def test_duplicate_writes_longer_length_update_queue(self):
+        self.q.request_writes(offset=1, data='b')
+
+        # We're still waiting for offset=0, but if
+        # a write comes in for the same offset=2/offset=1
+        # it updates the queue if the request contains more data.
+        # This gives "largest one wins" behavior with ties
+        # broken via "first one wins".
+        self.assertEqual(self.q.request_writes(offset=1, data='bar'), [])
+
+        self.assertEqual(
+            self.q.request_writes(offset=0, data='a'),
+            [
+                {'offset': 0, 'data': 'a'},
+                # Note we're seeing 'bar', and not 'b', since len(bar) > 
len(b).
+                {'offset': 1, 'data': 'bar'},
+            ],
+        )

Reply via email to