This is an automated email from the ASF dual-hosted git repository.

sbp pushed a commit to branch sbp
in repository https://gitbox.apache.org/repos/asf/tooling-trusted-releases.git

commit 9003b05087b8278654e5023f9fbedee092c6bc9f
Author: Sean B. Palmer <[email protected]>
AuthorDate: Thu Feb 26 15:30:52 2026 +0000

    Separate the code to finalise a revision
---
 atr/storage/writers/revision.py    | 243 ++++++++++++++++++++++---------------
 tests/unit/test_create_revision.py |  65 +++++++++-
 2 files changed, 205 insertions(+), 103 deletions(-)

diff --git a/atr/storage/writers/revision.py b/atr/storage/writers/revision.py
index 9e7bbbd0..c99443cb 100644
--- a/atr/storage/writers/revision.py
+++ b/atr/storage/writers/revision.py
@@ -45,6 +45,8 @@ import atr.util as util
 if TYPE_CHECKING:
     from collections.abc import Awaitable, Callable
 
+    import atr.models.attestable
+
 
 class SafeSession:
     def __init__(self, temp_dir: str):
@@ -64,6 +66,129 @@ class SafeSession:
         return False
 
 
+async def _finalise_revision(
+    data: db.Session,
+    *,
+    asf_uid: str,
+    base_hashes: dict[str, str],
+    base_inodes: dict[str, int],
+    description: str | None,
+    merge_enabled: bool,
+    n_inodes: dict[str, int],
+    old_revision: sql.Revision | None,
+    path_to_hash: dict[str, str],
+    path_to_size: dict[str, int],
+    previous_attestable: atr.models.attestable.AttestableV1 | None,
+    project_name: str,
+    release: sql.Release,
+    release_name: str,
+    temp_dir: str,
+    temp_dir_path: pathlib.Path,
+    version_name: str,
+) -> sql.Revision:
+    try:
+        # This is the only place where models.Revision is constructed
+        # That makes models.populate_revision_sequence_and_name safe against 
races
+        # Because that event is called when data.add is called below
+        # And we have a write lock at that point through the use of 
data.begin_immediate
+        new_revision = sql.Revision(
+            release_name=release_name,
+            release=release,
+            asfuid=asf_uid,
+            created=datetime.datetime.now(datetime.UTC),
+            phase=release.phase,
+            description=description,
+        )
+
+        # Acquire the write lock and add the row
+        # We need this write lock for moving the directory below atomically
+        # But it also helps to make models.populate_revision_sequence_and_name 
safe against races
+        await data.begin_immediate()
+        data.add(new_revision)
+
+        # Flush but do not commit the new revision row to get its name and 
number
+        # The row will still be invisible to other sessions after flushing
+        await data.flush()
+
+        # Merge with the prior revision if there was an intervening change
+        prior_name = new_revision.parent_name
+        if (
+            merge_enabled
+            and (old_revision is not None)
+            and (prior_name is not None)
+            and (prior_name != old_revision.name)
+        ):
+            prior_number = prior_name.split()[-1]
+            prior_dir = paths.release_directory_base(release) / prior_number
+            await merge.merge(
+                base_inodes,
+                base_hashes,
+                prior_dir,
+                project_name,
+                version_name,
+                prior_number,
+                temp_dir_path,
+                n_inodes,
+                path_to_hash,
+                path_to_size,
+            )
+            previous_attestable = await attestable.load(project_name, 
version_name, prior_number)
+
+        # Rename the directory to the new revision number
+        await data.refresh(release)
+        new_revision_dir = paths.release_directory(release)
+
+        # Ensure that the parent directory exists
+        await aiofiles.os.makedirs(new_revision_dir.parent, exist_ok=True)
+
+        # Raise an error if the destination directory already exists
+        # This can happen for example if there was a previous failed cleanup
+        if await aiofiles.os.path.exists(new_revision_dir):
+            raise types.FailedError(f"Revision directory {new_revision_dir} 
already exists")
+
+        # Rename the temporary interim directory to the new revision number
+        await aiofiles.os.rename(temp_dir, new_revision_dir)
+    except Exception:
+        await aioshutil.rmtree(temp_dir)
+        raise
+
+    # Change permissions of all directories in the new revision directory to 
555
+    # This prevents accidental modifications to any directory in the new 
revision
+    # This must be done after the rename, otherwise the rename will fail
+    # The ".." entry in a directory is modified when it is moved between 
parents
+    # (Additionally, on macOS a 555 directory cannot be renamed within the 
same parent)
+    await asyncio.to_thread(util.chmod_directories, new_revision_dir, 0o555)
+
+    policy = release.release_policy or release.project.release_policy
+
+    await attestable.write_files_data(
+        project_name,
+        version_name,
+        new_revision.number,
+        policy.model_dump() if policy else None,
+        asf_uid,
+        previous_attestable,
+        path_to_hash,
+        path_to_size,
+    )
+
+    # Commit to end the transaction started by data.begin_immediate
+    # We must commit the revision before starting the checks
+    # This also releases the write lock
+    await data.commit()
+
+    async with data.begin():
+        # Run checks if in DRAFT phase
+        # We could also run this outside the data Session
+        # But then it would create its own new Session
+        # It does, however, need a transaction to be created using data.begin()
+        if release.phase == sql.ReleasePhase.RELEASE_CANDIDATE_DRAFT:
+            # Must use caller_data here because we acquired the write lock
+            await tasks.draft_checks(asf_uid, project_name, version_name, 
new_revision.number, caller_data=data)
+
+    return new_revision
+
+
 class GeneralPublic:
     def __init__(
         self,
@@ -201,108 +326,26 @@ class CommitteeParticipant(FoundationCommitter):
             raise
 
         async with SafeSession(temp_dir) as data:
-            try:
-                # This is the only place where models.Revision is constructed
-                # That makes models.populate_revision_sequence_and_name safe 
against races
-                # Because that event is called when data.add is called below
-                # And we have a write lock at that point through the use of 
data.begin_immediate
-                new_revision = sql.Revision(
-                    release_name=release_name,
-                    release=release,
-                    asfuid=asf_uid,
-                    created=datetime.datetime.now(datetime.UTC),
-                    phase=release.phase,
-                    description=description,
-                )
-
-                # Acquire the write lock and add the row
-                # We need this write lock for moving the directory below 
atomically
-                # But it also helps to make 
models.populate_revision_sequence_and_name safe against races
-                await data.begin_immediate()
-                data.add(new_revision)
-
-                # Flush but do not commit the new revision row to get its name 
and number
-                # The row will still be invisible to other sessions after 
flushing
-                await data.flush()
-
-                # Merge with the prior revision if there was an intervening 
change
-                prior_name = new_revision.parent_name
-                if (
-                    merge_enabled
-                    and (old_revision is not None)
-                    and (prior_name is not None)
-                    and (prior_name != old_revision.name)
-                ):
-                    prior_number = prior_name.split()[-1]
-                    prior_dir = paths.release_directory_base(release) / 
prior_number
-                    await merge.merge(
-                        base_inodes,
-                        base_hashes,
-                        prior_dir,
-                        project_name,
-                        version_name,
-                        prior_number,
-                        temp_dir_path,
-                        n_inodes,
-                        path_to_hash,
-                        path_to_size,
-                    )
-                    previous_attestable = await attestable.load(project_name, 
version_name, prior_number)
-
-                # Rename the directory to the new revision number
-                await data.refresh(release)
-                new_revision_dir = paths.release_directory(release)
-
-                # Ensure that the parent directory exists
-                await aiofiles.os.makedirs(new_revision_dir.parent, 
exist_ok=True)
-
-                # Raise an error if the destination directory already exists
-                # This can happen for example if there was a previous failed 
cleanup
-                if await aiofiles.os.path.exists(new_revision_dir):
-                    raise types.FailedError(f"Revision directory 
{new_revision_dir} already exists")
-
-                # Rename the temporary interim directory to the new revision 
number
-                await aiofiles.os.rename(temp_dir, new_revision_dir)
-            except Exception:
-                await aioshutil.rmtree(temp_dir)
-                raise
-
-            # Change permissions of all directories in the new revision 
directory to 555
-            # This prevents accidental modifications to any directory in the 
new revision
-            # This must be done after the rename, otherwise the rename will 
fail
-            # The ".." entry in a directory is modified when it is moved 
between parents
-            # (Additionally, on macOS a 555 directory cannot be renamed within 
the same parent)
-            await asyncio.to_thread(util.chmod_directories, new_revision_dir, 
0o555)
-
-            policy = release.release_policy or release.project.release_policy
-
-            await attestable.write_files_data(
-                project_name,
-                version_name,
-                new_revision.number,
-                policy.model_dump() if policy else None,
-                asf_uid,
-                previous_attestable,
-                path_to_hash,
-                path_to_size,
+            return await _finalise_revision(
+                data,
+                asf_uid=asf_uid,
+                base_hashes=base_hashes,
+                base_inodes=base_inodes,
+                description=description,
+                merge_enabled=merge_enabled,
+                n_inodes=n_inodes,
+                old_revision=old_revision,
+                path_to_hash=path_to_hash,
+                path_to_size=path_to_size,
+                previous_attestable=previous_attestable,
+                project_name=project_name,
+                release=release,
+                release_name=release_name,
+                temp_dir=temp_dir,
+                temp_dir_path=temp_dir_path,
+                version_name=version_name,
             )
 
-            # Commit to end the transaction started by data.begin_immediate
-            # We must commit the revision before starting the checks
-            # This also releases the write lock
-            await data.commit()
-
-            async with data.begin():
-                # Run checks if in DRAFT phase
-                # We could also run this outside the data Session
-                # But then it would create its own new Session
-                # It does, however, need a transaction to be created using 
data.begin()
-                if release.phase == sql.ReleasePhase.RELEASE_CANDIDATE_DRAFT:
-                    # Must use caller_data here because we acquired the write 
lock
-                    await tasks.draft_checks(asf_uid, project_name, 
version_name, new_revision.number, caller_data=data)
-
-        return new_revision
-
 
 class CommitteeMember(CommitteeParticipant):
     def __init__(
diff --git a/tests/unit/test_create_revision.py 
b/tests/unit/test_create_revision.py
index 635dcdee..47d50c44 100644
--- a/tests/unit/test_create_revision.py
+++ b/tests/unit/test_create_revision.py
@@ -56,8 +56,9 @@ class FakeRevision:
 
 
 class MockSafeData:
-    def __init__(self, parent_name: str):
+    def __init__(self, parent_name: str, new_number: str = "00006"):
         self._new_revision: FakeRevision | None = None
+        self._new_number = new_number
         self._parent_name = parent_name
         self.add = mock.MagicMock(side_effect=self._add)
         self.begin = mock.MagicMock(return_value=AsyncContextManager())
@@ -72,8 +73,8 @@ class MockSafeData:
     async def _flush(self) -> None:
         if self._new_revision is None:
             raise RuntimeError("Expected data.add to set _new_revision before 
flush")
-        self._new_revision.name = f"{self._new_revision.release_name} 00006"
-        self._new_revision.number = "00006"
+        self._new_revision.name = f"{self._new_revision.release_name} 
{self._new_number}"
+        self._new_revision.number = self._new_number
         self._new_revision.parent_name = self._parent_name
 
 
@@ -171,6 +172,64 @@ async def 
test_clone_from_older_revision_skips_merge_without_intervening_change(
         raise AssertionError("Expected inode scan to run only for the 
temporary working directory")
 
 
[email protected]
+async def 
test_intervening_revision_triggers_merge_and_uses_latest_parent(tmp_path: 
pathlib.Path):
+    release = mock.MagicMock()
+    release.phase = sql.ReleasePhase.RELEASE_PREVIEW
+    release.project = mock.MagicMock()
+    release.project.release_policy = None
+    release.release_policy = None
+    release_name = sql.release_name("proj", "1.0")
+
+    old_revision = mock.MagicMock()
+    old_revision.name = f"{release_name} 00005"
+    old_revision.number = "00005"
+
+    first_attestable = mock.MagicMock(paths={"dist/a.tar.gz": "h1"})
+    second_attestable = mock.MagicMock(paths={"dist/b.tar.gz": "h2"})
+
+    mock_session = _mock_db_session(release)
+    participant = _make_participant()
+    safe_data = MockSafeData(parent_name=f"{release_name} 00006", 
new_number="00007")
+    merge_mock = mock.AsyncMock()
+    load_mock = mock.AsyncMock(side_effect=[first_attestable, 
second_attestable])
+
+    with (
+        mock.patch.object(revision.aiofiles.os, "makedirs", 
new_callable=mock.AsyncMock),
+        mock.patch.object(revision.aiofiles.os, "rename", 
new_callable=mock.AsyncMock),
+        mock.patch.object(revision.attestable, "load", new=load_mock),
+        mock.patch.object(
+            revision.attestable, "paths_to_hashes_and_sizes", 
new_callable=mock.AsyncMock, return_value=({}, {})
+        ),
+        mock.patch.object(revision.attestable, "write_files_data", 
new_callable=mock.AsyncMock),
+        mock.patch.object(revision.db, "session", return_value=mock_session),
+        mock.patch.object(revision.detection, "validate_directory", 
return_value=[]),
+        mock.patch.object(
+            revision.interaction, "latest_revision", 
new_callable=mock.AsyncMock, return_value=old_revision
+        ),
+        mock.patch.object(revision.merge, "merge", new=merge_mock),
+        mock.patch.object(revision.sql, "Revision", 
side_effect=_make_fake_revision),
+        mock.patch.object(revision, "SafeSession", 
return_value=MockSafeSession(safe_data)),
+        mock.patch.object(revision.tasks, "draft_checks", 
new_callable=mock.AsyncMock),
+        mock.patch.object(revision.util, "chmod_directories"),
+        mock.patch.object(revision.util, "chmod_files"),
+        mock.patch.object(revision.util, "create_hard_link_clone", 
new_callable=mock.AsyncMock),
+        mock.patch.object(revision.paths, "get_tmp_dir", 
return_value=tmp_path),
+        mock.patch.object(revision.util, "paths_to_inodes", return_value={}),
+        mock.patch.object(revision.paths, "release_directory", 
return_value=tmp_path / "releases" / "00007"),
+        mock.patch.object(revision.paths, "release_directory_base", 
return_value=tmp_path / "releases"),
+    ):
+        created_revision = await participant.create_revision("proj", "1.0", 
"test")
+
+    assert isinstance(created_revision, FakeRevision)
+    assert merge_mock.await_count == 1
+    assert load_mock.await_count == 2
+
+    merge_await_args = merge_mock.await_args
+    assert merge_await_args is not None
+    assert merge_await_args.args[5] == "00006"
+
+
 @pytest.mark.asyncio
 async def test_modify_failed_error_propagates_and_cleans_up(tmp_path: 
pathlib.Path):
     received_args: dict[str, object] = {}


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to