This is an automated email from the ASF dual-hosted git repository. miroslav pushed a commit to branch trunk in repository https://gitbox.apache.org/repos/asf/jackrabbit-oak.git
The following commit(s) were added to refs/heads/trunk by this push: new 406de12d53 OAK-11884: AzureArchiveManage.listArchives() should delete segments only if write acces is allowed (#2480) 406de12d53 is described below commit 406de12d53d54b537f28cea06308ff8a0bdf32b9 Author: Miroslav Smiljanic <smmiros...@gmail.com> AuthorDate: Fri Aug 29 12:11:02 2025 +0200 OAK-11884: AzureArchiveManage.listArchives() should delete segments only if write acces is allowed (#2480) --- .../oak/segment/azure/AzureArchiveManager.java | 56 +++++++++++---- .../segment/azure/AzureSegmentArchiveWriter.java | 1 + .../oak/segment/azure/AzureUtilities.java | 8 +++ .../segment/azure/v8/AzureArchiveManagerV8.java | 50 +++++++++++--- .../azure/v8/AzureSegmentArchiveWriterV8.java | 12 +++- .../oak/segment/azure/AzureArchiveManagerTest.java | 64 +++++++++++++++++- .../azure/AzureSegmentArchiveWriterTest.java | 31 ++++++++- .../oak/segment/azure/AzureUtilitiesTest.java | 79 ++++++++++++++++++++++ .../azure/v8/AzureArchiveManagerV8Test.java | 68 ++++++++++++++++++- .../azure/v8/AzureSegmentArchiveWriterV8Test.java | 20 ++++++ .../oak/segment/remote/RemoteUtilities.java | 4 ++ .../oak/segment/remote/WriteAccessController.java | 10 +++ .../oak/segment/remote/RemoteUtilitiesTest.java | 31 +++++++++ .../segment/remote/WriteAccessControllerTest.java | 9 +++ 14 files changed, 412 insertions(+), 31 deletions(-) diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureArchiveManager.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureArchiveManager.java index f4a5a7d0d4..1f130a9849 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureArchiveManager.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureArchiveManager.java @@ -16,6 +16,7 @@ */ package org.apache.jackrabbit.oak.segment.azure; +import com.azure.core.util.BinaryData; import com.azure.core.util.polling.PollResponse; import com.azure.storage.blob.BlobContainerClient; import com.azure.storage.blob.models.BlobCopyInfo; @@ -56,6 +57,10 @@ public class AzureArchiveManager implements SegmentArchiveManager { private static final Logger log = LoggerFactory.getLogger(AzureArchiveManager.class); + private static final String DELETED_ARCHIVE_MARKER = "deleted"; + + private static final String CLOSED_ARCHIVE_MARKER = "closed"; + protected final BlobContainerClient readBlobContainerClient; protected final BlobContainerClient writeBlobContainerClient; @@ -89,8 +94,10 @@ public class AzureArchiveManager implements SegmentArchiveManager { Iterator<String> it = archiveNames.iterator(); while (it.hasNext()) { String archiveName = it.next(); - if (isArchiveEmpty(archiveName)) { - delete(archiveName); + if (deleteInProgress(archiveName)) { + if (writeAccessController.isWritingAllowed()) { + delete(archiveName); + } it.remove(); } } @@ -101,21 +108,19 @@ public class AzureArchiveManager implements SegmentArchiveManager { } /** - * Check if there's a valid 0000. segment in the archive + * Check if the archive is being deleted. + * * @param archiveName - * @return true if the archive is empty (no 0000.* segment) + * @return true if the "deleted" marker exists */ - private boolean isArchiveEmpty(String archiveName) throws BlobStorageException { - String fullBlobPrefix = getDirectory(archiveName) + "0000."; - ListBlobsOptions listBlobsOptions = new ListBlobsOptions(); - listBlobsOptions.setPrefix(fullBlobPrefix); - return !readBlobContainerClient.listBlobs(listBlobsOptions, null).iterator().hasNext(); + private boolean deleteInProgress(String archiveName) throws BlobStorageException { + return readBlobContainerClient.getBlobClient(getDirectory(archiveName) + DELETED_ARCHIVE_MARKER).exists(); } @Override public SegmentArchiveReader open(String archiveName) throws IOException { try { - String closedBlob = getDirectory(archiveName) + "closed"; + String closedBlob = getDirectory(archiveName) + CLOSED_ARCHIVE_MARKER; if (!readBlobContainerClient.getBlobClient(closedBlob).exists()) { return null; } @@ -138,22 +143,44 @@ public class AzureArchiveManager implements SegmentArchiveManager { @Override public boolean delete(String archiveName) { try { + uploadDeletedMarker(archiveName); getBlobs(archiveName) .forEach(blobItem -> { try { - writeAccessController.checkWritingAllowed(); - writeBlobContainerClient.getBlobClient(blobItem.getName()).delete(); + String blobName = getName(blobItem); + if (!blobName.equals(DELETED_ARCHIVE_MARKER) && !blobName.equals(CLOSED_ARCHIVE_MARKER)) { + writeAccessController.checkWritingAllowed(); + writeBlobContainerClient.getBlobClient(blobItem.getName()).delete(); + } } catch (BlobStorageException e) { log.error("Can't delete segment {}", blobItem.getName(), e); } }); + deleteClosedMarker(archiveName); + deleteDeletedMarker(archiveName); return true; - } catch (IOException e) { + } catch (IOException | BlobStorageException e) { log.error("Can't delete archive {}", archiveName, e); return false; } } + private void deleteDeletedMarker(String archiveName) throws BlobStorageException { + writeAccessController.checkWritingAllowed(); + writeBlobContainerClient.getBlobClient(getDirectory(archiveName) + DELETED_ARCHIVE_MARKER).deleteIfExists(); + } + + private void deleteClosedMarker(String archiveName) throws BlobStorageException { + writeAccessController.checkWritingAllowed(); + writeBlobContainerClient.getBlobClient(getDirectory(archiveName) + CLOSED_ARCHIVE_MARKER).deleteIfExists(); + } + + private void uploadDeletedMarker(String archiveName) throws BlobStorageException { + writeAccessController.checkWritingAllowed(); + writeBlobContainerClient.getBlobClient(getDirectory(archiveName) + DELETED_ARCHIVE_MARKER).getBlockBlobClient().upload(BinaryData.fromBytes(new byte[0]), true); + } + + @Override public boolean renameTo(String from, String to) { try { @@ -240,7 +267,8 @@ public class AzureArchiveManager implements SegmentArchiveManager { private void delete(String archiveName, Set<UUID> recoveredEntries) throws IOException { getBlobs(archiveName) .forEach(blobItem -> { - if (!recoveredEntries.contains(RemoteUtilities.getSegmentUUID(getName(blobItem)))) { + String name = getName(blobItem); + if (RemoteUtilities.isSegmentName(name) && !recoveredEntries.contains(RemoteUtilities.getSegmentUUID(name))) { try { writeBlobContainerClient.getBlobClient(blobItem.getName()).delete(); } catch (BlobStorageException e) { diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentArchiveWriter.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentArchiveWriter.java index b558143b6c..d96dce3171 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentArchiveWriter.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentArchiveWriter.java @@ -57,6 +57,7 @@ public class AzureSegmentArchiveWriter extends AbstractRemoteSegmentArchiveWrite this.archiveName = AzureUtilities.ensureNoTrailingSlash(archiveName); this.archivePathPrefix = AzureUtilities.asAzurePrefix(rootPrefix, archiveName); this.writeAccessController = writeAccessController; + this.created = AzureUtilities.archiveExists(blobContainerClient, archivePathPrefix); } @Override diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureUtilities.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureUtilities.java index 25f65a288c..c256392537 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureUtilities.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureUtilities.java @@ -69,6 +69,14 @@ public final class AzureUtilities { return blobContainerClient.listBlobs(listOptions, null).stream().collect(Collectors.toList()); } + public static boolean archiveExists(BlobContainerClient blobContainerClient, String archivePathPrefix) { + ListBlobsOptions listOptions = new ListBlobsOptions(); + listOptions.setPrefix(archivePathPrefix); + listOptions.setMaxResultsPerPage(1); + return blobContainerClient.listBlobs(listOptions, null).iterator().hasNext(); + } + + public static void readBufferFully(BlockBlobClient blob, Buffer buffer) throws IOException { try { blob.downloadStream(new ByteBufferOutputStream(buffer)); diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureArchiveManagerV8.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureArchiveManagerV8.java index ea10290000..d63f6144a3 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureArchiveManagerV8.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureArchiveManagerV8.java @@ -55,6 +55,9 @@ public class AzureArchiveManagerV8 implements SegmentArchiveManager { private static final Logger log = LoggerFactory.getLogger(AzureSegmentArchiveReaderV8.class); + private static final String DELETED_ARCHIVE_MARKER = "deleted"; + private static final String CLOSED_ARCHIVE_MARKER = "closed"; + protected final CloudBlobDirectory cloudBlobDirectory; protected final IOMonitor ioMonitor; @@ -84,8 +87,10 @@ public class AzureArchiveManagerV8 implements SegmentArchiveManager { Iterator<String> it = archiveNames.iterator(); while (it.hasNext()) { String archiveName = it.next(); - if (isArchiveEmpty(archiveName)) { - delete(archiveName); + if (deleteInProgress(archiveName)) { + if (writeAccessController.isWritingAllowed()) { + delete(archiveName); + } it.remove(); } } @@ -96,19 +101,20 @@ public class AzureArchiveManagerV8 implements SegmentArchiveManager { } /** - * Check if there's a valid 0000. segment in the archive + * Check if the archive is being deleted. + * * @param archiveName - * @return true if the archive is empty (no 0000.* segment) + * @return true if the "deleted" marker exists */ - private boolean isArchiveEmpty(String archiveName) throws IOException, URISyntaxException, StorageException { - return !getDirectory(archiveName).listBlobs("0000.").iterator().hasNext(); + private boolean deleteInProgress(String archiveName) throws IOException, URISyntaxException, StorageException { + return getDirectory(archiveName).getBlockBlobReference(DELETED_ARCHIVE_MARKER).exists(); } @Override public SegmentArchiveReader open(String archiveName) throws IOException { try { CloudBlobDirectory archiveDirectory = getDirectory(archiveName); - if (!archiveDirectory.getBlockBlobReference("closed").exists()) { + if (!archiveDirectory.getBlockBlobReference(CLOSED_ARCHIVE_MARKER).exists()) { return null; } return new AzureSegmentArchiveReaderV8(archiveDirectory, ioMonitor); @@ -131,22 +137,43 @@ public class AzureArchiveManagerV8 implements SegmentArchiveManager { @Override public boolean delete(String archiveName) { try { + uploadDeletedMarker(archiveName); getBlobs(archiveName) .forEach(cloudBlob -> { try { - writeAccessController.checkWritingAllowed(); - cloudBlob.delete(); + String blobName = getName(cloudBlob); + if (!blobName.equals(DELETED_ARCHIVE_MARKER) && !blobName.equals(CLOSED_ARCHIVE_MARKER)) { + writeAccessController.checkWritingAllowed(); + cloudBlob.delete(); + } } catch (StorageException e) { log.error("Can't delete segment {}", cloudBlob.getUri().getPath(), e); } }); + deleteClosedMarker(archiveName); + deleteDeletedMarker(archiveName); return true; - } catch (IOException e) { + } catch (IOException | URISyntaxException | StorageException e) { log.error("Can't delete archive {}", archiveName, e); return false; } } + private void deleteDeletedMarker(String archiveName) throws IOException, URISyntaxException, StorageException { + writeAccessController.checkWritingAllowed(); + getDirectory(archiveName).getBlockBlobReference(DELETED_ARCHIVE_MARKER).deleteIfExists(); + } + + private void deleteClosedMarker(String archiveName) throws IOException, URISyntaxException, StorageException { + writeAccessController.checkWritingAllowed(); + getDirectory(archiveName).getBlockBlobReference(CLOSED_ARCHIVE_MARKER).deleteIfExists(); + } + + private void uploadDeletedMarker(String archiveName) throws IOException, URISyntaxException, StorageException { + writeAccessController.checkWritingAllowed(); + getDirectory(archiveName).getBlockBlobReference(DELETED_ARCHIVE_MARKER).openOutputStream().close(); + } + @Override public boolean renameTo(String from, String to) { try { @@ -231,7 +258,8 @@ public class AzureArchiveManagerV8 implements SegmentArchiveManager { private void delete(String archiveName, Set<UUID> recoveredEntries) throws IOException { getBlobs(archiveName) .forEach(cloudBlob -> { - if (!recoveredEntries.contains(RemoteUtilities.getSegmentUUID(getName(cloudBlob)))) { + String name = getName(cloudBlob); + if (RemoteUtilities.isSegmentName(name) && !recoveredEntries.contains(RemoteUtilities.getSegmentUUID(name))) { try { cloudBlob.delete(); } catch (StorageException e) { diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentArchiveWriterV8.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentArchiveWriterV8.java index 89ae33763a..cd3e15ca8e 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentArchiveWriterV8.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentArchiveWriterV8.java @@ -23,6 +23,7 @@ import static org.apache.jackrabbit.oak.segment.remote.RemoteUtilities.OFF_HEAP; import java.io.File; import java.io.IOException; import java.net.URISyntaxException; +import java.util.NoSuchElementException; import java.util.concurrent.TimeUnit; import com.microsoft.azure.storage.blob.BlobRequestOptions; @@ -52,11 +53,20 @@ public class AzureSegmentArchiveWriterV8 extends AbstractRemoteSegmentArchiveWri private final BlobRequestOptions writeOptimisedBlobRequestOptions; - public AzureSegmentArchiveWriterV8(CloudBlobDirectory archiveDirectory, IOMonitor ioMonitor, FileStoreMonitor monitor, WriteAccessController writeAccessController) { + public AzureSegmentArchiveWriterV8(CloudBlobDirectory archiveDirectory, IOMonitor ioMonitor, FileStoreMonitor monitor, WriteAccessController writeAccessController) throws IOException { super(ioMonitor, monitor); this.archiveDirectory = archiveDirectory; this.writeAccessController = writeAccessController; this.writeOptimisedBlobRequestOptions = AzureRequestOptionsV8.optimiseForWriteOperations(archiveDirectory.getServiceClient().getDefaultRequestOptions()); + this.created = hasBlobs(); + } + + private boolean hasBlobs() throws IOException { + try { + return this.archiveDirectory.listBlobs().iterator().hasNext(); + } catch (StorageException | URISyntaxException | NoSuchElementException e) { + throw new IOException(e); + } } @Override diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureArchiveManagerTest.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureArchiveManagerTest.java index f6b19dffd7..d220123bc6 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureArchiveManagerTest.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureArchiveManagerTest.java @@ -16,6 +16,7 @@ */ package org.apache.jackrabbit.oak.segment.azure; +import com.azure.core.util.BinaryData; import com.azure.storage.blob.BlobContainerClient; import com.azure.storage.blob.models.BlobItem; import com.azure.storage.blob.models.BlobStorageException; @@ -92,6 +93,7 @@ public class AzureArchiveManagerTest { private BlobContainerClient noRetryBlobContainerClient; private AzurePersistence azurePersistence; + private WriteAccessController writeAccessController; @Before public void setup() throws BlobStorageException, InvalidKeyException, URISyntaxException { @@ -99,7 +101,7 @@ public class AzureArchiveManagerTest { writeBlobContainerClient = azurite.getWriteBlobContainerClient("oak-test"); noRetryBlobContainerClient = azurite.getNoRetryBlobContainerClient("oak-test"); - WriteAccessController writeAccessController = new WriteAccessController(); + writeAccessController = new WriteAccessController(); writeAccessController.enableWriting(); azurePersistence = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, noRetryBlobContainerClient, "oak"); azurePersistence.setWriteAccessController(writeAccessController); @@ -523,7 +525,6 @@ public class AzureArchiveManagerTest { .when(blobLeaseMocked).renewLease(); AzurePersistence mockedRwPersistence = Mockito.spy(rwPersistence); - WriteAccessController writeAccessController = new WriteAccessController(); AzureRepositoryLock azureRepositoryLock = new AzureRepositoryLock(blobMocked, blobLeaseMocked, () -> { }, writeAccessController); AzureArchiveManager azureArchiveManager = new AzureArchiveManager(oakDirectory, writeOakDirectory, "", new IOMonitorAdapter(), new FileStoreMonitorAdapter(), writeAccessController); @@ -583,6 +584,65 @@ public class AzureArchiveManagerTest { rwFileStore2.close(); } + @Test + public void testListArchivesDoesNotReturnDeletedArchive() throws IOException, BlobStorageException { + // The archive manager should not return the archive which has "deleted" marker + SegmentArchiveManager manager = azurePersistence.createArchiveManager(false, false, new IOMonitorAdapter(), new FileStoreMonitorAdapter(), new RemoteStoreMonitorAdapter()); + + // Create an archive + createArchive(manager, "data00000a.tar"); + + // Verify the archive is listed + List<String> archives = manager.listArchives(); + assertTrue("Archive should be listed before deletion", archives.contains("data00000a.tar")); + + // Upload deleted marker for the archive + writeBlobContainerClient.getBlobClient("oak/data00000a.tar/deleted").getBlockBlobClient().upload(BinaryData.fromBytes(new byte[0])); + + // Verify the archive is no longer listed after adding deleted marker + archives = manager.listArchives(); + assertFalse("Archive should not be listed after deleted marker is uploaded", archives.contains("data00000a.tar")); + } + + @Test + public void testListArchiveWithDeleteMarkerPresentWithWriteAccess() throws Exception{ + SegmentArchiveManager manager = azurePersistence.createArchiveManager(false, false, new IOMonitorAdapter(), new FileStoreMonitorAdapter(), new RemoteStoreMonitorAdapter()); + + createArchive(manager, "data00000a.tar"); + + writeBlobContainerClient.getBlobClient("oak/data00000a.tar/deleted").getBlockBlobClient().upload(BinaryData.fromBytes(new byte[0])); + + List<String> archives = manager.listArchives(); + assertFalse("Archive should not be listed after deleted marker is uploaded", archives.contains("data00000a.tar")); + + assertFalse("Archive should be deleted", readBlobContainerClient.listBlobs(new ListBlobsOptions().setPrefix("oak/data00000a.tar"), null).iterator().hasNext()); + } + + @Test + public void testListArchiveWithDeleteMarkerPresentAndNoWriteAccess() throws Exception{ + SegmentArchiveManager manager = azurePersistence.createArchiveManager(false, false, new IOMonitorAdapter(), new FileStoreMonitorAdapter(), new RemoteStoreMonitorAdapter()); + + createArchive(manager, "data00000a.tar"); + + writeBlobContainerClient.getBlobClient("oak/data00000a.tar/deleted").getBlockBlobClient().upload(BinaryData.fromBytes(new byte[0])); + + // disable writing + writeAccessController.disableWriting(); + + List<String> archives = manager.listArchives(); + assertFalse("Archive should not be listed after deleted marker is uploaded", archives.contains("data00000a.tar")); + + assertTrue("Archive should not be deleted", readBlobContainerClient.listBlobs(new ListBlobsOptions().setPrefix("oak/data00000a.tar"), null).iterator().hasNext()); + } + + private void createArchive(SegmentArchiveManager manager, String archiveName) throws IOException { + SegmentArchiveWriter writer = manager.create(archiveName); + UUID u = UUID.randomUUID(); + writer.writeSegment(u.getMostSignificantBits(), u.getLeastSignificantBits(), new byte[10], 0, 10, 0, 0, false); + writer.flush(); + writer.close(); + } + private PersistentCache createPersistenceCache() { return new AbstractPersistentCache() { @Override diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentArchiveWriterTest.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentArchiveWriterTest.java index b2a12f039b..48a19fe458 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentArchiveWriterTest.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentArchiveWriterTest.java @@ -236,11 +236,40 @@ public class AzureSegmentArchiveWriterTest { } private void createContainerMock() { + // Mock container creation (PUT) mockServerClient .when(request() .withMethod("PUT") - .withPath(BASE_PATH)) + .withPath(BASE_PATH) + .withQueryStringParameter("restype", "container")) .respond(response().withStatusCode(201).withBody("Container created successfully")); + + // Mock container existence check (HEAD) + mockServerClient + .when(request() + .withMethod("HEAD") + .withPath(BASE_PATH) + .withQueryStringParameter("restype", "container")) + .respond(response().withStatusCode(200)); + + // Mock listBlobs operation for archiveExists() call - return empty list + mockServerClient + .when(request() + .withMethod("GET") + .withPath(BASE_PATH) + .withQueryStringParameter("restype", "container") + .withQueryStringParameter("comp", "list") + .withQueryStringParameter("prefix", "oak/data00000a.tar/") + .withQueryStringParameter("maxresults", "1"), Times.once()) + .respond(response() + .withStatusCode(200) + .withHeader("Content-Type", "application/xml") + .withBody("<?xml version=\"1.0\" encoding=\"utf-8\"?>" + + "<EnumerationResults ServiceEndpoint=\"http://127.0.0.1:10000/devstoreaccount1\" ContainerName=\"oak-test\">" + + "<Prefix>oak/data00000a.tar/</Prefix>" + + "<MaxResults>1</MaxResults>" + + "<Blobs></Blobs>" + + "</EnumerationResults>")); } public BlobContainerClient getCloudStorageAccount(String containerName, RequestRetryOptions retryOptions) { diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureUtilitiesTest.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureUtilitiesTest.java new file mode 100644 index 0000000000..5a66b0cc7e --- /dev/null +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureUtilitiesTest.java @@ -0,0 +1,79 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.segment.azure; + +import com.azure.core.util.BinaryData; +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.models.BlobStorageException; +import org.apache.jackrabbit.oak.segment.remote.RemoteUtilities; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Test; + +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +public class AzureUtilitiesTest { + + @ClassRule + public static AzuriteDockerRule azurite = new AzuriteDockerRule(); + + private BlobContainerClient blobContainerClient; + private String archivePrefix = "oak/data00000a.tar/"; + private String archiveName = "data00000a.tar"; + + @Before + public void setup() throws BlobStorageException { + blobContainerClient = azurite.getReadBlobContainerClient("oak-test"); + } + + @Test + public void testArchiveExistsWhenArchiveHasBlobs() { + blobContainerClient.getBlobClient(archivePrefix + RemoteUtilities.getSegmentFileName(0, 0, 0)).getBlockBlobClient() + .upload(BinaryData.fromString("")); + + assertTrue("Archive should exist when it contains segment blob", + AzureUtilities.archiveExists(blobContainerClient, archivePrefix)); + } + + @Test + public void testArchiveExistsWhenArchiveIsEmpty() { + + assertFalse("Archive should not exist when no blobs are present", + AzureUtilities.archiveExists(blobContainerClient, archivePrefix)); + } + + @Test + public void testArchiveExistsWithArchiveMetadata() { + blobContainerClient.getBlobClient(archivePrefix + archiveName + ".brf").getBlockBlobClient() + .upload(BinaryData.fromString("")); + blobContainerClient.getBlobClient(archivePrefix + archiveName + ".gph").getBlockBlobClient() + .upload(BinaryData.fromString("")); + + assertTrue("Archive should exist when it contains metadata", + AzureUtilities.archiveExists(blobContainerClient, archivePrefix)); + } + + @Test + public void testArchiveExistsWithArchiveClosedMarker() { + blobContainerClient.getBlobClient(archivePrefix + "closed").getBlockBlobClient() + .upload(BinaryData.fromString("")); + + assertTrue("Archive should exist when it contains closed marker", + AzureUtilities.archiveExists(blobContainerClient, archivePrefix)); + } +} diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureArchiveManagerV8Test.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureArchiveManagerV8Test.java index 5305018ba3..eba893de2d 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureArchiveManagerV8Test.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureArchiveManagerV8Test.java @@ -92,12 +92,13 @@ public class AzureArchiveManagerV8Test { private CloudBlobContainer container; private AzurePersistenceV8 azurePersistenceV8; + private WriteAccessController writeAccessController; @Before public void setup() throws StorageException, InvalidKeyException, URISyntaxException { container = azurite.getContainer("oak-test"); - WriteAccessController writeAccessController = new WriteAccessController(); + writeAccessController = new WriteAccessController(); writeAccessController.enableWriting(); azurePersistenceV8 = new AzurePersistenceV8(container.getDirectoryReference("oak")); azurePersistenceV8.setWriteAccessController(writeAccessController); @@ -490,7 +491,6 @@ public class AzureArchiveManagerV8Test { .when(blobMocked).renewLease(Mockito.any(), Mockito.any(), Mockito.any()); AzurePersistenceV8 mockedRwPersistence = Mockito.spy(rwPersistence); - WriteAccessController writeAccessController = new WriteAccessController(); AzureRepositoryLockV8 azureRepositoryLockV8 = new AzureRepositoryLockV8(blobMocked, () -> {}, writeAccessController); AzureArchiveManagerV8 azureArchiveManagerV8 = new AzureArchiveManagerV8(oakDirectory, new IOMonitorAdapter(), new FileStoreMonitorAdapter(), writeAccessController); @@ -547,6 +547,70 @@ public class AzureArchiveManagerV8Test { rwFileStore2.close(); } + + @Test + public void testListArchivesDoesNotReturnDeletedArchive() throws IOException, URISyntaxException, StorageException { + // The archive manager should not return the archive which has "deleted" marker + SegmentArchiveManager manager = azurePersistenceV8.createArchiveManager(false, false, new IOMonitorAdapter(), new FileStoreMonitorAdapter(), new RemoteStoreMonitorAdapter()); + + // Create an archive + createArchive(manager, "data00000a.tar"); + + // Verify the archive is listed + List<String> archives = manager.listArchives(); + assertTrue("Archive should be listed before deletion", archives.contains("data00000a.tar")); + + // Upload deleted marker for the archive + CloudBlobDirectory archiveDirectory = container.getDirectoryReference("oak/data00000a.tar"); + archiveDirectory.getBlockBlobReference("deleted").openOutputStream().close(); + + // Verify the archive is no longer listed after adding deleted marker + archives = manager.listArchives(); + assertFalse("Archive should not be listed after deleted marker is uploaded", archives.contains("data00000a.tar")); + } + + @Test + public void testListArchiveWithDeleteMarkerPresentWithWriteAccess() throws Exception{ + SegmentArchiveManager manager = azurePersistenceV8.createArchiveManager(false, false, new IOMonitorAdapter(), new FileStoreMonitorAdapter(), new RemoteStoreMonitorAdapter()); + + createArchive(manager, "data00000a.tar"); + + // Upload deleted marker for the archive + CloudBlobDirectory archiveDirectory = container.getDirectoryReference("oak/data00000a.tar"); + archiveDirectory.getBlockBlobReference("deleted").openOutputStream().close(); + + List<String> archives = manager.listArchives(); + assertFalse("Archive should not be listed after deleted marker is uploaded", archives.contains("data00000a.tar")); + + assertFalse("Archive should be deleted", container.getDirectoryReference("oak/data00000a.tar").listBlobs().iterator().hasNext()); + } + + + @Test + public void testListArchiveWithDeleteMarkerPresentAndNoWriteAccess() throws Exception{ + SegmentArchiveManager manager = azurePersistenceV8.createArchiveManager(false, false, new IOMonitorAdapter(), new FileStoreMonitorAdapter(), new RemoteStoreMonitorAdapter()); + + createArchive(manager, "data00000a.tar"); + + // Upload deleted marker for the archive + CloudBlobDirectory archiveDirectory = container.getDirectoryReference("oak/data00000a.tar"); + archiveDirectory.getBlockBlobReference("deleted").openOutputStream().close(); + + writeAccessController.disableWriting(); + + List<String> archives = manager.listArchives(); + assertFalse("Archive should not be listed after deleted marker is uploaded", archives.contains("data00000a.tar")); + + assertTrue("Archive should not be deleted", container.getDirectoryReference("oak/data00000a.tar").listBlobs().iterator().hasNext()); + } + + private static void createArchive(SegmentArchiveManager manager, String archiveName) throws IOException { + SegmentArchiveWriter writer = manager.create(archiveName); + UUID u = UUID.randomUUID(); + writer.writeSegment(u.getMostSignificantBits(), u.getLeastSignificantBits(), new byte[10], 0, 10, 0, 0, false); + writer.flush(); + writer.close(); + } private PersistentCache createPersistenceCache() { return new AbstractPersistentCache() { diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentArchiveWriterV8Test.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentArchiveWriterV8Test.java index e659701cdb..eaf7f439b5 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentArchiveWriterV8Test.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentArchiveWriterV8Test.java @@ -182,6 +182,9 @@ public class AzureSegmentArchiveWriterV8Test { @NotNull private SegmentArchiveWriter createSegmentArchiveWriter() throws URISyntaxException, IOException { + // Mock the list blobs operation that's called during AzureSegmentArchiveWriterV8 initialization + expectListBlobsRequest(); + WriteAccessController writeAccessController = new WriteAccessController(); writeAccessController.enableWriting(); AzurePersistenceV8 azurePersistenceV8 = new AzurePersistenceV8(container.getDirectoryReference("oak"));/**/ @@ -223,6 +226,23 @@ public class AzureSegmentArchiveWriterV8Test { .withBody(new BinaryBody(new byte[10])); } + private void expectListBlobsRequest() { + mockServerClient + .when(request() + .withMethod("GET") + .withPath(BASE_PATH) + .withQueryStringParameter("comp", "list") + .withQueryStringParameter("prefix", "oak/data00000a.tar/"), Times.once()) + .respond(response() + .withStatusCode(200) + .withHeader("Content-Type", "application/xml") + .withBody("<?xml version=\"1.0\" encoding=\"utf-8\"?>" + + "<EnumerationResults ServiceEndpoint=\"http://127.0.0.1:10000/devstoreaccount1\" ContainerName=\"oak-test\">" + + "<Prefix></Prefix>" + + "<Blobs></Blobs>" + + "</EnumerationResults>")); + } + @NotNull private CloudBlobContainer createCloudBlobContainer() throws URISyntaxException, StorageException { URI uri = new URIBuilder() diff --git a/oak-segment-remote/src/main/java/org/apache/jackrabbit/oak/segment/remote/RemoteUtilities.java b/oak-segment-remote/src/main/java/org/apache/jackrabbit/oak/segment/remote/RemoteUtilities.java index 9de3ccb6e7..91fa3b173d 100644 --- a/oak-segment-remote/src/main/java/org/apache/jackrabbit/oak/segment/remote/RemoteUtilities.java +++ b/oak-segment-remote/src/main/java/org/apache/jackrabbit/oak/segment/remote/RemoteUtilities.java @@ -53,6 +53,10 @@ public final class RemoteUtilities { return UUID.fromString(m.group(2)); } + public static boolean isSegmentName(String name) { + return null != name && PATTERN.matcher(name).matches(); + } + private static class ArchiveIndexComparator implements Comparator<String> { final static Pattern indexPattern = Pattern.compile("[0-9]+"); diff --git a/oak-segment-remote/src/main/java/org/apache/jackrabbit/oak/segment/remote/WriteAccessController.java b/oak-segment-remote/src/main/java/org/apache/jackrabbit/oak/segment/remote/WriteAccessController.java index 929e5375f0..3fa162cbc0 100644 --- a/oak-segment-remote/src/main/java/org/apache/jackrabbit/oak/segment/remote/WriteAccessController.java +++ b/oak-segment-remote/src/main/java/org/apache/jackrabbit/oak/segment/remote/WriteAccessController.java @@ -33,6 +33,9 @@ public class WriteAccessController { } } + /** + * Blocks the current thread until writing is allowed. + */ public void checkWritingAllowed() { while (!isWritingAllowed) { synchronized (lock) { @@ -48,4 +51,11 @@ public class WriteAccessController { } } } + + /** + * @return true if writing is allowed, false otherwise + */ + public boolean isWritingAllowed() { + return isWritingAllowed; + } } diff --git a/oak-segment-remote/src/test/java/org/apache/jackrabbit/oak/segment/remote/RemoteUtilitiesTest.java b/oak-segment-remote/src/test/java/org/apache/jackrabbit/oak/segment/remote/RemoteUtilitiesTest.java index be445659d6..a7b47e13e2 100644 --- a/oak-segment-remote/src/test/java/org/apache/jackrabbit/oak/segment/remote/RemoteUtilitiesTest.java +++ b/oak-segment-remote/src/test/java/org/apache/jackrabbit/oak/segment/remote/RemoteUtilitiesTest.java @@ -26,6 +26,8 @@ import java.util.UUID; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.assertFalse; public class RemoteUtilitiesTest { @Test @@ -66,4 +68,33 @@ public class RemoteUtilitiesTest { public void testSortArchivesLargeIndices() { expectArchiveSortOrder(Arrays.asList("data00003a.tar", "data20000a.tar", "data100000a.tar")); } + + @Test + public void testIsSegmentName_ValidName() { + UUID uuid = UUID.randomUUID(); + String validName = RemoteUtilities.getSegmentFileName(0, uuid.getMostSignificantBits(), uuid.getLeastSignificantBits()); + assertTrue(RemoteUtilities.isSegmentName(validName)); + + String validMaxName = RemoteUtilities.getSegmentFileName( + RemoteUtilities.MAX_ENTRY_COUNT - 1, + uuid.getMostSignificantBits(), + uuid.getLeastSignificantBits() + ); + assertTrue(RemoteUtilities.isSegmentName(validMaxName)); + } + + @Test + public void testIsSegmentName_InvalidNames() { + // closed marker + assertFalse(RemoteUtilities.isSegmentName("closed")); + + // metadata files + assertFalse(RemoteUtilities.isSegmentName("data00000a.tar.brf")); + assertFalse(RemoteUtilities.isSegmentName("data00000a.tar.gph")); + assertFalse(RemoteUtilities.isSegmentName("data00000a.tar.idx")); + + // empty value + assertFalse(RemoteUtilities.isSegmentName("")); + assertFalse(RemoteUtilities.isSegmentName(null)); + } } diff --git a/oak-segment-remote/src/test/java/org/apache/jackrabbit/oak/segment/remote/WriteAccessControllerTest.java b/oak-segment-remote/src/test/java/org/apache/jackrabbit/oak/segment/remote/WriteAccessControllerTest.java index 4302d8158e..751a12b67d 100644 --- a/oak-segment-remote/src/test/java/org/apache/jackrabbit/oak/segment/remote/WriteAccessControllerTest.java +++ b/oak-segment-remote/src/test/java/org/apache/jackrabbit/oak/segment/remote/WriteAccessControllerTest.java @@ -19,6 +19,7 @@ package org.apache.jackrabbit.oak.segment.remote; import org.junit.Test; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; public class WriteAccessControllerTest { @@ -51,6 +52,14 @@ public class WriteAccessControllerTest { assertFalse(t2.isAlive()); } + @Test + public void testWritingAllowed() { + WriteAccessController controller = new WriteAccessController(); + assertFalse(controller.isWritingAllowed()); + controller.enableWriting(); + assertTrue(controller.isWritingAllowed()); + } + private void assertThreadWaiting(Thread.State state) { assert state == Thread.State.WAITING || state == Thread.State.TIMED_WAITING; }