This is an automated email from the ASF dual-hosted git repository.

jsedding pushed a commit to branch jsedding/OAK-12085-oom-in-segment-azure-tests
in repository https://gitbox.apache.org/repos/asf/jackrabbit-oak.git

commit 6bd9d2b36cd44f5b5b83ef3815fc9717c77637da
Author: Julian Sedding <[email protected]>
AuthorDate: Wed Feb 11 10:41:27 2026 +0100

    OAK-12085 - OutOfMemory in oak-segment-azure tests
    
    - consistently close FileStores and other closeable objects
---
 .../oak/segment/azure/AzureArchiveManagerTest.java | 339 +++++++++++----------
 .../oak/segment/azure/AzureJournalFileTest.java    |  14 +-
 .../oak/segment/azure/AzureReadSegmentTest.java    |  20 +-
 .../oak/segment/azure/AzureRepositoryLockTest.java | 129 ++++----
 .../azure/AzureSegmentArchiveWriterTest.java       |  44 ++-
 .../oak/segment/azure/AzuriteDockerRule.java       |  13 +-
 .../oak/segment/azure/FileStoreTestUtil.java       |  48 +++
 .../segment/azure/tool/SegmentCopyTestBase.java    |  63 ++--
 .../azure/v8/AzureArchiveManagerV8Test.java        | 233 +++++++-------
 .../segment/azure/v8/AzureReadSegmentV8Test.java   |  20 +-
 .../azure/v8/AzureRepositoryLockV8Test.java        |  57 ++--
 .../azure/v8/AzureSegmentArchiveWriterV8Test.java  |  12 +
 .../split/SplitPersistenceBlobTest.java            |  37 ++-
 .../persistence/split/SplitPersistenceTest.java    |  12 +-
 .../split/v8/SplitPersistenceBlobV8Test.java       |  37 ++-
 .../split/v8/SplitPersistenceV8Test.java           |  12 +-
 .../oak/segment/file/TarRevisionsTest.java         |   6 +-
 .../testutils/NodeStoreTestHarness.java            |   3 +
 18 files changed, 607 insertions(+), 492 deletions(-)

diff --git 
a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureArchiveManagerTest.java
 
b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureArchiveManagerTest.java
index c29e49922e..8a15229a2d 100644
--- 
a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureArchiveManagerTest.java
+++ 
b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureArchiveManagerTest.java
@@ -33,7 +33,6 @@ import org.apache.jackrabbit.oak.segment.SegmentNodeStore;
 import org.apache.jackrabbit.oak.segment.SegmentNodeStoreBuilders;
 import org.apache.jackrabbit.oak.segment.SegmentNotFoundException;
 import org.apache.jackrabbit.oak.segment.file.FileStore;
-import org.apache.jackrabbit.oak.segment.file.FileStoreBuilder;
 import org.apache.jackrabbit.oak.segment.file.InvalidFileStoreVersionException;
 import org.apache.jackrabbit.oak.segment.file.ReadOnlyFileStore;
 import org.apache.jackrabbit.oak.segment.file.tar.TarPersistence;
@@ -168,42 +167,42 @@ public class AzureArchiveManagerTest {
     @Test
     public void testUncleanStop() throws IOException, 
InvalidFileStoreVersionException, CommitFailedException, BlobStorageException {
         AzurePersistence p = new AzurePersistence(readBlobContainerClient, 
writeBlobContainerClient, noRetryBlobContainerClient, "oak");
-        FileStore fs = FileStoreBuilder.fileStoreBuilder(new 
File("target")).withCustomPersistence(p).build();
-        SegmentNodeStore segmentNodeStore = 
SegmentNodeStoreBuilders.builder(fs).build();
-        NodeBuilder builder = segmentNodeStore.getRoot().builder();
-        builder.setProperty("foo", "bar");
-        segmentNodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
-        fs.close();
+        try (FileStore fs = FileStoreTestUtil.createFileStore(new 
File("target"), p)) {
+            SegmentNodeStore segmentNodeStore = 
SegmentNodeStoreBuilders.builder(fs).build();
+            NodeBuilder builder = segmentNodeStore.getRoot().builder();
+            builder.setProperty("foo", "bar");
+            segmentNodeStore.merge(builder, EmptyHook.INSTANCE, 
CommitInfo.EMPTY);
+        }
 
 
         
readBlobContainerClient.getBlobClient("oak/data00000a.tar/closed").delete();
         
readBlobContainerClient.getBlobClient("oak/data00000a.tar/data00000a.tar.brf").delete();
         
readBlobContainerClient.getBlobClient("oak/data00000a.tar/data00000a.tar.gph").delete();
 
-        fs = FileStoreBuilder.fileStoreBuilder(new 
File("target")).withCustomPersistence(p).build();
-        segmentNodeStore = SegmentNodeStoreBuilders.builder(fs).build();
-        assertEquals("bar", segmentNodeStore.getRoot().getString("foo"));
-        fs.close();
+        try (FileStore fs = FileStoreTestUtil.createFileStore(new 
File("target"), p)) {
+            SegmentNodeStore segmentNodeStore = 
SegmentNodeStoreBuilders.builder(fs).build();
+            assertEquals("bar", segmentNodeStore.getRoot().getString("foo"));
+        }
     }
 
     @Test
     // see OAK-8566
     public void testUncleanStopWithEmptyArchive() throws IOException, 
InvalidFileStoreVersionException, CommitFailedException, BlobStorageException {
         AzurePersistence p = new AzurePersistence(readBlobContainerClient, 
writeBlobContainerClient, noRetryBlobContainerClient, "oak");
-        FileStore fs = FileStoreBuilder.fileStoreBuilder(new 
File("target")).withCustomPersistence(p).build();
-        SegmentNodeStore segmentNodeStore = 
SegmentNodeStoreBuilders.builder(fs).build();
-        NodeBuilder builder = segmentNodeStore.getRoot().builder();
-        builder.setProperty("foo", "bar");
-        segmentNodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
-        fs.close();
+        try (FileStore fs = FileStoreTestUtil.createFileStore(new 
File("target"), p)) {
+            SegmentNodeStore segmentNodeStore = 
SegmentNodeStoreBuilders.builder(fs).build();
+            NodeBuilder builder = segmentNodeStore.getRoot().builder();
+            builder.setProperty("foo", "bar");
+            segmentNodeStore.merge(builder, EmptyHook.INSTANCE, 
CommitInfo.EMPTY);
+        }
 
         // make sure there are 2 archives
-        fs = FileStoreBuilder.fileStoreBuilder(new 
File("target")).withCustomPersistence(p).build();
-        segmentNodeStore = SegmentNodeStoreBuilders.builder(fs).build();
-        builder = segmentNodeStore.getRoot().builder();
-        builder.setProperty("foo2", "bar2");
-        segmentNodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
-        fs.close();
+        try (FileStore fs = FileStoreTestUtil.createFileStore(new 
File("target"), p)) {
+            SegmentNodeStore segmentNodeStore = 
SegmentNodeStoreBuilders.builder(fs).build();
+            NodeBuilder builder = segmentNodeStore.getRoot().builder();
+            builder.setProperty("foo2", "bar2");
+            segmentNodeStore.merge(builder, EmptyHook.INSTANCE, 
CommitInfo.EMPTY);
+        }
 
         // remove the segment 0000 from the second archive
         ListBlobsOptions listBlobsOptions = new ListBlobsOptions();
@@ -213,42 +212,42 @@ public class AzureArchiveManagerTest {
         readBlobContainerClient.getBlobClient(blobItem.getName()).delete();
         
readBlobContainerClient.getBlobClient("oak/data00001a.tar/closed").delete();
 
-        fs = FileStoreBuilder.fileStoreBuilder(new 
File("target")).withCustomPersistence(p).build();
-        segmentNodeStore = SegmentNodeStoreBuilders.builder(fs).build();
-        assertEquals("bar", segmentNodeStore.getRoot().getString("foo"));
-        fs.close();
+        try (FileStore fs = FileStoreTestUtil.createFileStore(new 
File("target"), p)) {
+            SegmentNodeStore segmentNodeStore = 
SegmentNodeStoreBuilders.builder(fs).build();
+            assertEquals("bar", segmentNodeStore.getRoot().getString("foo"));
+        }
     }
 
     @Test
     public void testUncleanStopSegmentMissing() throws IOException, 
InvalidFileStoreVersionException, CommitFailedException, BlobStorageException {
         AzurePersistence p = new AzurePersistence(readBlobContainerClient, 
writeBlobContainerClient, noRetryBlobContainerClient, "oak");
-        FileStore fs = FileStoreBuilder.fileStoreBuilder(new 
File("target")).withCustomPersistence(p).build();
-        SegmentNodeStore segmentNodeStore = 
SegmentNodeStoreBuilders.builder(fs).build();
-        NodeBuilder builder = segmentNodeStore.getRoot().builder();
-        builder.setProperty("foo", "bar");
-        segmentNodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
-        fs.close();
+        try (FileStore fs = FileStoreTestUtil.createFileStore(new 
File("target"), p)) {
+            SegmentNodeStore segmentNodeStore = 
SegmentNodeStoreBuilders.builder(fs).build();
+            NodeBuilder builder = segmentNodeStore.getRoot().builder();
+            builder.setProperty("foo", "bar");
+            segmentNodeStore.merge(builder, EmptyHook.INSTANCE, 
CommitInfo.EMPTY);
+        }
 
         // make sure there are 2 archives
-        fs = FileStoreBuilder.fileStoreBuilder(new 
File("target")).withCustomPersistence(p).build();
-        segmentNodeStore = SegmentNodeStoreBuilders.builder(fs).build();
-        builder = segmentNodeStore.getRoot().builder();
-        builder.setProperty("foo0", "bar0");
-        segmentNodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
-        fs.flush();
-        //create segment 0001
-        builder.setProperty("foo1", "bar1");
-        segmentNodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
-        fs.flush();
-        //create segment 0002
-        builder.setProperty("foo2", "bar2");
-        segmentNodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
-        fs.flush();
-        //create segment 0003
-        builder.setProperty("foo3", "bar3");
-        segmentNodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
-        fs.flush();
-        fs.close();
+        try(FileStore fs = FileStoreTestUtil.createFileStore(new 
File("target"), p)) {
+            SegmentNodeStore segmentNodeStore = 
SegmentNodeStoreBuilders.builder(fs).build();
+            NodeBuilder builder = segmentNodeStore.getRoot().builder();
+            builder.setProperty("foo0", "bar0");
+            segmentNodeStore.merge(builder, EmptyHook.INSTANCE, 
CommitInfo.EMPTY);
+            fs.flush();
+            //create segment 0001
+            builder.setProperty("foo1", "bar1");
+            segmentNodeStore.merge(builder, EmptyHook.INSTANCE, 
CommitInfo.EMPTY);
+            fs.flush();
+            //create segment 0002
+            builder.setProperty("foo2", "bar2");
+            segmentNodeStore.merge(builder, EmptyHook.INSTANCE, 
CommitInfo.EMPTY);
+            fs.flush();
+            //create segment 0003
+            builder.setProperty("foo3", "bar3");
+            segmentNodeStore.merge(builder, EmptyHook.INSTANCE, 
CommitInfo.EMPTY);
+            fs.flush();
+        }
 
         // remove the segment 0002 from the second archive
         ListBlobsOptions listOptions = new ListBlobsOptions();
@@ -257,32 +256,32 @@ public class AzureArchiveManagerTest {
         
readBlobContainerClient.getBlobClient(blobItem.getName()).getBlockBlobClient().delete();
         
readBlobContainerClient.getBlobClient("oak/data00001a.tar/closed").delete();
 
-        fs = FileStoreBuilder.fileStoreBuilder(new 
File("target")).withCustomPersistence(p).build();
-        segmentNodeStore = SegmentNodeStoreBuilders.builder(fs).build();
-        assertEquals("bar", segmentNodeStore.getRoot().getString("foo"));
-
-        //recovered archive data00001a.tar should not contain segments 0002 
and 0003
-        listOptions.setPrefix("oak/data00001a.tar/0002.");
-        assertFalse(readBlobContainerClient.listBlobs(listOptions, 
null).iterator().hasNext());
-        listOptions.setPrefix("oak/data00001a.tar/0003.");
-        assertFalse(readBlobContainerClient.listBlobs(listOptions, 
null).iterator().hasNext());
-
-        listOptions.setPrefix("oak/data00001a.tar.bak");
-        assertTrue("Backup directory should have been created", 
readBlobContainerClient.listBlobs(listOptions, null).iterator().hasNext());
-        //backup has all segments but 0002 since it was deleted before recovery
-        listOptions.setPrefix("oak/data00001a.tar.bak/0001.");
-        assertTrue(readBlobContainerClient.listBlobs(listOptions, 
null).iterator().hasNext());
-        listOptions.setPrefix("oak/data00001a.tar.bak/0002.");
-        assertFalse(readBlobContainerClient.listBlobs(listOptions, 
null).iterator().hasNext());
-        listOptions.setPrefix("oak/data00001a.tar.bak/0003.");
-        assertTrue(readBlobContainerClient.listBlobs(listOptions, 
null).iterator().hasNext());
-
-        //verify content from recovered segments preserved
-        assertEquals("bar1", segmentNodeStore.getRoot().getString("foo1"));
-        //content from deleted segments not preserved
-        assertNull(segmentNodeStore.getRoot().getString("foo2"));
-        assertNull(segmentNodeStore.getRoot().getString("foo3"));
-        fs.close();
+        try (FileStore fs = FileStoreTestUtil.createFileStore(new 
File("target"), p)) {
+            SegmentNodeStore segmentNodeStore = 
SegmentNodeStoreBuilders.builder(fs).build();
+            assertEquals("bar", segmentNodeStore.getRoot().getString("foo"));
+
+            //recovered archive data00001a.tar should not contain segments 
0002 and 0003
+            listOptions.setPrefix("oak/data00001a.tar/0002.");
+            assertFalse(readBlobContainerClient.listBlobs(listOptions, 
null).iterator().hasNext());
+            listOptions.setPrefix("oak/data00001a.tar/0003.");
+            assertFalse(readBlobContainerClient.listBlobs(listOptions, 
null).iterator().hasNext());
+
+            listOptions.setPrefix("oak/data00001a.tar.bak");
+            assertTrue("Backup directory should have been created", 
readBlobContainerClient.listBlobs(listOptions, null).iterator().hasNext());
+            //backup has all segments but 0002 since it was deleted before 
recovery
+            listOptions.setPrefix("oak/data00001a.tar.bak/0001.");
+            assertTrue(readBlobContainerClient.listBlobs(listOptions, 
null).iterator().hasNext());
+            listOptions.setPrefix("oak/data00001a.tar.bak/0002.");
+            assertFalse(readBlobContainerClient.listBlobs(listOptions, 
null).iterator().hasNext());
+            listOptions.setPrefix("oak/data00001a.tar.bak/0003.");
+            assertTrue(readBlobContainerClient.listBlobs(listOptions, 
null).iterator().hasNext());
+
+            //verify content from recovered segments preserved
+            assertEquals("bar1", segmentNodeStore.getRoot().getString("foo1"));
+            //content from deleted segments not preserved
+            assertNull(segmentNodeStore.getRoot().getString("foo2"));
+            assertNull(segmentNodeStore.getRoot().getString("foo3"));
+        }
     }
 
     @Test
@@ -309,11 +308,15 @@ public class AzureArchiveManagerTest {
         SegmentArchiveManager manager = 
azurePersistence.createArchiveManager(false, false, new IOMonitorAdapter(), new 
FileStoreMonitorAdapter(), new RemoteStoreMonitorAdapter());
         SegmentArchiveWriter writer = manager.create("data00000a.tar");
 
-        Assert.assertFalse(manager.exists("data00000a.tar"));
-        UUID u = UUID.randomUUID();
-        writer.writeSegment(u.getMostSignificantBits(), 
u.getLeastSignificantBits(), new byte[10], 0, 10, 0, 0, false);
-        writer.flush();
-        Assert.assertTrue(manager.exists("data00000a.tar"));
+        try {
+            Assert.assertFalse(manager.exists("data00000a.tar"));
+            UUID u = UUID.randomUUID();
+            writer.writeSegment(u.getMostSignificantBits(), 
u.getLeastSignificantBits(), new byte[10], 0, 10, 0, 0, false);
+            writer.flush();
+            Assert.assertTrue(manager.exists("data00000a.tar"));
+        } finally {
+            writer.close();
+        }
     }
 
     @Test(expected = FileNotFoundException.class)
@@ -349,59 +352,58 @@ public class AzureArchiveManagerTest {
     public void testMissingSegmentDetectedInFileStore() throws IOException, 
BlobStorageException, InvalidFileStoreVersionException {
 
         AzurePersistence azurePersistence = new 
AzurePersistence(readBlobContainerClient, writeBlobContainerClient, 
noRetryBlobContainerClient, "oak");
-        FileStore fileStore = FileStoreBuilder.fileStoreBuilder(new 
File("target")).withCustomPersistence(azurePersistence).build();
+        try (FileStore fileStore = FileStoreTestUtil.createFileStore(new 
File("target"), azurePersistence)) {
+            SegmentArchiveManager manager = 
azurePersistence.createArchiveManager(false, false, new IOMonitorAdapter(), new 
FileStoreMonitorAdapter(), new RemoteStoreMonitorAdapter());
+            SegmentArchiveWriter writer = manager.create("data00000a.tar");
 
-        SegmentArchiveManager manager = 
azurePersistence.createArchiveManager(false, false, new IOMonitorAdapter(), new 
FileStoreMonitorAdapter(), new RemoteStoreMonitorAdapter());
-        SegmentArchiveWriter writer = manager.create("data00000a.tar");
-
-        //Assert.assertFalse(manager.exists("data00000a.tar"));
-        UUID u = UUID.randomUUID();
-        writer.writeSegment(u.getMostSignificantBits(), 
u.getLeastSignificantBits(), new byte[10], 0, 10, 0, 0, false);
-        writer.flush();
-        writer.close();
+            //Assert.assertFalse(manager.exists("data00000a.tar"));
+            UUID u = UUID.randomUUID();
+            writer.writeSegment(u.getMostSignificantBits(), 
u.getLeastSignificantBits(), new byte[10], 0, 10, 0, 0, false);
+            writer.flush();
+            writer.close();
 
-        SegmentArchiveReader reader = manager.open("data00000a.tar");
-        Buffer segment = reader.readSegment(u.getMostSignificantBits(), 
u.getLeastSignificantBits());
-        assertNotNull(segment);
+            SegmentArchiveReader reader = manager.open("data00000a.tar");
+            Buffer segment = reader.readSegment(u.getMostSignificantBits(), 
u.getLeastSignificantBits());
+            assertNotNull(segment);
 
-        ListBlobsOptions listOptions = new ListBlobsOptions();
-        listOptions.setPrefix("oak/data00000a.tar/0000.");
-        BlobItem segment0000 = readBlobContainerClient.listBlobs(listOptions, 
null).iterator().next();
-        readBlobContainerClient.getBlobClient(segment0000.getName()).delete();
+            ListBlobsOptions listOptions = new ListBlobsOptions();
+            listOptions.setPrefix("oak/data00000a.tar/0000.");
+            BlobItem segment0000 = 
readBlobContainerClient.listBlobs(listOptions, null).iterator().next();
+            
readBlobContainerClient.getBlobClient(segment0000.getName()).delete();
 
-        // SegmentNotFoundException should be thrown here
-        fileStore.readSegment(new SegmentId(fileStore, 
u.getMostSignificantBits(), u.getLeastSignificantBits()));
+            // SegmentNotFoundException should be thrown here
+            fileStore.readSegment(new SegmentId(fileStore, 
u.getMostSignificantBits(), u.getLeastSignificantBits()));
+        }
     }
 
     @Test
     public void testReadOnlyRecovery() throws 
InvalidFileStoreVersionException, IOException, CommitFailedException, 
BlobStorageException {
         AzurePersistence rwPersistence = new 
AzurePersistence(readBlobContainerClient, writeBlobContainerClient, 
noRetryBlobContainerClient, "oak");
-        FileStore rwFileStore = FileStoreBuilder.fileStoreBuilder(new 
File("target")).withCustomPersistence(rwPersistence).build();
-        SegmentNodeStore segmentNodeStore = 
SegmentNodeStoreBuilders.builder(rwFileStore).build();
-        NodeBuilder builder = segmentNodeStore.getRoot().builder();
-        builder.setProperty("foo", "bar");
-        segmentNodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
-        rwFileStore.flush();
-
-        ListBlobsOptions listOptions = new ListBlobsOptions();
-        listOptions.setPrefix("oak/data00000a.tar");
-        assertTrue(readBlobContainerClient.listBlobs(listOptions, 
null).iterator().hasNext());
-        listOptions.setPrefix("oak/data00000a.tar.ro.bak");
-        assertFalse(readBlobContainerClient.listBlobs(listOptions, 
null).iterator().hasNext());
-
-        // create read-only FS
-        AzurePersistence roPersistence = new 
AzurePersistence(readBlobContainerClient, writeBlobContainerClient, 
noRetryBlobContainerClient, "oak");
-        ReadOnlyFileStore roFileStore = FileStoreBuilder.fileStoreBuilder(new 
File("target")).withCustomPersistence(roPersistence).buildReadOnly();
+        try (FileStore rwFileStore = FileStoreTestUtil.createFileStore(new 
File("target"), rwPersistence)) {
+            SegmentNodeStore segmentNodeStore = 
SegmentNodeStoreBuilders.builder(rwFileStore).build();
+            NodeBuilder builder = segmentNodeStore.getRoot().builder();
+            builder.setProperty("foo", "bar");
+            segmentNodeStore.merge(builder, EmptyHook.INSTANCE, 
CommitInfo.EMPTY);
+            rwFileStore.flush();
 
-        PropertyState fooProperty = 
SegmentNodeStoreBuilders.builder(roFileStore).build()
-                .getRoot()
-                .getProperty("foo");
-        assertThat(fooProperty, not(nullValue()));
-        assertThat(fooProperty.getValue(Type.STRING), equalTo("bar"));
+            ListBlobsOptions listOptions = new ListBlobsOptions();
+            listOptions.setPrefix("oak/data00000a.tar");
+            assertTrue(readBlobContainerClient.listBlobs(listOptions, 
null).iterator().hasNext());
+            listOptions.setPrefix("oak/data00000a.tar.ro.bak");
+            assertFalse(readBlobContainerClient.listBlobs(listOptions, 
null).iterator().hasNext());
 
-        roFileStore.close();
-        rwFileStore.close();
+            // create read-only FS
+            AzurePersistence roPersistence = new 
AzurePersistence(readBlobContainerClient, writeBlobContainerClient, 
noRetryBlobContainerClient, "oak");
+            try (ReadOnlyFileStore roFileStore = 
FileStoreTestUtil.createReadOnlyFileStore(new File("target"), roPersistence)) {
+                PropertyState fooProperty = 
SegmentNodeStoreBuilders.builder(roFileStore).build()
+                        .getRoot()
+                        .getProperty("foo");
+                assertThat(fooProperty, not(nullValue()));
+                assertThat(fooProperty.getValue(Type.STRING), equalTo("bar"));
+            }
+        }
 
+        ListBlobsOptions listOptions = new ListBlobsOptions();
         listOptions.setPrefix("oak/data00000a.tar");
         assertTrue(readBlobContainerClient.listBlobs(listOptions, 
null).iterator().hasNext());
         // after creating a read-only FS, the recovery procedure should not be 
started since there is another running Oak process
@@ -412,12 +414,13 @@ public class AzureArchiveManagerTest {
     @Test
     public void testCachingPersistenceTarRecovery() throws 
InvalidFileStoreVersionException, IOException, CommitFailedException, 
BlobStorageException {
         AzurePersistence rwPersistence = new 
AzurePersistence(readBlobContainerClient, writeBlobContainerClient, 
noRetryBlobContainerClient, "oak");
-        FileStore rwFileStore = 
FileStoreBuilder.fileStoreBuilder(folder.newFolder()).withCustomPersistence(rwPersistence).build();
-        SegmentNodeStore segmentNodeStore = 
SegmentNodeStoreBuilders.builder(rwFileStore).build();
-        NodeBuilder builder = segmentNodeStore.getRoot().builder();
-        builder.setProperty("foo", "bar");
-        segmentNodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
-        rwFileStore.flush();
+        try (FileStore rwFileStore = 
FileStoreTestUtil.createFileStore(folder.newFolder(), rwPersistence)) {
+            SegmentNodeStore segmentNodeStore = 
SegmentNodeStoreBuilders.builder(rwFileStore).build();
+            NodeBuilder builder = segmentNodeStore.getRoot().builder();
+            builder.setProperty("foo", "bar");
+            segmentNodeStore.merge(builder, EmptyHook.INSTANCE, 
CommitInfo.EMPTY);
+            rwFileStore.flush();
+        }
 
         ListBlobsOptions listOptions = new ListBlobsOptions();
         listOptions.setPrefix("oak/data00000a.tar");
@@ -434,7 +437,9 @@ public class AzureArchiveManagerTest {
         SegmentNodeStorePersistence splitPersistence = new 
SplitPersistence(cachingPersistence, localPersistence);
 
         // exception should not be thrown here
-        FileStore splitPersistenceFileStore = 
FileStoreBuilder.fileStoreBuilder(localFolder).withCustomPersistence(splitPersistence).build();
+        try (FileStore splitPersistenceFileStore = 
FileStoreTestUtil.createFileStore(localFolder, splitPersistence)) {
+            // nothing to do, just checking if the store can be opened
+        }
 
         listOptions.setPrefix("oak/data00000a.tar");
         assertTrue(readBlobContainerClient.listBlobs(listOptions, 
null).iterator().hasNext());
@@ -446,7 +451,7 @@ public class AzureArchiveManagerTest {
     @Test
     public void testCollectBlobReferencesForReadOnlyFileStore() throws 
InvalidFileStoreVersionException, IOException, CommitFailedException, 
BlobStorageException {
         AzurePersistence rwPersistence = new 
AzurePersistence(readBlobContainerClient, writeBlobContainerClient, 
noRetryBlobContainerClient, "oak");
-        try (FileStore rwFileStore = FileStoreBuilder.fileStoreBuilder(new 
File("target")).withCustomPersistence(rwPersistence).build()) {
+        try (FileStore rwFileStore = FileStoreTestUtil.createFileStore(new 
File("target"), rwPersistence)) {
             SegmentNodeStore segmentNodeStore = 
SegmentNodeStoreBuilders.builder(rwFileStore).build();
             NodeBuilder builder = segmentNodeStore.getRoot().builder();
             builder.setProperty("foo", "bar");
@@ -458,7 +463,7 @@ public class AzureArchiveManagerTest {
 
             // create read-only FS, while the rw FS is still open
             AzurePersistence roPersistence = new 
AzurePersistence(readBlobContainerClient, writeBlobContainerClient, 
noRetryBlobContainerClient, "oak");
-            try (ReadOnlyFileStore roFileStore = 
FileStoreBuilder.fileStoreBuilder(new 
File("target")).withCustomPersistence(roPersistence).buildReadOnly()) {
+            try (ReadOnlyFileStore roFileStore = 
FileStoreTestUtil.createReadOnlyFileStore(new File("target"), roPersistence)) {
 
                 PropertyState fooProperty = 
SegmentNodeStoreBuilders.builder(roFileStore).build()
                         .getRoot()
@@ -476,7 +481,7 @@ public class AzureArchiveManagerTest {
     @Test
     public void testCollectBlobReferencesDoesNotFailWhenFileIsMissing() throws 
InvalidFileStoreVersionException, IOException, CommitFailedException, 
BlobStorageException {
         AzurePersistence rwPersistence = new 
AzurePersistence(readBlobContainerClient, writeBlobContainerClient, 
noRetryBlobContainerClient, "oak");
-        try (FileStore rwFileStore = FileStoreBuilder.fileStoreBuilder(new 
File("target")).withCustomPersistence(rwPersistence).build()) {
+        try (FileStore rwFileStore = FileStoreTestUtil.createFileStore(new 
File("target"), rwPersistence)) {
             SegmentNodeStore segmentNodeStore = 
SegmentNodeStoreBuilders.builder(rwFileStore).build();
             NodeBuilder builder = segmentNodeStore.getRoot().builder();
             builder.setProperty("foo", "bar");
@@ -488,7 +493,7 @@ public class AzureArchiveManagerTest {
 
             // create read-only FS, while the rw FS is still open
             AzurePersistence roPersistence = new 
AzurePersistence(readBlobContainerClient, writeBlobContainerClient, 
noRetryBlobContainerClient, "oak");
-            try (ReadOnlyFileStore roFileStore = 
FileStoreBuilder.fileStoreBuilder(new 
File("target")).withCustomPersistence(roPersistence).buildReadOnly()) {
+            try (ReadOnlyFileStore roFileStore = 
FileStoreTestUtil.createReadOnlyFileStore(new File("target"), roPersistence)) {
 
                 PropertyState fooProperty = 
SegmentNodeStoreBuilders.builder(roFileStore).build()
                         .getRoot()
@@ -525,8 +530,7 @@ public class AzureArchiveManagerTest {
                 .when(blobLeaseMocked).renewLease();
 
         AzurePersistence mockedRwPersistence = Mockito.spy(rwPersistence);
-        AzureRepositoryLock azureRepositoryLock = new 
AzureRepositoryLock(blobMocked, blobLeaseMocked, () -> {
-        }, writeAccessController);
+        AzureRepositoryLock azureRepositoryLock = new 
AzureRepositoryLock(blobMocked, blobLeaseMocked, () -> {}, 
writeAccessController, 1);
         AzureArchiveManager azureArchiveManager = new 
AzureArchiveManager(oakDirectory, writeOakDirectory, "", new 
IOMonitorAdapter(), new FileStoreMonitorAdapter(), writeAccessController);
 
 
@@ -541,47 +545,46 @@ public class AzureArchiveManagerTest {
                 .doReturn(new AzureJournalFile(oakDirectory, 
writeOakDirectory, "journal.log", writeAccessController))
                 .when(mockedRwPersistence).getJournalFile();
 
-        FileStore rwFileStore = 
FileStoreBuilder.fileStoreBuilder(folder.newFolder()).withCustomPersistence(mockedRwPersistence).build();
-        SegmentNodeStore segmentNodeStore = 
SegmentNodeStoreBuilders.builder(rwFileStore).build();
-        NodeBuilder builder = segmentNodeStore.getRoot().builder();
-
+        try (FileStore rwFileStore = 
FileStoreTestUtil.createFileStore(folder.newFolder(), mockedRwPersistence)) {
+            SegmentNodeStore segmentNodeStore = 
SegmentNodeStoreBuilders.builder(rwFileStore).build();
+            NodeBuilder builder = segmentNodeStore.getRoot().builder();
 
-        // simulate operation timeout when trying to renew lease
-        Mockito.reset(blobMocked);
 
-        BlobStorageException storageException =
-                //new BlobStorageException("operation timeout", 
BlobErrorCode.OPERATION_TIMED_OUT, new TimeoutException());
-                new BlobStorageException("operation timeout", null, new 
TimeoutException());
+            // simulate operation timeout when trying to renew lease
+            Mockito.reset(blobMocked);
 
-        Mockito.doThrow(storageException).when(blobLeaseMocked).renewLease();
+            BlobStorageException storageException =
+                    //new BlobStorageException("operation timeout", 
BlobErrorCode.OPERATION_TIMED_OUT, new TimeoutException());
+                    new BlobStorageException("operation timeout", null, new 
TimeoutException());
 
+            
Mockito.doThrow(storageException).when(blobLeaseMocked).renewLease();
 
-        // wait till lease expires
-        Thread.sleep(17000);
+            // wait till lease expires
+            Thread.sleep(3000);
 
-        // try updating repository
-        Thread thread = new Thread(() -> {
-            try {
-                builder.setProperty("foo", "bar");
-                segmentNodeStore.merge(builder, EmptyHook.INSTANCE, 
CommitInfo.EMPTY);
-                rwFileStore.flush();
-            } catch (Exception e) {
-                fail("No Exception expected, but got: " + e.getMessage());
-            }
-        });
-        thread.start();
+            // try updating repository
+            Thread thread = new Thread(() -> {
+                try {
+                    builder.setProperty("foo", "bar");
+                    segmentNodeStore.merge(builder, EmptyHook.INSTANCE, 
CommitInfo.EMPTY);
+                    rwFileStore.flush();
+                } catch (Exception e) {
+                    fail("No Exception expected, but got: " + e.getMessage());
+                }
+            });
+            thread.start();
 
-        Thread.sleep(2000);
+            Thread.sleep(2000);
+        }
 
         // It should be possible to start another RW file store.
-        FileStore rwFileStore2 = 
FileStoreBuilder.fileStoreBuilder(folder.newFolder()).withCustomPersistence(new 
AzurePersistence(oakDirectory, writeOakDirectory, noRetryOakDirectory, 
"")).build();
-        SegmentNodeStore segmentNodeStore2 = 
SegmentNodeStoreBuilders.builder(rwFileStore2).build();
-        NodeBuilder builder2 = segmentNodeStore2.getRoot().builder();
+        try (FileStore rwFileStore2 = 
FileStoreTestUtil.createFileStore(folder.newFolder(), new 
AzurePersistence(oakDirectory, writeOakDirectory, noRetryOakDirectory, ""))) {
+            SegmentNodeStore segmentNodeStore2 = 
SegmentNodeStoreBuilders.builder(rwFileStore2).build();
+            NodeBuilder builder2 = segmentNodeStore2.getRoot().builder();
 
-        //repository hasn't been updated
-        assertNull(builder2.getProperty("foo"));
-
-        rwFileStore2.close();
+            //repository hasn't been updated
+            assertNull(builder2.getProperty("foo"));
+        }
     }
 
     @Test
diff --git 
a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureJournalFileTest.java
 
b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureJournalFileTest.java
index e08f303d26..834ab6d261 100644
--- 
a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureJournalFileTest.java
+++ 
b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureJournalFileTest.java
@@ -29,7 +29,9 @@ import 
org.apache.jackrabbit.oak.segment.remote.WriteAccessController;
 import org.apache.jackrabbit.oak.segment.spi.persistence.JournalFile;
 import org.apache.jackrabbit.oak.segment.spi.persistence.JournalFileReader;
 import org.apache.jackrabbit.oak.segment.spi.persistence.JournalFileWriter;
+import org.apache.jackrabbit.oak.segment.spi.persistence.RepositoryLock;
 import org.jetbrains.annotations.NotNull;
+import org.junit.After;
 import org.junit.Before;
 import org.junit.ClassRule;
 import org.junit.Test;
@@ -54,6 +56,8 @@ public class AzureJournalFileTest {
 
     private JournalFile journal;
 
+    private RepositoryLock repositoryLock;
+
     private final String rootPrefix = "oak";
 
     @Before
@@ -65,10 +69,18 @@ public class AzureJournalFileTest {
         WriteAccessController writeAccessController = new 
WriteAccessController();
         writeAccessController.enableWriting();
         AzurePersistence azurePersistence = new 
AzurePersistence(readBlobContainerClient, writeBlobContainerClient, 
noRetryBlobContainerClient, rootPrefix, null, 50);
-        azurePersistence.lockRepository();
+        repositoryLock = azurePersistence.lockRepository();
         journal = azurePersistence.getJournalFile();
     }
 
+    @After
+    public void teardown() throws IOException {
+        if (repositoryLock != null) {
+            repositoryLock.unlock();
+            repositoryLock = null;
+        }
+    }
+
     @Test
     public void testSplitJournalFiles() throws IOException {
         assertFalse(journal.exists());
diff --git 
a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureReadSegmentTest.java
 
b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureReadSegmentTest.java
index cbc3d2cf7c..20c8bfa59f 100644
--- 
a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureReadSegmentTest.java
+++ 
b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureReadSegmentTest.java
@@ -24,7 +24,6 @@ import org.apache.jackrabbit.oak.commons.Buffer;
 import org.apache.jackrabbit.oak.segment.SegmentId;
 import org.apache.jackrabbit.oak.segment.SegmentNotFoundException;
 import org.apache.jackrabbit.oak.segment.file.FileStore;
-import org.apache.jackrabbit.oak.segment.file.FileStoreBuilder;
 import org.apache.jackrabbit.oak.segment.file.InvalidFileStoreVersionException;
 import org.apache.jackrabbit.oak.segment.spi.RepositoryNotReachableException;
 import org.apache.jackrabbit.oak.segment.spi.monitor.FileStoreMonitor;
@@ -61,29 +60,20 @@ public class AzureReadSegmentTest {
     @Test(expected = SegmentNotFoundException.class)
     public void testReadNonExistentSegmentRepositoryReachable() throws 
IOException, InvalidFileStoreVersionException, BlobStorageException {
         AzurePersistence p = new AzurePersistence(readBlobContainerClient, 
writeBlobContainerClient, noRetryBlobContainerClient, "oak");
-        FileStore fs = FileStoreBuilder.fileStoreBuilder(new 
File("target")).withCustomPersistence(p).build();
-        SegmentId id = new SegmentId(fs, 0, 0);
-
-        try {
+        try (FileStore fs = FileStoreTestUtil.createFileStore(new 
File("target"), p)) {
+            SegmentId id = new SegmentId(fs, 0, 0);
             fs.readSegment(id);
-        } finally {
-            fs.close();
         }
     }
 
     @Test(expected = RepositoryNotReachableException.class)
     public void testReadExistentSegmentRepositoryNotReachable() throws 
IOException, InvalidFileStoreVersionException, BlobStorageException {
         AzurePersistence p = new 
ReadFailingAzurePersistence(readBlobContainerClient, writeBlobContainerClient, 
noRetryBlobContainerClient, "oak");
-        FileStore fs = FileStoreBuilder.fileStoreBuilder(new 
File("target")).withCustomPersistence(p).build();
-
-        SegmentId id = new SegmentId(fs, 0, 0);
-        byte[] buffer = new byte[2];
-
-        try {
+        try (FileStore fs = FileStoreTestUtil.createFileStore(new 
File("target"), p)) {
+            SegmentId id = new SegmentId(fs, 0, 0);
+            byte[] buffer = new byte[2];
             fs.writeSegment(id, buffer, 0, 2);
             fs.readSegment(id);
-        } finally {
-            fs.close();
         }
     }
 
diff --git 
a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureRepositoryLockTest.java
 
b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureRepositoryLockTest.java
index 752933ee21..ae7504ea76 100644
--- 
a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureRepositoryLockTest.java
+++ 
b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureRepositoryLockTest.java
@@ -87,12 +87,14 @@ public class AzureRepositoryLockTest {
         BlockBlobClient blockBlobClient = 
readBlobContainerClient.getBlobClient("oak/repo.lock").getBlockBlobClient();
         BlockBlobClient noRetrtBlockBlobClient = 
noRetryBlobContainerClient.getBlobClient("oak/repo.lock").getBlockBlobClient();
         BlobLeaseClient blobLeaseClient = new 
BlobLeaseClientBuilder().blobClient(noRetrtBlockBlobClient).buildClient();
-        new AzureRepositoryLock(blockBlobClient, blobLeaseClient, () -> {}, 
new WriteAccessController()).lock();
+        AzureRepositoryLock lock = new AzureRepositoryLock(blockBlobClient, 
blobLeaseClient, () -> {}, new WriteAccessController()).lock();
         try {
             new AzureRepositoryLock(blockBlobClient, blobLeaseClient, () -> 
{}, new WriteAccessController()).lock();
             fail("The second lock should fail.");
         } catch (IOException e) {
             // it's fine
+        } finally {
+            lock.unlock();
         }
     }
 
@@ -107,7 +109,12 @@ public class AzureRepositoryLockTest {
         BlobLeaseClient blobLeaseClient = new 
BlobLeaseClientBuilder().blobClient(noRetrtBlockBlobClient).buildClient();
 
         // no exception should be present when calling lock
-        new AzureRepositoryLock(blockBlobClient, blobLeaseClient, () -> {}, 
new WriteAccessController()).lock();
+        AzureRepositoryLock lock = new AzureRepositoryLock(blockBlobClient, 
blobLeaseClient, () -> {}, new WriteAccessController());
+        try {
+            lock.lock();
+        } finally {
+            lock.unlock();
+        }
     }
 
     @Test
@@ -128,7 +135,12 @@ public class AzureRepositoryLockTest {
         }).start();
 
         s.acquire();
-        new AzureRepositoryLock(blockBlobClient, blobLeaseClient, () -> {}, 
new WriteAccessController(), 10).lock();
+        AzureRepositoryLock lock = new AzureRepositoryLock(blockBlobClient, 
blobLeaseClient, () -> {}, new WriteAccessController(), 10);
+        try {
+            lock.lock();
+        } finally {
+            lock.unlock();
+        }
     }
 
     @Test
@@ -148,19 +160,25 @@ public class AzureRepositoryLockTest {
                 .doCallRealMethod()
                 
.when(blobLeaseMocked).renewLeaseWithResponse((RequestConditions) any(), any(), 
any());
 
-        new AzureRepositoryLock(blobMocked, blobLeaseMocked, () -> {}, new 
WriteAccessController()).lock();
+        AzureRepositoryLock lock = new AzureRepositoryLock(blobMocked, 
blobLeaseMocked, () -> {}, new WriteAccessController(), 1);
+        try {
+            lock.lock();
 
-        // wait till lease expires
-        Thread.sleep(16000);
+            // wait till lease expires
+            Thread.sleep(3000);
 
-        // reset the mock to default behaviour
-        
Mockito.doCallRealMethod().when(blobLeaseMocked).renewLeaseWithResponse((RequestConditions)
 any(), any(), any());
+            // reset the mock to default behaviour
+            
Mockito.doCallRealMethod().when(blobLeaseMocked).renewLeaseWithResponse((RequestConditions)
 any(), any(), any());
 
-        try {
-            new AzureRepositoryLock(blobMocked, blobLeaseMocked, () -> {}, new 
WriteAccessController()).lock();
-            fail("The second lock should fail.");
-        } catch (IOException e) {
-            // it's fine
+            try {
+                new AzureRepositoryLock(blobMocked, blobLeaseMocked, () -> {
+                }, new WriteAccessController()).lock();
+                fail("The second lock should fail.");
+            } catch (IOException e) {
+                // it's fine
+            }
+        } finally {
+            lock.unlock();
         }
     }
 
@@ -191,29 +209,34 @@ public class AzureRepositoryLockTest {
 
         WriteAccessController writeAccessController = new 
WriteAccessController();
 
-        new AzureRepositoryLock(blobMocked, blobLeaseMocked, () -> {}, 
writeAccessController).lock();
-
-
-        Thread thread = new Thread(() -> {
-
-            while (true) {
-                writeAccessController.checkWritingAllowed();
-
-            }
-        });
+        AzureRepositoryLock lock = new AzureRepositoryLock(blobMocked, 
blobLeaseMocked, () -> {}, writeAccessController);
+        Thread thread = null;
+        try {
+            lock.lock();
 
-        thread.start();
+            thread = new Thread(() -> {
+                while (true) {
+                    writeAccessController.checkWritingAllowed();
+                }
+            });
+            thread.start();
 
-        Thread.sleep(3000);
-        assertFalse("after 3 seconds thread should not be in a waiting state", 
thread.getState().equals(Thread.State.WAITING));
+            Thread.sleep(3000);
+            assertFalse("after 3 seconds thread should not be in a waiting 
state", thread.getState().equals(Thread.State.WAITING));
 
-        Thread.sleep(3000);
-        assertFalse("after 6 seconds thread should not be in a waiting state", 
thread.getState().equals(Thread.State.WAITING));
+            Thread.sleep(3000);
+            assertFalse("after 6 seconds thread should not be in a waiting 
state", thread.getState().equals(Thread.State.WAITING));
 
-        Thread.sleep(5000);
-        assertTrue("after more than 9 seconds thread should be in a waiting 
state", thread.getState().equals(Thread.State.WAITING));
+            Thread.sleep(5000);
+            assertTrue("after more than 9 seconds thread should be in a 
waiting state", thread.getState().equals(Thread.State.WAITING));
 
-        
Mockito.doCallRealMethod().when(blobLeaseMocked).renewLeaseWithResponse((RequestConditions)
 any(), any(), any());
+            
Mockito.doCallRealMethod().when(blobLeaseMocked).renewLeaseWithResponse((RequestConditions)
 any(), any(), any());
+        } finally {
+            lock.unlock();
+            if (thread != null) {
+                thread.interrupt();
+            }
+        }
     }
 
     @Test
@@ -246,21 +269,23 @@ public class AzureRepositoryLockTest {
         WriteAccessController writeAccessController = new 
WriteAccessController();
 
         AzureRepositoryLock lock = new AzureRepositoryLock(blockBlobClient, 
blobLeaseClient, shutdownHook, writeAccessController);
-        lock.lock();
-
-        // Enable delay injection after initial lease acquisition
-        delayPolicy.setEnabled(true);
+        try {
+            lock.lock();
 
-        // Wait for at least 3 renewal calls (2 timeouts + 1 success) with a 
timeout
-        Mockito.verify(blobLeaseClient, Mockito.timeout(15000).atLeast(3))
-                .renewLeaseWithResponse((RequestConditions) any(), any(), 
any());
+            // Enable delay injection after initial lease acquisition
+            delayPolicy.setEnabled(true);
 
-        assertTrue("Should have delayed at least 2 requests, but delayed: " + 
delayPolicy.getDelayedRequestCount(),
-                delayPolicy.getDelayedRequestCount() >= 2);
+            // Wait for at least 3 renewal calls (2 timeouts + 1 success) with 
a timeout
+            Mockito.verify(blobLeaseClient, Mockito.timeout(15000).atLeast(3))
+                    .renewLeaseWithResponse((RequestConditions) any(), any(), 
any());
 
-        assertFalse("Shutdown hook should not be called for client-side 
timeout exceptions", shutdownCalled.get());
+            assertTrue("Should have delayed at least 2 requests, but delayed: 
" + delayPolicy.getDelayedRequestCount(),
+                    delayPolicy.getDelayedRequestCount() >= 2);
 
-        lock.unlock();
+            assertFalse("Shutdown hook should not be called for client-side 
timeout exceptions", shutdownCalled.get());
+        } finally {
+            lock.unlock();
+        }
     }
 
     @Test
@@ -290,17 +315,19 @@ public class AzureRepositoryLockTest {
         WriteAccessController writeAccessController = new 
WriteAccessController();
 
         AzureRepositoryLock lock = new AzureRepositoryLock(blobMocked, 
blobLeaseMocked, shutdownHook, writeAccessController);
-        lock.lock();
-
-        // Wait for at least 3 calls (2 failures + 1 success) with a timeout
-        Mockito.verify(blobLeaseMocked, Mockito.timeout(10000).atLeast(3))
-                .renewLeaseWithResponse((RequestConditions) any(), any(), 
any());
+        try {
+            lock.lock();
 
-        // Verify that shutdown hook was NOT called - the IO exception should 
be treated as recoverable
-        assertFalse("Shutdown hook should not be called for IO exceptions", 
shutdownCalled.get());
+            // Wait for at least 3 calls (2 failures + 1 success) with a 
timeout
+            Mockito.verify(blobLeaseMocked, Mockito.timeout(10000).atLeast(3))
+                    .renewLeaseWithResponse((RequestConditions) any(), any(), 
any());
 
-        // Clean up: stop the renewal thread and release the lease
-        lock.unlock();
+            // Verify that shutdown hook was NOT called - the IO exception 
should be treated as recoverable
+            assertFalse("Shutdown hook should not be called for IO 
exceptions", shutdownCalled.get());
+        } finally {
+            // Clean up: stop the renewal thread and release the lease
+            lock.unlock();
+        }
     }
 
     /**
diff --git 
a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentArchiveWriterTest.java
 
b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentArchiveWriterTest.java
index 48a19fe458..50c9d4a50b 100644
--- 
a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentArchiveWriterTest.java
+++ 
b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentArchiveWriterTest.java
@@ -97,13 +97,21 @@ public class AzureSegmentArchiveWriterTest {
         mockServerClient
                 .when(writeBinaryReferencesRequest, Times.once())
                 .respond(response().withStatusCode(201));
+        // then allow closing the writer
+        mockServerClient
+                .when(getCloseArchiveRequest(), Times.once())
+                .respond(response().withStatusCode(201));
 
         SegmentArchiveWriter writer = createSegmentArchiveWriter();
-        writeAndFlushSegment(writer);
+        try {
+            writeAndFlushSegment(writer);
 
-        writer.writeBinaryReferences(new byte[10]);
+            writer.writeBinaryReferences(new byte[10]);
 
-        mockServerClient.verify(writeBinaryReferencesRequest, 
exactly(MAX_ATTEMPTS));
+            mockServerClient.verify(writeBinaryReferencesRequest, 
exactly(MAX_ATTEMPTS));
+        } finally {
+            writer.close();
+        }
     }
 
     @Test
@@ -119,13 +127,21 @@ public class AzureSegmentArchiveWriterTest {
         mockServerClient
                 .when(writeGraphRequest, Times.once())
                 .respond(response().withStatusCode(201));
+        // then allow closing the writer
+        mockServerClient
+                .when(getCloseArchiveRequest(), Times.once())
+                .respond(response().withStatusCode(201));
 
         SegmentArchiveWriter writer = createSegmentArchiveWriter();
-        writeAndFlushSegment(writer);
+        try {
+            writeAndFlushSegment(writer);
 
-        writer.writeGraph(new byte[10]);
+            writer.writeGraph(new byte[10]);
 
-        mockServerClient.verify(writeGraphRequest, exactly(MAX_ATTEMPTS));
+            mockServerClient.verify(writeGraphRequest, exactly(MAX_ATTEMPTS));
+        } finally {
+            writer.close();
+        }
     }
 
     @Test
@@ -143,9 +159,11 @@ public class AzureSegmentArchiveWriterTest {
                 .respond(response().withStatusCode(201));
 
         SegmentArchiveWriter writer = createSegmentArchiveWriter();
-        writeAndFlushSegment(writer);
-
-        writer.close();
+        try {
+            writeAndFlushSegment(writer);
+        } finally {
+            writer.close();
+        }
 
         mockServerClient.verify(closeArchiveRequest, exactly(MAX_ATTEMPTS));
     }
@@ -161,9 +179,11 @@ public class AzureSegmentArchiveWriterTest {
                 .respond(response().withStatusCode(500));
 
         SegmentArchiveWriter writer = createSegmentArchiveWriter();
-        writeAndFlushSegment(writer);
-
-        assertThrows(IOException.class, writer::close);
+        try {
+            writeAndFlushSegment(writer);
+        } finally {
+            assertThrows(IOException.class, writer::close);
+        }
 
         mockServerClient.verify(closeArchiveRequest, exactly(MAX_ATTEMPTS));
     }
diff --git 
a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzuriteDockerRule.java
 
b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzuriteDockerRule.java
index 409442c82b..711f91b696 100644
--- 
a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzuriteDockerRule.java
+++ 
b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzuriteDockerRule.java
@@ -73,15 +73,14 @@ public class AzuriteDockerRule extends ExternalResource {
         return new Statement() {
             @Override
             public void evaluate() throws Throwable {
-                try {
-                    before();
-                } catch (IllegalStateException e) {
-                    Assume.assumeNoException(STARTUP_EXCEPTION.get());
-                    throw e;
-                }
-
                 List<Throwable> errors = new ArrayList<Throwable>();
                 try {
+                    try {
+                        before();
+                    } catch (IllegalStateException e) {
+                        Assume.assumeNoException(STARTUP_EXCEPTION.get());
+                        throw e;
+                    }
                     base.evaluate();
                 } catch (Throwable t) {
                     errors.add(t);
diff --git 
a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/FileStoreTestUtil.java
 
b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/FileStoreTestUtil.java
new file mode 100644
index 0000000000..90a90f1bfe
--- /dev/null
+++ 
b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/FileStoreTestUtil.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.jackrabbit.oak.segment.azure;
+
+import org.apache.jackrabbit.oak.segment.file.FileStore;
+import org.apache.jackrabbit.oak.segment.file.FileStoreBuilder;
+import org.apache.jackrabbit.oak.segment.file.InvalidFileStoreVersionException;
+import org.apache.jackrabbit.oak.segment.file.ReadOnlyFileStore;
+import 
org.apache.jackrabbit.oak.segment.spi.persistence.SegmentNodeStorePersistence;
+
+import java.io.File;
+import java.io.IOException;
+
+public class FileStoreTestUtil {
+
+    public static FileStore createFileStore(File folder, 
SegmentNodeStorePersistence persistence) throws 
InvalidFileStoreVersionException, IOException {
+        return createFileStoreBuilder(folder, persistence).build();
+    }
+
+    public static ReadOnlyFileStore createReadOnlyFileStore(File folder, 
SegmentNodeStorePersistence persistence) throws 
InvalidFileStoreVersionException, IOException {
+        return createFileStoreBuilder(folder, persistence)
+                .buildReadOnly();
+    }
+
+    public static FileStoreBuilder createFileStoreBuilder(File folder, 
SegmentNodeStorePersistence persistence) {
+        return FileStoreBuilder.fileStoreBuilder(folder)
+                .withStringCacheSize(0)
+                .withTemplateCacheSize(0)
+                .withSegmentCacheSize(8)
+                .withCustomPersistence(persistence);
+    }
+}
diff --git 
a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/tool/SegmentCopyTestBase.java
 
b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/tool/SegmentCopyTestBase.java
index c7f7f07606..e0397602ce 100644
--- 
a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/tool/SegmentCopyTestBase.java
+++ 
b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/tool/SegmentCopyTestBase.java
@@ -50,7 +50,6 @@ import org.apache.jackrabbit.oak.segment.SegmentCache;
 import org.apache.jackrabbit.oak.segment.SegmentNodeStore;
 import org.apache.jackrabbit.oak.segment.SegmentNodeStoreBuilders;
 import org.apache.jackrabbit.oak.segment.azure.v8.AzurePersistenceV8;
-import org.apache.jackrabbit.oak.segment.azure.tool.SegmentCopy;
 import org.apache.jackrabbit.oak.segment.azure.tool.ToolUtils.SegmentStoreType;
 import 
org.apache.jackrabbit.oak.segment.compaction.SegmentGCOptions.CompactorType;
 import org.apache.jackrabbit.oak.segment.file.FileStore;
@@ -171,47 +170,49 @@ public abstract class SegmentCopyTestBase {
         for (String archive : srcArchives) {
             assertEquals(srcArchiveManager.exists(archive), 
destArchiveManager.exists(archive));
 
-            SegmentArchiveReader srcArchiveReader = 
srcArchiveManager.open(archive);
-            SegmentArchiveReader destArchiveReader = 
destArchiveManager.open(archive);
+            try (SegmentArchiveReader srcArchiveReader = 
srcArchiveManager.open(archive);
+                 SegmentArchiveReader destArchiveReader = 
destArchiveManager.open(archive)) {
 
-            List<SegmentArchiveEntry> srcSegments = 
srcArchiveReader.listSegments();
-            List<SegmentArchiveEntry> destSegments = 
destArchiveReader.listSegments();
+                List<SegmentArchiveEntry> srcSegments = 
srcArchiveReader.listSegments();
+                List<SegmentArchiveEntry> destSegments = 
destArchiveReader.listSegments();
 
-            for (int i = 0; i < srcSegments.size(); i++) {
-                SegmentArchiveEntry srcSegment = srcSegments.get(i);
-                SegmentArchiveEntry destSegment = destSegments.get(i);
+                for (int i = 0; i < srcSegments.size(); i++) {
+                    SegmentArchiveEntry srcSegment = srcSegments.get(i);
+                    SegmentArchiveEntry destSegment = destSegments.get(i);
 
-                assertEquals(srcSegment.getMsb(), destSegment.getMsb());
-                assertEquals(srcSegment.getLsb(), destSegment.getLsb());
-                assertEquals(srcSegment.getLength(), destSegment.getLength());
-                assertEquals(srcSegment.getFullGeneration(), 
destSegment.getFullGeneration());
-                assertEquals(srcSegment.getGeneration(), 
destSegment.getFullGeneration());
+                    assertEquals(srcSegment.getMsb(), destSegment.getMsb());
+                    assertEquals(srcSegment.getLsb(), destSegment.getLsb());
+                    assertEquals(srcSegment.getLength(), 
destSegment.getLength());
+                    assertEquals(srcSegment.getFullGeneration(), 
destSegment.getFullGeneration());
+                    assertEquals(srcSegment.getGeneration(), 
destSegment.getFullGeneration());
 
-                Buffer srcDataBuffer = 
srcArchiveReader.readSegment(srcSegment.getMsb(), srcSegment.getLsb());
-                Buffer destDataBuffer = 
destArchiveReader.readSegment(destSegment.getMsb(), destSegment.getLsb());
+                    Buffer srcDataBuffer = 
srcArchiveReader.readSegment(srcSegment.getMsb(), srcSegment.getLsb());
+                    Buffer destDataBuffer = 
destArchiveReader.readSegment(destSegment.getMsb(), destSegment.getLsb());
 
-                assertEquals(srcDataBuffer, destDataBuffer);
-            }
+                    assertEquals(srcDataBuffer, destDataBuffer);
+                }
+
+                Buffer srcBinRefBuffer = 
srcArchiveReader.getBinaryReferences();
+                Buffer destBinRefBuffer = 
destArchiveReader.getBinaryReferences();
+                assertEquals(srcBinRefBuffer, destBinRefBuffer);
 
-            Buffer srcBinRefBuffer = srcArchiveReader.getBinaryReferences();
-            Buffer destBinRefBuffer = destArchiveReader.getBinaryReferences();
-            assertEquals(srcBinRefBuffer, destBinRefBuffer);
-            
-            SegmentGraph srcGraph = srcArchiveReader.getGraph();
-            SegmentGraph destGraph = destArchiveReader.getGraph();
-            assertEquals(srcGraph, destGraph);
+                SegmentGraph srcGraph = srcArchiveReader.getGraph();
+                SegmentGraph destGraph = destArchiveReader.getGraph();
+                assertEquals(srcGraph, destGraph);
+            }
         }
     }
 
     private void checkJournal(SegmentNodeStorePersistence srcPersistence, 
SegmentNodeStorePersistence destPersistence)
             throws IOException {
-        JournalFileReader srcJournalFileReader = 
srcPersistence.getJournalFile().openJournalReader();
-        JournalFileReader destJournalFileReader = 
destPersistence.getJournalFile().openJournalReader();
+        try (JournalFileReader srcJournalFileReader = 
srcPersistence.getJournalFile().openJournalReader();
+             JournalFileReader destJournalFileReader = 
destPersistence.getJournalFile().openJournalReader()) {
 
-        String srcJournalLine = null;
-        while ((srcJournalLine = srcJournalFileReader.readLine()) != null) {
-            String destJournalLine = destJournalFileReader.readLine();
-            assertEquals(srcJournalLine, destJournalLine);
+            String srcJournalLine = null;
+            while ((srcJournalLine = srcJournalFileReader.readLine()) != null) 
{
+                String destJournalLine = destJournalFileReader.readLine();
+                assertEquals(srcJournalLine, destJournalLine);
+            }
         }
     }
 
@@ -278,4 +279,4 @@ public abstract class SegmentCopyTestBase {
         
sharedAccessBlobPolicy.setSharedAccessExpiryTime(Date.from(Instant.now().plus(Duration.ofDays(7))));
         return sharedAccessBlobPolicy;
     }
-}
\ No newline at end of file
+}
diff --git 
a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureArchiveManagerV8Test.java
 
b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureArchiveManagerV8Test.java
index eba893de2d..9065977487 100644
--- 
a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureArchiveManagerV8Test.java
+++ 
b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureArchiveManagerV8Test.java
@@ -32,8 +32,8 @@ import org.apache.jackrabbit.oak.segment.SegmentId;
 import org.apache.jackrabbit.oak.segment.SegmentNodeStore;
 import org.apache.jackrabbit.oak.segment.SegmentNodeStoreBuilders;
 import org.apache.jackrabbit.oak.segment.SegmentNotFoundException;
+import org.apache.jackrabbit.oak.segment.azure.FileStoreTestUtil;
 import org.apache.jackrabbit.oak.segment.file.FileStore;
-import org.apache.jackrabbit.oak.segment.file.FileStoreBuilder;
 import org.apache.jackrabbit.oak.segment.file.InvalidFileStoreVersionException;
 import org.apache.jackrabbit.oak.segment.file.ReadOnlyFileStore;
 import org.apache.jackrabbit.oak.segment.file.tar.TarPersistence;
@@ -165,109 +165,109 @@ public class AzureArchiveManagerV8Test {
     @Test
     public void testUncleanStop() throws URISyntaxException, IOException, 
InvalidFileStoreVersionException, CommitFailedException, StorageException {
         AzurePersistenceV8 p = new 
AzurePersistenceV8(container.getDirectoryReference("oak"));
-        FileStore fs = FileStoreBuilder.fileStoreBuilder(new 
File("target")).withCustomPersistence(p).build();
-        SegmentNodeStore segmentNodeStore = 
SegmentNodeStoreBuilders.builder(fs).build();
-        NodeBuilder builder = segmentNodeStore.getRoot().builder();
-        builder.setProperty("foo", "bar");
-        segmentNodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
-        fs.close();
+        try (FileStore fs = FileStoreTestUtil.createFileStore(new 
File("target"), p)) {
+            SegmentNodeStore segmentNodeStore = 
SegmentNodeStoreBuilders.builder(fs).build();
+            NodeBuilder builder = segmentNodeStore.getRoot().builder();
+            builder.setProperty("foo", "bar");
+            segmentNodeStore.merge(builder, EmptyHook.INSTANCE, 
CommitInfo.EMPTY);
+        }
 
         container.getBlockBlobReference("oak/data00000a.tar/closed").delete();
         
container.getBlockBlobReference("oak/data00000a.tar/data00000a.tar.brf").delete();
         
container.getBlockBlobReference("oak/data00000a.tar/data00000a.tar.gph").delete();
 
-        fs = FileStoreBuilder.fileStoreBuilder(new 
File("target")).withCustomPersistence(p).build();
-        segmentNodeStore = SegmentNodeStoreBuilders.builder(fs).build();
-        assertEquals("bar", segmentNodeStore.getRoot().getString("foo"));
-        fs.close();
+        try (FileStore fs = FileStoreTestUtil.createFileStore(new 
File("target"), p)) {
+            SegmentNodeStore segmentNodeStore = 
SegmentNodeStoreBuilders.builder(fs).build();
+            assertEquals("bar", segmentNodeStore.getRoot().getString("foo"));
+        }
     }
 
     @Test
     // see OAK-8566
     public void testUncleanStopWithEmptyArchive() throws URISyntaxException, 
IOException, InvalidFileStoreVersionException, CommitFailedException, 
StorageException {
         AzurePersistenceV8 p = new 
AzurePersistenceV8(container.getDirectoryReference("oak"));
-        FileStore fs = FileStoreBuilder.fileStoreBuilder(new 
File("target")).withCustomPersistence(p).build();
-        SegmentNodeStore segmentNodeStore = 
SegmentNodeStoreBuilders.builder(fs).build();
-        NodeBuilder builder = segmentNodeStore.getRoot().builder();
-        builder.setProperty("foo", "bar");
-        segmentNodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
-        fs.close();
+        try (FileStore fs = FileStoreTestUtil.createFileStore(new 
File("target"), p)) {
+            SegmentNodeStore segmentNodeStore = 
SegmentNodeStoreBuilders.builder(fs).build();
+            NodeBuilder builder = segmentNodeStore.getRoot().builder();
+            builder.setProperty("foo", "bar");
+            segmentNodeStore.merge(builder, EmptyHook.INSTANCE, 
CommitInfo.EMPTY);
+        }
 
         // make sure there are 2 archives
-        fs = FileStoreBuilder.fileStoreBuilder(new 
File("target")).withCustomPersistence(p).build();
-        segmentNodeStore = SegmentNodeStoreBuilders.builder(fs).build();
-        builder = segmentNodeStore.getRoot().builder();
-        builder.setProperty("foo2", "bar2");
-        segmentNodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
-        fs.close();
+        try (FileStore fs = FileStoreTestUtil.createFileStore(new 
File("target"), p)) {
+            SegmentNodeStore segmentNodeStore = 
SegmentNodeStoreBuilders.builder(fs).build();
+            NodeBuilder builder = segmentNodeStore.getRoot().builder();
+            builder.setProperty("foo2", "bar2");
+            segmentNodeStore.merge(builder, EmptyHook.INSTANCE, 
CommitInfo.EMPTY);
+        }
 
         // remove the segment 0000 from the second archive
         ListBlobItem segment0000 = 
container.listBlobs("oak/data00001a.tar/0000.").iterator().next();
         ((CloudBlob) segment0000).delete();
         container.getBlockBlobReference("oak/data00001a.tar/closed").delete();
 
-        fs = FileStoreBuilder.fileStoreBuilder(new 
File("target")).withCustomPersistence(p).build();
-        segmentNodeStore = SegmentNodeStoreBuilders.builder(fs).build();
-        assertEquals("bar", segmentNodeStore.getRoot().getString("foo"));
-        fs.close();
+        try (FileStore fs = FileStoreTestUtil.createFileStore(new 
File("target"), p)) {
+            SegmentNodeStore segmentNodeStore = 
SegmentNodeStoreBuilders.builder(fs).build();
+            assertEquals("bar", segmentNodeStore.getRoot().getString("foo"));
+        }
     }
 
     @Test
     public void testUncleanStopSegmentMissing() throws URISyntaxException, 
IOException, InvalidFileStoreVersionException, CommitFailedException, 
StorageException {
         AzurePersistenceV8 p = new 
AzurePersistenceV8(container.getDirectoryReference("oak"));
-        FileStore fs = FileStoreBuilder.fileStoreBuilder(new 
File("target")).withCustomPersistence(p).build();
-        SegmentNodeStore segmentNodeStore = 
SegmentNodeStoreBuilders.builder(fs).build();
-        NodeBuilder builder = segmentNodeStore.getRoot().builder();
-        builder.setProperty("foo", "bar");
-        segmentNodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
-        fs.close();
+        try (FileStore fs = FileStoreTestUtil.createFileStore(new 
File("target"), p)) {
+            SegmentNodeStore segmentNodeStore = 
SegmentNodeStoreBuilders.builder(fs).build();
+            NodeBuilder builder = segmentNodeStore.getRoot().builder();
+            builder.setProperty("foo", "bar");
+            segmentNodeStore.merge(builder, EmptyHook.INSTANCE, 
CommitInfo.EMPTY);
+        }
 
         // make sure there are 2 archives
-        fs = FileStoreBuilder.fileStoreBuilder(new 
File("target")).withCustomPersistence(p).build();
-        segmentNodeStore = SegmentNodeStoreBuilders.builder(fs).build();
-        builder = segmentNodeStore.getRoot().builder();
-        builder.setProperty("foo0", "bar0");
-        segmentNodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
-        fs.flush();
-        //create segment 0001
-        builder.setProperty("foo1", "bar1");
-        segmentNodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
-        fs.flush();
-        //create segment 0002
-        builder.setProperty("foo2", "bar2");
-        segmentNodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
-        fs.flush();
-        //create segment 0003
-        builder.setProperty("foo3", "bar3");
-        segmentNodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
-        fs.flush();
-        fs.close();
+        try (FileStore fs = FileStoreTestUtil.createFileStore(new 
File("target"), p)) {
+            SegmentNodeStore segmentNodeStore = 
SegmentNodeStoreBuilders.builder(fs).build();
+            NodeBuilder builder = segmentNodeStore.getRoot().builder();
+            builder.setProperty("foo0", "bar0");
+            segmentNodeStore.merge(builder, EmptyHook.INSTANCE, 
CommitInfo.EMPTY);
+            fs.flush();
+            //create segment 0001
+            builder.setProperty("foo1", "bar1");
+            segmentNodeStore.merge(builder, EmptyHook.INSTANCE, 
CommitInfo.EMPTY);
+            fs.flush();
+            //create segment 0002
+            builder.setProperty("foo2", "bar2");
+            segmentNodeStore.merge(builder, EmptyHook.INSTANCE, 
CommitInfo.EMPTY);
+            fs.flush();
+            //create segment 0003
+            builder.setProperty("foo3", "bar3");
+            segmentNodeStore.merge(builder, EmptyHook.INSTANCE, 
CommitInfo.EMPTY);
+            fs.flush();
+        }
 
         // remove the segment 0002 from the second archive
         ListBlobItem segment0002 = 
container.listBlobs("oak/data00001a.tar/0002.").iterator().next();
         ((CloudBlob) segment0002).delete();
         container.getBlockBlobReference("oak/data00001a.tar/closed").delete();
 
-        fs = FileStoreBuilder.fileStoreBuilder(new 
File("target")).withCustomPersistence(p).build();
-        segmentNodeStore = SegmentNodeStoreBuilders.builder(fs).build();
-        assertEquals("bar", segmentNodeStore.getRoot().getString("foo"));
-
-        //recovered archive data00001a.tar should not contain segments 0002 
and 0003
-        
assertFalse(container.listBlobs("oak/data00001a.tar/0002.").iterator().hasNext());
-        
assertFalse(container.listBlobs("oak/data00001a.tar/0003.").iterator().hasNext());
-
-        assertTrue("Backup directory should have been created", 
container.listBlobs("oak/data00001a.tar.bak").iterator().hasNext());
-        //backup has all segments but 0002 since it was deleted before recovery
-        
assertTrue(container.listBlobs("oak/data00001a.tar.bak/0001.").iterator().hasNext());
-        
assertFalse(container.listBlobs("oak/data00001a.tar.bak/0002.").iterator().hasNext());
-        
assertTrue(container.listBlobs("oak/data00001a.tar.bak/0003.").iterator().hasNext());
-
-        //verify content from recovered segments preserved
-        assertEquals("bar1", segmentNodeStore.getRoot().getString("foo1"));
-        //content from deleted segments not preserved
-        assertNull(segmentNodeStore.getRoot().getString("foo2"));
-        assertNull(segmentNodeStore.getRoot().getString("foo3"));
-        fs.close();
+        try (FileStore fs = FileStoreTestUtil.createFileStore(new 
File("target"), p)) {
+            SegmentNodeStore segmentNodeStore = 
SegmentNodeStoreBuilders.builder(fs).build();
+            assertEquals("bar", segmentNodeStore.getRoot().getString("foo"));
+
+            //recovered archive data00001a.tar should not contain segments 
0002 and 0003
+            
assertFalse(container.listBlobs("oak/data00001a.tar/0002.").iterator().hasNext());
+            
assertFalse(container.listBlobs("oak/data00001a.tar/0003.").iterator().hasNext());
+
+            assertTrue("Backup directory should have been created", 
container.listBlobs("oak/data00001a.tar.bak").iterator().hasNext());
+            //backup has all segments but 0002 since it was deleted before 
recovery
+            
assertTrue(container.listBlobs("oak/data00001a.tar.bak/0001.").iterator().hasNext());
+            
assertFalse(container.listBlobs("oak/data00001a.tar.bak/0002.").iterator().hasNext());
+            
assertTrue(container.listBlobs("oak/data00001a.tar.bak/0003.").iterator().hasNext());
+
+            //verify content from recovered segments preserved
+            assertEquals("bar1", segmentNodeStore.getRoot().getString("foo1"));
+            //content from deleted segments not preserved
+            assertNull(segmentNodeStore.getRoot().getString("foo2"));
+            assertNull(segmentNodeStore.getRoot().getString("foo3"));
+        }
     }
 
     @Test
@@ -298,6 +298,7 @@ public class AzureArchiveManagerV8Test {
         UUID u = UUID.randomUUID();
         writer.writeSegment(u.getMostSignificantBits(), 
u.getLeastSignificantBits(), new byte[10], 0, 10, 0, 0, false);
         writer.flush();
+        writer.close();
         Assert.assertTrue(manager.exists("data00000a.tar"));
     }
 
@@ -332,7 +333,7 @@ public class AzureArchiveManagerV8Test {
     public void testMissngSegmentDetectedInFileStore() throws IOException, 
StorageException, URISyntaxException, InvalidFileStoreVersionException {
 
         AzurePersistenceV8 azurePersistenceV8 = new 
AzurePersistenceV8(container.getDirectoryReference("oak"));
-        FileStore fileStore = FileStoreBuilder.fileStoreBuilder(new 
File("target")).withCustomPersistence(azurePersistenceV8).build();
+        FileStore fileStore = FileStoreTestUtil.createFileStore(new 
File("target"), azurePersistenceV8);
 
         SegmentArchiveManager manager = 
azurePersistenceV8.createArchiveManager(false, false, new IOMonitorAdapter(), 
new FileStoreMonitorAdapter(), new RemoteStoreMonitorAdapter());
         SegmentArchiveWriter writer = manager.create("data00000a.tar");
@@ -357,28 +358,27 @@ public class AzureArchiveManagerV8Test {
     @Test
     public void testReadOnlyRecovery() throws URISyntaxException, 
InvalidFileStoreVersionException, IOException, CommitFailedException, 
StorageException {
         AzurePersistenceV8 rwPersistence = new 
AzurePersistenceV8(container.getDirectoryReference("oak"));
-        FileStore rwFileStore = FileStoreBuilder.fileStoreBuilder(new 
File("target")).withCustomPersistence(rwPersistence).build();
-        SegmentNodeStore segmentNodeStore = 
SegmentNodeStoreBuilders.builder(rwFileStore).build();
-        NodeBuilder builder = segmentNodeStore.getRoot().builder();
-        builder.setProperty("foo", "bar");
-        segmentNodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
-        rwFileStore.flush();
-
-        
assertTrue(container.getDirectoryReference("oak/data00000a.tar").listBlobs().iterator().hasNext());
-        
assertFalse(container.getDirectoryReference("oak/data00000a.tar.ro.bak").listBlobs().iterator().hasNext());
+        try (FileStore rwFileStore = FileStoreTestUtil.createFileStore(new 
File("target"), rwPersistence)) {
+            SegmentNodeStore segmentNodeStore = 
SegmentNodeStoreBuilders.builder(rwFileStore).build();
+            NodeBuilder builder = segmentNodeStore.getRoot().builder();
+            builder.setProperty("foo", "bar");
+            segmentNodeStore.merge(builder, EmptyHook.INSTANCE, 
CommitInfo.EMPTY);
+            rwFileStore.flush();
 
-        // create read-only FS
-        AzurePersistenceV8 roPersistence = new 
AzurePersistenceV8(container.getDirectoryReference("oak"));
-        ReadOnlyFileStore roFileStore = FileStoreBuilder.fileStoreBuilder(new 
File("target")).withCustomPersistence(roPersistence).buildReadOnly();
+            
assertTrue(container.getDirectoryReference("oak/data00000a.tar").listBlobs().iterator().hasNext());
+            
assertFalse(container.getDirectoryReference("oak/data00000a.tar.ro.bak").listBlobs().iterator().hasNext());
 
-        PropertyState fooProperty = 
SegmentNodeStoreBuilders.builder(roFileStore).build()
-                .getRoot()
-                .getProperty("foo");
-        assertThat(fooProperty, not(nullValue()));
-        assertThat(fooProperty.getValue(Type.STRING), equalTo("bar"));
+            // create read-only FS
+            AzurePersistenceV8 roPersistence = new 
AzurePersistenceV8(container.getDirectoryReference("oak"));
+            try (ReadOnlyFileStore roFileStore = 
FileStoreTestUtil.createReadOnlyFileStore(new File("target"), roPersistence)) {
 
-        roFileStore.close();
-        rwFileStore.close();
+                PropertyState fooProperty = 
SegmentNodeStoreBuilders.builder(roFileStore).build()
+                        .getRoot()
+                        .getProperty("foo");
+                assertThat(fooProperty, not(nullValue()));
+                assertThat(fooProperty.getValue(Type.STRING), equalTo("bar"));
+            }
+        }
 
         
assertTrue(container.getDirectoryReference("oak/data00000a.tar").listBlobs().iterator().hasNext());
         // after creating a read-only FS, the recovery procedure should not be 
started since there is another running Oak process
@@ -388,12 +388,13 @@ public class AzureArchiveManagerV8Test {
     @Test
     public void testCachingPersistenceTarRecovery() throws URISyntaxException, 
InvalidFileStoreVersionException, IOException, CommitFailedException, 
StorageException {
         AzurePersistenceV8 rwPersistence = new 
AzurePersistenceV8(container.getDirectoryReference("oak"));
-        FileStore rwFileStore = 
FileStoreBuilder.fileStoreBuilder(folder.newFolder()).withCustomPersistence(rwPersistence).build();
-        SegmentNodeStore segmentNodeStore = 
SegmentNodeStoreBuilders.builder(rwFileStore).build();
-        NodeBuilder builder = segmentNodeStore.getRoot().builder();
-        builder.setProperty("foo", "bar");
-        segmentNodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
-        rwFileStore.flush();
+        try (FileStore rwFileStore = 
FileStoreTestUtil.createFileStore(folder.newFolder(), rwPersistence)) {
+            SegmentNodeStore segmentNodeStore = 
SegmentNodeStoreBuilders.builder(rwFileStore).build();
+            NodeBuilder builder = segmentNodeStore.getRoot().builder();
+            builder.setProperty("foo", "bar");
+            segmentNodeStore.merge(builder, EmptyHook.INSTANCE, 
CommitInfo.EMPTY);
+            rwFileStore.flush();
+        }
 
         
assertTrue(container.getDirectoryReference("oak/data00000a.tar").listBlobs().iterator().hasNext());
         
assertFalse(container.getDirectoryReference("oak/data00000a.tar.ro.bak").listBlobs().iterator().hasNext());
@@ -407,7 +408,9 @@ public class AzureArchiveManagerV8Test {
         SegmentNodeStorePersistence splitPersistence = new 
SplitPersistence(cachingPersistence, localPersistence);
 
         // exception should not be thrown here
-        FileStore splitPersistenceFileStore = 
FileStoreBuilder.fileStoreBuilder(localFolder).withCustomPersistence(splitPersistence).build();
+        try (FileStore splitPersistenceFileStore = 
FileStoreTestUtil.createFileStore(localFolder, splitPersistence)) {
+            // nothing to do, just checking if the store can be opened
+        }
 
         
assertTrue(container.getDirectoryReference("oak/data00000a.tar").listBlobs().iterator().hasNext());
         // after creating a read-only FS, the recovery procedure should not be 
started since there is another running Oak process
@@ -417,7 +420,7 @@ public class AzureArchiveManagerV8Test {
     @Test
     public void testCollectBlobReferencesForReadOnlyFileStore() throws 
URISyntaxException, InvalidFileStoreVersionException, IOException, 
CommitFailedException, StorageException {
         AzurePersistenceV8 rwPersistence = new 
AzurePersistenceV8(container.getDirectoryReference("oak"));
-        try (FileStore rwFileStore = FileStoreBuilder.fileStoreBuilder(new 
File("target")).withCustomPersistence(rwPersistence).build()) {
+        try (FileStore rwFileStore = FileStoreTestUtil.createFileStore(new 
File("target"), rwPersistence)) {
             SegmentNodeStore segmentNodeStore = 
SegmentNodeStoreBuilders.builder(rwFileStore).build();
             NodeBuilder builder = segmentNodeStore.getRoot().builder();
             builder.setProperty("foo", "bar");
@@ -429,7 +432,7 @@ public class AzureArchiveManagerV8Test {
 
             // create read-only FS, while the rw FS is still open
             AzurePersistenceV8 roPersistence = new 
AzurePersistenceV8(container.getDirectoryReference("oak"));
-            try (ReadOnlyFileStore roFileStore = 
FileStoreBuilder.fileStoreBuilder(new 
File("target")).withCustomPersistence(roPersistence).buildReadOnly()) {
+            try (ReadOnlyFileStore roFileStore = 
FileStoreTestUtil.createReadOnlyFileStore(new File("target"), roPersistence)) {
 
                 PropertyState fooProperty = 
SegmentNodeStoreBuilders.builder(roFileStore).build()
                         .getRoot()
@@ -447,7 +450,7 @@ public class AzureArchiveManagerV8Test {
     @Test
     public void testCollectBlobReferencesDoesNotFailWhenFileIsMissing() throws 
URISyntaxException, InvalidFileStoreVersionException, IOException, 
CommitFailedException, StorageException {
         AzurePersistenceV8 rwPersistence = new 
AzurePersistenceV8(container.getDirectoryReference("oak"));
-        try (FileStore rwFileStore = FileStoreBuilder.fileStoreBuilder(new 
File("target")).withCustomPersistence(rwPersistence).build()) {
+        try (FileStore rwFileStore = FileStoreTestUtil.createFileStore(new 
File("target"), rwPersistence)) {
             SegmentNodeStore segmentNodeStore = 
SegmentNodeStoreBuilders.builder(rwFileStore).build();
             NodeBuilder builder = segmentNodeStore.getRoot().builder();
             builder.setProperty("foo", "bar");
@@ -459,8 +462,7 @@ public class AzureArchiveManagerV8Test {
 
             // create read-only FS, while the rw FS is still open
             AzurePersistenceV8 roPersistence = new 
AzurePersistenceV8(container.getDirectoryReference("oak"));
-            try (ReadOnlyFileStore roFileStore = 
FileStoreBuilder.fileStoreBuilder(new 
File("target")).withCustomPersistence(roPersistence).buildReadOnly()) {
-
+            try (ReadOnlyFileStore roFileStore = 
FileStoreTestUtil.createReadOnlyFileStore(new File("target"), roPersistence)) {
                 PropertyState fooProperty = 
SegmentNodeStoreBuilders.builder(roFileStore).build()
                         .getRoot()
                         .getProperty("foo");
@@ -506,7 +508,7 @@ public class AzureArchiveManagerV8Test {
                 .doReturn(new AzureJournalFileV8(oakDirectory, "journal.log", 
writeAccessController))
                 .when(mockedRwPersistence).getJournalFile();
 
-        FileStore rwFileStore = 
FileStoreBuilder.fileStoreBuilder(folder.newFolder()).withCustomPersistence(mockedRwPersistence).build();
+        FileStore rwFileStore = 
FileStoreTestUtil.createFileStore(folder.newFolder(), mockedRwPersistence);
         SegmentNodeStore segmentNodeStore = 
SegmentNodeStoreBuilders.builder(rwFileStore).build();
         NodeBuilder builder = segmentNodeStore.getRoot().builder();
 
@@ -537,17 +539,22 @@ public class AzureArchiveManagerV8Test {
 
         Thread.sleep(2000);
 
-        // It should be possible to start another RW file store.
-        FileStore rwFileStore2 = 
FileStoreBuilder.fileStoreBuilder(folder.newFolder()).withCustomPersistence(new 
AzurePersistenceV8(oakDirectory)).build();
-        SegmentNodeStore segmentNodeStore2 = 
SegmentNodeStoreBuilders.builder(rwFileStore2).build();
-        NodeBuilder builder2 = segmentNodeStore2.getRoot().builder();
+        thread.interrupt();
+        thread.join();
 
-        //repository hasn't been updated
-        assertNull(builder2.getProperty("foo"));
+        // TODO - close FileStore - however that seems to expose a deadlock 
that needs to be investigated further
+        // rwFileStore.close();
 
-        rwFileStore2.close();
+        // It should be possible to start another RW file store.
+        try (FileStore rwFileStore2 = 
FileStoreTestUtil.createFileStore(folder.newFolder(), new 
AzurePersistenceV8(oakDirectory))) {
+            SegmentNodeStore segmentNodeStore2 = 
SegmentNodeStoreBuilders.builder(rwFileStore2).build();
+            NodeBuilder builder2 = segmentNodeStore2.getRoot().builder();
+
+            //repository hasn't been updated
+            assertNull(builder2.getProperty("foo"));
+        }
     }
-    
+
     @Test
     public void testListArchivesDoesNotReturnDeletedArchive() throws 
IOException, URISyntaxException, StorageException {
         // The archive manager should not return the archive which has 
"deleted" marker
diff --git 
a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureReadSegmentV8Test.java
 
b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureReadSegmentV8Test.java
index ff3a2d422f..6e9583a7c4 100644
--- 
a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureReadSegmentV8Test.java
+++ 
b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureReadSegmentV8Test.java
@@ -26,8 +26,8 @@ import 
org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule;
 import org.apache.jackrabbit.oak.commons.Buffer;
 import org.apache.jackrabbit.oak.segment.SegmentId;
 import org.apache.jackrabbit.oak.segment.SegmentNotFoundException;
+import org.apache.jackrabbit.oak.segment.azure.FileStoreTestUtil;
 import org.apache.jackrabbit.oak.segment.file.FileStore;
-import org.apache.jackrabbit.oak.segment.file.FileStoreBuilder;
 import org.apache.jackrabbit.oak.segment.file.InvalidFileStoreVersionException;
 import org.apache.jackrabbit.oak.segment.spi.RepositoryNotReachableException;
 import org.apache.jackrabbit.oak.segment.spi.monitor.FileStoreMonitor;
@@ -60,29 +60,21 @@ public class AzureReadSegmentV8Test {
     @Test(expected = SegmentNotFoundException.class)
     public void testReadNonExistentSegmentRepositoryReachable() throws 
URISyntaxException, IOException, InvalidFileStoreVersionException, 
StorageException {
         AzurePersistenceV8 p = new 
AzurePersistenceV8(container.getDirectoryReference("oak"));
-        FileStore fs = FileStoreBuilder.fileStoreBuilder(new 
File("target")).withCustomPersistence(p).build();
-        SegmentId id = new SegmentId(fs, 0, 0);
-
-        try {
+        try (FileStore fs = FileStoreTestUtil.createFileStore(new 
File("target"), p)) {
+            SegmentId id = new SegmentId(fs, 0, 0);
             fs.readSegment(id);
-        } finally {
-            fs.close();
         }
     }
 
     @Test(expected = RepositoryNotReachableException.class)
     public void testReadExistentSegmentRepositoryNotReachable() throws 
URISyntaxException, IOException, InvalidFileStoreVersionException, 
StorageException {
         AzurePersistenceV8 p = new 
ReadFailingAzurePersistenceV8(container.getDirectoryReference("oak"));
-        FileStore fs = FileStoreBuilder.fileStoreBuilder(new 
File("target")).withCustomPersistence(p).build();
-
-        SegmentId id = new SegmentId(fs, 0, 0);
-        byte[] buffer = new byte[2];
+        try (FileStore fs = FileStoreTestUtil.createFileStore(new 
File("target"), p)) {
+            SegmentId id = new SegmentId(fs, 0, 0);
+            byte[] buffer = new byte[2];
 
-        try {
             fs.writeSegment(id, buffer, 0, 2);
             fs.readSegment(id);
-        } finally {
-            fs.close();
         }
     }
 
diff --git 
a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureRepositoryLockV8Test.java
 
b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureRepositoryLockV8Test.java
index 59cc74dc43..2695921745 100644
--- 
a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureRepositoryLockV8Test.java
+++ 
b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureRepositoryLockV8Test.java
@@ -43,6 +43,7 @@ import java.io.IOException;
 import java.net.URISyntaxException;
 import java.security.InvalidKeyException;
 import java.util.concurrent.Semaphore;
+import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicBoolean;
 
@@ -77,12 +78,14 @@ public class AzureRepositoryLockV8Test {
     @Test
     public void testFailingLock() throws URISyntaxException, IOException, 
StorageException {
         CloudBlockBlob blob = container.getBlockBlobReference("oak/repo.lock");
-        new AzureRepositoryLockV8(blob, () -> {}, new 
WriteAccessController()).lock();
+        AzureRepositoryLockV8 lock = new AzureRepositoryLockV8(blob, () -> {}, 
new WriteAccessController()).lock();
         try {
             new AzureRepositoryLockV8(blob, () -> {}, new 
WriteAccessController()).lock();
             fail("The second lock should fail.");
         } catch (IOException e) {
             // it's fine
+        } finally {
+            lock.unlock();
         }
     }
 
@@ -102,7 +105,8 @@ public class AzureRepositoryLockV8Test {
         }).start();
 
         s.acquire();
-        new AzureRepositoryLockV8(blob, () -> {}, new WriteAccessController(), 
10).lock();
+        AzureRepositoryLockV8 lock = new AzureRepositoryLockV8(blob, () -> {}, 
new WriteAccessController(), 10).lock();
+        lock.unlock();
     }
 
     @Test
@@ -119,10 +123,10 @@ public class AzureRepositoryLockV8Test {
                 .doCallRealMethod()
                 .when(blobMocked).renewLease(Mockito.any(), Mockito.any(), 
Mockito.any());
 
-        new AzureRepositoryLockV8(blobMocked, () -> {}, new 
WriteAccessController()).lock();
+        AzureRepositoryLockV8 lock = new AzureRepositoryLockV8(blobMocked, () 
-> {}, new WriteAccessController(), 1).lock();
 
         // wait till lease expires
-        Thread.sleep(16000);
+        Thread.sleep(1500);
 
         // reset the mock to default behaviour
         Mockito.doCallRealMethod().when(blobMocked).renewLease(Mockito.any(), 
Mockito.any(), Mockito.any());
@@ -133,6 +137,7 @@ public class AzureRepositoryLockV8Test {
         } catch (IOException e) {
             // it's fine
         }
+        lock.unlock();
     }
 
     @Test
@@ -153,29 +158,35 @@ public class AzureRepositoryLockV8Test {
 
         WriteAccessController writeAccessController = new 
WriteAccessController();
 
-        new AzureRepositoryLockV8(blobMocked, () -> {}, 
writeAccessController).lock();
-
-
-        Thread thread = new Thread(() -> {
-
-            while (true) {
-                writeAccessController.checkWritingAllowed();
-
-            }
-        });
+        AzureRepositoryLockV8 lock = new AzureRepositoryLockV8(blobMocked, () 
-> {}, writeAccessController);
+        Thread thread = null;
+        try {
+            lock.lock();
 
-        thread.start();
+            thread = new Thread(() -> {
+                while (true) {
+                    writeAccessController.checkWritingAllowed();
+                }
+            });
+            thread.start();
 
-        Thread.sleep(3000);
-        assertFalse("after 3 seconds thread should not be in a waiting state", 
thread.getState().equals(Thread.State.WAITING));
+            Thread.sleep(3000);
+            assertFalse("after 3 seconds thread should not be in a waiting 
state", thread.getState().equals(Thread.State.WAITING));
 
-        Thread.sleep(3000);
-        assertFalse("after 6 seconds thread should not be in a waiting state", 
thread.getState().equals(Thread.State.WAITING));
+            Thread.sleep(3000);
+            assertFalse("after 6 seconds thread should not be in a waiting 
state", thread.getState().equals(Thread.State.WAITING));
 
-        Thread.sleep(5000);
-        assertTrue("after more than 9 seconds thread should be in a waiting 
state", thread.getState().equals(Thread.State.WAITING));
+            Thread.sleep(5000);
+            assertTrue("after more than 9 seconds thread should be in a 
waiting state", thread.getState().equals(Thread.State.WAITING));
 
-        Mockito.doCallRealMethod().when(blobMocked).renewLease(Mockito.any(), 
Mockito.any(), Mockito.any());
+            
Mockito.doCallRealMethod().when(blobMocked).renewLease(Mockito.any(), 
Mockito.any(), Mockito.any());
+        } finally {
+            lock.unlock();
+            if (thread != null) {
+                thread.interrupt();
+                thread.join();
+            }
+        }
     }
 
     @Test
@@ -242,7 +253,7 @@ public class AzureRepositoryLockV8Test {
             lock.lock();
 
             // Wait for at least 3 lease renewal requests (2 timeouts + 1 
success)
-            await().atMost(10, java.util.concurrent.TimeUnit.SECONDS)
+            await().atMost(10, TimeUnit.SECONDS)
                     .untilAsserted(() -> wireMockServer.verify(
                             moreThanOrExactly(3),
                             
putRequestedFor(urlPathMatching(".*/oak/repo\\.lock"))
diff --git 
a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentArchiveWriterV8Test.java
 
b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentArchiveWriterV8Test.java
index eaf7f439b5..cfc543d1eb 100644
--- 
a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentArchiveWriterV8Test.java
+++ 
b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentArchiveWriterV8Test.java
@@ -99,10 +99,16 @@ public class AzureSegmentArchiveWriterV8Test {
         mockServerClient
                 .when(writeBinaryReferencesRequest, Times.once())
                 .respond(response().withStatusCode(201));
+        // then allow closing the writer
+        mockServerClient
+                .when(getCloseArchiveRequest(), Times.once())
+                .respond(response().withStatusCode(201));
 
         writer.writeBinaryReferences(new byte[10]);
 
         mockServerClient.verify(writeBinaryReferencesRequest, 
exactly(MAX_ATTEMPTS));
+
+        writer.close();
     }
 
     @Test
@@ -119,10 +125,16 @@ public class AzureSegmentArchiveWriterV8Test {
         mockServerClient
                 .when(writeGraphRequest, Times.once())
                 .respond(response().withStatusCode(201));
+        // then allow closing the writer
+        mockServerClient
+                .when(getCloseArchiveRequest(), Times.once())
+                .respond(response().withStatusCode(201));
 
         writer.writeGraph(new byte[10]);
 
         mockServerClient.verify(writeGraphRequest, exactly(MAX_ATTEMPTS));
+
+        writer.close();
     }
 
     @Test
diff --git 
a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/split/SplitPersistenceBlobTest.java
 
b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/split/SplitPersistenceBlobTest.java
index 6e330b047a..093584fefd 100644
--- 
a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/split/SplitPersistenceBlobTest.java
+++ 
b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/split/SplitPersistenceBlobTest.java
@@ -27,8 +27,8 @@ import org.apache.jackrabbit.oak.segment.SegmentNodeStore;
 import org.apache.jackrabbit.oak.segment.SegmentNodeStoreBuilders;
 import org.apache.jackrabbit.oak.segment.azure.AzurePersistence;
 import org.apache.jackrabbit.oak.segment.azure.AzuriteDockerRule;
+import org.apache.jackrabbit.oak.segment.azure.FileStoreTestUtil;
 import org.apache.jackrabbit.oak.segment.file.FileStore;
-import org.apache.jackrabbit.oak.segment.file.FileStoreBuilder;
 import org.apache.jackrabbit.oak.segment.file.InvalidFileStoreVersionException;
 import org.apache.jackrabbit.oak.segment.file.tar.TarPersistence;
 import 
org.apache.jackrabbit.oak.segment.spi.persistence.SegmentNodeStorePersistence;
@@ -86,24 +86,20 @@ public class SplitPersistenceBlobTest {
         File dataStoreDir = new File(folder.getRoot(), "blobstore");
         BlobStore blobStore = newBlobStore(dataStoreDir);
 
-        baseFileStore = FileStoreBuilder
-                .fileStoreBuilder(folder.newFolder())
-                .withCustomPersistence(sharedPersistence)
+        try (FileStore tempFileStore = 
FileStoreTestUtil.createFileStoreBuilder(folder.newFolder(), sharedPersistence)
                 .withBlobStore(blobStore)
-                .build();
-        base = SegmentNodeStoreBuilders.builder(baseFileStore).build();
+                .build()) {
+            base = SegmentNodeStoreBuilders.builder(tempFileStore).build();
 
-        NodeBuilder builder = base.getRoot().builder();
-        builder.child("foo").child("bar").setProperty("version", "v1");
-        base.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
+            NodeBuilder builder = base.getRoot().builder();
+            builder.child("foo").child("bar").setProperty("version", "v1");
+            base.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
 
-        baseBlobId = createLoad(base, baseFileStore).getContentIdentity();
-        baseFileStore.flush();
-        baseFileStore.close();
+            baseBlobId = createLoad(base, tempFileStore).getContentIdentity();
+            tempFileStore.flush();
+        }
 
-        baseFileStore = FileStoreBuilder
-            .fileStoreBuilder(folder.newFolder())
-            .withCustomPersistence(sharedPersistence)
+        baseFileStore = 
FileStoreTestUtil.createFileStoreBuilder(folder.newFolder(), sharedPersistence)
             .withBlobStore(blobStore)
             .build();
         base = SegmentNodeStoreBuilders.builder(baseFileStore).build();
@@ -114,9 +110,7 @@ public class SplitPersistenceBlobTest {
         SegmentNodeStorePersistence localPersistence = new 
TarPersistence(folder.newFolder());
         splitPersistence = new SplitPersistence(sharedPersistence, 
localPersistence);
 
-        splitFileStore = FileStoreBuilder
-            .fileStoreBuilder(folder.newFolder())
-            .withCustomPersistence(splitPersistence)
+        splitFileStore = 
FileStoreTestUtil.createFileStoreBuilder(folder.newFolder(), splitPersistence)
             .withBlobStore(blobStore)
             .build();
         split = SegmentNodeStoreBuilders.builder(splitFileStore).build();
@@ -124,7 +118,12 @@ public class SplitPersistenceBlobTest {
 
     @After
     public void tearDown() {
-        baseFileStore.close();
+        if (splitFileStore != null) {
+            splitFileStore.close();
+        }
+        if (baseFileStore != null) {
+            baseFileStore.close();
+        }
     }
 
     @Test
diff --git 
a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/split/SplitPersistenceTest.java
 
b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/split/SplitPersistenceTest.java
index 49c0ac6ae8..f37f67fc84 100644
--- 
a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/split/SplitPersistenceTest.java
+++ 
b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/split/SplitPersistenceTest.java
@@ -23,8 +23,8 @@ import org.apache.jackrabbit.oak.segment.SegmentNodeStore;
 import org.apache.jackrabbit.oak.segment.SegmentNodeStoreBuilders;
 import org.apache.jackrabbit.oak.segment.azure.AzurePersistence;
 import org.apache.jackrabbit.oak.segment.azure.AzuriteDockerRule;
+import org.apache.jackrabbit.oak.segment.azure.FileStoreTestUtil;
 import org.apache.jackrabbit.oak.segment.file.FileStore;
-import org.apache.jackrabbit.oak.segment.file.FileStoreBuilder;
 import org.apache.jackrabbit.oak.segment.file.InvalidFileStoreVersionException;
 import org.apache.jackrabbit.oak.segment.file.tar.TarPersistence;
 import 
org.apache.jackrabbit.oak.segment.file.tar.binaries.BinaryReferencesIndexLoader;
@@ -78,10 +78,7 @@ public class SplitPersistenceTest {
 
         SegmentNodeStorePersistence sharedPersistence = new 
AzurePersistence(readBlobContainerClient, writeBlobContainerClient, 
noRetryBlobContainerClient,"oak");
 
-        baseFileStore = FileStoreBuilder
-                .fileStoreBuilder(folder.newFolder())
-                .withCustomPersistence(sharedPersistence)
-                .build();
+        baseFileStore = FileStoreTestUtil.createFileStore(folder.newFolder(), 
sharedPersistence);
         base = SegmentNodeStoreBuilders.builder(baseFileStore).build();
 
         NodeBuilder builder = base.getRoot().builder();
@@ -92,10 +89,7 @@ public class SplitPersistenceTest {
         SegmentNodeStorePersistence localPersistence = new 
TarPersistence(folder.newFolder());
         splitPersistence = new SplitPersistence(sharedPersistence, 
localPersistence);
 
-        splitFileStore = FileStoreBuilder
-                .fileStoreBuilder(folder.newFolder())
-                .withCustomPersistence(splitPersistence)
-                .build();
+        splitFileStore = FileStoreTestUtil.createFileStore(folder.newFolder(), 
splitPersistence);
         split = SegmentNodeStoreBuilders.builder(splitFileStore).build();
     }
 
diff --git 
a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/split/v8/SplitPersistenceBlobV8Test.java
 
b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/split/v8/SplitPersistenceBlobV8Test.java
index 072890bb39..b0e9d1e4b7 100644
--- 
a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/split/v8/SplitPersistenceBlobV8Test.java
+++ 
b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/split/v8/SplitPersistenceBlobV8Test.java
@@ -34,9 +34,9 @@ import 
org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreBlobStore;
 import org.apache.jackrabbit.oak.plugins.blob.datastore.OakFileDataStore;
 import org.apache.jackrabbit.oak.segment.SegmentNodeStore;
 import org.apache.jackrabbit.oak.segment.SegmentNodeStoreBuilders;
+import org.apache.jackrabbit.oak.segment.azure.FileStoreTestUtil;
 import org.apache.jackrabbit.oak.segment.azure.v8.AzurePersistenceV8;
 import org.apache.jackrabbit.oak.segment.file.FileStore;
-import org.apache.jackrabbit.oak.segment.file.FileStoreBuilder;
 import org.apache.jackrabbit.oak.segment.file.InvalidFileStoreVersionException;
 import org.apache.jackrabbit.oak.segment.file.tar.TarPersistence;
 import 
org.apache.jackrabbit.oak.segment.spi.persistence.SegmentNodeStorePersistence;
@@ -82,24 +82,20 @@ public class SplitPersistenceBlobV8Test {
         File dataStoreDir = new File(folder.getRoot(), "blobstore");
         BlobStore blobStore = newBlobStore(dataStoreDir);
 
-        baseFileStore = FileStoreBuilder
-                .fileStoreBuilder(folder.newFolder())
-                .withCustomPersistence(sharedPersistence)
+        try (FileStore tempFileStore = 
FileStoreTestUtil.createFileStoreBuilder(folder.newFolder(), sharedPersistence)
                 .withBlobStore(blobStore)
-                .build();
-        base = SegmentNodeStoreBuilders.builder(baseFileStore).build();
+                .build()) {
+            base = SegmentNodeStoreBuilders.builder(tempFileStore).build();
 
-        NodeBuilder builder = base.getRoot().builder();
-        builder.child("foo").child("bar").setProperty("version", "v1");
-        base.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
+            NodeBuilder builder = base.getRoot().builder();
+            builder.child("foo").child("bar").setProperty("version", "v1");
+            base.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
 
-        baseBlobId = createLoad(base, baseFileStore).getContentIdentity();
-        baseFileStore.flush();
-        baseFileStore.close();
+            baseBlobId = createLoad(base, tempFileStore).getContentIdentity();
+            tempFileStore.flush();
+        }
 
-        baseFileStore = FileStoreBuilder
-            .fileStoreBuilder(folder.newFolder())
-            .withCustomPersistence(sharedPersistence)
+        baseFileStore = 
FileStoreTestUtil.createFileStoreBuilder(folder.newFolder(), sharedPersistence)
             .withBlobStore(blobStore)
             .build();
         base = SegmentNodeStoreBuilders.builder(baseFileStore).build();
@@ -110,9 +106,7 @@ public class SplitPersistenceBlobV8Test {
         SegmentNodeStorePersistence localPersistence = new 
TarPersistence(folder.newFolder());
         splitPersistence = new SplitPersistence(sharedPersistence, 
localPersistence);
 
-        splitFileStore = FileStoreBuilder
-            .fileStoreBuilder(folder.newFolder())
-            .withCustomPersistence(splitPersistence)
+        splitFileStore = 
FileStoreTestUtil.createFileStoreBuilder(folder.newFolder(), splitPersistence)
             .withBlobStore(blobStore)
             .build();
         split = SegmentNodeStoreBuilders.builder(splitFileStore).build();
@@ -120,7 +114,12 @@ public class SplitPersistenceBlobV8Test {
 
     @After
     public void tearDown() {
-        baseFileStore.close();
+        if (splitFileStore != null) {
+            splitFileStore.close();
+        }
+        if (baseFileStore != null) {
+            baseFileStore.close();
+        }
     }
 
     @Test
diff --git 
a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/split/v8/SplitPersistenceV8Test.java
 
b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/split/v8/SplitPersistenceV8Test.java
index a5af0abb47..aca9ea88f8 100644
--- 
a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/split/v8/SplitPersistenceV8Test.java
+++ 
b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/split/v8/SplitPersistenceV8Test.java
@@ -21,9 +21,9 @@ import org.apache.jackrabbit.oak.api.CommitFailedException;
 import 
org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule;
 import org.apache.jackrabbit.oak.segment.SegmentNodeStore;
 import org.apache.jackrabbit.oak.segment.SegmentNodeStoreBuilders;
+import org.apache.jackrabbit.oak.segment.azure.FileStoreTestUtil;
 import org.apache.jackrabbit.oak.segment.azure.v8.AzurePersistenceV8;
 import org.apache.jackrabbit.oak.segment.file.FileStore;
-import org.apache.jackrabbit.oak.segment.file.FileStoreBuilder;
 import org.apache.jackrabbit.oak.segment.file.InvalidFileStoreVersionException;
 import org.apache.jackrabbit.oak.segment.file.tar.TarPersistence;
 import 
org.apache.jackrabbit.oak.segment.file.tar.binaries.BinaryReferencesIndexLoader;
@@ -74,10 +74,7 @@ public class SplitPersistenceV8Test {
     public void setup() throws IOException, InvalidFileStoreVersionException, 
CommitFailedException, URISyntaxException, InvalidKeyException, 
StorageException {
         SegmentNodeStorePersistence sharedPersistence = new 
AzurePersistenceV8(azurite.getContainer("oak-test").getDirectoryReference("oak"));
 
-        baseFileStore = FileStoreBuilder
-                .fileStoreBuilder(folder.newFolder())
-                .withCustomPersistence(sharedPersistence)
-                .build();
+        baseFileStore = FileStoreTestUtil.createFileStore(folder.newFolder(), 
sharedPersistence);
         base = SegmentNodeStoreBuilders.builder(baseFileStore).build();
 
         NodeBuilder builder = base.getRoot().builder();
@@ -88,10 +85,7 @@ public class SplitPersistenceV8Test {
         SegmentNodeStorePersistence localPersistence = new 
TarPersistence(folder.newFolder());
         splitPersistence = new SplitPersistence(sharedPersistence, 
localPersistence);
 
-        splitFileStore = FileStoreBuilder
-                .fileStoreBuilder(folder.newFolder())
-                .withCustomPersistence(splitPersistence)
-                .build();
+        splitFileStore = FileStoreTestUtil.createFileStore(folder.newFolder(), 
splitPersistence);
         split = SegmentNodeStoreBuilders.builder(splitFileStore).build();
     }
 
diff --git 
a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/file/TarRevisionsTest.java
 
b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/file/TarRevisionsTest.java
index edbd5e694e..a24b8f6ab1 100644
--- 
a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/file/TarRevisionsTest.java
+++ 
b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/file/TarRevisionsTest.java
@@ -76,11 +76,15 @@ public class TarRevisionsTest {
     }
 
     @After
-    public void tearDown() {
+    public void tearDown() throws IOException {
         if (store != null) {
             store.close();
             store = null;
         }
+        if (revisions != null) {
+            revisions.close();
+            revisions = null;
+        }
     }
 
     @Test(expected = IllegalStateException.class)
diff --git 
a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/testutils/NodeStoreTestHarness.java
 
b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/testutils/NodeStoreTestHarness.java
index 1f1e0ceb69..ea56ac59a0 100644
--- 
a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/testutils/NodeStoreTestHarness.java
+++ 
b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/testutils/NodeStoreTestHarness.java
@@ -263,6 +263,9 @@ public class NodeStoreTestHarness implements Closeable {
                     }
                 })
                 .withCustomPersistence(persistence)
+                .withSegmentCacheSize(8)
+                .withStringCacheSize(0)
+                .withTemplateCacheSize(0)
                 .withMaxFileSize(1)
                 .withGCOptions(new 
SegmentGCOptions().setEstimationDisabled(true))
                 .build();

Reply via email to