amit-jain commented on code in PR #2409:
URL: https://github.com/apache/jackrabbit-oak/pull/2409#discussion_r2415645340
##########
oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataStoreService.java:
##########
@@ -32,7 +33,7 @@ public class AzureDataStoreService extends
AbstractAzureDataStoreService {
public static final String NAME =
"org.apache.jackrabbit.oak.plugins.blob.datastore.AzureDataStore";
- protected StatisticsProvider getStatisticsProvider(){
+ protected @NotNull StatisticsProvider getStatisticsProvider(){
Review Comment:
```suggestion
protected @NotNull StatisticsProvider getStatisticsProvider() {
```
##########
oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureBlobStoreBackend.java:
##########
@@ -874,24 +814,9 @@ URI createHttpDownloadURI(@NotNull DataIdentifier
identifier,
}
String key = getKeyName(identifier);
- SharedAccessBlobHeaders headers = new
SharedAccessBlobHeaders();
- headers.setCacheControl(String.format("private, max-age=%d,
immutable", httpDownloadURIExpirySeconds));
-
- String contentType = downloadOptions.getContentTypeHeader();
- if (! StringUtils.isEmpty(contentType)) {
- headers.setContentType(contentType);
- }
-
- String contentDisposition =
- downloadOptions.getContentDispositionHeader();
- if (! StringUtils.isEmpty(contentDisposition)) {
- headers.setContentDisposition(contentDisposition);
- }
-
uri = createPresignedURI(key,
- EnumSet.of(SharedAccessBlobPermissions.READ),
+ new BlobSasPermission().setReadPermission(true),
Review Comment:
The headers are ignored, this is a critical miss. Make sure to add headers
as documented in
https://learn.microsoft.com/en-us/java/api/com.azure.storage.blob.sas.blobservicesassignaturevalues?view=azure-java-stable#com-azure-storage-blob-sas-blobservicesassignaturevalues-blobservicesassignaturevalues(java-time-offsetdatetime-com-azure-storage-blob-sas-blobcontainersaspermission)
##########
oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/binary/fixtures/datastore/AzureDataStoreFixture.java:
##########
@@ -94,12 +97,22 @@ public DataStore createDataStore() {
String connectionString =
Utils.getConnectionStringFromProperties(azProps);
try {
- CloudBlobContainer container =
Utils.getBlobContainer(connectionString, containerName);
- container.createIfNotExists();
+ boolean useSDK12 =
Boolean.parseBoolean(azProps.getProperty(AZURE_SDK_12_ENABLED, "false"));
Review Comment:
where is SystemPropertySupplier being used?
##########
oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureBlobStoreBackend.java:
##########
@@ -874,24 +814,9 @@ URI createHttpDownloadURI(@NotNull DataIdentifier
identifier,
}
String key = getKeyName(identifier);
- SharedAccessBlobHeaders headers = new
SharedAccessBlobHeaders();
- headers.setCacheControl(String.format("private, max-age=%d,
immutable", httpDownloadURIExpirySeconds));
-
- String contentType = downloadOptions.getContentTypeHeader();
- if (! StringUtils.isEmpty(contentType)) {
- headers.setContentType(contentType);
- }
-
- String contentDisposition =
- downloadOptions.getContentDispositionHeader();
- if (! StringUtils.isEmpty(contentDisposition)) {
- headers.setContentDisposition(contentDisposition);
- }
-
uri = createPresignedURI(key,
- EnumSet.of(SharedAccessBlobPermissions.READ),
+ new BlobSasPermission().setReadPermission(true),
Review Comment:
Also, looks like a test missing, would be good to add.
##########
oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureBlobStoreBackend.java:
##########
@@ -719,34 +665,30 @@ public boolean deleteMetadataRecord(String name) {
@Override
public void deleteAllMetadataRecords(String prefix) {
- if (null == prefix) {
- throw new NullPointerException("prefix");
- }
- long start = System.currentTimeMillis();
+ Objects.requireNonNull(prefix, "prefix must not be null");
+
+ Stopwatch stopwatch = Stopwatch.createStarted();
ClassLoader contextClassLoader =
Thread.currentThread().getContextClassLoader();
try {
Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
- CloudBlobDirectory metaDir =
getAzureContainer().getDirectoryReference(META_DIR_NAME);
int total = 0;
- for (ListBlobItem item : metaDir.listBlobs(prefix)) {
- if (item instanceof CloudBlob) {
- if (((CloudBlob)item).deleteIfExists()) {
- total++;
- }
+
+ ListBlobsOptions listBlobsOptions = new ListBlobsOptions();
+
listBlobsOptions.setPrefix(AzureConstants.AZURE_BlOB_META_DIR_NAME);
Review Comment:
there is a test,
oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureBlobStoreBackendTest.java#testDeleteAllMetadataRecords
##########
oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureBlobStoreBackendTest.java:
##########
@@ -7,78 +7,2047 @@
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
*
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
*/
package org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage;
-import com.microsoft.azure.storage.StorageException;
-import com.microsoft.azure.storage.blob.CloudBlobContainer;
+import com.azure.core.util.BinaryData;
+import com.azure.storage.blob.BlobClient;
+import com.azure.storage.blob.BlobContainerClient;
+import com.azure.storage.blob.models.BlobItem;
+import com.azure.storage.blob.models.ListBlobsOptions;
+import com.azure.storage.blob.sas.BlobSasPermission;
+import com.azure.storage.blob.sas.BlobServiceSasSignatureValues;
+import com.azure.storage.blob.specialized.BlockBlobClient;
+import com.google.common.cache.Cache;
import com.microsoft.azure.storage.blob.SharedAccessBlobPermissions;
import com.microsoft.azure.storage.blob.SharedAccessBlobPolicy;
+import org.apache.commons.io.IOUtils;
+import org.apache.jackrabbit.core.data.DataIdentifier;
import org.apache.jackrabbit.core.data.DataRecord;
import org.apache.jackrabbit.core.data.DataStoreException;
+import
org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess.DataRecordDownloadOptions;
+import
org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess.DataRecordUpload;
+import
org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess.DataRecordUploadOptions;
import org.jetbrains.annotations.NotNull;
import org.junit.After;
+import org.junit.Before;
import org.junit.ClassRule;
import org.junit.Test;
+import org.mockito.Mock;
+import org.mockito.MockitoAnnotations;
+import org.slf4j.LoggerFactory;
+import ch.qos.logback.classic.spi.ILoggingEvent;
+import ch.qos.logback.core.read.ListAppender;
+import java.io.ByteArrayInputStream;
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.FileWriter;
import java.io.IOException;
-import java.net.URISyntaxException;
+import java.io.InputStream;
+import java.lang.reflect.Field;
+import java.lang.reflect.Method;
+import java.net.URI;
import java.time.Duration;
import java.time.Instant;
+import java.time.OffsetDateTime;
+import java.util.ArrayList;
import java.util.Date;
import java.util.EnumSet;
+import java.util.Iterator;
+import java.util.List;
import java.util.Properties;
import java.util.Set;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicReference;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import java.util.stream.StreamSupport;
-import static com.microsoft.azure.storage.blob.SharedAccessBlobPermissions.ADD;
-import static
com.microsoft.azure.storage.blob.SharedAccessBlobPermissions.CREATE;
-import static
com.microsoft.azure.storage.blob.SharedAccessBlobPermissions.LIST;
-import static
com.microsoft.azure.storage.blob.SharedAccessBlobPermissions.READ;
-import static
com.microsoft.azure.storage.blob.SharedAccessBlobPermissions.WRITE;
import static java.util.stream.Collectors.toSet;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static
org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzureConstants.AZURE_BlOB_META_DIR_NAME;
+import static
org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzureConstants.AZURE_BLOB_CONCURRENT_REQUESTS_PER_OPERATION;
+import static
org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzureConstants.AZURE_BLOB_REF_KEY;
+import static
org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzureConstants.AZURE_CONNECTION_STRING;
+import static
org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzureConstants.AZURE_BLOB_CONTAINER_NAME;
+import static
org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzureConstants.AZURE_STORAGE_ACCOUNT_NAME;
+import static
org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzureConstants.AZURE_BLOB_ENDPOINT;
+import static
org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzureConstants.AZURE_CREATE_CONTAINER;
+import static
org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzureConstants.AZURE_REF_ON_INIT;
+import static
org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzureConstants.PRESIGNED_HTTP_DOWNLOAD_URI_EXPIRY_SECONDS;
+import static org.junit.Assert.*;
import static org.junit.Assume.assumeNotNull;
+import static org.mockito.ArgumentMatchers.any;
+/**
+ * Comprehensive test class for AzureBlobStoreBackend covering all methods and
functionality.
+ * Combines unit tests and integration tests.
+ */
public class AzureBlobStoreBackendTest {
+
+ @ClassRule
+ public static AzuriteDockerRule azurite = new AzuriteDockerRule();
+
+ private static final String CONTAINER_NAME = "test-container";
+ private static final String TEST_BLOB_CONTENT = "test blob content";
+ private static final String TEST_METADATA_CONTENT = "test metadata
content";
private static final String AZURE_ACCOUNT_NAME = "AZURE_ACCOUNT_NAME";
private static final String AZURE_TENANT_ID = "AZURE_TENANT_ID";
private static final String AZURE_CLIENT_ID = "AZURE_CLIENT_ID";
private static final String AZURE_CLIENT_SECRET = "AZURE_CLIENT_SECRET";
- @ClassRule
- public static AzuriteDockerRule azurite = new AzuriteDockerRule();
+ private static final Set<String> BLOBS = Set.of("blob1", "blob2");
+
+ private BlobContainerClient container;
+ private AzureBlobStoreBackend backend;
+ private Properties testProperties;
+
+ @Mock
+ private AzureBlobContainerProvider mockProvider;
+
+ @Mock
+ private BlobContainerClient mockContainer;
+
+ @Mock
+ private BlobClient mockBlobClient;
+
+ @Mock
+ private BlockBlobClient mockBlockBlobClient;
+
+ @Before
+ public void setUp() throws Exception {
+ MockitoAnnotations.openMocks(this);
+
+ // Create real container for integration tests
+ container = azurite.getContainer(CONTAINER_NAME,
getConnectionString());
+
+ // Setup test properties
+ testProperties = createTestProperties();
+
+ // Create backend instance
+ backend = new AzureBlobStoreBackend();
+ backend.setProperties(testProperties);
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ if (backend != null) {
+ try {
+ backend.close();
+ } catch (Exception e) {
+ // Ignore cleanup errors
+ }
+ }
+ if (container != null) {
+ try {
+ container.deleteIfExists();
+ } catch (Exception e) {
+ // Ignore cleanup errors
+ }
+ }
+ }
+
+ private Properties createTestProperties() {
+ Properties properties = new Properties();
+ properties.setProperty(AZURE_BLOB_CONTAINER_NAME, CONTAINER_NAME);
+ properties.setProperty(AZURE_STORAGE_ACCOUNT_NAME,
AzuriteDockerRule.ACCOUNT_NAME);
+ properties.setProperty(AZURE_BLOB_ENDPOINT, azurite.getBlobEndpoint());
+ properties.setProperty(AZURE_CONNECTION_STRING, getConnectionString());
+ properties.setProperty(AZURE_CREATE_CONTAINER, "true");
+ properties.setProperty(AZURE_REF_ON_INIT, "false"); // Disable for
most tests
+ return properties;
+ }
+
+ private static String getConnectionString() {
+ return Utils.getConnectionString(
+ AzuriteDockerRule.ACCOUNT_NAME,
+ AzuriteDockerRule.ACCOUNT_KEY,
+ azurite.getBlobEndpoint()
+ );
+ }
+
+ // ========== INITIALIZATION AND CONFIGURATION TESTS ==========
+
+ @Test
+ public void testInitWithValidProperties() throws Exception {
+ backend.init();
+ assertNotNull("Backend should be initialized", backend);
+
+ // Verify container was created
+ BlobContainerClient azureContainer = backend.getAzureContainer();
+ assertNotNull("Azure container should not be null", azureContainer);
+ assertTrue("Container should exist", azureContainer.exists());
+ }
+
+ @Test
+ public void testInitWithNullProperties() throws Exception {
+ AzureBlobStoreBackend nullPropsBackend = new AzureBlobStoreBackend();
+ // Should not set properties, will try to read from default config file
+
+ try {
+ nullPropsBackend.init();
+ fail("Expected DataStoreException when no properties and no
default config file");
+ } catch (DataStoreException e) {
+ assertTrue("Should contain config file error",
+ e.getMessage().contains("Unable to initialize Azure Data
Store"));
+ }
+ }
+
+ @Test
+ public void testInitWithNullPropertiesAndValidConfigFile() throws
Exception {
+ // Create a temporary azure.properties file in the working directory
+ File configFile = new File("azure.properties");
+ Properties configProps = createTestProperties();
+
+ try (FileOutputStream fos = new FileOutputStream(configFile)) {
+ configProps.store(fos, "Test configuration for null properties
test");
+ }
+
+ AzureBlobStoreBackend nullPropsBackend = new AzureBlobStoreBackend();
+ // Don't set properties - should read from azure.properties file
+
+ try {
+ nullPropsBackend.init();
+ assertNotNull("Backend should be initialized from config file",
nullPropsBackend);
+
+ // Verify container was created
+ BlobContainerClient azureContainer =
nullPropsBackend.getAzureContainer();
+ assertNotNull("Azure container should not be null",
azureContainer);
+ assertTrue("Container should exist", azureContainer.exists());
+ } finally {
+ // Clean up the config file
+ if (configFile.exists()) {
+ configFile.delete();
+ }
+ // Clean up the backend
+ if (nullPropsBackend != null) {
+ try {
+ nullPropsBackend.close();
+ } catch (Exception e) {
+ // Ignore cleanup errors
+ }
+ }
+ }
+ }
+
+ @Test
+ public void testSetProperties() {
+ Properties newProps = new Properties();
+ newProps.setProperty("test.key", "test.value");
+
+ backend.setProperties(newProps);
+
+ // Verify properties were set (using reflection to access private
field)
+ try {
+ Field propertiesField =
AzureBlobStoreBackend.class.getDeclaredField("properties");
+ propertiesField.setAccessible(true);
+ Properties actualProps = (Properties) propertiesField.get(backend);
+ assertEquals("Properties should be set", "test.value",
actualProps.getProperty("test.key"));
+ } catch (Exception e) {
+ fail("Failed to verify properties were set: " + e.getMessage());
+ }
+ }
+
+ @Test
+ public void testConcurrentRequestCountValidation() throws Exception {
+ // Test with too low concurrent request count
+ Properties lowProps = createTestProperties();
+ lowProps.setProperty(AZURE_BLOB_CONCURRENT_REQUESTS_PER_OPERATION,
"1");
+
+ AzureBlobStoreBackend lowBackend = new AzureBlobStoreBackend();
+ lowBackend.setProperties(lowProps);
+ lowBackend.init();
+
+ // Should reset to default minimum (verified through successful
initialization)
+ assertNotNull("Backend should initialize with low concurrent request
count", lowBackend);
+ lowBackend.close();
+
+ // Test with too high concurrent request count
+ Properties highProps = createTestProperties();
+ highProps.setProperty(AZURE_BLOB_CONCURRENT_REQUESTS_PER_OPERATION,
"100");
+
+ AzureBlobStoreBackend highBackend = new AzureBlobStoreBackend();
+ highBackend.setProperties(highProps);
+ highBackend.init();
+
+ // Should reset to default maximum (verified through successful
initialization)
+ assertNotNull("Backend should initialize with high concurrent request
count", highBackend);
+ highBackend.close();
+ }
+
+ @Test
+ public void testGetAzureContainerThreadSafety() throws Exception {
+ backend.init();
+
+ int threadCount = 10;
+ ExecutorService executor = Executors.newFixedThreadPool(threadCount);
+ CountDownLatch latch = new CountDownLatch(threadCount);
+ List<Future<BlobContainerClient>> futures = new ArrayList<>();
+
+ // Submit multiple threads to get container simultaneously
+ for (int i = 0; i < threadCount; i++) {
+ futures.add(executor.submit(() -> {
+ try {
+ latch.countDown();
+ latch.await(); // Wait for all threads to be ready
+ return backend.getAzureContainer();
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ }));
+ }
+
+ // Verify all threads get the same container instance
+ BlobContainerClient firstContainer = futures.get(0).get(5,
TimeUnit.SECONDS);
+ for (Future<BlobContainerClient> future : futures) {
+ BlobContainerClient container = future.get(5, TimeUnit.SECONDS);
+ assertSame("All threads should get the same container instance",
firstContainer, container);
+ }
+
+ executor.shutdown();
+ }
+
+ @Test
+ public void testGetAzureContainerWhenNull() throws Exception {
+ // Create a backend with valid properties but don't initialize it
+ // This ensures azureContainer field remains null initially
+ AzureBlobStoreBackend testBackend = new AzureBlobStoreBackend();
+ testBackend.setProperties(testProperties);
+
+ // Initialize the backend to set up azureBlobContainerProvider
+ testBackend.init();
+
+ try {
+ // Reset azureContainer to null using reflection to test the null
case
+ Field azureContainerReferenceField =
AzureBlobStoreBackend.class.getDeclaredField("azureContainerReference");
+ azureContainerReferenceField.setAccessible(true);
+ @SuppressWarnings("unchecked")
+ AtomicReference<BlobContainerClient> azureContainerReference =
(AtomicReference<BlobContainerClient>)
azureContainerReferenceField.get(testBackend);
+ azureContainerReference.set(null);
+
+ // Verify azureContainer is null
+ BlobContainerClient containerBeforeCall =
azureContainerReference.get();
+ assertNull("azureContainer should be null before getAzureContainer
call", containerBeforeCall);
+
+ // Call getAzureContainer - this should initialize the container
+ BlobContainerClient container = testBackend.getAzureContainer();
+
+ // Verify container is not null and properly initialized
+ assertNotNull("getAzureContainer should return non-null container
when azureContainer was null", container);
+ assertTrue("Container should exist", container.exists());
+
+ // Verify azureContainer field is now set
+ BlobContainerClient containerAfterCall =
azureContainerReference.get();
+ assertNotNull("azureContainer field should be set after
getAzureContainer call", containerAfterCall);
+ assertSame("Returned container should be same as stored in field",
container, containerAfterCall);
+
+ // Call getAzureContainer again - should return same instance
+ BlobContainerClient container2 = testBackend.getAzureContainer();
+ assertSame("Subsequent calls should return same container
instance", container, container2);
+
+ } finally {
+ testBackend.close();
+ }
+ }
+
+ @Test
+ public void testGetAzureContainerWithProviderException() throws Exception {
+ // Create a backend with a mock provider that throws exception
+ AzureBlobStoreBackend testBackend = new AzureBlobStoreBackend();
+ testBackend.setProperties(testProperties);
+
+ // Set up mock provider using reflection
+ Field providerField =
AzureBlobStoreBackend.class.getDeclaredField("azureBlobContainerProvider");
+ providerField.setAccessible(true);
+
+ // Create mock provider that throws DataStoreException
+ AzureBlobContainerProvider mockProvider =
org.mockito.Mockito.mock(AzureBlobContainerProvider.class);
+ org.mockito.Mockito.when(mockProvider.getBlobContainer(any(), any()))
+ .thenThrow(new DataStoreException("Mock connection failure"));
+
+ providerField.set(testBackend, mockProvider);
+
+ try {
+ // Call getAzureContainer - should propagate the DataStoreException
+ testBackend.getAzureContainer();
+ fail("Expected DataStoreException when
azureBlobContainerProvider.getBlobContainer() fails");
+ } catch (DataStoreException e) {
+ assertEquals("Exception message should match", "Mock connection
failure", e.getMessage());
+
+ // Verify azureContainer field remains null after exception
+ Field azureContainerField =
AzureBlobStoreBackend.class.getDeclaredField("azureContainerReference");
+ azureContainerField.setAccessible(true);
+ @SuppressWarnings("unchecked")
+ BlobContainerClient containerAfterException =
((AtomicReference<BlobContainerClient>)
azureContainerField.get(testBackend)).get();
+ assertNull("azureContainer should remain null after exception",
containerAfterException);
+ } finally {
+ testBackend.close();
+ }
+ }
+
+ // ========== CORE CRUD OPERATIONS TESTS ==========
+
+ @Test
+ public void testWriteAndRead() throws Exception {
+ backend.init();
+
+ // Create test file
+ File testFile = createTempFile("test-content");
+ DataIdentifier identifier = new DataIdentifier("testidentifier123");
+
+ try {
+ // Write file
+ backend.write(identifier, testFile);
+
+ // Read file
+ try (InputStream inputStream = backend.read(identifier)) {
+ String content = IOUtils.toString(inputStream, "UTF-8");
+ assertEquals("Content should match", "test-content", content);
+ }
+ } finally {
+ testFile.delete();
+ }
+ }
+
+ @Test
+ public void testWriteWithNullIdentifier() throws Exception {
+ backend.init();
+ File testFile = createTempFile("test");
+
+ try {
+ backend.write(null, testFile);
+ fail("Expected NullPointerException for null identifier");
+ } catch (NullPointerException e) {
+ assertEquals("identifier must not be null", e.getMessage());
+ } finally {
+ testFile.delete();
+ }
+ }
+
+ @Test
+ public void testWriteWithNullFile() throws Exception {
+ backend.init();
+ DataIdentifier identifier = new DataIdentifier("test");
+
+ try {
+ backend.write(identifier, null);
+ fail("Expected NullPointerException for null file");
+ } catch (NullPointerException e) {
+ assertEquals("file must not be null", e.getMessage());
+ }
+ }
+
+ @Test
+ public void testWriteExistingBlobWithSameLength() throws Exception {
+ backend.init();
+
+ File testFile = createTempFile("same-content");
+ DataIdentifier identifier = new DataIdentifier("existingblob123");
+
+ try {
+ // Write file first time
+ backend.write(identifier, testFile);
+
+ // Write same file again (should update metadata)
+ backend.write(identifier, testFile);
+
+ // Verify content is still accessible
+ try (InputStream inputStream = backend.read(identifier)) {
+ String content = IOUtils.toString(inputStream, "UTF-8");
+ assertEquals("Content should match", "same-content", content);
+ }
+ } finally {
+ testFile.delete();
+ }
+ }
+
+ @Test
+ public void testWriteExistingBlobWithDifferentLength() throws Exception {
+ backend.init();
+
+ File testFile1 = createTempFile("content1");
+ File testFile2 = createTempFile("different-length-content");
+ DataIdentifier identifier = new DataIdentifier("lengthcollision");
+
+ try {
+ // Write first file
+ backend.write(identifier, testFile1);
+
+ // Try to write file with different length
+ try {
+ backend.write(identifier, testFile2);
+ fail("Expected DataStoreException for length collision");
+ } catch (DataStoreException e) {
+ assertTrue("Should contain length collision error",
+ e.getMessage().contains("Length Collision"));
+ }
+ } finally {
+ testFile1.delete();
+ testFile2.delete();
+ }
+ }
+
+ @Test
+ public void testReadNonExistentBlob() throws Exception {
+ backend.init();
+ DataIdentifier identifier = new DataIdentifier("nonexistent123");
+
+ try {
+ backend.read(identifier);
+ fail("Expected DataStoreException for non-existent blob");
+ } catch (DataStoreException e) {
+ assertTrue("Should contain missing blob error",
+ e.getMessage().contains("Trying to read missing blob"));
+ }
+ }
+
+ @Test
+ public void testReadWithNullIdentifier() throws Exception {
+ backend.init();
+
+ try {
+ backend.read(null);
+ fail("Expected NullPointerException for null identifier");
+ } catch (NullPointerException e) {
+ assertEquals("identifier must not be null", e.getMessage());
+ }
+ }
+
+ @Test
+ public void testGetRecord() throws Exception {
+ backend.init();
+
+ File testFile = createTempFile("record-content");
+ DataIdentifier identifier = new DataIdentifier("testrecord123");
+
+ try {
+ // Write file first
+ backend.write(identifier, testFile);
+
+ // Get record
+ DataRecord record = backend.getRecord(identifier);
+ assertNotNull("Record should not be null", record);
+ assertEquals("Record identifier should match", identifier,
record.getIdentifier());
+ assertEquals("Record length should match", testFile.length(),
record.getLength());
+ assertTrue("Record should have valid last modified time",
record.getLastModified() > 0);
+ } finally {
+ testFile.delete();
+ }
+ }
+
+ @Test
+ public void testGetRecordNonExistent() throws Exception {
+ backend.init();
+ DataIdentifier identifier = new DataIdentifier("nonexistentrecord");
+
+ try {
+ backend.getRecord(identifier);
+ fail("Expected DataStoreException for non-existent record");
+ } catch (DataStoreException e) {
+ assertTrue("Should contain retrieve blob error",
+ e.getMessage().contains("Cannot retrieve blob"));
+ }
+ }
+
+ @Test
+ public void testGetRecordWithNullIdentifier() throws Exception {
+ backend.init();
+
+ try {
+ backend.getRecord(null);
+ fail("Expected NullPointerException for null identifier");
+ } catch (NullPointerException e) {
+ assertEquals("identifier must not be null", e.getMessage());
+ }
+ }
+
+ @Test
+ public void testExists() throws Exception {
+ backend.init();
+
+ File testFile = createTempFile("exists-content");
+ DataIdentifier identifier = new DataIdentifier("existstest123");
+
+ try {
+ // Initially should not exist
+ assertFalse("Blob should not exist initially",
backend.exists(identifier));
+
+ // Write file
+ backend.write(identifier, testFile);
+
+ // Now should exist
+ assertTrue("Blob should exist after write",
backend.exists(identifier));
+ } finally {
+ testFile.delete();
+ }
+ }
+
+ @Test
+ public void testDeleteRecord() throws Exception {
+ backend.init();
+
+ File testFile = createTempFile("delete-content");
+ DataIdentifier identifier = new DataIdentifier("deletetest123");
+
+ try {
+ // Write file
+ backend.write(identifier, testFile);
+ assertTrue("Blob should exist before delete",
backend.exists(identifier));
+
+ // Delete record
+ backend.deleteRecord(identifier);
+ assertFalse("Blob should not exist after delete",
backend.exists(identifier));
+ } finally {
+ testFile.delete();
+ }
+ }
+
+ @Test
+ public void testDeleteNonExistentRecord() throws Exception {
+ backend.init();
+ DataIdentifier identifier = new DataIdentifier("nonexistentdelete");
+
+ // Should not throw exception when deleting non-existent record
+ backend.deleteRecord(identifier);
+ // No exception expected
+ }
+
+ @Test
+ public void testDeleteRecordWithNullIdentifier() throws Exception {
+ backend.init();
+
+ try {
+ backend.deleteRecord(null);
+ fail("Expected NullPointerException for null identifier");
+ } catch (NullPointerException e) {
+ assertEquals("identifier must not be null", e.getMessage());
+ }
+ }
+
+ @Test
+ public void testGetAllIdentifiers() throws Exception {
+ backend.init();
+
+ // Create multiple test files
+ File testFile1 = createTempFile("content1");
+ File testFile2 = createTempFile("content2");
+ DataIdentifier id1 = new DataIdentifier("identifier1");
+ DataIdentifier id2 = new DataIdentifier("identifier2");
+
+ try {
+ // Write files
+ backend.write(id1, testFile1);
+ backend.write(id2, testFile2);
+
+ // Get all identifiers
+ Iterator<DataIdentifier> identifiers = backend.getAllIdentifiers();
+ assertNotNull("Identifiers iterator should not be null",
identifiers);
+
+ // Collect identifiers
+ List<String> identifierStrings = new ArrayList<>();
+ while (identifiers.hasNext()) {
+ identifierStrings.add(identifiers.next().toString());
+ }
+
+ // Should contain both identifiers
+ assertTrue("Should contain identifier1",
identifierStrings.contains("identifier1"));
+ assertTrue("Should contain identifier2",
identifierStrings.contains("identifier2"));
+ } finally {
+ testFile1.delete();
+ testFile2.delete();
+ }
+ }
+
+ @Test
+ public void testGetAllRecords() throws Exception {
+ backend.init();
+
+ // Create test file
+ File testFile = createTempFile("record-content");
+ DataIdentifier identifier = new DataIdentifier("recordtest123");
+
+ try {
+ // Write file
+ backend.write(identifier, testFile);
+
+ // Get all records
+ Iterator<DataRecord> records = backend.getAllRecords();
+ assertNotNull("Records iterator should not be null", records);
+
+ // Find our record
+ boolean foundRecord = false;
+ while (records.hasNext()) {
+ DataRecord record = records.next();
+ if (record.getIdentifier().toString().equals("recordtest123"))
{
+ foundRecord = true;
+ assertEquals("Record length should match",
testFile.length(), record.getLength());
+ assertTrue("Record should have valid last modified time",
record.getLastModified() > 0);
+ break;
+ }
+ }
+ assertTrue("Should find our test record", foundRecord);
+ } finally {
+ testFile.delete();
+ }
+ }
+
+ // ========== METADATA OPERATIONS TESTS ==========
+
+ @Test
+ public void testAddMetadataRecordWithInputStream() throws Exception {
+ backend.init();
+
+ String metadataName = "test-metadata-stream";
+ String content = TEST_METADATA_CONTENT;
+
+ // Add metadata record
+ backend.addMetadataRecord(new
ByteArrayInputStream(content.getBytes()), metadataName);
+
+ // Verify record exists
+ assertTrue("Metadata record should exist",
backend.metadataRecordExists(metadataName));
+
+ // Verify content
+ DataRecord record = backend.getMetadataRecord(metadataName);
+ assertNotNull("Record should not be null", record);
+ assertEquals("Record should have correct length", content.length(),
record.getLength());
+
+ // Verify content can be read
+ try (InputStream stream = record.getStream()) {
+ String readContent = IOUtils.toString(stream, "UTF-8");
+ assertEquals("Content should match", content, readContent);
+ }
+
+ // Clean up
+ backend.deleteMetadataRecord(metadataName);
+ }
+
+ @Test
+ public void testAddMetadataRecordWithFile() throws Exception {
+ backend.init();
+
+ String metadataName = "test-metadata-file";
+ File metadataFile = createTempFile(TEST_METADATA_CONTENT);
+
+ try {
+ // Add metadata record from file
+ backend.addMetadataRecord(metadataFile, metadataName);
+
+ // Verify record exists
+ assertTrue("Metadata record should exist",
backend.metadataRecordExists(metadataName));
+
+ // Verify content
+ DataRecord record = backend.getMetadataRecord(metadataName);
+ assertNotNull("Record should not be null", record);
+ assertEquals("Record should have correct length",
metadataFile.length(), record.getLength());
+
+ // Clean up
+ backend.deleteMetadataRecord(metadataName);
+ } finally {
+ metadataFile.delete();
+ }
+ }
+
+ @Test
+ public void testAddMetadataRecordWithNullInputStream() throws Exception {
+ backend.init();
+
+ try {
+ backend.addMetadataRecord((InputStream) null, "test");
+ fail("Expected NullPointerException for null input stream");
+ } catch (NullPointerException e) {
+ assertEquals("input must not be null", e.getMessage());
+ }
+ }
+
+ @Test
+ public void testAddMetadataRecordWithNullFile() throws Exception {
+ backend.init();
+
+ try {
+ backend.addMetadataRecord((File) null, "test");
+ fail("Expected NullPointerException for null file");
+ } catch (NullPointerException e) {
+ assertEquals("input must not be null", e.getMessage());
+ }
+ }
+
+ @Test
+ public void testAddMetadataRecordWithNullName() throws Exception {
+ backend.init();
+
+ try {
+ backend.addMetadataRecord(new
ByteArrayInputStream("test".getBytes()), null);
+ fail("Expected IllegalArgumentException for null name");
+ } catch (IllegalArgumentException e) {
+ assertEquals("name should not be empty", e.getMessage());
+ }
+ }
+
+ @Test
+ public void testAddMetadataRecordWithEmptyName() throws Exception {
+ backend.init();
+
+ try {
+ backend.addMetadataRecord(new
ByteArrayInputStream("test".getBytes()), "");
+ fail("Expected IllegalArgumentException for empty name");
+ } catch (IllegalArgumentException e) {
+ assertEquals("name should not be empty", e.getMessage());
+ }
+ }
+
+ @Test
+ public void testGetMetadataRecordNonExistent() throws Exception {
+ backend.init();
+
+ DataRecord record = backend.getMetadataRecord("non-existent-metadata");
+ assertNull("Non-existent metadata record should return null", record);
+ }
+
+ @Test
+ public void testGetAllMetadataRecords() throws Exception {
+ backend.init();
+
+ String prefix = "test-prefix-";
+ String content = "metadata content";
+
+ // Add multiple metadata records
+ for (int i = 0; i < 3; i++) {
+ backend.addMetadataRecord(
+ new ByteArrayInputStream((content + i).getBytes()),
+ prefix + i
+ );
+ }
+
+ try {
+ // Get all metadata records
+ List<DataRecord> records = backend.getAllMetadataRecords("");
+ assertNotNull("Records list should not be null", records);
+
+ // Find our records
+ int foundCount = 0;
+ for (DataRecord record : records) {
+ if (record.getIdentifier().toString().startsWith(prefix)) {
+ foundCount++;
+ }
+ }
+ assertEquals("Should find all 3 metadata records", 3, foundCount);
+ } finally {
+ // Clean up
+ for (int i = 0; i < 3; i++) {
+ backend.deleteMetadataRecord(prefix + i);
+ }
+ }
+ }
+
+ @Test
+ public void testGetAllMetadataRecordsWithNullPrefix() throws Exception {
+ backend.init();
+
+ try {
+ backend.getAllMetadataRecords(null);
+ fail("Expected NullPointerException for null prefix");
+ } catch (NullPointerException e) {
+ assertEquals("prefix must not be null", e.getMessage());
+ }
+ }
+
+ @Test
+ public void testDeleteMetadataRecord() throws Exception {
+ backend.init();
+
+ String metadataName = "delete-metadata-test";
+ String content = "content to delete";
+
+ // Add metadata record
+ backend.addMetadataRecord(new
ByteArrayInputStream(content.getBytes()), metadataName);
+ assertTrue("Metadata record should exist",
backend.metadataRecordExists(metadataName));
+
+ // Delete metadata record
+ boolean deleted = backend.deleteMetadataRecord(metadataName);
+ assertTrue("Delete should return true", deleted);
+ assertFalse("Metadata record should not exist after delete",
backend.metadataRecordExists(metadataName));
+ }
+
+ @Test
+ public void testDeleteNonExistentMetadataRecord() throws Exception {
+ backend.init();
+
+ boolean deleted =
backend.deleteMetadataRecord("non-existent-metadata");
+ assertFalse("Delete should return false for non-existent record",
deleted);
+ }
+
+ @Test
+ public void testDeleteAllMetadataRecords() throws Exception {
+ backend.init();
+
+ String prefix = "delete-all-";
+
+ // Add multiple metadata records
+ for (int i = 0; i < 3; i++) {
+ backend.addMetadataRecord(
Review Comment:
need to add record with diff prefixes to test that only records with prefix
are deleted
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]