Added:
jackrabbit/oak/trunk/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataStoreTest.java
URL:
http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataStoreTest.java?rev=1788387&view=auto
==============================================================================
---
jackrabbit/oak/trunk/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataStoreTest.java
(added)
+++
jackrabbit/oak/trunk/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataStoreTest.java
Fri Mar 24 09:31:18 2017
@@ -0,0 +1,691 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage;
+
+import static org.apache.commons.codec.binary.Hex.encodeHexString;
+import static org.apache.commons.io.FileUtils.copyInputStreamToFile;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.junit.Assume.assumeTrue;
+
+import com.google.common.base.Strings;
+import com.google.common.collect.Iterators;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+import com.microsoft.azure.storage.StorageException;
+import com.microsoft.azure.storage.blob.CloudBlobContainer;
+
+import org.apache.commons.io.IOUtils;
+import org.apache.commons.io.output.NullOutputStream;
+import org.apache.jackrabbit.core.data.DataIdentifier;
+import org.apache.jackrabbit.core.data.DataRecord;
+import org.apache.jackrabbit.core.data.DataStoreException;
+import org.apache.jackrabbit.oak.spi.blob.SharedBackend;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.ByteArrayInputStream;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.io.StringWriter;
+import java.net.URISyntaxException;
+import java.security.DigestOutputStream;
+import java.security.InvalidKeyException;
+import java.security.MessageDigest;
+import java.security.NoSuchAlgorithmException;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Random;
+import java.util.Set;
+
+import javax.jcr.RepositoryException;
+
+/**
+ * Test {@link AzureDataStore} with AzureDataStore and local cache on.
+ * It requires to pass azure config file via system property or system
properties by prefixing with 'ds.'.
+ * See details @ {@link AzureDataStoreUtils}.
+ * For e.g. -Dconfig=/opt/cq/azure.properties. Sample azure properties located
at
+ * src/test/resources/azure.properties
+ */
+public class AzureDataStoreTest {
+ @Rule
+ public TemporaryFolder folder = new TemporaryFolder(new File("target"));
+
+ private Properties props;
+ private static byte[] testBuffer = "test".getBytes();
+ private AzureDataStore ds;
+ private AzureBlobStoreBackend backend;
+ private String container;
+ Random randomGen = new Random();
+
+ @BeforeClass
+ public static void assumptions() {
+ assumeTrue(AzureDataStoreUtils.isAzureConfigured());
+ }
+
+ @Before
+ public void setup() throws IOException, RepositoryException,
URISyntaxException, InvalidKeyException, StorageException {
+
+ props = AzureDataStoreUtils.getAzureConfig();
+ container = String.valueOf(randomGen.nextInt(9999)) + "-" +
String.valueOf(randomGen.nextInt(9999))
+ + "-test";
+ props.setProperty(AzureConstants.AZURE_BLOB_CONTAINER_NAME, container);
+
+ ds = new AzureDataStore();
+ ds.setProperties(props);
+ ds.setCacheSize(0); // Turn caching off so we don't get weird test
results due to caching
+ ds.init(folder.newFolder().getAbsolutePath());
+ backend = (AzureBlobStoreBackend) ds.getBackend();
+ }
+
+ @After
+ public void teardown() throws InvalidKeyException, URISyntaxException,
StorageException {
+ ds = null;
+ try {
+ AzureDataStoreUtils.deleteContainer(container);
+ } catch (Exception ignore) {}
+ }
+
+ private void validateRecord(final DataRecord record,
+ final String contents,
+ final DataRecord rhs)
+ throws DataStoreException, IOException {
+ validateRecord(record, contents, rhs.getIdentifier(), rhs.getLength(),
rhs.getLastModified());
+ }
+
+ private void validateRecord(final DataRecord record,
+ final String contents,
+ final DataIdentifier identifier,
+ final long length,
+ final long lastModified)
+ throws DataStoreException, IOException {
+ validateRecord(record, contents, identifier, length, lastModified,
true);
+ }
+
+ private void validateRecord(final DataRecord record,
+ final String contents,
+ final DataIdentifier identifier,
+ final long length,
+ final long lastModified,
+ final boolean lastModifiedEquals)
+ throws DataStoreException, IOException {
+ assertEquals(record.getLength(), length);
+ if (lastModifiedEquals) {
+ assertEquals(record.getLastModified(), lastModified);
+ } else {
+ assertTrue(record.getLastModified() > lastModified);
+ }
+
assertTrue(record.getIdentifier().toString().equals(identifier.toString()));
+ StringWriter writer = new StringWriter();
+ org.apache.commons.io.IOUtils.copy(record.getStream(), writer,
"utf-8");
+ assertTrue(writer.toString().equals(contents));
+ }
+
+ private static InputStream randomStream(int seed, int size) {
+ Random r = new Random(seed);
+ byte[] data = new byte[size];
+ r.nextBytes(data);
+ return new ByteArrayInputStream(data);
+ }
+
+ private static String getIdForInputStream(final InputStream in)
+ throws NoSuchAlgorithmException, IOException {
+ MessageDigest digest = MessageDigest.getInstance("SHA-1");
+ OutputStream output = new DigestOutputStream(new NullOutputStream(),
digest);
+ try {
+ IOUtils.copyLarge(in, output);
+ } finally {
+ IOUtils.closeQuietly(output);
+ IOUtils.closeQuietly(in);
+ }
+ return encodeHexString(digest.digest());
+ }
+
+
+ @Test
+ public void testCreateAndDeleteBlobHappyPath() throws DataStoreException,
IOException {
+ final DataRecord uploadedRecord = ds.addRecord(new
ByteArrayInputStream(testBuffer));
+ DataIdentifier identifier = uploadedRecord.getIdentifier();
+ assertTrue(backend.exists(identifier));
+ assertTrue(0 != uploadedRecord.getLastModified());
+ assertEquals(testBuffer.length, uploadedRecord.getLength());
+
+ final DataRecord retrievedRecord = ds.getRecord(identifier);
+ validateRecord(retrievedRecord, new String(testBuffer),
uploadedRecord);
+
+ ds.deleteRecord(identifier);
+ assertFalse(backend.exists(uploadedRecord.getIdentifier()));
+ }
+
+
+ @Test
+ public void testCreateAndReUploadBlob() throws DataStoreException,
IOException {
+ final DataRecord createdRecord = ds.addRecord(new
ByteArrayInputStream(testBuffer));
+ DataIdentifier identifier1 = createdRecord.getIdentifier();
+ assertTrue(backend.exists(identifier1));
+
+ final DataRecord record1 = ds.getRecord(identifier1);
+ validateRecord(record1, new String(testBuffer), createdRecord);
+
+ try { Thread.sleep(1001); } catch (InterruptedException e) { }
+
+ final DataRecord updatedRecord = ds.addRecord(new
ByteArrayInputStream(testBuffer));
+ DataIdentifier identifier2 = updatedRecord.getIdentifier();
+ assertTrue(backend.exists(identifier2));
+
+ assertTrue(identifier1.toString().equals(identifier2.toString()));
+ validateRecord(record1, new String(testBuffer), createdRecord);
+
+ ds.deleteRecord(identifier1);
+ assertFalse(backend.exists(createdRecord.getIdentifier()));
+ }
+
+ @Test
+ public void testListBlobs() throws DataStoreException, IOException {
+ final Set<DataIdentifier> identifiers = Sets.newHashSet();
+ final Set<String> testStrings = Sets.newHashSet("test1", "test2",
"test3");
+
+ for (String s : testStrings) {
+ identifiers.add(ds.addRecord(new
ByteArrayInputStream(s.getBytes())).getIdentifier());
+ }
+
+ Iterator<DataIdentifier> iter = ds.getAllIdentifiers();
+ while (iter.hasNext()) {
+ DataIdentifier identifier = iter.next();
+ assertTrue(identifiers.contains(identifier));
+ ds.deleteRecord(identifier);
+ }
+ }
+
+ ////
+ // Backend Tests
+ ////
+
+ private void validateRecordData(final SharedBackend backend,
+ final DataIdentifier identifier,
+ int expectedSize,
+ final InputStream expected) throws
IOException, DataStoreException {
+ byte[] blobData = new byte[expectedSize];
+ backend.read(identifier).read(blobData);
+ byte[] expectedData = new byte[expectedSize];
+ expected.read(expectedData);
+ for (int i=0; i<expectedSize; i++) {
+ assertEquals(expectedData[i], blobData[i]);
+ }
+ }
+
+ // Write (Backend)
+
+ @Test
+ public void testBackendWriteDifferentSizedRecords() throws IOException,
NoSuchAlgorithmException, DataStoreException {
+ // Sizes are chosen as follows:
+ // 0 - explicitly test zero-size file
+ // 10 - very small file
+ // 1000 - under 4K (a reasonably expected stream buffer size)
+ // 4100 - over 4K but under 8K and 16K (other reasonably expected
stream buffer sizes)
+ // 16500 - over 8K and 16K but under 64K (another reasonably expected
stream buffer size)
+ // 66000 - over 64K but under 128K (probably the largest reasonably
expected stream buffer size)
+ // 132000 - over 128K
+ for (int size : Lists.newArrayList(0, 10, 1000, 4100, 16500, 66000,
132000)) {
+ File testFile = folder.newFile();
+ copyInputStreamToFile(randomStream(size, size), testFile);
+ DataIdentifier identifier = new
DataIdentifier(getIdForInputStream(new FileInputStream(testFile)));
+ backend.write(identifier, testFile);
+ assertTrue(backend.exists(identifier));
+
+ validateRecordData(backend, identifier, size, new
FileInputStream(testFile));
+
+ backend.deleteRecord(identifier);
+ assertFalse(backend.exists(identifier));
+ }
+ }
+
+ @Test
+ public void
testBackendWriteRecordNullIdentifierThrowsNullPointerException() throws
IOException, DataStoreException{
+ DataIdentifier identifier = null;
+ File testFile = folder.newFile();
+ copyInputStreamToFile(randomStream(0, 10), testFile);
+ try {
+ backend.write(identifier, testFile);
+ fail();
+ } catch (NullPointerException e) {
+ assertEquals("identifier", e.getMessage());
+ }
+ }
+
+ @Test
+ public void testBackendWriteRecordNullFileThrowsNullPointerException()
throws DataStoreException {
+ File testFile = null;
+ DataIdentifier identifier = new DataIdentifier("fake");
+ try {
+ backend.write(identifier, testFile);
+ fail();
+ }
+ catch (NullPointerException e) {
+ assertTrue("file".equals(e.getMessage()));
+ }
+ }
+
+ @Test
+ public void testBackendWriteRecordFileNotFoundThrowsException() throws
IOException, NoSuchAlgorithmException {
+ File testFile = folder.newFile();
+ copyInputStreamToFile(randomStream(0, 10), testFile);
+ DataIdentifier identifier = new DataIdentifier(getIdForInputStream(new
FileInputStream(testFile)));
+ assertTrue(testFile.delete());
+ try {
+ backend.write(identifier, testFile);
+ fail();
+ } catch (DataStoreException e) {
+ assertTrue(e.getCause() instanceof FileNotFoundException);
+ }
+ }
+
+ // Read (Backend)
+
+ @Test
+ public void testBackendReadRecordNullIdentifier() throws
DataStoreException {
+ DataIdentifier identifier = null;
+ try {
+ backend.read(identifier);
+ fail();
+ }
+ catch (NullPointerException e) {
+ assert("identifier".equals(e.getMessage()));
+ }
+ }
+
+ @Test
+ public void testBackendReadRecordInvalidIdentifier() {
+ DataIdentifier identifier = new DataIdentifier("fake");
+ try {
+ backend.read(identifier);
+ fail();
+ }
+ catch (DataStoreException e) { }
+ }
+
+ // Delete (Backend)
+
+ @Test
+ public void testBackendDeleteRecordNullIdentifier() throws
DataStoreException {
+ DataIdentifier identifier = null;
+ try {
+ backend.deleteRecord(identifier);
+ fail();
+ }
+ catch (NullPointerException e) {
+ assert("identifier".equals(e.getMessage()));
+ }
+ }
+
+ @Test
+ public void testBackendDeleteRecordInvalidIdentifier() throws
DataStoreException {
+ DataIdentifier identifier = new DataIdentifier("fake");
+ backend.deleteRecord(identifier); // We don't care if the identifier
is invalid; this is a noop
+ }
+
+ // Exists (Backend)
+
+ @Test
+ public void testBackendNotCreatedRecordDoesNotExist() throws
DataStoreException {
+ assertFalse(backend.exists(new DataIdentifier(("fake"))));
+ }
+
+ @Test
+ public void
testBackendRecordExistsNullIdentifierThrowsNullPointerException() throws
DataStoreException {
+ try {
+ DataIdentifier nullIdentifier = null;
+ backend.exists(nullIdentifier);
+ fail();
+ }
+ catch (NullPointerException e) { }
+ }
+
+ // GetAllIdentifiers (Backend)
+
+ @Test
+ public void testBackendGetAllIdentifiersNoRecordsReturnsNone() throws
DataStoreException {
+ Iterator<DataIdentifier> allIdentifiers = backend.getAllIdentifiers();
+ assertFalse(allIdentifiers.hasNext());
+ }
+
+ @Test
+ public void testBackendGetAllIdentifiers() throws DataStoreException,
IOException, NoSuchAlgorithmException {
+ for (int expectedRecCount : Lists.newArrayList(1, 2, 5)) {
+ final List<DataIdentifier> ids = Lists.newArrayList();
+ for (int i=0; i<expectedRecCount; i++) {
+ File testfile = folder.newFile();
+ copyInputStreamToFile(randomStream(i, 10), testfile);
+ DataIdentifier identifier = new
DataIdentifier(getIdForInputStream(new FileInputStream(testfile)));
+ backend.write(identifier, testfile);
+ ids.add(identifier);
+ }
+
+ int actualRecCount = Iterators.size(backend.getAllIdentifiers());
+
+ for (DataIdentifier identifier : ids) {
+ backend.deleteRecord(identifier);
+ }
+
+ assertEquals(expectedRecCount, actualRecCount);
+ }
+ }
+
+ // GetRecord (Backend)
+
+ @Test
+ public void testBackendGetRecord() throws IOException, DataStoreException {
+ String recordData = "testData";
+ DataRecord record = ds.addRecord(new
ByteArrayInputStream(recordData.getBytes()));
+ DataRecord retrievedRecord = backend.getRecord(record.getIdentifier());
+ validateRecord(record, recordData, retrievedRecord);
+ }
+
+ @Test
+ public void testBackendGetRecordNullIdentifierThrowsNullPointerException()
throws DataStoreException {
+ try {
+ DataIdentifier identifier = null;
+ backend.getRecord(identifier);
+ fail();
+ }
+ catch (NullPointerException e) {
+ assertTrue("identifier".equals(e.getMessage()));
+ }
+ }
+
+ @Test
+ public void
testBackendGetRecordInvalidIdentifierThrowsDataStoreException() {
+ try {
+ backend.getRecord(new DataIdentifier("invalid"));
+ fail();
+ }
+ catch (DataStoreException e) {
+
+ }
+ }
+
+ // GetAllRecords (Backend)
+
+ @Test
+ public void testBackendGetAllRecordsReturnsAll() throws
DataStoreException, IOException {
+ for (int recCount : Lists.newArrayList(0, 1, 2, 5)) {
+ Map<DataIdentifier, String> addedRecords = Maps.newHashMap();
+ if (0 < recCount) {
+ for (int i = 0; i < recCount; i++) {
+ String data = String.format("testData%d", i);
+ DataRecord record = ds.addRecord(new
ByteArrayInputStream(data.getBytes()));
+ addedRecords.put(record.getIdentifier(), data);
+ }
+ }
+
+ Iterator<DataRecord> iter = backend.getAllRecords();
+ List<DataIdentifier> identifiers = Lists.newArrayList();
+ int actualCount = 0;
+ while (iter.hasNext()) {
+ DataRecord record = iter.next();
+ identifiers.add(record.getIdentifier());
+ assertTrue(addedRecords.containsKey(record.getIdentifier()));
+ StringWriter writer = new StringWriter();
+ IOUtils.copy(record.getStream(), writer);
+
assertTrue(writer.toString().equals(addedRecords.get(record.getIdentifier())));
+ actualCount++;
+ }
+
+ for (DataIdentifier identifier : identifiers) {
+ ds.deleteRecord(identifier);
+ }
+
+ assertEquals(recCount, actualCount);
+ }
+ }
+
+ // AddMetadataRecord (Backend)
+
+ @Test
+ public void testBackendAddMetadataRecordsFromInputStream() throws
DataStoreException, IOException, NoSuchAlgorithmException {
+ for (boolean fromInputStream : Lists.newArrayList(false, true)) {
+ String prefix = String.format("%s.META.",
getClass().getSimpleName());
+ for (int count : Lists.newArrayList(1, 3)) {
+ Map<String, String> records = Maps.newHashMap();
+ for (int i = 0; i < count; i++) {
+ String recordName = String.format("%sname.%d", prefix, i);
+ String data = String.format("testData%d", i);
+ records.put(recordName, data);
+
+ if (fromInputStream) {
+ backend.addMetadataRecord(new
ByteArrayInputStream(data.getBytes()), recordName);
+ }
+ else {
+ File testFile = folder.newFile();
+ copyInputStreamToFile(new
ByteArrayInputStream(data.getBytes()), testFile);
+ backend.addMetadataRecord(testFile, recordName);
+ }
+ }
+
+ assertEquals(count,
backend.getAllMetadataRecords(prefix).size());
+
+ for (Map.Entry<String, String> entry : records.entrySet()) {
+ DataRecord record =
backend.getMetadataRecord(entry.getKey());
+ StringWriter writer = new StringWriter();
+ IOUtils.copy(record.getStream(), writer);
+ backend.deleteMetadataRecord(entry.getKey());
+ assertTrue(writer.toString().equals(entry.getValue()));
+ }
+
+ assertEquals(0, backend.getAllMetadataRecords(prefix).size());
+ }
+ }
+ }
+
+ @Test
+ public void
testBackendAddMetadataRecordFileNotFoundThrowsDataStoreException() throws
IOException {
+ File testFile = folder.newFile();
+ copyInputStreamToFile(randomStream(0, 10), testFile);
+ testFile.delete();
+ try {
+ backend.addMetadataRecord(testFile, "name");
+ fail();
+ }
+ catch (DataStoreException e) {
+ assertTrue(e.getCause() instanceof FileNotFoundException);
+ }
+ }
+
+ @Test
+ public void
testBackendAddMetadataRecordNullInputStreamThrowsNullPointerException() throws
DataStoreException {
+ try {
+ backend.addMetadataRecord((InputStream)null, "name");
+ fail();
+ }
+ catch (NullPointerException e) {
+ assertTrue("input".equals(e.getMessage()));
+ }
+ }
+
+ @Test
+ public void
testBackendAddMetadataRecordNullFileThrowsNullPointerException() throws
DataStoreException {
+ try {
+ backend.addMetadataRecord((File)null, "name");
+ fail();
+ }
+ catch (NullPointerException e) {
+ assertTrue("input".equals(e.getMessage()));
+ }
+ }
+
+ @Test
+ public void
testBackendAddMetadataRecordNullEmptyNameThrowsIllegalArgumentException()
throws DataStoreException, IOException {
+ final String data = "testData";
+ for (boolean fromInputStream : Lists.newArrayList(false, true)) {
+ for (String name : Lists.newArrayList(null, "")) {
+ try {
+ if (fromInputStream) {
+ backend.addMetadataRecord(new
ByteArrayInputStream(data.getBytes()), name);
+ } else {
+ File testFile = folder.newFile();
+ copyInputStreamToFile(new
ByteArrayInputStream(data.getBytes()), testFile);
+ backend.addMetadataRecord(testFile, name);
+ }
+ fail();
+ } catch (IllegalArgumentException e) {
+ assertTrue("name".equals(e.getMessage()));
+ }
+ }
+ }
+ }
+
+ // GetMetadataRecord (Backend)
+
+ @Test
+ public void testBackendGetMetadataRecordInvalidName() throws
DataStoreException {
+ backend.addMetadataRecord(randomStream(0, 10), "testRecord");
+ assertNull(backend.getMetadataRecord("invalid"));
+ for (String name : Lists.newArrayList("", null)) {
+ try {
+ backend.getMetadataRecord(name);
+ fail("Expect to throw");
+ } catch(Exception e) {}
+ }
+
+ backend.deleteMetadataRecord("testRecord");
+ }
+
+ // GetAllMetadataRecords (Backend)
+
+ @Test
+ public void testBackendGetAllMetadataRecordsPrefixMatchesAll() throws
DataStoreException {
+ assertEquals(0, backend.getAllMetadataRecords("").size());
+
+ String prefixAll = "prefix1";
+ String prefixSome = "prefix1.prefix2";
+ String prefixOne = "prefix1.prefix3";
+ String prefixNone = "prefix4";
+
+ backend.addMetadataRecord(randomStream(1, 10),
String.format("%s.testRecord1", prefixAll));
+ backend.addMetadataRecord(randomStream(2, 10),
String.format("%s.testRecord2", prefixSome));
+ backend.addMetadataRecord(randomStream(3, 10),
String.format("%s.testRecord3", prefixSome));
+ backend.addMetadataRecord(randomStream(4, 10),
String.format("%s.testRecord4", prefixOne));
+ backend.addMetadataRecord(randomStream(5, 10), "prefix5.testRecord5");
+
+ assertEquals(5, backend.getAllMetadataRecords("").size());
+ assertEquals(4, backend.getAllMetadataRecords(prefixAll).size());
+ assertEquals(2, backend.getAllMetadataRecords(prefixSome).size());
+ assertEquals(1, backend.getAllMetadataRecords(prefixOne).size());
+ assertEquals(0, backend.getAllMetadataRecords(prefixNone).size());
+
+ backend.deleteAllMetadataRecords("");
+ assertEquals(0, backend.getAllMetadataRecords("").size());
+ }
+
+ @Test
+ public void
testBackendGetAllMetadataRecordsNullPrefixThrowsNullPointerException() {
+ try {
+ backend.getAllMetadataRecords(null);
+ fail();
+ }
+ catch (NullPointerException e) {
+ assertTrue("prefix".equals(e.getMessage()));
+ }
+ }
+
+ // DeleteMetadataRecord (Backend)
+
+ @Test
+ public void testBackendDeleteMetadataRecord() throws DataStoreException {
+ backend.addMetadataRecord(randomStream(0, 10), "name");
+ for (String name : Lists.newArrayList("invalid", "", null)) {
+ if (Strings.isNullOrEmpty(name)) {
+ try {
+ backend.deleteMetadataRecord(name);
+ }
+ catch (IllegalArgumentException e) { }
+ }
+ else {
+ assertFalse(backend.deleteMetadataRecord(name));
+ }
+ }
+ assertTrue(backend.deleteMetadataRecord("name"));
+ }
+
+ // DeleteAllMetadataRecords (Backend)
+
+ @Test
+ public void testBackendDeleteAllMetadataRecordsPrefixMatchesAll() throws
DataStoreException {
+ String prefixAll = "prefix1";
+ String prefixSome = "prefix1.prefix2";
+ String prefixOne = "prefix1.prefix3";
+ String prefixNone = "prefix4";
+
+ Map<String, Integer> prefixCounts = Maps.newHashMap();
+ prefixCounts.put(prefixAll, 4);
+ prefixCounts.put(prefixSome, 2);
+ prefixCounts.put(prefixOne, 1);
+ prefixCounts.put(prefixNone, 0);
+
+ for (Map.Entry<String, Integer> entry : prefixCounts.entrySet()) {
+ backend.addMetadataRecord(randomStream(1, 10),
String.format("%s.testRecord1", prefixAll));
+ backend.addMetadataRecord(randomStream(2, 10),
String.format("%s.testRecord2", prefixSome));
+ backend.addMetadataRecord(randomStream(3, 10),
String.format("%s.testRecord3", prefixSome));
+ backend.addMetadataRecord(randomStream(4, 10),
String.format("%s.testRecord4", prefixOne));
+
+ int preCount = backend.getAllMetadataRecords("").size();
+
+ backend.deleteAllMetadataRecords(entry.getKey());
+
+ int deletedCount = preCount -
backend.getAllMetadataRecords("").size();
+ assertEquals(entry.getValue().intValue(), deletedCount);
+
+ backend.deleteAllMetadataRecords("");
+ }
+ }
+
+ @Test
+ public void testBackendDeleteAllMetadataRecordsNoRecordsNoChange() {
+ assertEquals(0, backend.getAllMetadataRecords("").size());
+
+ backend.deleteAllMetadataRecords("");
+
+ assertEquals(0, backend.getAllMetadataRecords("").size());
+ }
+
+ @Test
+ public void
testBackendDeleteAllMetadataRecordsNullPrefixThrowsNullPointerException() {
+ try {
+ backend.deleteAllMetadataRecords(null);
+ fail();
+ }
+ catch (NullPointerException e) {
+ assertTrue("prefix".equals(e.getMessage()));
+ }
+ }
+}
Propchange:
jackrabbit/oak/trunk/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataStoreTest.java
------------------------------------------------------------------------------
svn:eol-style = native
Added:
jackrabbit/oak/trunk/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataStoreUtils.java
URL:
http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataStoreUtils.java?rev=1788387&view=auto
==============================================================================
---
jackrabbit/oak/trunk/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataStoreUtils.java
(added)
+++
jackrabbit/oak/trunk/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataStoreUtils.java
Fri Mar 24 09:31:18 2017
@@ -0,0 +1,117 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage;
+
+import com.google.common.base.Predicate;
+import com.google.common.base.Strings;
+import com.google.common.collect.Maps;
+import com.microsoft.azure.storage.blob.CloudBlobContainer;
+
+import org.apache.commons.io.IOUtils;
+import org.apache.jackrabbit.core.data.DataStore;
+import org.apache.jackrabbit.oak.commons.PropertiesUtil;
+import org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.InputStream;
+import java.util.Map;
+import java.util.Properties;
+
+/**
+ * Extension to {@link DataStoreUtils} to enable Azure extensions for cleaning
and initialization.
+ */
+public class AzureDataStoreUtils extends DataStoreUtils {
+ private static final Logger log =
LoggerFactory.getLogger(AzureDataStoreUtils.class);
+
+ private static final String DEFAULT_CONFIG_PATH =
"./src/test/resources/azure.properties";
+
+
+ /**
+ * Check for presence of mandatory properties.
+ *
+ * @return true if mandatory props configured.
+ */
+ public static boolean isAzureConfigured() {
+ Properties props = getAzureConfig();
+ if (!props.containsKey(AzureConstants.AZURE_STORAGE_ACCOUNT_KEY) ||
!props.containsKey(AzureConstants.AZURE_STORAGE_ACCOUNT_NAME)
+ || !(props.containsKey(AzureConstants.AZURE_BLOB_CONTAINER_NAME)))
{
+ return false;
+ }
+ return true;
+ }
+
+ /**
+ * Read any config property configured.
+ * Also, read any props available as system properties.
+ * System properties take precedence.
+ *
+ * @return Properties instance
+ */
+ public static Properties getAzureConfig() {
+ String config = System.getProperty("azure.config");
+ if (Strings.isNullOrEmpty(config)) {
+ config = DEFAULT_CONFIG_PATH;
+ }
+ Properties props = new Properties();
+ if (new File(config).exists()) {
+ InputStream is = null;
+ try {
+ is = new FileInputStream(config);
+ props.load(is);
+ } catch (Exception e) {
+ e.printStackTrace();
+ } finally {
+ IOUtils.closeQuietly(is);
+ }
+ props.putAll(getConfig());
+ Map filtered = Maps.filterEntries(Maps.fromProperties(props), new
Predicate<Map.Entry<? extends Object, ? extends Object>>() {
+ @Override public boolean apply(Map.Entry<? extends Object, ?
extends Object> input) {
+ return !Strings.isNullOrEmpty((String) input.getValue());
+ }
+ });
+ props = new Properties();
+ props.putAll(filtered);
+ }
+ return props;
+ }
+
+ public static DataStore getAzureDataStore(Properties props, String
homeDir) throws Exception {
+ AzureDataStore ds = new AzureDataStore();
+ PropertiesUtil.populate(ds, Maps.fromProperties(props), false);
+ ds.setProperties(props);
+ ds.init(homeDir);
+
+ return ds;
+ }
+
+ public static void deleteContainer(String containerName) throws Exception {
+ if (Strings.isNullOrEmpty(containerName)) {
+ log.warn("Cannot delete container with null or empty name.
containerName={}", containerName);
+ return;
+ }
+ log.info("Starting to delete container. containerName={}",
containerName);
+ Properties props = getAzureConfig();
+ CloudBlobContainer container =
Utils.getBlobContainer(Utils.getConnectionStringFromProperties(props),
containerName);
+ boolean result = container.deleteIfExists();
+ log.info("Container deleted. containerName={} existed={}",
containerName, result);
+ }
+}
Propchange:
jackrabbit/oak/trunk/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataStoreUtils.java
------------------------------------------------------------------------------
svn:eol-style = native
Added:
jackrabbit/oak/trunk/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/TestAzureDS.java
URL:
http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/TestAzureDS.java?rev=1788387&view=auto
==============================================================================
---
jackrabbit/oak/trunk/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/TestAzureDS.java
(added)
+++
jackrabbit/oak/trunk/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/TestAzureDS.java
Fri Mar 24 09:31:18 2017
@@ -0,0 +1,95 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage;
+
+import static org.junit.Assume.assumeTrue;
+
+import org.apache.jackrabbit.core.data.DataStore;
+import org.apache.jackrabbit.oak.plugins.blob.datastore.AbstractDataStoreTest;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Properties;
+
+import javax.jcr.RepositoryException;
+
+/**
+ * Test {@link AzureDataStore} with AzureDataStore and local cache on.
+ * It requires to pass azure config file via system property or system
properties by prefixing with 'ds.'.
+ * See details @ {@link AzureDataStoreUtils}.
+ * For e.g. -Dconfig=/opt/cq/azure.properties. Sample azure properties located
at
+ * src/test/resources/azure.properties
+ */
+public class TestAzureDS extends AbstractDataStoreTest {
+
+ protected static final Logger LOG =
LoggerFactory.getLogger(TestAzureDS.class);
+ protected Properties props;
+ protected String container;
+
+ @BeforeClass
+ public static void assumptions() {
+ assumeTrue(AzureDataStoreUtils.isAzureConfigured());
+ }
+
+ @Override
+ @Before
+ public void setUp() throws Exception {
+ props = AzureDataStoreUtils.getAzureConfig();
+ container = String.valueOf(randomGen.nextInt(9999)) + "-" +
String.valueOf(randomGen.nextInt(9999))
+ + "-test";
+ props.setProperty(AzureConstants.AZURE_BLOB_CONTAINER_NAME, container);
+ props.setProperty("secret", "123456");
+ super.setUp();
+ }
+
+ @Override
+ @After
+ public void tearDown() {
+ try {
+ super.tearDown();
+ AzureDataStoreUtils.deleteContainer(container);
+ } catch (Exception ignore) {
+
+ }
+ }
+
+ @Override
+ protected DataStore createDataStore() throws RepositoryException {
+ DataStore azureds = null;
+ try {
+ azureds = AzureDataStoreUtils.getAzureDataStore(props, dataStoreDir);
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
+ sleep(1000);
+ return azureds;
+ }
+
+ /**---------- Skipped -----------**/
+ @Override
+ public void testUpdateLastModifiedOnAccess() {
+ }
+
+ @Override
+ public void testDeleteAllOlderThan() {
+ }
+}
Propchange:
jackrabbit/oak/trunk/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/TestAzureDS.java
------------------------------------------------------------------------------
svn:eol-style = native
Added:
jackrabbit/oak/trunk/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/TestAzureDSWithSmallCache.java
URL:
http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/TestAzureDSWithSmallCache.java?rev=1788387&view=auto
==============================================================================
---
jackrabbit/oak/trunk/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/TestAzureDSWithSmallCache.java
(added)
+++
jackrabbit/oak/trunk/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/TestAzureDSWithSmallCache.java
Fri Mar 24 09:31:18 2017
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage;
+
+import org.apache.jackrabbit.core.data.CachingDataStore;
+import org.apache.jackrabbit.core.data.LocalCache;
+import org.junit.Before;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Test {@link CachingDataStore} with AzureBlobStoreBackend and with very
small size (@link
+ * {@link LocalCache}.
+ * It requires to pass azure config file via system property or system
properties by prefixing with 'ds.'.
+ * See details @ {@link AzureDataStoreUtils}.
+ * For e.g. -Dconfig=/opt/cq/azure.properties. Sample azure properties located
at
+ * src/test/resources/azure.properties
+
+ */
+public class TestAzureDSWithSmallCache extends TestAzureDS {
+
+ protected static final Logger LOG =
LoggerFactory.getLogger(TestAzureDSWithSmallCache.class);
+
+ @Override
+ @Before
+ public void setUp() throws Exception {
+ super.setUp();
+ props.setProperty("cacheSize", String.valueOf(dataLength * 10));
+ }
+}
Propchange:
jackrabbit/oak/trunk/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/TestAzureDSWithSmallCache.java
------------------------------------------------------------------------------
svn:eol-style = native
Added:
jackrabbit/oak/trunk/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/TestAzureDsCacheOff.java
URL:
http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/TestAzureDsCacheOff.java?rev=1788387&view=auto
==============================================================================
---
jackrabbit/oak/trunk/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/TestAzureDsCacheOff.java
(added)
+++
jackrabbit/oak/trunk/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/TestAzureDsCacheOff.java
Fri Mar 24 09:31:18 2017
@@ -0,0 +1,42 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage;
+
+import org.junit.Before;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Test {@link org.apache.jackrabbit.core.data.CachingDataStore} with
AzureBlobStoreBackend
+ * and local cache Off.
+ * It requires to pass azure config file via system property or system
properties by prefixing with 'ds.'.
+ * See details @ {@link AzureDataStoreUtils}.
+ * For e.g. -Dconfig=/opt/cq/azure.properties. Sample azure properties located
at
+ * src/test/resources/azure.properties
+
+ */
+public class TestAzureDsCacheOff extends TestAzureDS {
+
+ protected static final Logger LOG =
LoggerFactory.getLogger(TestAzureDsCacheOff.class);
+
+ @Override
+ @Before
+ public void setUp() throws Exception {
+ super.setUp();
+ props.setProperty("cacheSize", "0");
+ }
+}
Propchange:
jackrabbit/oak/trunk/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/TestAzureDsCacheOff.java
------------------------------------------------------------------------------
svn:eol-style = native
Added:
jackrabbit/oak/trunk/oak-blob-cloud-azure/src/test/resources/azure.properties
URL:
http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-blob-cloud-azure/src/test/resources/azure.properties?rev=1788387&view=auto
==============================================================================
---
jackrabbit/oak/trunk/oak-blob-cloud-azure/src/test/resources/azure.properties
(added)
+++
jackrabbit/oak/trunk/oak-blob-cloud-azure/src/test/resources/azure.properties
Fri Mar 24 09:31:18 2017
@@ -0,0 +1,40 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Microsoft Azure authentication credentials.
+#
https://azure.microsoft.com/en-us/documentation/articles/storage-create-storage-account
+# account name
+accessKey=
+# account key
+secretKey=
+
+# Microsoft Azure blob storage container name. Container is a grouping of a set
+# of blobs. https://msdn.microsoft.com/en-us/library/dd135715.aspx
+container=
+
+# The timeout interval, in milliseconds, to use for the request (default 5
minutes)
+socketTimeout=120000
+
+# Concurrent number of simultaneous requests per operation (default 1)
+maxConnections=1
+
+# Number of retries per request (default 3)
+maxErrorRetry=3
+
+# proxy configurations (optional)
+proxyHost=
+proxyPort=
Propchange:
jackrabbit/oak/trunk/oak-blob-cloud-azure/src/test/resources/azure.properties
------------------------------------------------------------------------------
svn:eol-style = native
Added:
jackrabbit/oak/trunk/oak-blob-cloud-azure/src/test/resources/logback-test.xml
URL:
http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-blob-cloud-azure/src/test/resources/logback-test.xml?rev=1788387&view=auto
==============================================================================
---
jackrabbit/oak/trunk/oak-blob-cloud-azure/src/test/resources/logback-test.xml
(added)
+++
jackrabbit/oak/trunk/oak-blob-cloud-azure/src/test/resources/logback-test.xml
Fri Mar 24 09:31:18 2017
@@ -0,0 +1,40 @@
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+<configuration>
+
+ <appender name="console" class="ch.qos.logback.core.ConsoleAppender">
+ <encoder>
+ <pattern>%date{HH:mm:ss.SSS} %-5level %-40([%thread] %F:%L)
%msg%n</pattern>
+ </encoder>
+ </appender>
+
+ <appender name="file" class="ch.qos.logback.core.FileAppender">
+ <file>target/unit-tests.log</file>
+ <encoder>
+ <pattern>%date{HH:mm:ss.SSS} %-5level %-40([%thread] %F:%L)
%msg%n</pattern>
+ </encoder>
+ </appender>
+
+ <logger name="org.apache.jackrabbit.core.data" level="INFO"/>
+ <logger name="org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage"
level="TRACE"/>
+
+ <root level="INFO">
+ <appender-ref ref="console"/>
+ <appender-ref ref="file"/>
+ </root>
+
+</configuration>
Propchange:
jackrabbit/oak/trunk/oak-blob-cloud-azure/src/test/resources/logback-test.xml
------------------------------------------------------------------------------
svn:eol-style = native
Modified: jackrabbit/oak/trunk/oak-doc/src/site/markdown/osgi_config.md
URL:
http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-doc/src/site/markdown/osgi_config.md?rev=1788387&r1=1788386&r2=1788387&view=diff
==============================================================================
--- jackrabbit/oak/trunk/oak-doc/src/site/markdown/osgi_config.md (original)
+++ jackrabbit/oak/trunk/oak-doc/src/site/markdown/osgi_config.md Fri Mar 24
09:31:18 2017
@@ -433,6 +433,7 @@ All the above data stores enable local f
* _PID `org.apache.jackrabbit.oak.plugins.blob.datastore.SharedS3DataStore`_
* _PID `org.apache.jackrabbit.oak.plugins.blob.datastore.S3DataStore`_
* _PID `org.apache.jackrabbit.oak.plugins.blob.datastore.FileDataStore`_
+* _PID `org.apache.jackrabbit.oak.plugins.blob.datastore.AzureDataStore`_
cacheSize
: Default - 68719476736
Modified: jackrabbit/oak/trunk/oak-doc/src/site/markdown/plugins/blobstore.md
URL:
http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-doc/src/site/markdown/plugins/blobstore.md?rev=1788387&r1=1788386&r2=1788387&view=diff
==============================================================================
--- jackrabbit/oak/trunk/oak-doc/src/site/markdown/plugins/blobstore.md
(original)
+++ jackrabbit/oak/trunk/oak-doc/src/site/markdown/plugins/blobstore.md Fri Mar
24 09:31:18 2017
@@ -78,6 +78,7 @@ Further Oak ships with multiple BlobStor
4. `S3DataStore` (with wrapper) - Stores the file in Amazon S3
5. `RDBBlobStore` - Store the file contents in chunks in a relational
databases. Typically used with
`DocumentNodeStore`when using a relational DB persistence
+6. `AzureDataStore` (with wrapper) - Stores the file in Microsoft Azure Blob
storage
In addition there are some more implementations which are considered
**experimental**
@@ -98,6 +99,7 @@ can be used
* FileDataStore - This should be used if the blobs/binaries have to be shared
between multiple
repositories. This would also be used when a JR2 repository is migrated to
Oak
* S3DataStore - This should be used when binaries are stored in Amazon S3
+* AzureDataStore - This should be used when binaries are stored in Microsoft
Azure Blob storage
#### DocumentNodeStore
@@ -112,7 +114,7 @@ one of the following can be used
#### Caching DataStore
-The DataStore implementations `S3DataStore` and `CachingFileDataStore` support
local file system caching for the
+The DataStore implementations `S3DataStore`,`CachingFileDataStore` and
`AzureDataStore` support local file system caching for the
files/blobs and extend the `AbstractSharedCachingDataStore` class which
implements the caching functionality. The
`CachingFileDataStore` is useful when the DataStore is on nfs.
The cache has a size limit and is configured by the `cacheSize` parameter.
@@ -198,6 +200,7 @@ Blob Garbage Collection(GC) is applicabl
* FileDataStore
* S3DataStore
* SharedS3DataStore (since Oak 1.2.0)
+ * AzureDataStore
Oak implements a Mark and Sweep based Garbage Collection logic.
@@ -224,7 +227,7 @@ The garbage collection can be triggered
<a name="blobid-tracker"></a>
#### Caching of Blob ids locally (Oak 1.6.x)
-For the `FileDataStore` and `S3DataStore` the blob ids are cached locally on
the disk when they are created which
+For the `FileDataStore`, `S3DataStore` and `AzureDataStore` the blob ids are
cached locally on the disk when they are created which
speeds up the 'Mark BlobStore' phase. The locally tracked ids are synchronized
with the data store periodically to enable
other cluster nodes or different repositories sharing the datastore to get a
consolidated list of all blob ids. The
interval of synchronization is defined by the OSGi configuration parameter
`blobTrackSnapshotIntervalInSecs` for the
@@ -249,13 +252,14 @@ following should be executed.
##### Registration
-On start of a repository configured to use a shared DataStore (same path or S3
bucket), a unique repository id is
+On start of a repository configured to use a shared DataStore (same path, S3
bucket or Azure container), a unique repository id is
generated and registered in the NodeStore as well as the DataStore.
In the DataStore this repository id is registered as an empty file with the
format `repository-[repository-id]`
(e.g. repository-988373a0-3efb-451e-ab4c-f7e794189273). This empty file is
created under:
* FileDataStore - Under the root directory configured for the datastore.
* S3DataStore - Under `META` folder in the S3 bucket configured.
+* AzureDataStore - Under `META` folder in the Azure container configured.
On start/configuration of all the repositories sharing the data store it
should be confirmed that the unique
repositoryId per repository is registered in the DataStore. Refer the section
below on [Checking Shared GC status](#check-shared-datastore-gc).
@@ -387,6 +391,7 @@ the steps:
* Remove the corresponding registered repository file
(`repository-[repositoryId]`) from the DataStore
* FileDataStore - Remove the file from the data store root directory.
* S3DataStore - Remove the file from the `META` folder of the S3 bucket.
+ * AzureDataStore - Remove the file from the `META` folder of the Azure
container.
* Remove other files corresponding to the particular repositoryId e.g.
`markedTimestamp-[repositoryId]` or
`references-[repositoryId]`.
Modified: jackrabbit/oak/trunk/oak-parent/pom.xml
URL:
http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-parent/pom.xml?rev=1788387&r1=1788386&r2=1788387&view=diff
==============================================================================
--- jackrabbit/oak/trunk/oak-parent/pom.xml (original)
+++ jackrabbit/oak/trunk/oak-parent/pom.xml Fri Mar 24 09:31:18 2017
@@ -178,6 +178,7 @@
<filter>!org.apache.jackrabbit.oak.spi.blob.split</filter>
<filter>!org.apache.jackrabbit.oak.spi.blob.stats</filter>
<filter>!org.apache.jackrabbit.oak.blob.cloud.aws.s3</filter>
+
<filter>!org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage</filter>
<filter>!org.apache.jackrabbit.oak</filter>
<filter>!org.apache.jackrabbit.oak.json</filter>
<filter>!org.apache.jackrabbit.oak.management</filter>
Modified: jackrabbit/oak/trunk/pom.xml
URL:
http://svn.apache.org/viewvc/jackrabbit/oak/trunk/pom.xml?rev=1788387&r1=1788386&r2=1788387&view=diff
==============================================================================
--- jackrabbit/oak/trunk/pom.xml (original)
+++ jackrabbit/oak/trunk/pom.xml Fri Mar 24 09:31:18 2017
@@ -40,6 +40,7 @@
<module>oak-commons</module>
<module>oak-blob</module>
<module>oak-blob-cloud</module>
+ <module>oak-blob-cloud-azure</module>
<module>oak-core</module>
<module>oak-jcr</module>
<module>oak-upgrade</module>