Author: amitj
Date: Tue Sep 18 03:55:48 2018
New Revision: 1841184
URL: http://svn.apache.org/viewvc?rev=1841184&view=rev
Log:
OAK-7716: Enable datastore command for Azure segment store
- Tests for Azure segment store
- Documentation
Modified:
jackrabbit/oak/trunk/oak-run/README.md
jackrabbit/oak/trunk/oak-run/src/test/java/org/apache/jackrabbit/oak/run/DataStoreCommandTest.java
Modified: jackrabbit/oak/trunk/oak-run/README.md
URL:
http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-run/README.md?rev=1841184&r1=1841183&r2=1841184&view=diff
==============================================================================
--- jackrabbit/oak/trunk/oak-run/README.md (original)
+++ jackrabbit/oak/trunk/oak-run/README.md Tue Sep 18 03:55:48 2018
@@ -525,17 +525,20 @@ The following operations are available:
The following options are available:
- --work-dir - Path to use for temporary files and
directoriesw(Optional). Otherwise, files will be dumped in the user temp
directory.
- --out-dir - Path where to dump the files (Optional). Otherwise,
files will be dumped in the current directory.
- --s3ds - Path to the S3DataStore configuration file.
- --azureds - Path to the AzureDataStore configuration file.
- --fds - Path to the FileDataStore configuration file ('path'
property is mandatory).
- --fake-ds-path - To check for misconfigured external references when no
data store should be there.
- --max-age - Corresponds to the OSGi 'maxBlobGcAgeInSecs' property
and specifies the time interval from now with only older blobs being deleted.
- --verbose - Outputs backend friendly blobids and also adds the node
path (for SegmentNodeStore) from where referred.
- This options would typically be a slower option since,
it requires the whole repo traversal.
- Adds the sub-directories created in FDS and the changes
done for S3/Azure when stored in the respective container.
- <path|mongo_uri> - Path to the segment store of mongo uri (Required for
--ref & --consistency option above)
+ --work-dir - Path to use for temporary files and
directoriesw(Optional). Otherwise, files will be dumped in the user temp
directory.
+ --out-dir - Path where to dump the files (Optional). Otherwise,
files will be dumped in the current directory.
+ --ds-read-write - Required option to open the datastore in read-write
mode.
+ --s3ds - Path to the S3DataStore configuration file.
+ --azureds - Path to the AzureDataStore configuration file.
+ --fds - Path to the FileDataStore configuration file ('path'
property is mandatory).
+ --fake-ds-path - To check for misconfigured external references when
no data store should be there.
+ --max-age - Corresponds to the OSGi 'maxBlobGcAgeInSecs' property
and specifies the time interval from now with only older blobs being deleted.
+ --verbose - Outputs backend friendly blobids and also adds the
node path (for SegmentNodeStore) from where referred.
+ This options would typically be a slower option
since, it requires the whole repo traversal.
+ Adds the sub-directories created in FDS and the
changes done for S3/Azure when stored in the respective container.
+ --<path|mongo_uri> - Path to the tar segment store or the segment azure
uri as specified in
+
http://jackrabbit.apache.org/oak/docs/nodestore/segment/overview.html#remote-segment-stores
+ or if Mongo NodeStore then the mongo uri.
Note:
Modified:
jackrabbit/oak/trunk/oak-run/src/test/java/org/apache/jackrabbit/oak/run/DataStoreCommandTest.java
URL:
http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-run/src/test/java/org/apache/jackrabbit/oak/run/DataStoreCommandTest.java?rev=1841184&r1=1841183&r2=1841184&view=diff
==============================================================================
---
jackrabbit/oak/trunk/oak-run/src/test/java/org/apache/jackrabbit/oak/run/DataStoreCommandTest.java
(original)
+++
jackrabbit/oak/trunk/oak-run/src/test/java/org/apache/jackrabbit/oak/run/DataStoreCommandTest.java
Tue Sep 18 03:55:48 2018
@@ -69,9 +69,12 @@ import org.apache.jackrabbit.oak.plugins
import org.apache.jackrabbit.oak.run.cli.BlobStoreOptions.Type;
import org.apache.jackrabbit.oak.segment.SegmentNodeStore;
import org.apache.jackrabbit.oak.segment.SegmentNodeStoreBuilders;
+import org.apache.jackrabbit.oak.segment.azure.AzureUtilities;
+import org.apache.jackrabbit.oak.segment.azure.tool.ToolUtils;
import org.apache.jackrabbit.oak.segment.compaction.SegmentGCOptions;
import org.apache.jackrabbit.oak.segment.file.FileStore;
import org.apache.jackrabbit.oak.segment.file.FileStoreBuilder;
+import
org.apache.jackrabbit.oak.segment.spi.persistence.SegmentNodeStorePersistence;
import org.apache.jackrabbit.oak.spi.blob.GarbageCollectableBlobStore;
import org.apache.jackrabbit.oak.spi.cluster.ClusterRepositoryInfo;
import org.apache.jackrabbit.oak.spi.commit.CommitInfo;
@@ -84,6 +87,7 @@ import org.junit.After;
import org.junit.Assert;
import org.junit.Assume;
import org.junit.Before;
+import org.junit.Ignore;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
@@ -101,6 +105,7 @@ import static org.apache.jackrabbit.oak.
import static
org.apache.jackrabbit.oak.run.DataStoreCommand.VerboseIdLogger.HASH;
import static
org.apache.jackrabbit.oak.run.DataStoreCommand.VerboseIdLogger.filterFiles;
import static
org.apache.jackrabbit.oak.segment.compaction.SegmentGCOptions.defaultGCOptions;
+import static
org.apache.jackrabbit.oak.segment.file.FileStoreBuilder.fileStoreBuilder;
import static org.junit.Assert.assertEquals;
/**
@@ -314,7 +319,7 @@ public class DataStoreCommandTest {
Data data = prepareData(storeFixture, blobFixture, 10, 5, 1);
storeFixture.close();
- testGc(dump, data, 100, false);
+ testGc(dump, data, 10000, false);
}
@Test
@@ -323,7 +328,7 @@ public class DataStoreCommandTest {
Data data = prepareData(storeFixture, blobFixture, 10, 5, 1);
storeFixture.close();
- testGc(dump, data, 100, true);
+ testGc(dump, data, 10000, true);
}
@Test
@@ -609,7 +614,7 @@ public class DataStoreCommandTest {
StoreFixture MONGO = new MongoStoreFixture();
StoreFixture SEGMENT = new SegmentStoreFixture();
-
+ StoreFixture SEGMENT_AZURE = new AzureSegmentStoreFixture();
class MongoStoreFixture implements StoreFixture {
private final Clock.Virtual clock;
@@ -671,10 +676,10 @@ public class DataStoreCommandTest {
}
class SegmentStoreFixture implements StoreFixture {
- private FileStore fileStore;
- private SegmentNodeStore store;
- private SegmentGCOptions gcOptions = defaultGCOptions();
- private String storePath;
+ protected FileStore fileStore;
+ protected SegmentNodeStore store;
+ protected SegmentGCOptions gcOptions = defaultGCOptions();
+ protected String storePath;
@Override public NodeStore init(DataStoreBlobStore blobStore, File
storeFile)
throws Exception {
@@ -728,6 +733,73 @@ public class DataStoreCommandTest {
store.merge(a, EmptyHook.INSTANCE, CommitInfo.EMPTY);
}
}
+
+
+ /**
+ * Requires 'AZURE_SECRET_KEY' to be set as an environment variable as
well
+ */
+ class AzureSegmentStoreFixture extends SegmentStoreFixture {
+ private static final String AZURE_DIR = "repository";
+ private String container;
+
+ @Override public NodeStore init(DataStoreBlobStore blobStore, File
storeFile) throws Exception {
+ Properties props = AzureDataStoreUtils.getAzureConfig();
+ String accessKey =
props.getProperty(AzureConstants.AZURE_STORAGE_ACCOUNT_NAME);
+ String secretKey =
props.getProperty(AzureConstants.AZURE_STORAGE_ACCOUNT_KEY);
+ container =
props.getProperty(AzureConstants.AZURE_BLOB_CONTAINER_NAME);
+ container = container + System.currentTimeMillis();
+ // Create the azure segment container
+ String connectionString = getAzureConnectionString(accessKey,
secretKey, container, AZURE_DIR);
+ AzureUtilities.cloudBlobDirectoryFrom(connectionString,
container, AZURE_DIR);
+
+ // get the azure uri expected by the command
+ storePath = getAzureUri(accessKey, container, AZURE_DIR);
+
+ // initialize azure segment for test setup
+ SegmentNodeStorePersistence segmentNodeStorePersistence =
+
ToolUtils.newSegmentNodeStorePersistence(ToolUtils.SegmentStoreType.AZURE,
storePath);
+ fileStore =
fileStoreBuilder(storeFile).withBlobStore(blobStore)
+
.withCustomPersistence(segmentNodeStorePersistence).build();
+
+ store = SegmentNodeStoreBuilders.builder(fileStore).build();
+
+ return store;
+ }
+
+ protected String getAzureUri(String accountName, String container,
String directory) {
+ StringBuilder uri = new StringBuilder("az:");
+
uri.append("https://").append(accountName).append(".blob.core.windows.net/");
+ uri.append(container).append("/");
+ uri.append(directory);
+
+ return uri.toString();
+ }
+
+ protected String getAzureConnectionString(String accountName,
String secret, String container, String directory) {
+ StringBuilder builder = new StringBuilder();
+ builder.append("AccountName=").append(accountName).append(";");
+ builder.append("DefaultEndpointsProtocol=https;");
+
builder.append("BlobEndpoint=https://").append(accountName).append(".blob.core.windows.net").append(";");
+ builder.append("ContainerName=").append(container).append(";");
+ builder.append("Directory=").append(directory).append(";");
+ builder.append("AccountKey=").append(secret);
+
+ return builder.toString();
+ }
+
+ @Override
+ public void after() {
+ try {
+ AzureDataStoreUtils.deleteContainer(container);
+ } catch(Exception e) {
+ log.error("Error in cleaning the container {}", container,
e);
+ }
+ }
+
+ @Override public boolean isAvailable() {
+ return AzureDataStoreUtils.isAzureConfigured();
+ }
+ }
}
interface DataStoreFixture {
@@ -878,11 +950,13 @@ public class DataStoreCommandTest {
static class FixtureHelper {
static List<StoreFixture> getStoreFixtures() {
- return ImmutableList.of(StoreFixture.MONGO, StoreFixture.SEGMENT);
+ //return ImmutableList.of(StoreFixture.MONGO,
StoreFixture.SEGMENT);
+ return ImmutableList.of(StoreFixture.SEGMENT_AZURE);
}
static List<DataStoreFixture> getDataStoreFixtures() {
- return ImmutableList.of(DataStoreFixture.S3,
DataStoreFixture.AZURE, DataStoreFixture.FDS);
+ //return ImmutableList.of(DataStoreFixture.S3,
DataStoreFixture.AZURE, DataStoreFixture.FDS);
+ return ImmutableList.of(DataStoreFixture.AZURE);
}
static List<Object[]> get() {