Author: amitj
Date: Mon Jul 8 06:55:33 2019
New Revision: 1862717
URL: http://svn.apache.org/viewvc?rev=1862717&view=rev
Log:
OAK-8314: BlobIds created with direct uploads lack length suffix
OAK-8394: Fix BinaryAccessDSGCIT failing intermittently for Mongo
Merge r1859350, r1859351, r1859365, r1860931 from trunk
Added:
jackrabbit/oak/branches/1.10/oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/binary/fixtures/nodestore/DocumentMongoNodeStoreFixture.java
- copied, changed from r1859351,
jackrabbit/oak/trunk/oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/binary/fixtures/nodestore/DocumentMongoNodeStoreFixture.java
jackrabbit/oak/branches/1.10/oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/binary/util/BinaryAccessDSGCFixture.java
- copied unchanged from r1859351,
jackrabbit/oak/trunk/oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/binary/util/BinaryAccessDSGCFixture.java
Modified:
jackrabbit/oak/branches/1.10/ (props changed)
jackrabbit/oak/branches/1.10/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/DataStoreBlobStore.java
jackrabbit/oak/branches/1.10/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/BlobGCTest.java
jackrabbit/oak/branches/1.10/oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/binary/BinaryAccessDSGCIT.java
jackrabbit/oak/branches/1.10/oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/binary/fixtures/nodestore/SegmentMemoryNodeStoreFixture.java
Propchange: jackrabbit/oak/branches/1.10/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Mon Jul 8 06:55:33 2019
@@ -1,3 +1,3 @@
/jackrabbit/oak/branches/1.0:1665962
-/jackrabbit/oak/trunk:1850874,1850882,1851236,1851253,1851451,1851533-1851535,1851619,1852052,1852084,1852120,1852135,1852451,1852492-1852493,1852528,1852582,1852584,1852601,1852920,1853083,1853141,1853229,1853393,1853429,1853433,1853441,1853866,1853868,1853870,1853893,1853969,1853997,1854034,1854044,1854055,1854058,1854113,1854373,1854377,1854380,1854385,1854401,1854403,1854455,1854461-1854462,1854466,1854468,1854515,1854533,1854539,1854701,1854773-1854774,1854827,1854848,1854859,1854930,1854990-1854991,1855032,1855221,1855477-1855478,1855776,1855993,1856049,1856056,1856538,1856545,1857000,1857010,1857104,1857159,1857212,1857221,1857238,1857247,1857253,1857294,1857314,1857463,1857480,1857577,1857589,1857592,1857627,1857634-1857635,1857638,1857640,1857687,1857936,1858032,1858053,1858123,1858139,1858385,1858424,1858571,1858578,1858810,1858926,1858931,1859020,1859231,1859292,1859294,1859359,1859533,1859609,1859612,1859619,1859711,1859716,1859772,1859776,1859780,1859843,1859854,1859881
,1860120,1860131,1860137,1860202,1860278,1860328,1860330,1860355,1860393,1860442,1860548,1860564-1860565,1861270,1861626,1862044,1862093,1862531
+/jackrabbit/oak/trunk:1850874,1850882,1851236,1851253,1851451,1851533-1851535,1851619,1852052,1852084,1852120,1852135,1852451,1852492-1852493,1852528,1852582,1852584,1852601,1852920,1853083,1853141,1853229,1853393,1853429,1853433,1853441,1853866,1853868,1853870,1853893,1853969,1853997,1854034,1854044,1854055,1854058,1854113,1854373,1854377,1854380,1854385,1854401,1854403,1854455,1854461-1854462,1854466,1854468,1854515,1854533,1854539,1854701,1854773-1854774,1854827,1854848,1854859,1854930,1854990-1854991,1855032,1855221,1855477-1855478,1855776,1855993,1856049,1856056,1856538,1856545,1857000,1857010,1857104,1857159,1857212,1857221,1857238,1857247,1857253,1857294,1857314,1857463,1857480,1857577,1857589,1857592,1857627,1857634-1857635,1857638,1857640,1857687,1857936,1858032,1858053,1858123,1858139,1858385,1858424,1858571,1858578,1858810,1858926,1858931,1859020,1859231,1859292,1859294,1859350-1859351,1859359,1859365,1859533,1859609,1859612,1859619,1859711,1859716,1859772,1859776,1859780
,1859843,1859854,1859881,1860120,1860131,1860137,1860202,1860278,1860328,1860330,1860355,1860393,1860442,1860548,1860564-1860565,1860931,1861270,1861626,1862044,1862093,1862531
/jackrabbit/trunk:1345480
Modified:
jackrabbit/oak/branches/1.10/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/DataStoreBlobStore.java
URL:
http://svn.apache.org/viewvc/jackrabbit/oak/branches/1.10/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/DataStoreBlobStore.java?rev=1862717&r1=1862716&r2=1862717&view=diff
==============================================================================
---
jackrabbit/oak/branches/1.10/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/DataStoreBlobStore.java
(original)
+++
jackrabbit/oak/branches/1.10/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/DataStoreBlobStore.java
Mon Jul 8 06:55:33 2019
@@ -724,7 +724,7 @@ public class DataStoreBlobStore
if (delegate instanceof DataRecordAccessProvider) {
try {
DataRecord record = ((DataRecordAccessProvider)
delegate).completeDataRecordUpload(uploadToken);
- return new BlobStoreBlob(this,
record.getIdentifier().toString());
+ return new BlobStoreBlob(this, getBlobId(record));
}
catch (DataStoreException | DataRecordUploadException e) {
log.warn("Unable to complete direct upload for upload token
{}", uploadToken, e);
Modified:
jackrabbit/oak/branches/1.10/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/BlobGCTest.java
URL:
http://svn.apache.org/viewvc/jackrabbit/oak/branches/1.10/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/BlobGCTest.java?rev=1862717&r1=1862716&r2=1862717&view=diff
==============================================================================
---
jackrabbit/oak/branches/1.10/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/BlobGCTest.java
(original)
+++
jackrabbit/oak/branches/1.10/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/BlobGCTest.java
Mon Jul 8 06:55:33 2019
@@ -26,19 +26,25 @@ import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
+import java.net.URI;
import java.security.DigestOutputStream;
import java.security.MessageDigest;
+import java.util.Collection;
+import java.util.Collections;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.Set;
+import java.util.UUID;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.atomic.AtomicReference;
+import javax.jcr.RepositoryException;
+
import ch.qos.logback.classic.Level;
import com.google.common.collect.Iterators;
import com.google.common.collect.Lists;
@@ -49,15 +55,25 @@ import org.apache.commons.io.IOUtils;
import org.apache.commons.io.output.NullOutputStream;
import org.apache.jackrabbit.core.data.DataIdentifier;
import org.apache.jackrabbit.core.data.DataRecord;
+import org.apache.jackrabbit.core.data.DataStore;
import org.apache.jackrabbit.core.data.DataStoreException;
+import org.apache.jackrabbit.core.data.MultiDataStoreAware;
import org.apache.jackrabbit.oak.api.Blob;
import org.apache.jackrabbit.oak.api.CommitFailedException;
+import org.apache.jackrabbit.oak.api.PropertyState;
+import org.apache.jackrabbit.oak.api.Type;
+import org.apache.jackrabbit.oak.api.blob.BlobAccessProvider;
+import org.apache.jackrabbit.oak.api.blob.BlobUpload;
import org.apache.jackrabbit.oak.commons.concurrent.ExecutorCloser;
import org.apache.jackrabbit.oak.commons.junit.LogCustomizer;
+import org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreBlobStore;
import org.apache.jackrabbit.oak.plugins.blob.datastore.SharedDataStoreUtils;
+import
org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess.DataRecordAccessProvider;
+import
org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess.DataRecordDownloadOptions;
+import
org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess.DataRecordUpload;
+import
org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess.DataRecordUploadException;
import org.apache.jackrabbit.oak.plugins.memory.ArrayBasedBlob;
import org.apache.jackrabbit.oak.plugins.memory.MemoryNodeStore;
-import org.apache.jackrabbit.oak.spi.blob.BlobOptions;
import org.apache.jackrabbit.oak.spi.blob.BlobStore;
import org.apache.jackrabbit.oak.spi.blob.GarbageCollectableBlobStore;
import org.apache.jackrabbit.oak.spi.cluster.ClusterRepositoryInfo;
@@ -128,7 +144,8 @@ public class BlobGCTest {
}
};
- TimeLapsedBlobStore blobStore = new TimeLapsedBlobStore();
+ TimeLapsedDataStore dataStore = new TimeLapsedDataStore();
+ DataStoreBlobStore blobStore = new DataStoreBlobStore(dataStore);
MemoryBlobStoreNodeStore nodeStore = new
MemoryBlobStoreNodeStore(blobStore);
cluster = new Cluster(folder.newFolder(), blobStore, nodeStore, 0);
closer.register(cluster);
@@ -151,6 +168,7 @@ public class BlobGCTest {
protected final BlobStoreState blobStoreState;
private final File root;
String repoId;
+ protected final TimeLapsedDataStore dataStore;
protected final GarbageCollectableBlobStore blobStore;
protected final NodeStore nodeStore;
private MarkSweepGarbageCollector collector;
@@ -163,6 +181,7 @@ public class BlobGCTest {
public Cluster(File root, GarbageCollectableBlobStore blobStore,
NodeStore nodeStore, int seed) throws Exception {
this.root = root;
this.nodeStore = nodeStore;
+ this.dataStore = (TimeLapsedDataStore) ((DataStoreBlobStore)
blobStore).getDataStore();
this.blobStore = blobStore;
if (SharedDataStoreUtils.isShared(blobStore)) {
repoId = ClusterRepositoryInfo.getOrCreateId(nodeStore);
@@ -272,6 +291,30 @@ public class BlobGCTest {
}
@Test
+ public void gcWithNoDeleteDirectBinary() throws Exception {
+ log.info("Starting gcWithNoDeleteDirectBinary()");
+
+ setupDirectBinary(1, 0);
+ Set<String> existingAfterGC = executeGarbageCollection(cluster,
cluster.getCollector(0), false);
+
assertTrue(Sets.symmetricDifference(cluster.blobStoreState.blobsPresent,
existingAfterGC).isEmpty());
+ assertStats(cluster.statsProvider, 1, 0,
+ cluster.blobStoreState.blobsAdded.size() -
cluster.blobStoreState.blobsPresent.size(),
+ cluster.blobStoreState.blobsAdded.size() -
cluster.blobStoreState.blobsPresent.size(), NAME);
+ }
+
+ @Test
+ public void gcWithDeleteDirectBinary() throws Exception {
+ log.info("Starting gcWithNoDeleteDirectBinary()");
+
+ setupDirectBinary(5, 2);
+ Set<String> existingAfterGC = executeGarbageCollection(cluster,
cluster.getCollector(0), false);
+
assertTrue(Sets.symmetricDifference(cluster.blobStoreState.blobsPresent,
existingAfterGC).isEmpty());
+ assertStats(cluster.statsProvider, 1, 0,
+ cluster.blobStoreState.blobsAdded.size() -
cluster.blobStoreState.blobsPresent.size(),
+ cluster.blobStoreState.blobsAdded.size() -
cluster.blobStoreState.blobsPresent.size(), NAME);
+ }
+
+ @Test
public void noGc() throws Exception {
log.info("Starting noGc()");
@@ -453,6 +496,31 @@ public class BlobGCTest {
return state;
}
+ protected void setupDirectBinary(int numCreate, int numDelete) throws
CommitFailedException {
+ for (int i = 0; i < numCreate; i++) {
+ BlobUpload blobUpload = ((BlobAccessProvider)
cluster.blobStore).initiateBlobUpload(100, 1);
+ Blob blob = ((BlobAccessProvider)
cluster.blobStore).completeBlobUpload(blobUpload.getUploadToken());
+
+ cluster.blobStoreState.blobsAdded.add(blob.getContentIdentity());
+ cluster.blobStoreState.blobsPresent.add(blob.getContentIdentity());
+ NodeBuilder builder = cluster.nodeStore.getRoot().builder();
+ builder.child("dbu" + i).setProperty("x", blob);
+ cluster.nodeStore.merge(builder, EmptyHook.INSTANCE,
CommitInfo.EMPTY);
+ PropertyState property =
cluster.nodeStore.getRoot().getChildNode("dbu" + i).getProperty("x");
+ Blob blobReturned = property.getValue(Type.BINARY);
+ ((MemoryBlobStoreNodeStore)
cluster.nodeStore).getReferencedBlobs().add(blobReturned.getContentIdentity());
+ }
+
+ for (int i = 0; i < Math.min(numCreate, numDelete); i++) {
+ PropertyState property =
cluster.nodeStore.getRoot().getChildNode("dbu" + i).getProperty("x");
+ String blobId =
property.getValue(Type.BINARY).getContentIdentity();
+
+ delete("dbu" + i, cluster.nodeStore);
+ ((MemoryBlobStoreNodeStore)
cluster.nodeStore).getReferencedBlobs().remove(blobId);
+ cluster.blobStoreState.blobsPresent.remove(blobId);
+ }
+ }
+
protected Set<String> createBlobs(GarbageCollectableBlobStore blobStore,
int count, int size) throws Exception {
HashSet<String> blobSet = new HashSet<String>();
for (int i = 0; i < count; i++) {
@@ -503,6 +571,10 @@ public class BlobGCTest {
this.referencedBlobs = referencedBlobs;
}
+ public Set<String> getReferencedBlobs() {
+ return this.referencedBlobs;
+ }
+
@Override
public ArrayBasedBlob createBlob(InputStream in) {
try {
@@ -562,98 +634,59 @@ public class BlobGCTest {
/**
* Test in memory DS to store the contents with an increasing time
*/
- class TimeLapsedBlobStore implements GarbageCollectableBlobStore,
SharedDataStore {
+ class TimeLapsedDataStore implements DataStore, MultiDataStoreAware,
SharedDataStore, DataRecordAccessProvider {
+ public static final int MIN_RECORD_LENGTH = 50;
+
private final long startTime;
Map<String, DataRecord> store;
Map<String, DataRecord> metadata;
+ Map<String, String> uploadTokens;
- public TimeLapsedBlobStore() {
- this(System.currentTimeMillis());
- }
-
- public TimeLapsedBlobStore(long startTime) {
+ public TimeLapsedDataStore() {
this.startTime = clock.getTime();
store = Maps.newHashMap();
metadata = Maps.newHashMap();
+ uploadTokens = Maps.newHashMap();
}
- @Override public Iterator<String> getAllChunkIds(long
maxLastModifiedTime) throws Exception {
- return store.keySet().iterator();
- }
-
- @Override public boolean deleteChunks(List<String> chunkIds, long
maxLastModifiedTime) throws Exception {
- return (chunkIds.size() == countDeleteChunks(chunkIds,
maxLastModifiedTime));
- }
-
- @Override public long countDeleteChunks(List<String> chunkIds, long
maxLastModifiedTime) throws Exception {
- int count = 0;
-
- for(String id : chunkIds) {
- log.info("maxLastModifiedTime {}", maxLastModifiedTime);
- log.info("store.get(id).getLastModified() {}",
store.get(id).getLastModified());
- if (maxLastModifiedTime <= 0 ||
store.get(id).getLastModified() < maxLastModifiedTime) {
- store.remove(id);
- count++;
- }
+ @Override public DataRecord getRecordIfStored(DataIdentifier
identifier) throws DataStoreException {
+ if (store.containsKey(identifier.toString())) {
+ return getRecord(identifier);
}
- return count;
+ return null;
}
- @Override public Iterator<String> resolveChunks(String blobId) throws
IOException {
- return Iterators.singletonIterator(blobId);
+ @Override public DataRecord getRecord(DataIdentifier identifier)
throws DataStoreException {
+ return store.get(identifier.toString());
}
- @Override public String writeBlob(InputStream in) throws IOException {
- return writeBlob(in, new BlobOptions());
+ @Override public DataRecord getRecordFromReference(String reference)
throws DataStoreException {
+ return getRecord(new DataIdentifier(reference));
}
- @Override public String writeBlob(InputStream in, BlobOptions options)
throws IOException {
+ @Override public DataRecord addRecord(InputStream stream) throws
DataStoreException {
try {
- byte[] data = IOUtils.toByteArray(in);
+ byte[] data = IOUtils.toByteArray(stream);
String id = getIdForInputStream(new
ByteArrayInputStream(data));
- id += "#" + data.length;
TestRecord rec = new TestRecord(id, data, clock.getTime());
store.put(id, rec);
log.info("Blob created {} with timestamp {}", rec.id,
rec.lastModified);
- return id;
+ return rec;
} catch (Exception e) {
- throw new IOException(e);
- }
- }
-
- private String getIdForInputStream(final InputStream in)
- throws Exception {
- MessageDigest digest = MessageDigest.getInstance("SHA-256");
- OutputStream output = new DigestOutputStream(new
NullOutputStream(), digest);
- try {
- IOUtils.copyLarge(in, output);
- } finally {
- IOUtils.closeQuietly(output);
- IOUtils.closeQuietly(in);
+ throw new DataStoreException(e);
}
- return encodeHexString(digest.digest());
- }
- @Override public long getBlobLength(String blobId) throws IOException {
- return ((TestRecord) store.get(blobId)).data.length;
}
- @Override public InputStream getInputStream(String blobId) throws
IOException {
- try {
- return store.get(blobId).getStream();
- } catch (DataStoreException e) {
- e.printStackTrace();
- }
- return null;
+ @Override public Iterator<DataIdentifier> getAllIdentifiers() throws
DataStoreException {
+ return Iterators.transform(store.keySet().iterator(), input ->
new DataIdentifier(input));
}
- @Nullable @Override public String getBlobId(@NotNull String reference)
{
- return reference;
+ @Override public void deleteRecord(DataIdentifier identifier) throws
DataStoreException {
+ store.remove(identifier.toString());
}
- @Nullable @Override public String getReference(@NotNull String blobId)
{
- return blobId;
- }
+ /***************************************** SharedDataStore
***************************************/
@Override public void addMetadataRecord(InputStream stream, String
name) throws DataStoreException {
try {
@@ -729,8 +762,55 @@ public class BlobGCTest {
return store.get(id.toString());
}
- @Override public Type getType() {
- return Type.SHARED;
+ @Override public SharedDataStore.Type getType() {
+ return SharedDataStore.Type.SHARED;
+ }
+
+ /**************************** DataRecordAccessProvider
*************************/
+
+ @Override public @Nullable URI getDownloadURI(@NotNull DataIdentifier
identifier,
+ @NotNull DataRecordDownloadOptions downloadOptions) {
+ return null;
+ }
+
+ @Override
+ public @Nullable DataRecordUpload initiateDataRecordUpload(long
maxUploadSizeInBytes, int maxNumberOfURIs)
+ throws IllegalArgumentException, DataRecordUploadException {
+ String upToken = UUID.randomUUID().toString();
+ Random rand = new Random();
+ InputStream stream = randomStream(rand.nextInt(1000), 100);
+ byte[] data = new byte[0];
+ try {
+ data = IOUtils.toByteArray(stream);
+ } catch (IOException e) {
+ throw new DataRecordUploadException(e);
+ }
+ TestRecord rec = new TestRecord(upToken, data, clock.getTime());
+ store.put(upToken, rec);
+
+ DataRecordUpload uploadRec = new DataRecordUpload() {
+ @Override public @NotNull String getUploadToken() {
+ return upToken;
+ }
+
+ @Override public long getMinPartSize() {
+ return maxUploadSizeInBytes;
+ }
+
+ @Override public long getMaxPartSize() {
+ return maxUploadSizeInBytes;
+ }
+
+ @Override public @NotNull Collection<URI> getUploadURIs() {
+ return Collections.EMPTY_LIST;
+ }
+ };
+ return uploadRec;
+ }
+
+ @Override public @NotNull DataRecord completeDataRecordUpload(@NotNull
String uploadToken)
+ throws IllegalArgumentException, DataRecordUploadException,
DataStoreException {
+ return store.get(uploadToken);
}
class TestRecord implements DataRecord {
@@ -765,33 +845,38 @@ public class BlobGCTest {
}
}
- /** No-op **/
- @Override public int readBlob(String blobId, long pos, byte[] buff,
int off, int length) throws IOException {
- throw new UnsupportedOperationException("readBlob not supported");
- }
-
- @Override public void setBlockSize(int x) {
+ private String getIdForInputStream(final InputStream in)
+ throws Exception {
+ MessageDigest digest = MessageDigest.getInstance("SHA-256");
+ OutputStream output = new DigestOutputStream(new
NullOutputStream(), digest);
+ try {
+ IOUtils.copyLarge(in, output);
+ } finally {
+ IOUtils.closeQuietly(output);
+ IOUtils.closeQuietly(in);
+ }
+ return encodeHexString(digest.digest());
}
- @Override public String writeBlob(String tempFileName) throws
IOException {
- throw new UnsupportedOperationException("getBlockSizeMin not
supported");
+ /*************************************** No Op ***********************/
+ @Override public void init(String homeDir) throws RepositoryException {
}
- @Override public int sweep() throws IOException {
- throw new UnsupportedOperationException("sweep not supported");
+ @Override public void updateModifiedDateOnAccess(long before) {
}
- @Override public void startMark() throws IOException {
+ @Override public int deleteAllOlderThan(long min) throws
DataStoreException {
+ return 0;
}
- @Override public void clearInUse() {
+ @Override public int getMinRecordLength() {
+ return MIN_RECORD_LENGTH;
}
- @Override public void clearCache() {
+ @Override public void close() throws DataStoreException {
}
- @Override public long getBlockSizeMin() {
- throw new UnsupportedOperationException("getBlockSizeMin not
supported");
+ @Override public void clearInUse() {
}
}
}
Modified:
jackrabbit/oak/branches/1.10/oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/binary/BinaryAccessDSGCIT.java
URL:
http://svn.apache.org/viewvc/jackrabbit/oak/branches/1.10/oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/binary/BinaryAccessDSGCIT.java?rev=1862717&r1=1862716&r2=1862717&view=diff
==============================================================================
---
jackrabbit/oak/branches/1.10/oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/binary/BinaryAccessDSGCIT.java
(original)
+++
jackrabbit/oak/branches/1.10/oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/binary/BinaryAccessDSGCIT.java
Mon Jul 8 06:55:33 2019
@@ -56,25 +56,33 @@ import org.apache.jackrabbit.core.data.D
import org.apache.jackrabbit.oak.fixture.NodeStoreFixture;
import
org.apache.jackrabbit.oak.jcr.binary.fixtures.datastore.AzureDataStoreFixture;
import
org.apache.jackrabbit.oak.jcr.binary.fixtures.datastore.S3DataStoreFixture;
+import
org.apache.jackrabbit.oak.jcr.binary.fixtures.nodestore.DocumentMongoNodeStoreFixture;
import
org.apache.jackrabbit.oak.jcr.binary.fixtures.nodestore.SegmentMemoryNodeStoreFixture;
+import org.apache.jackrabbit.oak.jcr.binary.util.BinaryAccessDSGCFixture;
import org.apache.jackrabbit.oak.jcr.binary.util.Content;
+import org.apache.jackrabbit.oak.plugins.blob.BlobReferenceRetriever;
import org.apache.jackrabbit.oak.plugins.blob.MarkSweepGarbageCollector;
import org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreBlobStore;
-import org.apache.jackrabbit.oak.segment.SegmentBlobReferenceRetriever;
import org.apache.jackrabbit.oak.segment.compaction.SegmentGCOptions;
import org.apache.jackrabbit.oak.segment.file.FileStore;
import org.apache.jackrabbit.oak.spi.blob.BlobStore;
import org.apache.jackrabbit.oak.spi.blob.GarbageCollectableBlobStore;
import org.apache.jackrabbit.oak.spi.cluster.ClusterRepositoryInfo;
import org.junit.Before;
+import org.junit.Rule;
import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
import org.junit.runners.Parameterized;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class BinaryAccessDSGCIT extends AbstractBinaryAccessIT {
+ private static Logger LOG =
LoggerFactory.getLogger(BinaryAccessDSGCIT.class);
+
private static final String TEST_ROOT = "testroot";
private static final long BINARY_SIZE = 1024*1024;
@@ -83,17 +91,24 @@ public class BinaryAccessDSGCIT extends
private static final String DIRECT_UPLOAD_1 = "du1";
private static final String DIRECT_UPLOAD_2 = "du2";
+ @Rule
+ public TemporaryFolder folder = new TemporaryFolder(new File("target"));
+
@Parameterized.Parameters(name = "{0}")
public static Iterable<?> dataStoreFixtures() {
Collection<NodeStoreFixture> fixtures = new ArrayList<>();
fixtures.add(new SegmentMemoryNodeStoreFixture(new
S3DataStoreFixture()));
+ fixtures.add(new DocumentMongoNodeStoreFixture(new
S3DataStoreFixture()));
+
fixtures.add(new SegmentMemoryNodeStoreFixture(new
AzureDataStoreFixture()));
+ fixtures.add(new DocumentMongoNodeStoreFixture(new
AzureDataStoreFixture()));
+
return fixtures;
}
public BinaryAccessDSGCIT(NodeStoreFixture fixture) {
// reuse NodeStore (and DataStore) across all tests in this class
- super(fixture, true);
+ super(fixture, false);
}
private Session session;
@@ -190,8 +205,6 @@ public class BinaryAccessDSGCIT extends
private MarkSweepGarbageCollector getGarbageCollector()
throws DataStoreException, IOException {
DataStoreBlobStore blobStore = (DataStoreBlobStore)
getNodeStoreComponent(BlobStore.class);
- FileStore fileStore = getNodeStoreComponent(FileStore.class);
- File fileStoreRoot = getNodeStoreComponent(FileStore.class.getName() +
":root");
if (null == garbageCollector) {
String repoId =
ClusterRepositoryInfo.getOrCreateId(getNodeStore());
@@ -200,11 +213,12 @@ public class BinaryAccessDSGCIT extends
if (null == executor) {
executor = (ThreadPoolExecutor)
Executors.newFixedThreadPool(10);
}
+ BlobReferenceRetriever referenceRetriever =
((BinaryAccessDSGCFixture) fixture).getBlobReferenceRetriever(getNodeStore());
garbageCollector = new MarkSweepGarbageCollector(
- new SegmentBlobReferenceRetriever(fileStore),
+ referenceRetriever,
blobStore,
executor,
- fileStoreRoot.getAbsolutePath(),
+ folder.newFolder().getAbsolutePath(),
2048,
0,
repoId
@@ -225,6 +239,8 @@ public class BinaryAccessDSGCIT extends
@Test
public void testGC() throws Exception {
+ LOG.info("Starting testGC [{}]", fixture);
+
Map<String, Content> binaryContent = Maps.newHashMap();
Map<String, Binary> binaries = Maps.newHashMap();
@@ -256,7 +272,6 @@ public class BinaryAccessDSGCIT extends
}
session.save();
-
// Verify that they are deleted from repo
for (String path : deletedBinaryPaths) {
assertFalse(session.nodeExists(toAbsolutePath(path)));
@@ -267,7 +282,7 @@ public class BinaryAccessDSGCIT extends
// Run DSGC
- compactFileStore();
+ ((BinaryAccessDSGCFixture) fixture).compactStore(getNodeStore());
MarkSweepGarbageCollector garbageCollector = getGarbageCollector();
garbageCollector.collectGarbage(false);
Copied:
jackrabbit/oak/branches/1.10/oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/binary/fixtures/nodestore/DocumentMongoNodeStoreFixture.java
(from r1859351,
jackrabbit/oak/trunk/oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/binary/fixtures/nodestore/DocumentMongoNodeStoreFixture.java)
URL:
http://svn.apache.org/viewvc/jackrabbit/oak/branches/1.10/oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/binary/fixtures/nodestore/DocumentMongoNodeStoreFixture.java?p2=jackrabbit/oak/branches/1.10/oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/binary/fixtures/nodestore/DocumentMongoNodeStoreFixture.java&p1=jackrabbit/oak/trunk/oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/binary/fixtures/nodestore/DocumentMongoNodeStoreFixture.java&r1=1859351&r2=1862717&rev=1862717&view=diff
==============================================================================
---
jackrabbit/oak/trunk/oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/binary/fixtures/nodestore/DocumentMongoNodeStoreFixture.java
(original)
+++
jackrabbit/oak/branches/1.10/oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/binary/fixtures/nodestore/DocumentMongoNodeStoreFixture.java
Mon Jul 8 06:55:33 2019
@@ -20,6 +20,7 @@ package org.apache.jackrabbit.oak.jcr.bi
import java.io.File;
import java.io.IOException;
+import java.util.UUID;
import java.util.concurrent.TimeUnit;
import javax.jcr.RepositoryException;
@@ -37,6 +38,7 @@ import org.apache.jackrabbit.oak.plugins
import
org.apache.jackrabbit.oak.plugins.document.DocumentBlobReferenceRetriever;
import org.apache.jackrabbit.oak.plugins.document.DocumentNodeStore;
import org.apache.jackrabbit.oak.plugins.document.MongoConnectionFactory;
+import org.apache.jackrabbit.oak.plugins.document.MongoUtils;
import org.apache.jackrabbit.oak.plugins.document.Revision;
import org.apache.jackrabbit.oak.plugins.document.VersionGarbageCollector;
import
org.apache.jackrabbit.oak.plugins.document.mongo.MongoDocumentNodeStoreBuilder;
@@ -62,19 +64,33 @@ public class DocumentMongoNodeStoreFixtu
private final Table<NodeStore, String, Object> components =
HashBasedTable.create();
private MongoConnection connection;
private final Clock clock;
- public final MongoConnectionFactory connFactory = new
MongoConnectionFactory();
-
+ public MongoConnectionFactory connFactory;
+ private String db;
public DocumentMongoNodeStoreFixture(@Nullable DataStoreFixture
dataStoreFixture) {
this.dataStoreFixture = dataStoreFixture;
this.clock = new Clock.Virtual();
}
+ /**
+ * Mandatory to be called to initialize the connectionFactory.
+ * Lazy initializes it to limit docker container init only if relevant
datastores available.
+ *
+ * @return
+ */
@Override
public boolean isAvailable() {
- this.connection = connFactory.getConnection();
+ db = UUID.randomUUID().toString();
// if a DataStore is configured, it must be available for our
NodeStore to be available
- return (dataStoreFixture == null || dataStoreFixture.isAvailable()) &&
(connection != null);
+ if ((dataStoreFixture == null || dataStoreFixture.isAvailable())) {
+ try {
+ this.connFactory = new MongoConnectionFactory();
+ this.connection = connFactory.getConnection(db);
+
+ return (connection != null);
+ } catch (Exception e) {}
+ }
+ return false;
}
@Override
@@ -123,7 +139,7 @@ public class DocumentMongoNodeStoreFixtu
@Override
public void dispose(NodeStore nodeStore) {
try {
- if (nodeStore instanceof DocumentNodeStore) {
+ if (nodeStore != null && nodeStore instanceof DocumentNodeStore) {
((DocumentNodeStore)nodeStore).dispose();
}
@@ -134,7 +150,10 @@ public class DocumentMongoNodeStoreFixtu
File dataStoreFolder = (File) components.get(nodeStore,
DataStore.class.getName() + ":folder");
FileUtils.deleteQuietly(dataStoreFolder);
}
- connection.close();
+ MongoUtils.dropDatabase(db);
+ if (connection != null) {
+ connection.close();
+ }
} finally {
components.row(nodeStore).clear();
}
Modified:
jackrabbit/oak/branches/1.10/oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/binary/fixtures/nodestore/SegmentMemoryNodeStoreFixture.java
URL:
http://svn.apache.org/viewvc/jackrabbit/oak/branches/1.10/oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/binary/fixtures/nodestore/SegmentMemoryNodeStoreFixture.java?rev=1862717&r1=1862716&r2=1862717&view=diff
==============================================================================
---
jackrabbit/oak/branches/1.10/oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/binary/fixtures/nodestore/SegmentMemoryNodeStoreFixture.java
(original)
+++
jackrabbit/oak/branches/1.10/oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/binary/fixtures/nodestore/SegmentMemoryNodeStoreFixture.java
Mon Jul 8 06:55:33 2019
@@ -26,9 +26,13 @@ import org.apache.commons.io.FileUtils;
import org.apache.jackrabbit.core.data.DataStore;
import org.apache.jackrabbit.oak.fixture.NodeStoreFixture;
import
org.apache.jackrabbit.oak.jcr.binary.fixtures.datastore.DataStoreFixture;
+import org.apache.jackrabbit.oak.jcr.binary.util.BinaryAccessDSGCFixture;
import org.apache.jackrabbit.oak.jcr.util.ComponentHolder;
+import org.apache.jackrabbit.oak.plugins.blob.BlobReferenceRetriever;
import org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreBlobStore;
+import org.apache.jackrabbit.oak.segment.SegmentBlobReferenceRetriever;
import org.apache.jackrabbit.oak.segment.SegmentNodeStoreBuilders;
+import org.apache.jackrabbit.oak.segment.compaction.SegmentGCOptions;
import org.apache.jackrabbit.oak.segment.file.FileStore;
import org.apache.jackrabbit.oak.segment.file.FileStoreBuilder;
import org.apache.jackrabbit.oak.segment.file.InvalidFileStoreVersionException;
@@ -46,7 +50,8 @@ import com.google.common.collect.Table;
* - SegmentNodeStore, storing data in-memory
* - an optional DataStore provided by DataStoreFixture
*/
-public class SegmentMemoryNodeStoreFixture extends NodeStoreFixture implements
ComponentHolder {
+public class SegmentMemoryNodeStoreFixture extends NodeStoreFixture implements
ComponentHolder,
+ BinaryAccessDSGCFixture {
private final Logger log = LoggerFactory.getLogger(getClass());
@@ -129,6 +134,19 @@ public class SegmentMemoryNodeStoreFixtu
}
@Override
+ public void compactStore(NodeStore nodeStore) {
+ FileStore fileStore = get(nodeStore, FileStore.class.getName());
+ for (int i = 0; i<
SegmentGCOptions.defaultGCOptions().getRetainedGenerations(); i++) {
+ fileStore.compactFull();
+ }
+ }
+
+ @Override
+ public BlobReferenceRetriever getBlobReferenceRetriever(NodeStore
nodeStore) {
+ return new SegmentBlobReferenceRetriever(get(nodeStore,
FileStore.class.getName()));
+ }
+
+ @Override
public String toString() {
// for nice Junit parameterized test labels
return FixtureUtils.getFixtureLabel(this, dataStoreFixture);