Author: mreutegg
Date: Wed May 20 07:23:49 2015
New Revision: 1680466

URL: http://svn.apache.org/r1680466
Log:
OAK-2778: DocumentNodeState is null for revision rx-x-x

Merged revisions 1675054 and 1675566 from trunk

Modified:
    jackrabbit/oak/branches/1.2/   (props changed)
    
jackrabbit/oak/branches/1.2/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/VersionGarbageCollector.java
    
jackrabbit/oak/branches/1.2/oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/document/VersionGCDeletionTest.java
    
jackrabbit/oak/branches/1.2/oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/document/VersionGarbageCollectorTest.java

Propchange: jackrabbit/oak/branches/1.2/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Wed May 20 07:23:49 2015
@@ -1,3 +1,3 @@
 /jackrabbit/oak/branches/1.0:1665962
-/jackrabbit/oak/trunk:1672350,1672468,1672537,1672603,1672642,1672644,1672834-1672835,1673351,1673410,1673414,1673436,1673644,1673662-1673664,1673669,1673695,1674046,1674065,1674075,1674107,1674228,1674880,1675055,1675332,1675354,1675357,1675382,1675555,1675593,1676198,1676237,1676407,1676458,1676539,1676670,1676725,1677579,1677581,1677609,1677611,1677939,1677991,1678173,1678323,1678758,1678938,1678954,1679144,1679165,1679191,1679235,1680182,1680222,1680232,1680236,1680461
+/jackrabbit/oak/trunk:1672350,1672468,1672537,1672603,1672642,1672644,1672834-1672835,1673351,1673410,1673414,1673436,1673644,1673662-1673664,1673669,1673695,1674046,1674065,1674075,1674107,1674228,1674880,1675054-1675055,1675332,1675354,1675357,1675382,1675555,1675566,1675593,1676198,1676237,1676407,1676458,1676539,1676670,1676725,1677579,1677581,1677609,1677611,1677939,1677991,1678173,1678323,1678758,1678938,1678954,1679144,1679165,1679191,1679235,1680182,1680222,1680232,1680236,1680461
 /jackrabbit/trunk:1345480

Modified: 
jackrabbit/oak/branches/1.2/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/VersionGarbageCollector.java
URL: 
http://svn.apache.org/viewvc/jackrabbit/oak/branches/1.2/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/VersionGarbageCollector.java?rev=1680466&r1=1680465&r2=1680466&view=diff
==============================================================================
--- 
jackrabbit/oak/branches/1.2/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/VersionGarbageCollector.java
 (original)
+++ 
jackrabbit/oak/branches/1.2/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/VersionGarbageCollector.java
 Wed May 20 07:23:49 2015
@@ -19,31 +19,49 @@
 
 package org.apache.jackrabbit.oak.plugins.document;
 
+import java.io.Closeable;
 import java.io.IOException;
 import java.util.EnumSet;
 import java.util.Iterator;
 import java.util.List;
+import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.TimeUnit;
 
+import javax.annotation.Nonnull;
+
 import com.google.common.base.Joiner;
-import com.google.common.base.StandardSystemProperty;
+import com.google.common.base.Predicate;
 import com.google.common.base.Stopwatch;
-import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Iterators;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
 
 import org.apache.jackrabbit.oak.commons.sort.StringSort;
+import org.apache.jackrabbit.oak.plugins.document.UpdateOp.Condition;
+import org.apache.jackrabbit.oak.plugins.document.UpdateOp.Key;
 import org.apache.jackrabbit.oak.plugins.document.util.Utils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import static com.google.common.base.Preconditions.checkNotNull;
+import static com.google.common.base.StandardSystemProperty.LINE_SEPARATOR;
+import static com.google.common.collect.ImmutableList.copyOf;
 import static com.google.common.collect.Iterators.partition;
+import static java.util.Collections.singletonMap;
+import static org.apache.jackrabbit.oak.plugins.document.Collection.NODES;
+import static 
org.apache.jackrabbit.oak.plugins.document.NodeDocument.MODIFIED_IN_SECS;
 import static 
org.apache.jackrabbit.oak.plugins.document.NodeDocument.SplitDocType.COMMIT_ROOT_ONLY;
 import static 
org.apache.jackrabbit.oak.plugins.document.NodeDocument.SplitDocType.DEFAULT_LEAF;
+import static 
org.apache.jackrabbit.oak.plugins.document.UpdateOp.Condition.newEqualsCondition;
 
 public class VersionGarbageCollector {
     //Kept less than MongoDocumentStore.IN_CLAUSE_BATCH_SIZE to avoid 
re-partitioning
     private static final int DELETE_BATCH_SIZE = 450;
+    private static final int PROGRESS_BATCH_SIZE = 10000;
+    private static final Key KEY_MODIFIED = new Key(MODIFIED_IN_SECS, null);
     private final DocumentNodeStore nodeStore;
+    private final DocumentStore ds;
     private final VersionGCSupport versionStore;
     private int overflowToDiskThreshold = 100000;
 
@@ -59,6 +77,7 @@ public class VersionGarbageCollector {
                             VersionGCSupport gcSupport) {
         this.nodeStore = nodeStore;
         this.versionStore = gcSupport;
+        this.ds = nodeStore.getDocumentStore();
     }
 
     public VersionGCStats gc(long maxRevisionAge, TimeUnit unit) throws 
IOException {
@@ -98,74 +117,45 @@ public class VersionGarbageCollector {
         versionStore.deleteSplitDocuments(GC_TYPES, oldestRevTimeStamp, stats);
     }
 
-    private void collectDeletedDocuments(VersionGCStats stats, Revision 
headRevision, long oldestRevTimeStamp)
+    private void collectDeletedDocuments(VersionGCStats stats,
+                                         Revision headRevision,
+                                         long oldestRevTimeStamp)
             throws IOException {
         int docsTraversed = 0;
-        final int progressBatchSize = 10000;
-        StringSort docIdsToDelete = new StringSort(overflowToDiskThreshold, 
NodeDocumentIdComparator.INSTANCE);
+        DeletedDocsGC gc = new DeletedDocsGC(headRevision);
         try {
             stats.collectDeletedDocs.start();
             Iterable<NodeDocument> itr = 
versionStore.getPossiblyDeletedDocs(oldestRevTimeStamp);
             try {
                 for (NodeDocument doc : itr) {
-                    //Check if node is actually deleted at current revision
-                    //As node is not modified since oldestRevTimeStamp then
-                    //this node has not be revived again in past maxRevisionAge
-                    //So deleting it is safe
+                    // Check if node is actually deleted at current revision
+                    // As node is not modified since oldestRevTimeStamp then
+                    // this node has not be revived again in past 
maxRevisionAge
+                    // So deleting it is safe
                     docsTraversed++;
-                    if (docsTraversed % progressBatchSize == 0){
-                        log.info("Iterated through {} documents so far. {} 
found to be deleted", docsTraversed, docIdsToDelete.getSize());
-                    }
-                    if (doc.getNodeAtRevision(nodeStore, headRevision, null) 
== null) {
-                        docIdsToDelete.add(doc.getId());
-                        //Collect id of all previous docs also
-                        for (NodeDocument prevDoc : 
ImmutableList.copyOf(doc.getAllPreviousDocs())) {
-                            docIdsToDelete.add(prevDoc.getId());
-                        }
+                    if (docsTraversed % PROGRESS_BATCH_SIZE == 0){
+                        log.info("Iterated through {} documents so far. {} 
found to be deleted",
+                                docsTraversed, gc.getNumDocuments());
                     }
+                    gc.possiblyDeleted(doc);
                 }
             } finally {
                 Utils.closeIfCloseable(itr);
             }
             stats.collectDeletedDocs.stop();
 
-            if (docIdsToDelete.isEmpty()){
+            if (gc.getNumDocuments() == 0){
                 return;
             }
 
-            docIdsToDelete.sort();
-            log.info("Proceeding to delete [{}] documents", 
docIdsToDelete.getSize());
-
             stats.deleteDeletedDocs.start();
-            Iterator<List<String>> idListItr = 
partition(docIdsToDelete.getIds(), DELETE_BATCH_SIZE);
-            int deletedCount = 0;
-            int lastLoggedCount = 0;
-            while (idListItr.hasNext()) {
-                List<String> deletionBatch = idListItr.next();
-                deletedCount += deletionBatch.size();
 
-                if (log.isDebugEnabled()) {
-                    StringBuilder sb = new StringBuilder("Performing batch 
deletion of documents with following ids. \n");
-                    
Joiner.on(StandardSystemProperty.LINE_SEPARATOR.value()).appendTo(sb, 
deletionBatch);
-                    log.debug(sb.toString());
-                }
-                log.debug("Deleted [{}] documents so far", deletedCount);
-
-                if (deletedCount - lastLoggedCount >= progressBatchSize){
-                    lastLoggedCount = deletedCount;
-                    double progress = deletedCount * 1.0 / 
docIdsToDelete.getSize() * 100;
-                    String msg = String.format("Deleted %d (%1.2f%%) documents 
so far", deletedCount, progress);
-                    log.info(msg);
-                }
-
-                nodeStore.getDocumentStore().remove(Collection.NODES, 
deletionBatch);
-            }
+            gc.removeDocuments(stats);
 
             nodeStore.invalidateDocChildrenCache();
             stats.deleteDeletedDocs.stop();
-            stats.deletedDocGCCount += docIdsToDelete.getSize();
         } finally {
-            docIdsToDelete.close();
+            gc.close();
         }
     }
 
@@ -190,4 +180,215 @@ public class VersionGarbageCollector {
                     '}';
         }
     }
+
+    /**
+     * A helper class to remove document for deleted nodes.
+     */
+    private class DeletedDocsGC implements Closeable {
+
+        private final Revision headRevision;
+        private final StringSort docIdsToDelete = newStringSort();
+        private final StringSort prevDocIdsToDelete = newStringSort();
+        private final Set<String> exclude = Sets.newHashSet();
+        private boolean sorted = false;
+
+        public DeletedDocsGC(@Nonnull Revision headRevision) {
+            this.headRevision = checkNotNull(headRevision);
+        }
+
+        /**
+         * @return the number of documents gathers so far that have been
+         * identified as garbage via {@link #possiblyDeleted(NodeDocument)}.
+         * This number does not include the previous documents.
+         */
+        long getNumDocuments() {
+            return docIdsToDelete.getSize();
+        }
+
+        /**
+         * Informs the GC that the given document is possibly deleted. The
+         * implementation will check if the node still exists at the head
+         * revision passed to the constructor to this GC. The implementation
+         * will keep track of documents representing deleted nodes and remove
+         * them together with associated previous document
+         *
+         * @param doc the candidate document.
+         */
+        void possiblyDeleted(NodeDocument doc)
+                throws IOException {
+            if (doc.getNodeAtRevision(nodeStore, headRevision, null) == null) {
+                // construct an id that also contains
+                // the _modified time of the document
+                String id = doc.getId() + "/" + doc.getModified();
+                addDocument(id);
+                // Collect id of all previous docs also
+                for (NodeDocument prevDoc : copyOf(doc.getAllPreviousDocs())) {
+                    addPreviousDocument(prevDoc.getId());
+                }
+            }
+        }
+
+        /**
+         * Removes the documents that have been identified as garbage. This
+         * also includes previous documents. This method will only remove
+         * documents that have not been modified since they were passed to
+         * {@link #possiblyDeleted(NodeDocument)}.
+         *
+         * @param stats to track the number of removed documents.
+         */
+        void removeDocuments(VersionGCStats stats) throws IOException {
+            stats.deletedDocGCCount += removeDeletedDocuments();
+            // FIXME: this is incorrect because that method also removes 
intermediate docs
+            stats.splitDocGCCount += removeDeletedPreviousDocuments();
+        }
+
+        public void close() {
+            try {
+                docIdsToDelete.close();
+            } catch (IOException e) {
+                log.warn("Failed to close docIdsToDelete", e);
+            }
+            try {
+                prevDocIdsToDelete.close();
+            } catch (IOException e) {
+                log.warn("Failed to close prevDocIdsToDelete", e);
+            }
+        }
+
+        //------------------------------< internal 
>----------------------------
+
+        private void addDocument(String id) throws IOException {
+            docIdsToDelete.add(id);
+        }
+
+        private long getNumPreviousDocuments() {
+            return prevDocIdsToDelete.getSize() - exclude.size();
+        }
+
+        private void addPreviousDocument(String id) throws IOException {
+            prevDocIdsToDelete.add(id);
+        }
+
+        private Iterator<String> getDocIdsToDelete() throws IOException {
+            ensureSorted();
+            return docIdsToDelete.getIds();
+        }
+
+        private void concurrentModification(NodeDocument doc) {
+            for (NodeDocument prevDoc : copyOf(doc.getAllPreviousDocs())) {
+                exclude.add(prevDoc.getId());
+            }
+        }
+
+        private Iterator<String> getPrevDocIdsToDelete() throws IOException {
+            ensureSorted();
+            return Iterators.filter(prevDocIdsToDelete.getIds(),
+                    new Predicate<String>() {
+                @Override
+                public boolean apply(String input) {
+                    return !exclude.contains(input);
+                }
+            });
+        }
+
+        private int removeDeletedDocuments() throws IOException {
+            Iterator<String> docIdsToDelete = getDocIdsToDelete();
+            log.info("Proceeding to delete [{}] documents", getNumDocuments());
+
+            Iterator<List<String>> idListItr = partition(docIdsToDelete, 
DELETE_BATCH_SIZE);
+            int deletedCount = 0;
+            int lastLoggedCount = 0;
+            int recreatedCount = 0;
+            while (idListItr.hasNext()) {
+                Map<String, Map<Key, Condition>> deletionBatch = 
Maps.newLinkedHashMap();
+                for (String s : idListItr.next()) {
+                    int idx = s.lastIndexOf('/');
+                    String id = s.substring(0, idx);
+                    long modified = -1;
+                    try {
+                        modified = Long.parseLong(s.substring(idx + 1));
+                    } catch (NumberFormatException e) {
+                        log.warn("Invalid _modified {} for {}", 
s.substring(idx + 1), id);
+                    }
+                    deletionBatch.put(id, singletonMap(KEY_MODIFIED, 
newEqualsCondition(modified)));
+                }
+
+                if (log.isDebugEnabled()) {
+                    StringBuilder sb = new StringBuilder("Performing batch 
deletion of documents with following ids. \n");
+                    Joiner.on(LINE_SEPARATOR.value()).appendTo(sb, 
deletionBatch.keySet());
+                    log.debug(sb.toString());
+                }
+
+                int nRemoved = ds.remove(NODES, deletionBatch);
+
+                if (nRemoved < deletionBatch.size()) {
+                    // some nodes were re-created while GC was running
+                    // find the document that still exist
+                    for (String id : deletionBatch.keySet()) {
+                        NodeDocument d = ds.find(NODES, id);
+                        if (d != null) {
+                            concurrentModification(d);
+                        }
+                    }
+                    recreatedCount += (deletionBatch.size() - nRemoved);
+                }
+
+                deletedCount += nRemoved;
+                log.debug("Deleted [{}] documents so far", deletedCount);
+
+                if (deletedCount + recreatedCount - lastLoggedCount >= 
PROGRESS_BATCH_SIZE){
+                    lastLoggedCount = deletedCount + recreatedCount;
+                    double progress = lastLoggedCount * 1.0 / 
getNumDocuments() * 100;
+                    String msg = String.format("Deleted %d (%1.2f%%) documents 
so far", deletedCount, progress);
+                    log.info(msg);
+                }
+            }
+            return deletedCount;
+        }
+
+        private int removeDeletedPreviousDocuments() throws IOException {
+            log.info("Proceeding to delete [{}] previous documents", 
getNumPreviousDocuments());
+
+            int deletedCount = 0;
+            int lastLoggedCount = 0;
+            Iterator<List<String>> idListItr =
+                    partition(getPrevDocIdsToDelete(), DELETE_BATCH_SIZE);
+            while (idListItr.hasNext()) {
+                List<String> deletionBatch = idListItr.next();
+                deletedCount += deletionBatch.size();
+
+                if (log.isDebugEnabled()) {
+                    StringBuilder sb = new StringBuilder("Performing batch 
deletion of previous documents with following ids. \n");
+                    Joiner.on(LINE_SEPARATOR.value()).appendTo(sb, 
deletionBatch);
+                    log.debug(sb.toString());
+                }
+
+                ds.remove(NODES, deletionBatch);
+
+                log.debug("Deleted [{}] previous documents so far", 
deletedCount);
+
+                if (deletedCount - lastLoggedCount >= PROGRESS_BATCH_SIZE){
+                    lastLoggedCount = deletedCount;
+                    double progress = deletedCount * 1.0 / 
(prevDocIdsToDelete.getSize() - exclude.size()) * 100;
+                    String msg = String.format("Deleted %d (%1.2f%%) previous 
documents so far", deletedCount, progress);
+                    log.info(msg);
+                }
+            }
+            return deletedCount;
+        }
+
+        private void ensureSorted() throws IOException {
+            if (!sorted) {
+                docIdsToDelete.sort();
+                prevDocIdsToDelete.sort();
+                sorted = true;
+            }
+        }
+    }
+
+    @Nonnull
+    private StringSort newStringSort() {
+        return new StringSort(overflowToDiskThreshold,
+                NodeDocumentIdComparator.INSTANCE);
+    }
 }

Modified: 
jackrabbit/oak/branches/1.2/oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/document/VersionGCDeletionTest.java
URL: 
http://svn.apache.org/viewvc/jackrabbit/oak/branches/1.2/oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/document/VersionGCDeletionTest.java?rev=1680466&r1=1680465&r2=1680466&view=diff
==============================================================================
--- 
jackrabbit/oak/branches/1.2/oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/document/VersionGCDeletionTest.java
 (original)
+++ 
jackrabbit/oak/branches/1.2/oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/document/VersionGCDeletionTest.java
 Wed May 20 07:23:49 2015
@@ -253,11 +253,11 @@ public class VersionGCDeletionTest {
     private static class TestDocumentStore extends MemoryDocumentStore {
         boolean throwException;
         @Override
-        public <T extends Document> void remove(Collection<T> collection, 
String path) {
-            if (throwException && "2:/x/y".equals(path)){
+        public <T extends Document> void remove(Collection<T> collection, 
String key) {
+            if (throwException && "2:/x/y".equals(key)){
                 throw new AssertionError();
             }
-            super.remove(collection, path);
+            super.remove(collection, key);
         }
 
         @SuppressWarnings("unchecked")

Modified: 
jackrabbit/oak/branches/1.2/oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/document/VersionGarbageCollectorTest.java
URL: 
http://svn.apache.org/viewvc/jackrabbit/oak/branches/1.2/oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/document/VersionGarbageCollectorTest.java?rev=1680466&r1=1680465&r2=1680466&view=diff
==============================================================================
--- 
jackrabbit/oak/branches/1.2/oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/document/VersionGarbageCollectorTest.java
 (original)
+++ 
jackrabbit/oak/branches/1.2/oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/document/VersionGarbageCollectorTest.java
 Wed May 20 07:23:49 2015
@@ -23,10 +23,17 @@ import java.util.Collection;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
 import java.util.concurrent.TimeUnit;
 
+import static com.google.common.collect.Iterables.filter;
 import static com.google.common.collect.Iterables.size;
 import static java.util.concurrent.TimeUnit.HOURS;
+import static java.util.concurrent.TimeUnit.MINUTES;
 import static org.apache.jackrabbit.oak.plugins.document.Collection.NODES;
 import static 
org.apache.jackrabbit.oak.plugins.document.NodeDocument.NUM_REVS_THRESHOLD;
 import static 
org.apache.jackrabbit.oak.plugins.document.NodeDocument.PREV_SPLIT_FACTOR;
@@ -38,12 +45,15 @@ import static org.junit.Assert.assertNot
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 
+import com.google.common.base.Predicate;
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.Iterators;
 import com.google.common.collect.Lists;
+import com.google.common.collect.Queues;
 import com.google.common.collect.Sets;
 
 import org.apache.jackrabbit.oak.api.CommitFailedException;
+import org.apache.jackrabbit.oak.commons.PathUtils;
 import org.apache.jackrabbit.oak.plugins.document.util.Utils;
 import org.apache.jackrabbit.oak.spi.commit.CommitInfo;
 import org.apache.jackrabbit.oak.spi.commit.EmptyHook;
@@ -67,6 +77,8 @@ public class VersionGarbageCollectorTest
 
     private VersionGarbageCollector gc;
 
+    private ExecutorService execService;
+
     public VersionGarbageCollectorTest(DocumentStoreFixture fixture) {
         this.fixture = fixture;
     }
@@ -90,6 +102,7 @@ public class VersionGarbageCollectorTest
 
     @Before
     public void setUp() throws InterruptedException {
+        execService = Executors.newCachedThreadPool();
         clock = new Clock.Virtual();
         store = new DocumentMK.Builder()
                 .clock(clock)
@@ -106,6 +119,8 @@ public class VersionGarbageCollectorTest
     public void tearDown() throws Exception {
         store.dispose();
         Revision.resetClockToDefault();
+        execService.shutdown();
+        execService.awaitTermination(1, MINUTES);
     }
 
     @Test
@@ -384,6 +399,103 @@ public class VersionGarbageCollectorTest
         assertTrue("too many revisions: " + numRevs, numRevs < 6000);
     }
 
+    // OAK-2778
+    @Test
+    public void gcWithConcurrentModification() throws Exception {
+        Revision.setClock(clock);
+        DocumentStore ds = store.getDocumentStore();
+
+        // create test content
+        createTestNode("foo");
+        createTestNode("bar");
+
+        // remove again
+        NodeBuilder builder = store.getRoot().builder();
+        builder.getChildNode("foo").remove();
+        builder.getChildNode("bar").remove();
+        merge(store, builder);
+
+        // wait one hour
+        clock.waitUntil(clock.getTime() + HOURS.toMillis(1));
+
+        final BlockingQueue<NodeDocument> docs = Queues.newSynchronousQueue();
+        VersionGCSupport gcSupport = new 
VersionGCSupport(store.getDocumentStore()) {
+            @Override
+            public Iterable<NodeDocument> getPossiblyDeletedDocs(long 
lastModifiedTime) {
+                return filter(super.getPossiblyDeletedDocs(lastModifiedTime),
+                        new Predicate<NodeDocument>() {
+                            @Override
+                            public boolean apply(NodeDocument input) {
+                                try {
+                                    docs.put(input);
+                                } catch (InterruptedException e) {
+                                    throw new RuntimeException(e);
+                                }
+                                return true;
+                            }
+                        });
+            }
+        };
+        final VersionGarbageCollector gc = new VersionGarbageCollector(store, 
gcSupport);
+        // start GC -> will try to remove /foo and /bar
+        Future<VersionGCStats> f = execService.submit(new 
Callable<VersionGCStats>() {
+            @Override
+            public VersionGCStats call() throws Exception {
+                return gc.gc(30, MINUTES);
+            }
+        });
+
+        NodeDocument doc = docs.take();
+        String name = PathUtils.getName(doc.getPath());
+        // recreate node, which hasn't been removed yet
+        name = name.equals("foo") ? "bar" : "foo";
+        builder = store.getRoot().builder();
+        builder.child(name);
+        merge(store, builder);
+
+        // loop over child node entries -> will populate nodeChildrenCache
+        for (ChildNodeEntry cne : store.getRoot().getChildNodeEntries()) {
+            cne.getName();
+        }
+        // invalidate cached DocumentNodeState
+        DocumentNodeState state = (DocumentNodeState) 
store.getRoot().getChildNode(name);
+        store.invalidateNodeCache(state.getPath(), state.getRevision());
+
+        while (!f.isDone()) {
+            docs.poll();
+        }
+
+        // read children again after GC finished
+        List<String> names = Lists.newArrayList();
+        for (ChildNodeEntry cne : store.getRoot().getChildNodeEntries()) {
+            names.add(cne.getName());
+        }
+        assertEquals(1, names.size());
+
+        doc = ds.find(NODES, Utils.getIdFromPath("/" + names.get(0)));
+        assertNotNull(doc);
+        assertEquals(0, Iterators.size(doc.getAllPreviousDocs()));
+
+        VersionGCStats stats = f.get();
+        assertEquals(1, stats.deletedDocGCCount);
+        assertEquals(2, stats.splitDocGCCount);
+    }
+
+    private void createTestNode(String name) throws CommitFailedException {
+        DocumentStore ds = store.getDocumentStore();
+        NodeBuilder builder = store.getRoot().builder();
+        builder.child(name);
+        merge(store, builder);
+        String id = Utils.getIdFromPath("/" + name);
+        int i = 0;
+        while (ds.find(NODES, id).getPreviousRanges().isEmpty()) {
+            builder = store.getRoot().builder();
+            builder.getChildNode(name).setProperty("p", i++);
+            merge(store, builder);
+            store.runBackgroundOperations();
+        }
+    }
+
     private void merge(DocumentNodeStore store, NodeBuilder builder)
             throws CommitFailedException {
         store.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);


Reply via email to