Author: mreutegg
Date: Wed Apr  1 16:12:43 2015
New Revision: 1670705

URL: http://svn.apache.org/r1670705
Log:
OAK-2673: Resolve add-add, delete-delete merge conflict for empty hidden docs

Applied Vikas Saurabh's patch with a feature flag

Modified:
    
jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/Commit.java
    
jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStore.java
    
jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/NodeDocument.java
    
jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/util/Utils.java
    
jackrabbit/oak/trunk/oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreTest.java

Modified: 
jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/Commit.java
URL: 
http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/Commit.java?rev=1670705&r1=1670704&r2=1670705&view=diff
==============================================================================
--- 
jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/Commit.java
 (original)
+++ 
jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/Commit.java
 Wed Apr  1 16:12:43 2015
@@ -471,12 +471,12 @@ public class Commit {
             }
             String conflictMessage = null;
             if (newestRev == null) {
-                if (op.isDelete() || !op.isNew()) {
+                if ((op.isDelete() || !op.isNew()) && isConflicting(before, 
op)) {
                     conflictMessage = "The node " +
                             op.getId() + " does not exist or is already 
deleted";
                 }
             } else {
-                if (op.isNew()) {
+                if (op.isNew() && isConflicting(before, op)) {
                     conflictMessage = "The node " +
                             op.getId() + " was already added in revision\n" +
                             newestRev;
@@ -545,7 +545,8 @@ public class Commit {
             // or document did not exist before
             return false;
         }
-        return doc.isConflicting(op, baseRevision, revision, nodeStore);
+        return doc.isConflicting(op, baseRevision, revision, nodeStore,
+                nodeStore.getEnableConcurrentAddRemove());
     }
 
     /**

Modified: 
jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStore.java
URL: 
http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStore.java?rev=1670705&r1=1670704&r2=1670705&view=diff
==============================================================================
--- 
jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStore.java
 (original)
+++ 
jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStore.java
 Wed Apr  1 16:12:43 2015
@@ -134,6 +134,13 @@ public final class DocumentNodeStore
             Integer.getInteger("oak.documentMK.revisionAge", 60 * 1000);
 
     /**
+     * Feature flag to enable concurrent add/remove operations of hidden empty
+     * nodes. See OAK-2673.
+     */
+    private boolean enableConcurrentAddRemove =
+            Boolean.getBoolean("oak.enableConcurrentAddRemove");
+
+    /**
      * How long to remember the relative order of old revision of all cluster
      * nodes, in milliseconds. The default is one hour.
      */
@@ -652,6 +659,14 @@ public final class DocumentNodeStore
         return maxBackOffMillis;
     }
 
+    void setEnableConcurrentAddRemove(boolean b) {
+        enableConcurrentAddRemove = b;
+    }
+
+    boolean getEnableConcurrentAddRemove() {
+        return enableConcurrentAddRemove;
+    }
+
     @CheckForNull
     public ClusterNodeInfo getClusterInfo() {
         return clusterNodeInfo;

Modified: 
jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/NodeDocument.java
URL: 
http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/NodeDocument.java?rev=1670705&r1=1670704&r2=1670705&view=diff
==============================================================================
--- 
jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/NodeDocument.java
 (original)
+++ 
jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/NodeDocument.java
 Wed Apr  1 16:12:43 2015
@@ -925,22 +925,30 @@ public final class NodeDocument extends
      * @param baseRevision the base revision for the update operation.
      * @param commitRevision the commit revision of the update operation.
      * @param context the revision context.
+     * @param enableConcurrentAddRemove feature flag for OAK-2673.
      * @return <code>true</code> if conflicting, <code>false</code> otherwise.
      */
     boolean isConflicting(@Nonnull UpdateOp op,
-                                 @Nonnull Revision baseRevision,
-                                 @Nonnull Revision commitRevision,
-                                 @Nonnull RevisionContext context) {
+                          @Nonnull Revision baseRevision,
+                          @Nonnull Revision commitRevision,
+                          @Nonnull RevisionContext context,
+                          boolean enableConcurrentAddRemove) {
         // did existence of node change after baseRevision?
         // only check local deleted map, which contains the most
         // recent values
         Map<Revision, String> deleted = getLocalDeleted();
+        boolean allowConflictingDeleteChange =
+                enableConcurrentAddRemove && allowConflictingDeleteChange(op);
         for (Map.Entry<Revision, String> entry : deleted.entrySet()) {
             if (entry.getKey().equals(commitRevision)) {
                 continue;
             }
+
             if (isRevisionNewer(context, entry.getKey(), baseRevision)) {
-                return true;
+                boolean newerDeleted = Boolean.parseBoolean(entry.getValue());
+                if (!allowConflictingDeleteChange || op.isDelete() != 
newerDeleted) {
+                    return true;
+                }
             }
         }
 
@@ -949,7 +957,7 @@ public final class NodeDocument extends
                 continue;
             }
             String name = entry.getKey().getName();
-            if (DELETED.equals(name)) {
+            if (DELETED.equals(name) && !allowConflictingDeleteChange) {
                 // existence of node changed, this always conflicts with
                 // any other concurrent change
                 return true;
@@ -971,6 +979,55 @@ public final class NodeDocument extends
     }
 
     /**
+     * Utility method to check if {@code op} can be allowed to change
+     * {@link #DELETED} property. Basic idea is that a change in
+     * {@link #DELETED} property should be consistent if final value is same
+     * and there are no observation semantic change. Thus, this method tries to
+     * be very conservative and allows delete iff:
+     * <ul>
+     *     <li>{@code doc} represents and internal path</li>
+     *     <li>{@code op} represents an add or delete operation</li>
+     *     <li>{@code op} doesn't change add/delete any exposed property</li>
+     *     <li>{@code doc} doesn't have any exposed property</li>
+     * </ul>
+     * <i>
+     * Note: This method is a broad level check if we can allow such conflict
+     * resolution. Actual cases, like allow-delete-delete, allow-add-add wrt to
+     * revision are not handled here.
+     * </i>
+     * @param op {@link UpdateOp} instance having changes to check {@code doc} 
against
+     * @return if conflicting change in {@link #DELETED} property is allowed
+     */
+    private boolean allowConflictingDeleteChange(UpdateOp op) {
+        String path = getPath();
+        if (!Utils.isHiddenPath(path)) {
+            return false;
+        }
+
+        if (!op.isNew() && !op.isDelete()) {
+            return false;//only handle added/delete operations
+        }
+
+        for (Key opKey : op.getChanges().keySet()) {
+            String name = opKey.getName();
+            if (Utils.isPropertyName(name)) {
+                return false; //only handle changes to internal properties
+            }
+        }
+
+        // Only look at local data ...
+        // even remotely updated properties should have an entry (although 
invisible)
+        // by the time we are looking for conflicts
+        for (String dataKey : keySet()) {
+            if (Utils.isPropertyName(dataKey)) {
+                return false; //only handle changes to internal properties
+            }
+        }
+
+        return true;
+    }
+
+    /**
      * Returns update operations to split this document. The implementation may
      * decide to not return any operations if no splitting is required.
      *

Modified: 
jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/util/Utils.java
URL: 
http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/util/Utils.java?rev=1670705&r1=1670704&r2=1670705&view=diff
==============================================================================
--- 
jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/util/Utils.java
 (original)
+++ 
jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/util/Utils.java
 Wed Apr  1 16:12:43 2015
@@ -582,4 +582,11 @@ public class Utils {
         };
     }
 
+    /**
+     * @return if {@code path} represent oak's internal path. That is, a path
+     *          element start with a colon.
+     */
+    public static boolean isHiddenPath(@Nonnull String path) {
+        return path.contains("/:");
+    }
 }

Modified: 
jackrabbit/oak/trunk/oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreTest.java
URL: 
http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreTest.java?rev=1670705&r1=1670704&r2=1670705&view=diff
==============================================================================
--- 
jackrabbit/oak/trunk/oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreTest.java
 (original)
+++ 
jackrabbit/oak/trunk/oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreTest.java
 Wed Apr  1 16:12:43 2015
@@ -53,12 +53,16 @@ import java.util.concurrent.atomic.Atomi
 import javax.annotation.CheckForNull;
 import javax.annotation.Nonnull;
 
+import com.google.common.base.Throwables;
 import com.google.common.collect.Iterables;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Sets;
 import org.apache.jackrabbit.oak.api.CommitFailedException;
 import org.apache.jackrabbit.oak.api.PropertyState;
 import org.apache.jackrabbit.oak.api.Type;
+import org.apache.jackrabbit.oak.plugins.commit.AnnotatingConflictHandler;
+import org.apache.jackrabbit.oak.plugins.commit.ConflictHook;
+import org.apache.jackrabbit.oak.plugins.commit.ConflictValidatorProvider;
 import org.apache.jackrabbit.oak.plugins.document.cache.CacheInvalidationStats;
 import org.apache.jackrabbit.oak.plugins.document.memory.MemoryDocumentStore;
 import 
org.apache.jackrabbit.oak.plugins.document.util.TimingDocumentStoreWrapper;
@@ -1090,6 +1094,430 @@ public class DocumentNodeStoreTest {
     }
 
     @Test
+    public void mergeInternalDocAcrossCluster() throws Exception {
+        MemoryDocumentStore docStore = new MemoryDocumentStore();
+        final DocumentNodeStore store1 = new DocumentMK.Builder()
+                .setDocumentStore(docStore).setAsyncDelay(0)
+                .setClusterId(1)
+                .getNodeStore();
+        store1.setEnableConcurrentAddRemove(true);
+        final DocumentNodeStore store2 = new DocumentMK.Builder()
+                .setDocumentStore(docStore).setAsyncDelay(0)
+                .setClusterId(2)
+                .getNodeStore();
+        store2.setEnableConcurrentAddRemove(true);
+        try {
+
+            NodeState root;
+            NodeBuilder builder;
+
+            //Prepare repo
+            root = store1.getRoot();
+            builder = root.builder();
+            builder.child(":hidden").child("deleteDeleted");
+            builder.child(":hidden").child("deleteChanged");
+            builder.child(":hidden").child("changeDeleted");
+            merge(store1, builder);
+            store1.runBackgroundOperations();
+            store2.runBackgroundOperations();
+
+            //Changes in store1
+            root = store1.getRoot();
+            builder = root.builder();
+            builder.child("visible");
+            builder.child(":hidden").child("b");
+            builder.child(":hidden").child("deleteDeleted").remove();
+            builder.child(":hidden").child("changeDeleted").remove();
+            builder.child(":hidden").child("deleteChanged").setProperty("foo", 
"bar");
+            builder.child(":dynHidden").child("c");
+            
builder.child(":dynHidden").child("childWithProp").setProperty("foo", "bar");
+            merge(store1, builder);
+
+            //Changes in store2
+
+            //root would hold reference to store2 root state after initial 
repo initialization
+            root = store2.getRoot();
+
+            //The hidden node itself should be creatable across cluster 
concurrently
+            builder = root.builder();
+            builder.child(":dynHidden");
+            merge(store2, builder);
+
+            //Children of hidden node should be creatable across cluster 
concurrently
+            builder = root.builder();
+            builder.child(":hidden").child("b");
+            builder.child(":dynHidden").child("c");
+            merge(store2, builder);
+
+            //Deleted deleted conflict of internal node should work across 
cluster concurrently
+            builder = root.builder();
+            builder.child(":hidden").child("deleteDeleted").remove();
+            merge(store2, builder);
+
+            //Avoid repeated merge tries ... fail early
+            store2.setMaxBackOffMillis(0);
+
+            boolean commitFailed = false;
+            try {
+                builder = root.builder();
+                builder.child("visible");
+                merge(store2, builder);
+            } catch (CommitFailedException cfe) {
+                commitFailed = true;
+            }
+            assertTrue("Concurrent creation of visible node across cluster 
must fail", commitFailed);
+
+            commitFailed = false;
+            try {
+                builder = root.builder();
+                
builder.child(":dynHidden").child("childWithProp").setProperty("foo", "bar");
+                merge(store2, builder);
+            } catch (CommitFailedException cfe) {
+                commitFailed = true;
+            }
+            assertTrue("Concurrent creation of hidden node with properties 
across cluster must fail", commitFailed);
+
+            commitFailed = false;
+            try {
+                builder = root.builder();
+                builder.child(":hidden").child("deleteChanged").remove();
+                merge(store2, builder);
+            } catch (CommitFailedException cfe) {
+                commitFailed = true;
+            }
+            assertTrue("Delete changed merge across cluster must fail even 
under hidden tree", commitFailed);
+
+            commitFailed = false;
+            try {
+                builder = root.builder();
+                
builder.child(":hidden").child("changeDeleted").setProperty("foo", "bar");
+                merge(store2, builder);
+            } catch (CommitFailedException cfe) {
+                commitFailed = true;
+            }
+            assertTrue("Change deleted merge across cluster must fail even 
under hidden tree", commitFailed);
+        } finally {
+            store2.dispose();
+            store1.dispose();
+        }
+    }
+
+    @Test
+    public void mergeDeleteDeleteEmptyInternalDoc() throws Exception {
+        final DocumentNodeStore store = new 
DocumentMK.Builder().getNodeStore();
+        store.setEnableConcurrentAddRemove(true);
+        try {
+            NodeBuilder builder = store.getRoot().builder();
+            builder.child(":a");
+            builder.child(":b");
+            merge(store, builder);
+            SingleInstanceConflictUtility.generateConflict(store,
+                    new String[]{":1"}, new String[]{":a"},
+                    new String[]{":2"}, new String[]{":b"},
+                    new String[]{":3"}, new String[]{":a", ":b"},
+                    true, "Delete-delete merge conflicts for internal docs 
should be resolved");
+        } finally {
+            store.dispose();
+        }
+    }
+
+    @Test
+    public void mergeDeleteDeleteNonEmptyInternalDocShouldFail() throws 
Exception {
+        final DocumentNodeStore store = new 
DocumentMK.Builder().getNodeStore();
+        store.setEnableConcurrentAddRemove(true);
+        try {
+            NodeBuilder builder = store.getRoot().builder();
+            builder.child(":a").setProperty("foo", "bar");
+            builder.child(":b");
+            merge(store, builder);
+            SingleInstanceConflictUtility.generateConflict(store,
+                    new String[]{":1"}, new String[]{":a"},
+                    new String[]{":2"}, new String[]{":b"},
+                    new String[]{":3"}, new String[]{":a", ":b"},
+                    false, "Delete-delete merge conflicts for non-empty 
internal docs should fail");
+        } finally {
+            store.dispose();
+        }
+    }
+
+    @Test
+    public void mergeDeleteDeleteNormalDocShouldFail() throws Exception {
+        final DocumentNodeStore store = new 
DocumentMK.Builder().getNodeStore();
+        store.setEnableConcurrentAddRemove(true);
+        try {
+            NodeBuilder builder = store.getRoot().builder();
+            builder.child("a");
+            builder.child("b");
+            merge(store, builder);
+            SingleInstanceConflictUtility.generateConflict(store,
+                    new String[]{":1"}, new String[]{"a"},
+                    new String[]{":2"}, new String[]{"b"},
+                    new String[]{":3"}, new String[]{"a", "b"},
+                    false, "Delete-delete merge conflicts for normal docs 
should fail");
+        } finally {
+            store.dispose();
+        }
+    }
+
+    @Test
+    public void mergeAddAddEmptyInternalDoc() throws Exception {
+        final DocumentNodeStore store = new 
DocumentMK.Builder().getNodeStore();
+        store.setEnableConcurrentAddRemove(true);
+        try {
+            SingleInstanceConflictUtility.generateConflict(store,
+                    new String[]{":1", ":a"}, new String[]{},
+                    new String[]{":2", ":b"}, new String[]{},
+                    new String[]{":3", ":a", ":b"}, new String[]{},
+                    true, "Add-add merge conflicts for internal docs should be 
resolvable");
+        } finally {
+            store.dispose();
+        }
+    }
+
+    @Test
+    public void mergeAddAddNonEmptyInternalDocShouldFail() throws Exception {
+        final DocumentNodeStore store = new 
DocumentMK.Builder().getNodeStore();
+        store.setEnableConcurrentAddRemove(true);
+        try {
+            SingleInstanceConflictUtility.generateConflict(store,
+                    new String[]{":1", ":a"}, new String[]{}, true,
+                    new String[]{":2", ":b"}, new String[]{}, true,
+                    new String[]{":3", ":a", ":b"}, new String[]{}, false,
+                    false, "Add-add merge conflicts for non empty internal 
docs should fail");
+        } finally {
+            store.dispose();
+        }
+    }
+
+    @Test
+    public void mergeAddAddNormalDocShouldFail() throws Exception {
+        final DocumentNodeStore store = new 
DocumentMK.Builder().getNodeStore();
+        store.setEnableConcurrentAddRemove(true);
+        try {
+            SingleInstanceConflictUtility.generateConflict(store,
+                    new String[]{":1", "a"}, new String[]{},
+                    new String[]{":2", "b"}, new String[]{},
+                    new String[]{":3", "a", "b"}, new String[]{},
+                    false, "Add-add merge conflicts for normal docs should 
fail");
+        } finally {
+            store.dispose();
+        }
+    }
+
+    @Test
+    public void mergeDeleteChangedInternalDocShouldFail() throws Exception {
+        final DocumentNodeStore store = new 
DocumentMK.Builder().getNodeStore();
+        store.setEnableConcurrentAddRemove(true);
+        try {
+            NodeBuilder builder = store.getRoot().builder();
+            builder.child(":a");
+            builder.child(":b");
+            merge(store, builder);
+            SingleInstanceConflictUtility.generateConflict(store,
+                    new String[]{":1", ":a"}, new String[]{}, true,
+                    new String[]{":2", ":b"}, new String[]{}, true,
+                    new String[]{":3"}, new String[]{":a", ":b"}, false,
+                    false, "Delete changed merge conflicts for internal docs 
should fail");
+        } finally {
+            store.dispose();
+        }
+    }
+
+    @Test
+    public void mergeChangeDeletedInternalDocShouldFail() throws Exception {
+        final DocumentNodeStore store = new 
DocumentMK.Builder().getNodeStore();
+        store.setEnableConcurrentAddRemove(true);
+        try {
+            NodeBuilder builder = store.getRoot().builder();
+            builder.child(":a");
+            builder.child(":b");
+            merge(store, builder);
+            SingleInstanceConflictUtility.generateConflict(store,
+                    new String[]{":1"}, new String[]{":a"}, false,
+                    new String[]{":2"}, new String[]{":b"}, false,
+                    new String[]{":3", ":a", ":b"}, new String[]{}, true,
+                    false, "Change deleted merge conflicts for internal docs 
should fail");
+        } finally {
+            store.dispose();
+        }
+    }
+
+    /**
+     * Utility class that eases creating single cluster id merge conflicts. 
The two methods:
+     * <ul>
+     *     <li>{@link #generateConflict(DocumentNodeStore, String[], String[], 
String[], String[], String[], String[], boolean, String)}</li>
+     *     <li>{@link #generateConflict(DocumentNodeStore, String[], String[], 
boolean, String[], String[], boolean, String[], String[], boolean, boolean, 
String)}</li>
+     * </ul>
+     * can be passed descriptions of modifications required to create 
conflict. These methods would also take
+     * expectation of successful/failure of resolution of merge conflict. In 
case of failure of that assertion, these
+     * methods would mark the test to fail.
+     */
+    private static class SingleInstanceConflictUtility {
+        /**
+         * Wrapper of {@link #generateConflict(DocumentNodeStore, String[], 
String[], boolean, String[], String[], boolean, String[], String[], boolean, 
boolean, String)}
+         * with value of {@code change1, change2, and change3} as {@code false}
+         */
+        public static void generateConflict(final DocumentNodeStore store,
+                                            String [] normalAddChildren1, 
String [] normalRemoveChildren1,
+                                            String [] normalAddChildren2, 
String [] normalRemoveChildren2,
+                                            String [] conflictingAddChildren3, 
String [] conflictingRemoveChildren3,
+                                            boolean shouldMerge, String 
assertMessage)
+                throws CommitFailedException, InterruptedException {
+            generateConflict(store,
+                    normalAddChildren1, normalRemoveChildren1, false,
+                    normalAddChildren2, normalRemoveChildren2, false,
+                    conflictingAddChildren3, conflictingRemoveChildren3, false,
+                    shouldMerge, assertMessage
+                    );
+        }
+
+        /**
+         * This method takes 3 descriptions of changes for conflict to happen. 
Each description has a set of
+         * {@code AddChildren}, {@code RemoveChildren}, and {@code change} 
parameters. {@code AddChidren} is an
+         * array of children to be added, {@code RemoveChildren} is an array 
of children to be removed, and
+         * {@code change} controls if a property (hard-coded to {@code 
@foo=bar}) needs to be set on children
+         * that are part of {@code AddChildren} array.
+         * The changes should be such that set1 changes and set2 changes 
should be safe. The conflict should be
+         * represented by changes in set3 -- and the conflict should exist 
against both set1 and set2.
+         * These 3 description are then used to create changes on 3 threads in 
such a way that by the time thread3
+         * gets around to persist its changes, there are more revisions which 
get committed. In case the conflict
+         * couldn't be resolved, thread3 would report an exception which is 
tested
+         * against {@code mergeable}.
+         * @throws InterruptedException
+         */
+        public static void generateConflict(final DocumentNodeStore store,
+                                            String [] normalAddChildren1, 
String [] normalRemoveChildren1, boolean change1,
+                                            String [] normalAddChildren2, 
String [] normalRemoveChildren2, boolean change2,
+                                            String [] conflictingAddChildren3, 
String [] conflictingRemoveChildren3, boolean change3,
+                                            boolean mergeable, String 
assertMessage)
+                throws InterruptedException {
+            //This would result in 0 retries... 1 rebase would happen and we'd 
control it :D
+            store.setMaxBackOffMillis(0);
+
+            SingleInstanceConflictUtility thread1 = new 
SingleInstanceConflictUtility();
+            SingleInstanceConflictUtility thread3 = new 
SingleInstanceConflictUtility();
+            SingleInstanceConflictUtility thread2 = new 
SingleInstanceConflictUtility();
+
+            thread1.startMerge(store, normalAddChildren1, 
normalRemoveChildren1, change1);
+            thread2.startMerge(store, conflictingAddChildren3, 
conflictingRemoveChildren3, change3);
+
+            thread1.join();
+            thread2.waitForNextMerge();
+
+            thread3.startMerge(store, normalAddChildren2, 
normalRemoveChildren2, change2);
+            thread3.join();
+
+            thread2.join();
+
+            assertNull("There shouldn't be any exception for thread1", 
thread1.getException());
+            assertNull("There shouldn't be any exception for thread3", 
thread3.getException());
+
+            CommitFailedException cfe = thread2.getException();
+            if (mergeable != (cfe == null)) {
+                StringBuffer message = new StringBuffer(assertMessage);
+                if (cfe != null) {
+                    message.append("\n");
+                    message.append(Throwables.getStackTraceAsString(cfe));
+                }
+                fail(message.toString());
+            }
+        }
+
+        private Thread merger;
+        private CommitFailedException mergeException = null;
+
+        private boolean dontBlock;
+        private final Semaphore controller = new Semaphore(0);
+        private final Semaphore controllee = new Semaphore(0);
+
+        private void startMerge(final NodeStore store,
+                                @Nonnull String [] addChildren, @Nonnull 
String [] removeChildren, boolean change) {
+            startMerge(store, null, addChildren, removeChildren, change);
+        }
+
+        private void startMerge(final NodeStore store, final CommitHook hook,
+                                @Nonnull String [] addChildren, @Nonnull 
String [] removeChildren, boolean change) {
+            setDontBlock(false);
+
+            //our controller is controllee for merge thread (and vice versa)
+            merger = createMergeThread(store, hook, controllee, controller, 
addChildren, removeChildren, change);
+            merger.start();
+            controllee.acquireUninterruptibly();//wait for merge thread to get 
to blocking hook
+        }
+
+        private void waitForNextMerge() throws InterruptedException{
+            controller.release();
+            controllee.tryAcquire(2, TimeUnit.SECONDS);
+        }
+        private void unblock() {
+            setDontBlock(true);
+            controller.release();
+        }
+        private void join() throws InterruptedException {
+            unblock();
+            merger.join();
+        }
+
+        private synchronized void setDontBlock(boolean dontBlock) {
+            this.dontBlock = dontBlock;
+        }
+        private synchronized boolean getDontBlock() {
+            return dontBlock;
+        }
+        private CommitFailedException getException() {
+            return mergeException;
+        }
+
+        private Thread createMergeThread(final NodeStore store, final 
CommitHook hook,
+                                         final Semaphore controller, final 
Semaphore controllee,
+                                         @Nonnull final String [] addChildren, 
@Nonnull final String [] removeChildren,
+                                         final boolean change) {
+            return new Thread(new Runnable() {
+                @Override
+                public void run() {
+                    final CommitHook blockingHook = new CommitHook() {
+                        @Nonnull
+                        @Override
+                        public NodeState processCommit(NodeState before, 
NodeState after, CommitInfo info)
+                                throws CommitFailedException {
+                            controller.release();
+                            if(!getDontBlock()) {
+                                controllee.acquireUninterruptibly();
+                            }
+                            return after;
+                        }
+                    };
+
+                    try {
+                        NodeBuilder builder = store.getRoot().builder();
+                        for (String child : addChildren) {
+                            if (change) {
+                                builder.child(child).setProperty("foo", "bar");
+                            } else {
+                                builder.child(child);
+                            }
+                        }
+                        for (String child : removeChildren) {
+                            builder.child(child).remove();
+                        }
+
+                        List<CommitHook> hookList = new 
ArrayList<CommitHook>();
+                        if(hook != null) {
+                            hookList.add(hook);
+                        }
+                        hookList.add(blockingHook);
+                        hookList.add(new ConflictHook(new 
AnnotatingConflictHandler()));
+                        hookList.add(new EditorHook(new 
ConflictValidatorProvider()));
+                        store.merge(builder, CompositeHook.compose(hookList), 
CommitInfo.EMPTY);
+                    } catch (CommitFailedException cfe) {
+                        mergeException = cfe;
+                    }
+                }
+            });
+        }
+    }
+
+    @Test
     public void slowRebase() throws Exception {
         final int NUM_NODES = DocumentRootBuilder.UPDATE_LIMIT / 2;
         final int NUM_PROPS = 10;


Reply via email to