Author: mduerig
Date: Tue Jul  7 13:26:20 2015
New Revision: 1689664

URL: http://svn.apache.org/r1689664
Log:
OAK-2849: Improve revision gc on SegmentMK
SegmentCompactionIT improvement: don't hammer the file store with size() calls 
as this can lead to lock contention

Modified:
    
jackrabbit/oak/trunk/oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentCompactionIT.java

Modified: 
jackrabbit/oak/trunk/oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentCompactionIT.java
URL: 
http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentCompactionIT.java?rev=1689664&r1=1689663&r2=1689664&view=diff
==============================================================================
--- 
jackrabbit/oak/trunk/oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentCompactionIT.java
 (original)
+++ 
jackrabbit/oak/trunk/oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentCompactionIT.java
 Tue Jul  7 13:26:20 2015
@@ -53,6 +53,7 @@ import java.util.Set;
 import java.util.concurrent.Callable;
 import java.util.concurrent.CancellationException;
 import java.util.concurrent.ScheduledThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
 
 import javax.annotation.Nonnull;
 import javax.management.InstanceAlreadyExistsException;
@@ -151,6 +152,7 @@ public class SegmentCompactionIT {
     private volatile int compactionInterval = 1;
     private volatile boolean stopping;
     private volatile Reference rootReference;
+    private volatile long fileStoreSize;
 
     public synchronized void stop() {
         stopping = true;
@@ -256,6 +258,7 @@ public class SegmentCompactionIT {
 
     @Test
     public void run() throws InterruptedException {
+        scheduleSizeMonitor();
         scheduleCompactor();
         addReaders(maxReaders);
         addWriters(maxWriters);
@@ -267,6 +270,15 @@ public class SegmentCompactionIT {
         }
     }
 
+    private void scheduleSizeMonitor() {
+        scheduler.schedule(new Runnable() {
+            @Override
+            public void run() {
+                fileStoreSize = fileStore.size();
+            }
+        }, 1, TimeUnit.MINUTES);
+    }
+
     private synchronized void scheduleCompactor() {
         LOG.info("Scheduling compaction after {} minutes", compactionInterval);
         compactor.cancel(false);
@@ -378,16 +390,14 @@ public class SegmentCompactionIT {
         @Override
         public Void call() throws IOException, CommitFailedException {
             NodeBuilder root = nodeStore.getRoot().builder();
-            boolean deleteOnly = fileStore.size() > maxStoreSize;
             for (int k = 0; k < opCount; k++) {
-                modify(nodeStore, root, deleteOnly);
+                modify(nodeStore, root);
             }
             nodeStore.merge(root, EmptyHook.INSTANCE, CommitInfo.EMPTY);
             return null;
         }
 
-        private void modify(NodeStore nodeStore, NodeBuilder nodeBuilder, 
boolean deleteOnly)
-                throws IOException {
+        private void modify(NodeStore nodeStore, NodeBuilder nodeBuilder) 
throws IOException {
             int p0 = nodeRemoveRatio;
             int p1 = propertyRemoveRatio;
             int p2 = nodeAddRatio;
@@ -395,6 +405,7 @@ public class SegmentCompactionIT {
             int p4 = addBinaryRatio;
             double p = p0 + p1 + p2 + p3 + p4;
 
+            boolean deleteOnly = fileStoreSize > maxStoreSize;
             double k = rnd.nextDouble();
             if (k < p0/p) {
                 chooseRandomNode(nodeBuilder).remove();
@@ -880,7 +891,7 @@ public class SegmentCompactionIT {
 
         @Override
         public long getFileStoreSize() {
-            return fileStore.size();
+            return fileStoreSize;
         }
 
         private CompactionMap getCompactionMap() {


Reply via email to