This is an automated email from the ASF dual-hosted git repository.

jsedding pushed a commit to branch jsedding/OAK-11895-compaction
in repository https://gitbox.apache.org/repos/asf/jackrabbit-oak.git

commit 9777f5f9ff5660570b55bef1cca8f81984b86ba8
Author: Julian Sedding <[email protected]>
AuthorDate: Mon Sep 29 16:46:32 2025 +0200

    OAK-11895 - CheckpointCompactor writes to "after" instead of "onto" 
NodeState
---
 .../oak/segment/CheckpointCompactor.java           | 187 ++++++++++-----------
 .../apache/jackrabbit/oak/segment/Compactor.java   |  61 +++++++
 ...mpactor.java => LegacyCheckpointCompactor.java} |   7 +-
 .../oak/segment/compaction/SegmentGCOptions.java   |  39 +++--
 .../segment/file/AbstractCompactionStrategy.java   |   9 +
 .../segment/AbstractCompactorExternalBlobTest.java |  29 ++--
 .../oak/segment/AbstractCompactorTest.java         |  59 ++++---
 .../CheckpointCompactorExternalBlobTest.java       |   4 +-
 .../oak/segment/CheckpointCompactorTest.java       |   6 +-
 .../segment/CompactToDifferentNodeStoreTest.java   | 139 +++++++++++++++
 .../jackrabbit/oak/segment/CompactorTestUtils.java |  16 +-
 ...est.java => LegacyCheckpointCompactorTest.java} |  14 +-
 .../segment/ParallelCompactorExternalBlobTest.java |  17 +-
 .../oak/segment/ParallelCompactorTest.java         |  10 +-
 14 files changed, 411 insertions(+), 186 deletions(-)

diff --git 
a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/CheckpointCompactor.java
 
b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/CheckpointCompactor.java
index 2385075503..260cbab59a 100644
--- 
a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/CheckpointCompactor.java
+++ 
b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/CheckpointCompactor.java
@@ -19,21 +19,25 @@ package org.apache.jackrabbit.oak.segment;
 
 import static java.util.Objects.requireNonNull;
 
+import static java.util.Objects.requireNonNullElseGet;
 import static org.apache.jackrabbit.oak.commons.PathUtils.elements;
-import static org.apache.jackrabbit.oak.commons.PathUtils.getName;
-import static org.apache.jackrabbit.oak.commons.PathUtils.getParentPath;
 import static 
org.apache.jackrabbit.oak.plugins.memory.EmptyNodeState.EMPTY_NODE;
+import static org.apache.jackrabbit.oak.segment.SegmentNodeStore.CHECKPOINTS;
+import static org.apache.jackrabbit.oak.segment.SegmentNodeStore.ROOT;
 
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Date;
 import java.util.HashMap;
-import java.util.Iterator;
-import java.util.LinkedHashMap;
+import java.util.LinkedHashSet;
 import java.util.List;
 import java.util.Map;
-import java.util.Map.Entry;
+import java.util.Objects;
+import java.util.Set;
+import java.util.function.BiFunction;
+import java.util.function.Supplier;
 
+import org.apache.jackrabbit.oak.api.PropertyState;
 import org.apache.jackrabbit.oak.commons.Buffer;
 import org.apache.jackrabbit.oak.commons.conditions.Validate;
 import org.apache.jackrabbit.oak.plugins.memory.MemoryChildNodeEntry;
@@ -79,102 +83,104 @@ public class CheckpointCompactor extends Compactor {
     }
 
     @Override
-    public @Nullable CompactedNodeState compactDown(
+    public @Nullable CompactedNodeState compactDown(@NotNull NodeState before, 
@NotNull NodeState after, @NotNull Canceller hardCanceller, @NotNull Canceller 
softCanceller) throws IOException {
+        return doCompact(before, after, after, hardCanceller, softCanceller);
+    }
+
+    @Override
+    public @Nullable CompactedNodeState compact(@NotNull NodeState before, 
@NotNull NodeState after, @NotNull NodeState onto, @NotNull Canceller 
canceller) throws IOException {
+        return doCompact(before, after, onto, canceller, null);
+    }
+
+    /**
+     * Implementation that compacts checkpoints chronologically on top of each 
other. The implementation
+     * supports both {@link #compactDown(NodeState, Canceller, Canceller)} 
type and
+     * {@link #compactUp(NodeState, Canceller)} type operations.
+     * <p>
+     * Soft cancellation is only supported for {@link #compactDown(NodeState, 
Canceller, Canceller)} type
+     * scenarios, i.e. when {@code after.equals(onto)}, and will return a 
partially compacted state if cancelled.
+     * <p>
+     * Hard cancellation will abandon the compaction and return {@code null}.
+     * <p>
+     * If compaction completes successfully, a fully compacted state is 
returned.
+     *
+     * @param before        the node state to diff against from {@code after}
+     * @param after         the node state diffed against {@code before}
+     * @param onto          the node state to compact to apply the diff to
+     * @param hardCanceller the trigger for hard cancellation, will abandon 
compaction if cancelled
+     * @param softCanceller the trigger for soft cancellation, will return 
partially compacted state if cancelled; may only be set if {@code 
after.equals(onto)}, implementations should validate the arguments
+     * @return              a fully-compacted or partially-compacted node 
state, or {@code null} if hard-cancelled
+     * @throws IOException will throw exception if any errors occur during 
compaction
+     */
+    private @Nullable CompactedNodeState doCompact(
             @NotNull NodeState before,
             @NotNull NodeState after,
+            @NotNull NodeState onto,
             @NotNull Canceller hardCanceller,
-            @NotNull Canceller softCanceller
+            @Nullable Canceller softCanceller
     ) throws IOException {
-        Iterator<Entry<String, NodeState>> iterator = collectRoots(before, 
after).entrySet().iterator();
-        Entry<String, NodeState> entry = iterator.next();
-        String path = entry.getKey();
-
-        // could already be in cache if compactor is reused
-        CompactedNodeState compacted = cpCache.get(entry.getValue());
-        gcListener.info("compacting {}.", path);
-        if (compacted == null) {
-            compacted = compactDownWithDelegate(getRoot(before), 
entry.getValue(), hardCanceller, softCanceller);
-            if (compacted == null) {
-                return null;
-            }
-        }
+        Validate.checkArgument(softCanceller == null || Objects.equals(after, 
onto),
+                "softCanceller is only supported for compactDown, i.e. when 
Objects.equals(after, onto)");
 
-        NodeBuilder builder = after.builder();
+        Set<String> superRoots = collectSuperRootPaths(before, after);
         Buffer stableIdBytes = 
requireNonNull(CompactorUtils.getStableIdBytes(after));
 
-        getChild(builder, getParentPath(path)).setChildNode(getName(path), 
compacted);
+        NodeBuilder rootBuilder = onto.builder();
+        CompactedNodeState compacted = null;
+        Supplier<Boolean> isCancelled = () -> softCanceller != null && 
softCanceller.check().isCancelled();
+        for (String path : superRoots) {
+            NodeBuilder builder = getDescendant(rootBuilder, path, 
NodeBuilder::child);
+            NodeState afterSuperRoot = getDescendant(after, path, 
NodeState::getChildNode);
 
-        if (compacted.isComplete()) {
-            cpCache.put(entry.getValue(), compacted);
-        } else {
-            return compactor.writeNodeState(builder.getNodeState(), 
stableIdBytes, false);
-        }
+            NodeState baseRoot = requireNonNullElseGet(compacted, () -> 
getRoot(getDescendant(before, path, NodeState::getChildNode)));
+            NodeState ontoRoot = requireNonNullElseGet(compacted, () -> 
getRoot(getDescendant(onto, path, NodeState::getChildNode)));
 
-        before = entry.getValue();
-
-        while (iterator.hasNext()) {
-            entry = iterator.next();
-            path = entry.getKey();
-            gcListener.info("compacting {}.", path);
+            if (compacted == null) {
+                // down compaction only affects the first iteration, after 
that, we're always doing up compaction
+                compacted = compactWithCache(baseRoot, 
getRoot(afterSuperRoot), ontoRoot, hardCanceller, softCanceller);
+            } else {
+                compacted = compactWithCache(baseRoot, 
getRoot(afterSuperRoot), ontoRoot, hardCanceller, null);
+            }
 
-            compacted = compactWithCache(before, entry.getValue(), compacted, 
hardCanceller);
             if (compacted == null) {
                 return null;
             }
 
-            before = entry.getValue();
-            Validate.checkState(compacted.isComplete());
-            getChild(builder, getParentPath(path)).setChildNode(getName(path), 
compacted);
+            Validate.checkState(compacted.isComplete() || isCancelled.get(),
+                    "compaction must be complete unless cancelled");
 
-            if (softCanceller.check().isCancelled()) {
-                return compactor.writeNodeState(builder.getNodeState(), 
stableIdBytes, false);
+            builder.setChildNode(ROOT, compacted);
+            if (path.startsWith(CHECKPOINTS + '/')) {
+                // copy checkpoint "properties" child node
+                NodeBuilder props = builder.setChildNode("properties");
+                for (PropertyState properties : 
afterSuperRoot.getChildNode("properties").getProperties()) {
+                    props.setProperty(compactor.compact(properties));
+                }
+                // copy checkpoint properties (on the parent of the root node)
+                for (PropertyState property : afterSuperRoot.getProperties()) {
+                    builder.setProperty(compactor.compact(property));
+                }
             }
-        }
 
-        return compactor.writeNodeState(builder.getNodeState(), stableIdBytes, 
true);
-    }
-
-    @Override
-    public @Nullable CompactedNodeState compact(
-            @NotNull NodeState before,
-            @NotNull NodeState after,
-            @NotNull NodeState onto,
-            @NotNull Canceller canceller
-    ) throws IOException {
-        LinkedHashMap<String, NodeState> roots = collectRoots(before, after);
-
-        NodeBuilder builder = after.builder();
-        Buffer stableIdBytes = 
requireNonNull(CompactorUtils.getStableIdBytes(after));
-
-        before = getRoot(before);
-        onto = getRoot(onto);
-
-        for (Entry<String, NodeState> entry : roots.entrySet()) {
-            String path = entry.getKey();
-            after = entry.getValue();
-            CompactedNodeState compacted = compactWithCache(before, after, 
onto, canceller);
-            if (compacted == null) {
-                return null;
+            if (isCancelled.get()) {
+                break;
             }
-            Validate.checkState(compacted.isComplete());
-            getChild(builder, getParentPath(path)).setChildNode(getName(path), 
compacted);
-            before = after;
-            onto = compacted;
         }
 
-        return compactor.writeNodeState(builder.getNodeState(), stableIdBytes, 
true);
+        return compactor.writeNodeState(rootBuilder.getNodeState(), 
stableIdBytes, !isCancelled.get());
     }
 
     private @Nullable CompactedNodeState compactWithCache(
             @NotNull NodeState before,
             @NotNull NodeState after,
             @NotNull NodeState onto,
-            @NotNull Canceller canceller
+            @NotNull Canceller hardCanceller,
+            @Nullable Canceller softCanceller
     ) throws IOException {
         CompactedNodeState compacted = cpCache.get(after);
         if (compacted == null) {
-            compacted = compactWithDelegate(before, after, onto, canceller);
-            if (compacted != null) {
+            compacted = compactor.compact(before, after, onto, hardCanceller, 
softCanceller);
+            if (compacted != null && compacted.isComplete()) {
                 cpCache.put(after, compacted);
             }
         } else {
@@ -188,7 +194,7 @@ public class CheckpointCompactor extends Compactor {
      * state from a {@code superRoot}. This list consists of all checkpoints 
followed by
      * the root.
      */
-    private @NotNull LinkedHashMap<String, NodeState> collectRoots(
+    private @NotNull LinkedHashSet<String> collectSuperRootPaths(
             @NotNull NodeState superRootBefore,
             @NotNull NodeState superRootAfter) {
         List<ChildNodeEntry> checkpoints = new ArrayList<>();
@@ -208,48 +214,27 @@ public class CheckpointCompactor extends Compactor {
             return Long.compare(c1, c2);
         });
 
-        LinkedHashMap<String, NodeState> roots = new LinkedHashMap<>();
+        LinkedHashSet<String> roots = new LinkedHashSet<>();
         for (ChildNodeEntry checkpoint : checkpoints) {
             String name = checkpoint.getName();
             NodeState node = checkpoint.getNodeState();
             gcListener.info("found checkpoint {} created on {}.",
                     name, new Date(node.getLong("created")));
-            roots.put("checkpoints/" + name + "/root", 
node.getChildNode("root"));
+            roots.add("checkpoints/" + name);
         }
-        roots.put("root", superRootAfter.getChildNode("root"));
+        roots.add("");
 
         return roots;
     }
 
     private static @NotNull NodeState getRoot(@NotNull NodeState node) {
-        return node.hasChildNode("root") ? node.getChildNode("root") : 
EMPTY_NODE;
+        return node.hasChildNode(ROOT) ? node.getChildNode(ROOT) : EMPTY_NODE;
     }
 
-    private static @NotNull NodeBuilder getChild(NodeBuilder builder, String 
path) {
+    private static <T> T getDescendant(T t, String path, BiFunction<T, String, 
T> getChild) {
         for (String name : elements(path)) {
-            builder = builder.getChildNode(name);
+            t = getChild.apply(t, name);
         }
-        return builder;
-    }
-
-    /**
-     * Delegate compaction to another, usually simpler, implementation.
-     */
-    private @Nullable CompactedNodeState compactDownWithDelegate(
-            @NotNull NodeState before,
-            @NotNull NodeState after,
-            @NotNull Canceller hardCanceller,
-            @NotNull Canceller softCanceller
-    ) throws IOException {
-        return compactor.compactDown(before, after, hardCanceller, 
softCanceller);
-    }
-
-    private @Nullable CompactedNodeState compactWithDelegate(
-            @NotNull NodeState before,
-            @NotNull NodeState after,
-            @NotNull NodeState onto,
-            @NotNull Canceller canceller
-    ) throws IOException {
-        return compactor.compact(before, after, onto, canceller);
+        return t;
     }
 }
diff --git 
a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/Compactor.java
 
b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/Compactor.java
index e0dc1037b6..5e08b8496d 100644
--- 
a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/Compactor.java
+++ 
b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/Compactor.java
@@ -29,7 +29,59 @@ import java.io.IOException;
 
 import static 
org.apache.jackrabbit.oak.plugins.memory.EmptyNodeState.EMPTY_NODE;
 
+/**
+ * A compactor compacts the differences between two {@link NodeState}s
+ * ({@code before} and {@code after}) on top of a third {@link NodeState}
+ * ({@code onto}).
+ * <p>
+ * The compaction can be done in two ways:
+ * <ul>
+ *     <li>Compacting down, where the differences between {@code before} and
+ *     {@code after} are compacted on top of the {@code after} state. This is
+ *     useful, because it allows for partial compactions. I.e. compactions that
+ *     are "soft-cancelled" before the full compaction process completes.
+ *     <p>
+ *     During "down compaction", parts of the tree are compacted and the 
respective
+ *     node states are substituted with their compacted counterparts. This 
results
+ *     in an {@link #equals(Object)| equal} node state at any moment in the 
process,
+ *     which means that if the compaction is cancelled, a valid, albeit only
+ *     partially compacted state can be returned.
+ *     <p>
+ *     After a partially completed "down compaction", the segments referenced 
by the
+ *     root record can be of different generations. However, the stable 
identifier
+ *     of the compacted records remain unchanged.
+ *     </li>
+ *     <li>Compacting up, where the differences between {@code before} and
+ *     {@code after} are compacted on top of {@code before}. This is useful to
+ *     create a new {@link NodeState} that contains the changes from
+ *     {@code after} but is based on {@code before}, often on an empty 
node-state.
+ *     <p>
+ *     "Up compaction" results in a fully compacted state, but it can only be 
"hard-canceller",
+ *     i.e. either the full compaction is done, or nothing at all ({@code 
null} is returned
+ *     if cancelled).
+ *     <p>
+ *     Generally, "up compaction" is more thorough than "down compaction", 
because
+ *     the entire content tree is rewritten. Furthermore, it guarantees that 
after
+ *     compaction all reachable segments are of the same, new generation.
+ *     </li>
+ * </ul>
+ * <p>
+ * The compaction process can be cancelled through a {@link Canceller}. If
+ * cancellation is requested, the compaction will either abort completely or,
+ * in case of compacting down, return a partially compacted state.
+ */
 public abstract class Compactor {
+
+    /**
+     * Convenience method to run {@link #compactDown(NodeState, NodeState, 
Canceller, Canceller)},
+     * where the {@code before} state is the empty node state.
+     *
+     * @param state         the (after) node state to compact
+     * @param hardCanceller the trigger for hard cancellation, will abandon 
compaction if cancelled
+     * @param softCanceller the trigger for soft cancellation, will return 
partially compacted state if cancelled
+     * @return              the fully or partially compacted node state, or 
{@code null} if hard-cancelled
+     * @throws IOException will throw exception if any errors occur during 
compaction
+     */
     public final @Nullable CompactedNodeState compactDown(
             @NotNull NodeState state,
             @NotNull Canceller hardCanceller,
@@ -54,6 +106,15 @@ public abstract class Compactor {
             @NotNull Canceller softCanceller
     ) throws IOException;
 
+    /**
+     * Convenience method to run {@link #compactUp(NodeState, NodeState, 
Canceller)},
+     * where the {@code before} state is the empty node state.
+     *
+     * @param state         the (after) node state to compact
+     * @param canceller the trigger for hard cancellation, will abandon 
compaction if cancelled
+     * @return              the fully compacted node state, or {@code null} if 
hard-cancelled
+     * @throws IOException will throw exception if any errors occur during 
compaction
+     */
     public final @Nullable CompactedNodeState compactUp(
             @NotNull NodeState state,
             @NotNull Canceller canceller
diff --git 
a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/CheckpointCompactor.java
 
b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/LegacyCheckpointCompactor.java
similarity index 98%
copy from 
oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/CheckpointCompactor.java
copy to 
oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/LegacyCheckpointCompactor.java
index 2385075503..01b7c2d422 100644
--- 
a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/CheckpointCompactor.java
+++ 
b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/LegacyCheckpointCompactor.java
@@ -57,8 +57,11 @@ import org.jetbrains.annotations.Nullable;
  *     <li>Caching the compacted checkpoints and root states for deduplication 
should
  *     the same checkpoint or root state occur again in a later compaction 
retry cycle.
  * </ul>
+ *
+ * @deprecated Use {@link 
org.apache.jackrabbit.oak.segment.CheckpointCompactor} instead.
  */
-public class CheckpointCompactor extends Compactor {
+@Deprecated
+public class LegacyCheckpointCompactor extends Compactor {
     protected final @NotNull GCMonitor gcListener;
 
     private final @NotNull Map<NodeState, CompactedNodeState> cpCache = new 
HashMap<>();
@@ -71,7 +74,7 @@ public class CheckpointCompactor extends Compactor {
      * @param gcListener        listener receiving notifications about the 
garbage collection process
      * @param compactor         the delegate compactor to use for the actual 
compaction work
      */
-    public CheckpointCompactor(
+    public LegacyCheckpointCompactor(
             @NotNull GCMonitor gcListener,
             @NotNull ClassicCompactor compactor) {
         this.gcListener = gcListener;
diff --git 
a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/compaction/SegmentGCOptions.java
 
b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/compaction/SegmentGCOptions.java
index d9250c491a..284ebf5486 100644
--- 
a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/compaction/SegmentGCOptions.java
+++ 
b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/compaction/SegmentGCOptions.java
@@ -21,6 +21,8 @@ package org.apache.jackrabbit.oak.segment.compaction;
 
 import org.jetbrains.annotations.NotNull;
 
+import java.util.stream.Stream;
+
 /**
  * This class holds configuration options for segment store revision gc.
  */
@@ -60,7 +62,26 @@ public class SegmentGCOptions {
         /**
          * Multithreaded compaction implementation
          */
-        PARALLEL_COMPACTOR("parallel");
+        PARALLEL_COMPACTOR("parallel"),
+
+        /**
+         * Checkpoint-aware compaction implementation
+         *
+         * @deprecated use {@link #CHECKPOINT_COMPACTOR} instead, this 
implementation is only present
+         * for backwards-compatibility in case the fix for OAK-11895 
introduces issues for some users.
+         */
+        @Deprecated
+        LEGACY_CHECKPOINT_COMPACTOR("legacy_diff"),
+
+
+        /**
+         * Multithreaded compaction implementation
+         *
+         * @deprecated use {@link #PARALLEL_COMPACTOR} instead, this 
implementation is only present
+         * for backwards-compatibility in case the fix for OAK-11895 
introduces issues for some users.
+         */
+        @Deprecated
+        LEGACY_PARALLEL_COMPACTOR("legacy_parallel");
 
         private final String description;
 
@@ -69,16 +90,10 @@ public class SegmentGCOptions {
         }
 
         public static CompactorType fromDescription(String description) {
-            switch (description) {
-            case "classic":
-                return CLASSIC_COMPACTOR;
-            case "diff":
-                return CHECKPOINT_COMPACTOR;
-            case "parallel":
-                return PARALLEL_COMPACTOR;
-            default:
-                throw new IllegalArgumentException("Unrecognized compactor 
type " + description);
-            }
+            return Stream.of(values())
+                    .filter(t -> t.description().equals(description))
+                    .findFirst()
+                    .orElseThrow(() -> new 
IllegalArgumentException("Unrecognized compactor type " + description));
         }
 
         public String description() {
@@ -429,7 +444,7 @@ public class SegmentGCOptions {
     /**
      * Sets the concurrency level for compaction
      * @param concurrency number of threads to use
-     * @return this instance                    
+     * @return this instance
      */
     public SegmentGCOptions setConcurrency(int concurrency) {
         this.concurrency = concurrency;
diff --git 
a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/AbstractCompactionStrategy.java
 
b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/AbstractCompactionStrategy.java
index 19df646df4..031c01aa0d 100644
--- 
a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/AbstractCompactionStrategy.java
+++ 
b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/AbstractCompactionStrategy.java
@@ -30,6 +30,7 @@ import static 
org.apache.jackrabbit.oak.segment.file.TarRevisions.timeout;
 import org.apache.jackrabbit.oak.segment.CheckpointCompactor;
 import org.apache.jackrabbit.oak.segment.ClassicCompactor;
 import org.apache.jackrabbit.oak.segment.Compactor;
+import org.apache.jackrabbit.oak.segment.LegacyCheckpointCompactor;
 import org.apache.jackrabbit.oak.segment.ParallelCompactor;
 import org.apache.jackrabbit.oak.segment.RecordId;
 import org.apache.jackrabbit.oak.segment.SegmentNodeState;
@@ -290,6 +291,7 @@ abstract class AbstractCompactionStrategy implements 
CompactionStrategy {
         }
     }
 
+    @SuppressWarnings("deprecation") // see OAK-11985 why the 
LegacyCheckpointCompactor is still present
     private Compactor newCompactor(Context context, CompactionWriter writer) {
         CompactorType compactorType = 
context.getGCOptions().getCompactorType();
         switch (compactorType) {
@@ -302,6 +304,13 @@ abstract class AbstractCompactionStrategy implements 
CompactionStrategy {
                         new ClassicCompactor(writer, 
context.getCompactionMonitor()));
             case CLASSIC_COMPACTOR:
                 return new ClassicCompactor(writer, 
context.getCompactionMonitor());
+            case LEGACY_CHECKPOINT_COMPACTOR:
+                return new LegacyCheckpointCompactor(context.getGCListener(),
+                        new ClassicCompactor(writer, 
context.getCompactionMonitor()));
+            case LEGACY_PARALLEL_COMPACTOR:
+                return new LegacyCheckpointCompactor(context.getGCListener(),
+                        new ParallelCompactor(context.getGCListener(), writer, 
context.getCompactionMonitor(),
+                                context.getGCOptions().getConcurrency()));
             default:
                 throw new IllegalArgumentException("Unknown compactor type: " 
+ compactorType);
             }
diff --git 
a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/AbstractCompactorExternalBlobTest.java
 
b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/AbstractCompactorExternalBlobTest.java
index a6aac0da21..46709f5552 100644
--- 
a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/AbstractCompactorExternalBlobTest.java
+++ 
b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/AbstractCompactorExternalBlobTest.java
@@ -27,7 +27,8 @@ import static 
org.apache.jackrabbit.oak.segment.CompactorTestUtils.assertSameRec
 import static 
org.apache.jackrabbit.oak.segment.CompactorTestUtils.assertSameStableId;
 import static 
org.apache.jackrabbit.oak.segment.CompactorTestUtils.checkGeneration;
 import static org.apache.jackrabbit.oak.segment.CompactorTestUtils.createBlob;
-import static 
org.apache.jackrabbit.oak.segment.CompactorTestUtils.getCheckpoint;
+import static 
org.apache.jackrabbit.oak.segment.CompactorTestUtils.getCheckpointRoot;
+import static org.apache.jackrabbit.oak.segment.SegmentNodeStore.ROOT;
 import static 
org.apache.jackrabbit.oak.segment.file.FileStoreBuilder.fileStoreBuilder;
 import static 
org.apache.jackrabbit.oak.segment.file.tar.GCGeneration.newGCGeneration;
 import static org.junit.Assert.assertNotNull;
@@ -80,16 +81,12 @@ public abstract class AbstractCompactorExternalBlobTest {
     public RuleChain rules = RuleChain.outerRule(folder)
         .around(temporaryBlobStore);
 
-    @Parameterized.Parameters
-    public static List<SimpleCompactorFactory> compactorFactories() {
-        return Arrays.asList(
-                compactor -> compactor::compactUp,
-                compactor -> (node, canceller) -> compactor.compactDown(node, 
canceller, canceller),
-                compactor -> (node, canceller) -> 
compactor.compact(EMPTY_NODE, node, EMPTY_NODE, canceller)
-        );
+    @Parameterized.Parameters(name = "{index}: {0}")
+    public static Iterable<Object[]> compactorFactories() {
+        return AbstractCompactorTest.compactorFactories();
     }
 
-    public AbstractCompactorExternalBlobTest(@NotNull SimpleCompactorFactory 
compactorFactory) {
+    public AbstractCompactorExternalBlobTest(String name, @NotNull 
SimpleCompactorFactory compactorFactory) {
         this.compactorFactory = compactorFactory;
     }
 
@@ -146,12 +143,12 @@ public abstract class AbstractCompactorExternalBlobTest {
         checkGeneration(compacted1, compactedGeneration);
 
         assertSameStableId(uncompacted1, compacted1);
-        assertSameStableId(getCheckpoint(uncompacted1, cp1), 
getCheckpoint(compacted1, cp1));
-        assertSameStableId(getCheckpoint(uncompacted1, cp2), 
getCheckpoint(compacted1, cp2));
-        assertSameStableId(getCheckpoint(uncompacted1, cp3), 
getCheckpoint(compacted1, cp3));
-        assertSameStableId(getCheckpoint(uncompacted1, cp4), 
getCheckpoint(compacted1, cp4));
-        assertSameStableId(getCheckpoint(uncompacted1, cp5), 
getCheckpoint(compacted1, cp5));
-        assertSameRecord(getCheckpoint(compacted1, cp5), 
compacted1.getChildNode("root"));
+        assertSameStableId(getCheckpointRoot(uncompacted1, cp1), 
getCheckpointRoot(compacted1, cp1));
+        assertSameStableId(getCheckpointRoot(uncompacted1, cp2), 
getCheckpointRoot(compacted1, cp2));
+        assertSameStableId(getCheckpointRoot(uncompacted1, cp3), 
getCheckpointRoot(compacted1, cp3));
+        assertSameStableId(getCheckpointRoot(uncompacted1, cp4), 
getCheckpointRoot(compacted1, cp4));
+        assertSameStableId(getCheckpointRoot(uncompacted1, cp5), 
getCheckpointRoot(compacted1, cp5));
+        assertSameRecord(getCheckpointRoot(compacted1, cp5), 
compacted1.getChildNode(ROOT));
     }
 
     private static void updateTestContent(@NotNull String parent, @NotNull 
NodeStore nodeStore)
@@ -162,4 +159,4 @@ public abstract class AbstractCompactorExternalBlobTest {
         nodeStore.merge(rootBuilder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
     }
 
-}
\ No newline at end of file
+}
diff --git 
a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/AbstractCompactorTest.java
 
b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/AbstractCompactorTest.java
index 298318e362..af0d6b606a 100644
--- 
a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/AbstractCompactorTest.java
+++ 
b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/AbstractCompactorTest.java
@@ -19,14 +19,15 @@
 package org.apache.jackrabbit.oak.segment;
 
 import static java.util.concurrent.TimeUnit.DAYS;
-import static 
org.apache.jackrabbit.oak.plugins.memory.EmptyNodeState.EMPTY_NODE;
 import static 
org.apache.jackrabbit.oak.segment.CompactorTestUtils.SimpleCompactor;
 import static 
org.apache.jackrabbit.oak.segment.CompactorTestUtils.SimpleCompactorFactory;
 import static 
org.apache.jackrabbit.oak.segment.CompactorTestUtils.addTestContent;
 import static 
org.apache.jackrabbit.oak.segment.CompactorTestUtils.assertSameRecord;
 import static 
org.apache.jackrabbit.oak.segment.CompactorTestUtils.assertSameStableId;
 import static 
org.apache.jackrabbit.oak.segment.CompactorTestUtils.checkGeneration;
-import static 
org.apache.jackrabbit.oak.segment.CompactorTestUtils.getCheckpoint;
+import static 
org.apache.jackrabbit.oak.segment.CompactorTestUtils.getCheckpointRoot;
+import static 
org.apache.jackrabbit.oak.segment.CompactorTestUtils.getCheckpointSuperRoot;
+import static org.apache.jackrabbit.oak.segment.SegmentNodeStore.ROOT;
 import static 
org.apache.jackrabbit.oak.segment.file.FileStoreBuilder.fileStoreBuilder;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
@@ -39,7 +40,6 @@ import static org.junit.Assert.assertTrue;
 import java.io.File;
 import java.io.IOException;
 import java.util.Arrays;
-import java.util.List;
 
 import org.apache.jackrabbit.oak.segment.file.CompactedNodeState;
 import org.apache.jackrabbit.oak.segment.file.FileStore;
@@ -78,16 +78,15 @@ public abstract class AbstractCompactorTest {
     private GCGeneration partialGeneration;
     private GCGeneration targetGeneration;
 
-    @Parameterized.Parameters
-    public static List<SimpleCompactorFactory> compactorFactories() {
+    @Parameterized.Parameters(name = "{index}: {0}")
+    public static Iterable<Object[]> compactorFactories() {
         return Arrays.asList(
-                compactor -> compactor::compactUp,
-                compactor -> (node, canceller) -> compactor.compactDown(node, 
canceller, canceller),
-                compactor -> (node, canceller) -> 
compactor.compact(EMPTY_NODE, node, EMPTY_NODE, canceller)
+                new Object[] {"compactUp", ((SimpleCompactorFactory)compactor 
-> compactor::compactUp)},
+                new Object[] {"compactDown", 
((SimpleCompactorFactory)compactor -> (node, canceller) -> 
compactor.compactDown(node, canceller, canceller))}
         );
     }
 
-    public AbstractCompactorTest(@NotNull SimpleCompactorFactory 
compactorFactory) {
+    public AbstractCompactorTest(@SuppressWarnings("unused") String name, 
@NotNull SimpleCompactorFactory compactorFactory) {
         this.compactorFactory = compactorFactory;
     }
 
@@ -130,9 +129,9 @@ public abstract class AbstractCompactorTest {
         checkGeneration(compacted1, targetGeneration);
 
         assertSameStableId(uncompacted1, compacted1);
-        assertSameStableId(getCheckpoint(uncompacted1, cp1), 
getCheckpoint(compacted1, cp1));
-        assertSameStableId(getCheckpoint(uncompacted1, cp2), 
getCheckpoint(compacted1, cp2));
-        assertSameRecord(getCheckpoint(compacted1, cp2), 
compacted1.getChildNode("root"));
+        assertSameStableId(getCheckpointRoot(uncompacted1, cp1), 
getCheckpointRoot(compacted1, cp1));
+        assertSameStableId(getCheckpointRoot(uncompacted1, cp2), 
getCheckpointRoot(compacted1, cp2));
+        assertSameRecord(getCheckpointRoot(compacted1, cp2), 
compacted1.getChildNode(ROOT));
 
         // Simulate a 2nd compaction cycle
         addTestContent("cp3", nodeStore, 42);
@@ -141,22 +140,39 @@ public abstract class AbstractCompactorTest {
         String cp4 = nodeStore.checkpoint(DAYS.toMillis(1));
 
         SegmentNodeState uncompacted2 = fileStore.getHead();
-        SegmentNodeState compacted2 = compactor.compact(uncompacted1, 
uncompacted2, compacted1, Canceller.newCanceller());
+
+        // A new generation is needed for compaction, because the previous 
compaction was fully-compacted.
+        // If the previous compaction had been partial, the same target 
generation could be used.
+        GCGeneration secondTargetGeneration = targetGeneration.nextFull();
+        compactor = createCompactor(fileStore, new 
GCIncrement(targetGeneration, targetGeneration.nextPartial(), 
secondTargetGeneration), compactionMonitor);
+        SegmentNodeState compacted2 = compactor.compact(compacted1, 
uncompacted2, compacted1, Canceller.newCanceller());
+
         assertNotNull(compacted2);
         assertNotSame(uncompacted2, compacted2);
-        checkGeneration(compacted2, targetGeneration);
+        checkGeneration(compacted2, secondTargetGeneration);
 
         
assertTrue(fileStore.getRevisions().setHead(uncompacted2.getRecordId(), 
compacted2.getRecordId()));
 
+        // verify that all checkpoints, including the checkpoint-properties, 
are copied over correctly
+        assertEquals(getCheckpointSuperRoot(uncompacted1, cp1), 
getCheckpointSuperRoot(compacted2, cp1));
+        assertEquals(getCheckpointSuperRoot(uncompacted1, cp2), 
getCheckpointSuperRoot(compacted2, cp2));
+        assertEquals(getCheckpointSuperRoot(uncompacted2, cp3), 
getCheckpointSuperRoot(compacted2, cp3));
+        assertEquals(getCheckpointSuperRoot(uncompacted2, cp4), 
getCheckpointSuperRoot(compacted2, cp4));
+
+        // verify that the entire tree has the same content
         assertEquals(uncompacted2, compacted2);
+
+        // the super root and all root nodes should have the same stable-id
         assertSameStableId(uncompacted2, compacted2);
-        assertSameStableId(getCheckpoint(uncompacted2, cp1), 
getCheckpoint(compacted2, cp1));
-        assertSameStableId(getCheckpoint(uncompacted2, cp2), 
getCheckpoint(compacted2, cp2));
-        assertSameStableId(getCheckpoint(uncompacted2, cp3), 
getCheckpoint(compacted2, cp3));
-        assertSameStableId(getCheckpoint(uncompacted2, cp4), 
getCheckpoint(compacted2, cp4));
-        assertSameRecord(getCheckpoint(compacted1, cp1), 
getCheckpoint(compacted2, cp1));
-        assertSameRecord(getCheckpoint(compacted1, cp2), 
getCheckpoint(compacted2, cp2));
-        assertSameRecord(getCheckpoint(compacted2, cp4), 
compacted2.getChildNode("root"));
+        assertSameStableId(uncompacted2.getChildNode(ROOT), 
compacted2.getChildNode(ROOT));
+        assertSameStableId(getCheckpointRoot(uncompacted2, cp1), 
getCheckpointRoot(compacted2, cp1));
+        assertSameStableId(getCheckpointRoot(uncompacted2, cp2), 
getCheckpointRoot(compacted2, cp2));
+        assertSameStableId(getCheckpointRoot(uncompacted2, cp3), 
getCheckpointRoot(compacted2, cp3));
+        assertSameStableId(getCheckpointRoot(uncompacted2, cp4), 
getCheckpointRoot(compacted2, cp4));
+
+        // The root of checkpoint 4 and the current root should be 
deduplicated,
+        // as there were no changes after the checkpoint was taken.
+        assertSameRecord(getCheckpointRoot(compacted2, cp4), 
compacted2.getChildNode(ROOT));
     }
 
     @Test
@@ -199,5 +215,6 @@ public abstract class AbstractCompactorTest {
         assertNotNull(compacted2);
         assertTrue(compacted2.isComplete());
         checkGeneration(compacted2, targetGeneration);
+        assertEquals(uncompacted1, compacted2);
     }
 }
diff --git 
a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/CheckpointCompactorExternalBlobTest.java
 
b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/CheckpointCompactorExternalBlobTest.java
index 955eaa2952..ac3d880819 100644
--- 
a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/CheckpointCompactorExternalBlobTest.java
+++ 
b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/CheckpointCompactorExternalBlobTest.java
@@ -32,8 +32,8 @@ import static 
org.apache.jackrabbit.oak.segment.CompactorTestUtils.SimpleCompact
 
 @RunWith(Parameterized.class)
 public class CheckpointCompactorExternalBlobTest extends 
AbstractCompactorExternalBlobTest {
-    public CheckpointCompactorExternalBlobTest(@NotNull SimpleCompactorFactory 
compactorFactory) {
-        super(compactorFactory);
+    public CheckpointCompactorExternalBlobTest(String name, @NotNull 
SimpleCompactorFactory compactorFactory) {
+        super(name, compactorFactory);
     }
 
     @Override
diff --git 
a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/CheckpointCompactorTest.java
 
b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/CheckpointCompactorTest.java
index 8f9ad60c2b..127e936b38 100644
--- 
a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/CheckpointCompactorTest.java
+++ 
b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/CheckpointCompactorTest.java
@@ -32,12 +32,12 @@ import static 
org.apache.jackrabbit.oak.segment.CompactorTestUtils.SimpleCompact
 
 @RunWith(Parameterized.class)
 public class CheckpointCompactorTest extends AbstractCompactorTest {
-    public CheckpointCompactorTest(@NotNull SimpleCompactorFactory 
compactorFactory) {
-        super(compactorFactory);
+    public CheckpointCompactorTest(String name, @NotNull 
SimpleCompactorFactory compactorFactory) {
+        super(name, compactorFactory);
     }
 
     @Override
-    protected CheckpointCompactor createCompactor(
+    protected Compactor createCompactor(
             @NotNull FileStore fileStore,
             @NotNull GCIncrement increment,
             @NotNull GCNodeWriteMonitor compactionMonitor
diff --git 
a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/CompactToDifferentNodeStoreTest.java
 
b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/CompactToDifferentNodeStoreTest.java
new file mode 100644
index 0000000000..403327c211
--- /dev/null
+++ 
b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/CompactToDifferentNodeStoreTest.java
@@ -0,0 +1,139 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.jackrabbit.oak.segment;
+
+import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.jackrabbit.oak.api.CommitFailedException;
+import org.apache.jackrabbit.oak.api.Type;
+import org.apache.jackrabbit.oak.plugins.memory.BinaryPropertyState;
+import org.apache.jackrabbit.oak.segment.file.CompactedNodeState;
+import org.apache.jackrabbit.oak.segment.file.CompactionWriter;
+import org.apache.jackrabbit.oak.segment.file.FileStore;
+import org.apache.jackrabbit.oak.segment.file.GCNodeWriteMonitor;
+import org.apache.jackrabbit.oak.segment.file.InvalidFileStoreVersionException;
+import org.apache.jackrabbit.oak.segment.file.ReadOnlyFileStore;
+import org.apache.jackrabbit.oak.segment.file.cancel.Canceller;
+import org.apache.jackrabbit.oak.segment.file.tar.GCGeneration;
+import org.apache.jackrabbit.oak.spi.commit.CommitInfo;
+import org.apache.jackrabbit.oak.spi.commit.EmptyHook;
+import org.apache.jackrabbit.oak.spi.gc.GCMonitor;
+import org.apache.jackrabbit.oak.spi.gc.LoggingGCMonitor;
+import org.apache.jackrabbit.oak.spi.state.NodeBuilder;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+import org.junit.runners.Parameterized;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.UncheckedIOException;
+import java.util.Arrays;
+import java.util.List;
+
+import static org.apache.jackrabbit.oak.segment.SegmentNodeStore.ROOT;
+import static 
org.apache.jackrabbit.oak.segment.file.FileStoreBuilder.fileStoreBuilder;
+import static org.junit.Assert.assertEquals;
+
+public class CompactToDifferentNodeStoreTest {
+
+    private static final Logger LOG = 
LoggerFactory.getLogger(CompactToDifferentNodeStoreTest.class);
+
+    @Rule
+    public TemporaryFolder folder = new TemporaryFolder(new File("target"));
+
+    // TODO parameterize for different compactor implementations
+
+    @Parameterized.Parameters
+    public static List<CompactorTestUtils.SimpleCompactorFactory> 
compactorFactories() {
+        return Arrays.asList(
+                compactor -> compactor::compactUp,
+                compactor -> (node, canceller) -> compactor.compactDown(node, 
canceller, canceller)
+        );
+    }
+
+
+    @Test
+    public void testCompactCopy() throws InvalidFileStoreVersionException, 
IOException, CommitFailedException {
+        File sourceFolder = folder.newFolder("source");
+        createGarbage(sourceFolder);
+
+        File targetFolder = folder.newFolder("target");
+
+        try (ReadOnlyFileStore compactionSource = 
fileStoreBuilder(sourceFolder).buildReadOnly()) {
+
+            CompactedNodeState compactedNodeState;
+            try (FileStore compactionTarget = 
fileStoreBuilder(targetFolder).build()) {
+                compactedNodeState = compact(compactionSource, 
compactionTarget);
+            }
+
+            try (ReadOnlyFileStore result = 
fileStoreBuilder(targetFolder).buildReadOnly()) {
+                SegmentNodeStore s = 
SegmentNodeStoreBuilders.builder(compactionSource).build();
+                SegmentNodeStore t = 
SegmentNodeStoreBuilders.builder(result).build();
+
+                assertEquals("value100", 
compactedNodeState.getChildNode(ROOT).getChildNode("test").getProperty("property").getValue(Type.STRING));
+                assertEquals(
+                        
s.getRoot().getChildNode("garbage").getProperty("binary"),
+                        
compactedNodeState.getChildNode(ROOT).getChildNode("garbage").getProperty("binary"));
+                assertEquals(
+                        
s.getRoot().getChildNode("garbage").getProperty("binary"),
+                        
t.getRoot().getChildNode("garbage").getProperty("binary"));
+            }
+        }
+    }
+
+    private static CompactedNodeState compact(ReadOnlyFileStore source, 
FileStore target) {
+        GCGeneration headGeneration = 
target.getHead().getRecordId().getSegment().getGcGeneration();
+        CompactionWriter writer = new CompactionWriter(target.getReader(), 
target.getBlobStore(), headGeneration, target.getWriter());
+        GCMonitor gcListener = new LoggingGCMonitor(LOG);
+        Compactor compactor = new ParallelCompactor(gcListener, writer, new 
GCNodeWriteMonitor(1, gcListener), 4);
+        // Compactor compactor = new CheckpointCompactor(gcListener, writer, 
new GCNodeWriteMonitor(1, gcListener));
+        //Compactor compactor = new ClassicCompactor(writer, new 
GCNodeWriteMonitor(1, gcListener));
+        try {
+            CompactedNodeState compactedNodeState = 
compactor.compactUp(target.getHead(), source.getHead(), 
Canceller.newCanceller());
+            if (compactedNodeState != null) {
+                target.getRevisions().setHead(target.getHead().getRecordId(), 
compactedNodeState.getRecordId());
+            }
+            // target.getWriter().flush();
+            // target.flush();
+            return compactedNodeState;
+        } catch (IOException e) {
+            throw new UncheckedIOException(e);
+        }
+    }
+
+    private static void createGarbage(File source) throws 
InvalidFileStoreVersionException, IOException, CommitFailedException {
+        try (FileStore store = fileStoreBuilder(source).build()) {
+            SegmentNodeStore nodeStore = 
SegmentNodeStoreBuilders.builder(store).build();
+            for (int i = 0; i <= 100; i++) {
+                NodeBuilder builder = nodeStore.getRoot().builder();
+                builder.child("test").setProperty("property", "value" + i);
+                if (builder.hasChildNode("garbage")) {
+                    builder.getChildNode("garbage").remove();
+                } else {
+                    
builder.child("garbage").setProperty(BinaryPropertyState.binaryProperty("binary",
 RandomStringUtils.insecure().nextPrint(10_000)));
+                }
+                nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
+                store.flush();
+            }
+        }
+    }
+
+}
diff --git 
a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/CompactorTestUtils.java
 
b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/CompactorTestUtils.java
index 30d804f657..88e1ff74b3 100644
--- 
a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/CompactorTestUtils.java
+++ 
b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/CompactorTestUtils.java
@@ -18,6 +18,8 @@
 package org.apache.jackrabbit.oak.segment;
 
 import static 
org.apache.jackrabbit.oak.plugins.memory.MultiBinaryPropertyState.binaryPropertyFromBlob;
+import static org.apache.jackrabbit.oak.segment.SegmentNodeStore.CHECKPOINTS;
+import static org.apache.jackrabbit.oak.segment.SegmentNodeStore.ROOT;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
@@ -61,11 +63,17 @@ public class CompactorTestUtils {
         }
     }
 
-    public static NodeState getCheckpoint(NodeState superRoot, String name) {
+    public static NodeState getCheckpointSuperRoot(NodeState superRoot, String 
name) {
         NodeState checkpoint = superRoot
-                .getChildNode("checkpoints")
-                .getChildNode(name)
-                .getChildNode("root");
+                .getChildNode(CHECKPOINTS)
+                .getChildNode(name);
+        assertTrue(checkpoint.exists());
+        return checkpoint;
+    }
+
+    public static NodeState getCheckpointRoot(NodeState superRoot, String 
name) {
+        NodeState checkpoint = getCheckpointSuperRoot(superRoot, name)
+                .getChildNode(ROOT);
         assertTrue(checkpoint.exists());
         return checkpoint;
     }
diff --git 
a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/CheckpointCompactorTest.java
 
b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/LegacyCheckpointCompactorTest.java
similarity index 83%
copy from 
oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/CheckpointCompactorTest.java
copy to 
oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/LegacyCheckpointCompactorTest.java
index 8f9ad60c2b..75c9f41fbf 100644
--- 
a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/CheckpointCompactorTest.java
+++ 
b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/LegacyCheckpointCompactorTest.java
@@ -18,26 +18,26 @@
 
 package org.apache.jackrabbit.oak.segment;
 
+import org.apache.jackrabbit.oak.segment.file.CompactionWriter;
 import org.apache.jackrabbit.oak.segment.file.FileStore;
 import org.apache.jackrabbit.oak.segment.file.GCIncrement;
 import org.apache.jackrabbit.oak.segment.file.GCNodeWriteMonitor;
-import org.apache.jackrabbit.oak.segment.file.CompactionWriter;
 import org.apache.jackrabbit.oak.spi.gc.GCMonitor;
 import org.jetbrains.annotations.NotNull;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 
-import static 
org.apache.jackrabbit.oak.segment.DefaultSegmentWriterBuilder.defaultSegmentWriterBuilder;
 import static 
org.apache.jackrabbit.oak.segment.CompactorTestUtils.SimpleCompactorFactory;
+import static 
org.apache.jackrabbit.oak.segment.DefaultSegmentWriterBuilder.defaultSegmentWriterBuilder;
 
 @RunWith(Parameterized.class)
-public class CheckpointCompactorTest extends AbstractCompactorTest {
-    public CheckpointCompactorTest(@NotNull SimpleCompactorFactory 
compactorFactory) {
-        super(compactorFactory);
+public class LegacyCheckpointCompactorTest extends AbstractCompactorTest {
+    public LegacyCheckpointCompactorTest(String name, @NotNull 
SimpleCompactorFactory compactorFactory) {
+        super(name, compactorFactory);
     }
 
     @Override
-    protected CheckpointCompactor createCompactor(
+    protected Compactor createCompactor(
             @NotNull FileStore fileStore,
             @NotNull GCIncrement increment,
             @NotNull GCNodeWriteMonitor compactionMonitor
@@ -46,6 +46,6 @@ public class CheckpointCompactorTest extends 
AbstractCompactorTest {
                 .withGeneration(generation)
                 .build(fileStore);
         CompactionWriter compactionWriter = new 
CompactionWriter(fileStore.getReader(), fileStore.getBlobStore(), increment, 
writerFactory);
-        return new CheckpointCompactor(GCMonitor.EMPTY, new 
ClassicCompactor(compactionWriter, GCNodeWriteMonitor.EMPTY));
+        return new LegacyCheckpointCompactor(GCMonitor.EMPTY, new 
ClassicCompactor(compactionWriter, compactionMonitor));
     }
 }
diff --git 
a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/ParallelCompactorExternalBlobTest.java
 
b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/ParallelCompactorExternalBlobTest.java
index 06a26bb3f9..8346363815 100644
--- 
a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/ParallelCompactorExternalBlobTest.java
+++ 
b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/ParallelCompactorExternalBlobTest.java
@@ -27,7 +27,6 @@ import org.jetbrains.annotations.NotNull;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 
-import java.util.ArrayList;
 import java.util.List;
 
 import static 
org.apache.jackrabbit.oak.segment.DefaultSegmentWriterBuilder.defaultSegmentWriterBuilder;
@@ -38,21 +37,13 @@ public class ParallelCompactorExternalBlobTest extends 
AbstractCompactorExternal
 
     private final int concurrency;
 
-    @Parameterized.Parameters
+    @Parameterized.Parameters(name = "{index}: {0} concurrency={2}")
     public static List<Object[]> parameters() {
-        Integer[] concurrencyLevels = {1, 2, 4, 8, 16};
-
-        List<Object[]> parameters = new ArrayList<>();
-        for (SimpleCompactorFactory factory : 
AbstractCompactorExternalBlobTest.compactorFactories()) {
-            for (int concurrency : concurrencyLevels) {
-                parameters.add(new Object[]{factory, concurrency});
-            }
-        }
-        return parameters;
+        return ParallelCompactorTest.parameters();
     }
 
-    public ParallelCompactorExternalBlobTest(@NotNull SimpleCompactorFactory 
compactorFactory, int concurrency) {
-        super(compactorFactory);
+    public ParallelCompactorExternalBlobTest(String name, @NotNull 
SimpleCompactorFactory compactorFactory, int concurrency) {
+        super(name, compactorFactory);
         this.concurrency = concurrency;
     }
 
diff --git 
a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/ParallelCompactorTest.java
 
b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/ParallelCompactorTest.java
index f4e4d81ae8..28062d34b2 100644
--- 
a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/ParallelCompactorTest.java
+++ 
b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/ParallelCompactorTest.java
@@ -38,21 +38,21 @@ public class ParallelCompactorTest extends 
AbstractCompactorTest {
 
     private final int concurrency;
 
-    @Parameterized.Parameters
+    @Parameterized.Parameters(name = "{index}: {0} concurrency={2}")
     public static List<Object[]> parameters() {
         Integer[] concurrencyLevels = {1, 2, 4, 8, 16};
 
         List<Object[]> parameters = new ArrayList<>();
-        for (SimpleCompactorFactory factory : 
AbstractCompactorExternalBlobTest.compactorFactories()) {
+        for (Object[] args : AbstractCompactorTest.compactorFactories()) {
             for (int concurrency : concurrencyLevels) {
-                parameters.add(new Object[]{factory, concurrency});
+                parameters.add(new Object[]{args[0], args[1], concurrency});
             }
         }
         return parameters;
     }
 
-    public ParallelCompactorTest(@NotNull SimpleCompactorFactory 
compactorFactory, int concurrency) {
-        super(compactorFactory);
+    public ParallelCompactorTest(String name, @NotNull SimpleCompactorFactory 
compactorFactory, int concurrency) {
+        super(name, compactorFactory);
         this.concurrency = concurrency;
     }
 

Reply via email to