This is an automated email from the ASF dual-hosted git repository.
kfaraz pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/druid.git
The following commit(s) were added to refs/heads/master by this push:
new 2de589aabcf Refactor: Ensure that CompactionCandidate.currentStatus is
non-null (#19058)
2de589aabcf is described below
commit 2de589aabcfcd338e1043e2c989fcdd64e2bf4aa
Author: Kashif Faraz <[email protected]>
AuthorDate: Fri Feb 27 11:07:10 2026 +0530
Refactor: Ensure that CompactionCandidate.currentStatus is non-null (#19058)
---
.../ReindexingDeletionRuleOptimizerTest.java | 13 +++----
.../server/compaction/CompactionCandidate.java | 38 ++++++++++++++------
.../druid/server/compaction/CompactionStatus.java | 20 ++++++-----
.../DataSourceCompactibleSegmentIterator.java | 38 +++++++++++++-------
.../indexing/ClientCompactionIntervalSpecTest.java | 11 +++---
.../server/compaction/CompactionStatusTest.java | 40 ++++++++++------------
.../compaction/CompactionStatusTrackerTest.java | 10 +++---
.../MostFragmentedIntervalFirstPolicyTest.java | 7 ++--
.../coordinator/duty/CompactSegmentsTest.java | 10 +++---
9 files changed, 110 insertions(+), 77 deletions(-)
diff --git
a/indexing-service/src/test/java/org/apache/druid/indexing/compact/ReindexingDeletionRuleOptimizerTest.java
b/indexing-service/src/test/java/org/apache/druid/indexing/compact/ReindexingDeletionRuleOptimizerTest.java
index 47b69031218..fdcc1a3c723 100644
---
a/indexing-service/src/test/java/org/apache/druid/indexing/compact/ReindexingDeletionRuleOptimizerTest.java
+++
b/indexing-service/src/test/java/org/apache/druid/indexing/compact/ReindexingDeletionRuleOptimizerTest.java
@@ -363,8 +363,7 @@ public class ReindexingDeletionRuleOptimizerTest
List<DataSegment> segments = Arrays.stream(fingerprints)
.map(fp ->
DataSegment.builder(WIKI_SEGMENT).indexingStateFingerprint(fp).build())
.collect(Collectors.toList());
- return CompactionCandidate.from(segments, null)
- .withCurrentStatus(CompactionStatus.pending("segments need
compaction"));
+ return CompactionCandidate.from(segments, null,
CompactionStatus.pending("segments need compaction"));
}
private CompactionCandidate createCandidateWithNullFingerprints(int count)
@@ -373,8 +372,7 @@ public class ReindexingDeletionRuleOptimizerTest
for (int i = 0; i < count; i++) {
segments.add(DataSegment.builder(WIKI_SEGMENT).indexingStateFingerprint(null).build());
}
- return CompactionCandidate.from(segments, null)
- .withCurrentStatus(CompactionStatus.pending("segments need
compaction"));
+ return CompactionCandidate.from(segments, null,
CompactionStatus.pending("segments need compaction"));
}
private CompactionState createStateWithFilters(DimFilter... filters)
@@ -500,8 +498,11 @@ public class ReindexingDeletionRuleOptimizerTest
// Candidate with NEVER_COMPACTED status
List<DataSegment> segments = new ArrayList<>();
segments.add(DataSegment.builder(WIKI_SEGMENT).indexingStateFingerprint(null).build());
- CompactionCandidate candidate = CompactionCandidate.from(segments, null)
-
.withCurrentStatus(CompactionStatus.pending(CompactionStatus.NEVER_COMPACTED_REASON));
+ CompactionCandidate candidate = CompactionCandidate.from(
+ segments,
+ null,
+ CompactionStatus.pending(CompactionStatus.NEVER_COMPACTED_REASON)
+ );
InlineSchemaDataSourceCompactionConfig config =
createConfigWithFilter(expectedFilter, null);
CompactionJobParams params = createParams();
diff --git
a/server/src/main/java/org/apache/druid/server/compaction/CompactionCandidate.java
b/server/src/main/java/org/apache/druid/server/compaction/CompactionCandidate.java
index af8b32ebe6d..4d53d8e9cf5 100644
---
a/server/src/main/java/org/apache/druid/server/compaction/CompactionCandidate.java
+++
b/server/src/main/java/org/apache/druid/server/compaction/CompactionCandidate.java
@@ -19,13 +19,14 @@
package org.apache.druid.server.compaction;
-import org.apache.druid.error.InvalidInput;
+import org.apache.druid.error.DruidException;
import org.apache.druid.java.util.common.JodaUtils;
import org.apache.druid.java.util.common.granularity.Granularity;
import org.apache.druid.segment.SegmentUtils;
import org.apache.druid.timeline.DataSegment;
import org.joda.time.Interval;
+import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import java.util.List;
import java.util.Set;
@@ -46,13 +47,30 @@ public class CompactionCandidate
private final CompactionStatus currentStatus;
- public static CompactionCandidate from(
+ public static Interval getCompactionInterval(
List<DataSegment> segments,
@Nullable Granularity targetSegmentGranularity
)
+ {
+ final Set<Interval> segmentIntervals =
+
segments.stream().map(DataSegment::getInterval).collect(Collectors.toSet());
+ final Interval umbrellaInterval =
JodaUtils.umbrellaInterval(segmentIntervals);
+ return targetSegmentGranularity == null
+ ? umbrellaInterval
+ :
JodaUtils.umbrellaInterval(targetSegmentGranularity.getIterable(umbrellaInterval));
+ }
+
+ public static CompactionCandidate from(
+ List<DataSegment> segments,
+ @Nullable Granularity targetSegmentGranularity,
+ CompactionStatus currentStatus
+ )
{
if (segments == null || segments.isEmpty()) {
- throw InvalidInput.exception("Segments to compact must be non-empty");
+ throw DruidException.defensive("Segments to compact must be non-empty");
+ }
+ if (currentStatus == null) {
+ throw DruidException.defensive("CompactionCandidate must have a non-null
currentStatus");
}
final Set<Interval> segmentIntervals =
@@ -68,7 +86,7 @@ public class CompactionCandidate
umbrellaInterval,
compactionInterval,
segmentIntervals.size(),
- null
+ currentStatus
);
}
@@ -77,7 +95,7 @@ public class CompactionCandidate
Interval umbrellaInterval,
Interval compactionInterval,
int numDistinctSegmentIntervals,
- @Nullable CompactionStatus currentStatus
+ CompactionStatus currentStatus
)
{
this.segments = segments;
@@ -140,21 +158,19 @@ public class CompactionCandidate
@Nullable
public CompactionStatistics getCompactedStats()
{
- return (currentStatus == null || currentStatus.getCompactedStats() == null)
- ? null : currentStatus.getCompactedStats();
+ return currentStatus.getCompactedStats();
}
@Nullable
public CompactionStatistics getUncompactedStats()
{
- return (currentStatus == null || currentStatus.getUncompactedStats() ==
null)
- ? null : currentStatus.getUncompactedStats();
+ return currentStatus.getUncompactedStats();
}
/**
- * Current compaction status of the time chunk corresponding to this
candidate.
+ * Non-null current compaction status of the time chunk corresponding to
this candidate.
*/
- @Nullable
+ @Nonnull
public CompactionStatus getCurrentStatus()
{
return currentStatus;
diff --git
a/server/src/main/java/org/apache/druid/server/compaction/CompactionStatus.java
b/server/src/main/java/org/apache/druid/server/compaction/CompactionStatus.java
index 10076a71924..bf738aa5e88 100644
---
a/server/src/main/java/org/apache/druid/server/compaction/CompactionStatus.java
+++
b/server/src/main/java/org/apache/druid/server/compaction/CompactionStatus.java
@@ -259,7 +259,7 @@ public class CompactionStatus
* segments but only the first incomplete status is returned.
*/
static CompactionStatus compute(
- CompactionCandidate candidateSegments,
+ List<DataSegment> candidateSegments,
DataSourceCompactionConfig config,
@Nullable IndexingStateFingerprintMapper fingerprintMapper
)
@@ -348,7 +348,8 @@ public class CompactionStatus
private static class Evaluator
{
private final DataSourceCompactionConfig compactionConfig;
- private final CompactionCandidate candidateSegments;
+ private final List<DataSegment> candidateSegments;
+ private final long totalSegmentBytes;
private final ClientCompactionTaskQueryTuningConfig tuningConfig;
private final UserCompactionTaskGranularityConfig
configuredGranularitySpec;
@@ -362,13 +363,14 @@ public class CompactionStatus
private final IndexingStateFingerprintMapper fingerprintMapper;
private Evaluator(
- CompactionCandidate candidateSegments,
+ List<DataSegment> candidateSegments,
DataSourceCompactionConfig compactionConfig,
@Nullable String targetFingerprint,
@Nullable IndexingStateFingerprintMapper fingerprintMapper
)
{
this.candidateSegments = candidateSegments;
+ this.totalSegmentBytes =
candidateSegments.stream().mapToLong(DataSegment::getSize).sum();
this.compactionConfig = compactionConfig;
this.tuningConfig =
ClientCompactionTaskQueryTuningConfig.from(compactionConfig);
this.configuredGranularitySpec = compactionConfig.getGranularitySpec();
@@ -408,7 +410,7 @@ public class CompactionStatus
.map(f -> f.apply(this))
.filter(status -> !status.isComplete())
.map(CompactionStatus::getReason)
- .collect(Collectors.toList())
+ .toList()
);
// Any segments left in unknownStateToSegments passed all checks and
are considered compacted
@@ -417,7 +419,7 @@ public class CompactionStatus
.values()
.stream()
.flatMap(List::stream)
- .collect(Collectors.toList())
+ .toList()
);
}
@@ -467,7 +469,7 @@ public class CompactionStatus
mismatchedFingerprintToSegmentMap.values()
.stream()
.flatMap(List::stream)
- .collect(Collectors.toList())
+ .toList()
);
return CompactionStatus.pending("Segments have a mismatched
fingerprint and no fingerprint mapper is available");
}
@@ -508,7 +510,7 @@ public class CompactionStatus
*/
private CompactionStatus segmentsHaveBeenCompactedAtLeastOnce()
{
- for (DataSegment segment : candidateSegments.getSegments()) {
+ for (DataSegment segment : candidateSegments) {
final String fingerprint = segment.getIndexingStateFingerprint();
final CompactionState segmentState = segment.getLastCompactionState();
if (fingerprint != null) {
@@ -613,10 +615,10 @@ public class CompactionStatus
private CompactionStatus inputBytesAreWithinLimit()
{
final long inputSegmentSize =
compactionConfig.getInputSegmentSizeBytes();
- if (candidateSegments.getTotalBytes() > inputSegmentSize) {
+ if (totalSegmentBytes > inputSegmentSize) {
return CompactionStatus.skipped(
"'inputSegmentSize' exceeded: Total segment size[%d] is larger
than allowed inputSegmentSize[%d]",
- candidateSegments.getTotalBytes(), inputSegmentSize
+ totalSegmentBytes, inputSegmentSize
);
} else {
return COMPLETE;
diff --git
a/server/src/main/java/org/apache/druid/server/compaction/DataSourceCompactibleSegmentIterator.java
b/server/src/main/java/org/apache/druid/server/compaction/DataSourceCompactibleSegmentIterator.java
index ed80ec967b6..f818d6ca97a 100644
---
a/server/src/main/java/org/apache/druid/server/compaction/DataSourceCompactibleSegmentIterator.java
+++
b/server/src/main/java/org/apache/druid/server/compaction/DataSourceCompactibleSegmentIterator.java
@@ -121,9 +121,11 @@ public class DataSourceCompactibleSegmentIterator
implements CompactionSegmentIt
if (!partialEternitySegments.isEmpty()) {
// Do not use the target segment granularity in the
CompactionCandidate
// as Granularities.getIterable() will cause OOM due to the above
issue
- CompactionCandidate candidatesWithStatus = CompactionCandidate
- .from(partialEternitySegments, null)
- .withCurrentStatus(CompactionStatus.skipped("Segments have
partial-eternity intervals"));
+ CompactionCandidate candidatesWithStatus =
CompactionCandidate.from(
+ partialEternitySegments,
+ null,
+ CompactionStatus.skipped("Segments have partial-eternity
intervals")
+ );
skippedSegments.add(candidatesWithStatus);
return;
}
@@ -329,16 +331,19 @@ public class DataSourceCompactibleSegmentIterator
implements CompactionSegmentIt
continue;
}
- final CompactionCandidate candidates =
CompactionCandidate.from(segments, config.getSegmentGranularity());
- final CompactionStatus compactionStatus =
CompactionStatus.compute(candidates, config, fingerprintMapper);
- final CompactionCandidate candidatesWithStatus =
candidates.withCurrentStatus(compactionStatus);
+ final CompactionStatus compactionStatus =
CompactionStatus.compute(segments, config, fingerprintMapper);
+ final CompactionCandidate candidates = CompactionCandidate.from(
+ segments,
+ config.getSegmentGranularity(),
+ compactionStatus
+ );
if (compactionStatus.isComplete()) {
- compactedSegments.add(candidatesWithStatus);
+ compactedSegments.add(candidates);
} else if (compactionStatus.isSkipped()) {
- skippedSegments.add(candidatesWithStatus);
+ skippedSegments.add(candidates);
} else if (!queuedIntervals.contains(candidates.getUmbrellaInterval())) {
- queue.add(candidatesWithStatus);
+ queue.add(candidates);
queuedIntervals.add(candidates.getUmbrellaInterval());
}
}
@@ -372,16 +377,23 @@ public class DataSourceCompactibleSegmentIterator
implements CompactionSegmentIt
timeline.findNonOvershadowedObjectsInInterval(skipInterval,
Partitions.ONLY_COMPLETE)
);
if (!CollectionUtils.isNullOrEmpty(segments)) {
- final CompactionCandidate candidates =
CompactionCandidate.from(segments, config.getSegmentGranularity());
+ final Interval compactionInterval =
CompactionCandidate.getCompactionInterval(
+ segments,
+ config.getSegmentGranularity()
+ );
final CompactionStatus reason;
- if (candidates.getCompactionInterval().overlaps(latestSkipInterval)) {
+ if (compactionInterval.overlaps(latestSkipInterval)) {
reason = CompactionStatus.skipped("skip offset from latest[%s]",
skipOffset);
} else {
reason = CompactionStatus.skipped("interval locked by another task");
}
- final CompactionCandidate candidatesWithStatus =
candidates.withCurrentStatus(reason);
+ final CompactionCandidate candidatesWithStatus =
CompactionCandidate.from(
+ segments,
+ config.getSegmentGranularity(),
+ reason
+ );
skippedSegments.add(candidatesWithStatus);
}
}
@@ -405,7 +417,7 @@ public class DataSourceCompactibleSegmentIterator
implements CompactionSegmentIt
// findNonOvershadowedObjectsInInterval() may return segments merely
intersecting with lookupInterval, while
// we are interested only in segments fully lying within
lookupInterval here.
.filter(segment -> lookupInterval.contains(segment.getInterval()))
- .collect(Collectors.toList());
+ .toList();
if (segments.isEmpty()) {
continue;
diff --git
a/server/src/test/java/org/apache/druid/client/indexing/ClientCompactionIntervalSpecTest.java
b/server/src/test/java/org/apache/druid/client/indexing/ClientCompactionIntervalSpecTest.java
index 46ecc64d72d..ee5ed58f1c6 100644
---
a/server/src/test/java/org/apache/druid/client/indexing/ClientCompactionIntervalSpecTest.java
+++
b/server/src/test/java/org/apache/druid/client/indexing/ClientCompactionIntervalSpecTest.java
@@ -25,6 +25,7 @@ import org.apache.druid.java.util.common.Intervals;
import org.apache.druid.java.util.common.granularity.Granularities;
import org.apache.druid.segment.IndexIO;
import org.apache.druid.server.compaction.CompactionCandidate;
+import org.apache.druid.server.compaction.CompactionStatus;
import org.apache.druid.timeline.DataSegment;
import org.apache.druid.timeline.partition.NoneShardSpec;
import org.junit.Assert;
@@ -73,7 +74,7 @@ public class ClientCompactionIntervalSpecTest
public void testFromSegmentWithNoSegmentGranularity()
{
// The umbrella interval of segments is 2015-02-12/2015-04-14
- CompactionCandidate actual =
CompactionCandidate.from(ImmutableList.of(dataSegment1, dataSegment2,
dataSegment3), null);
+ CompactionCandidate actual =
CompactionCandidate.from(ImmutableList.of(dataSegment1, dataSegment2,
dataSegment3), null, CompactionStatus.running(""));
Assert.assertEquals(Intervals.of("2015-02-12/2015-04-14"),
actual.getCompactionInterval());
}
@@ -81,7 +82,7 @@ public class ClientCompactionIntervalSpecTest
public void testFromSegmentWitSegmentGranularitySameAsSegment()
{
// The umbrella interval of segments is 2015-04-11/2015-04-12
- CompactionCandidate actual =
CompactionCandidate.from(ImmutableList.of(dataSegment1), Granularities.DAY);
+ CompactionCandidate actual =
CompactionCandidate.from(ImmutableList.of(dataSegment1), Granularities.DAY,
CompactionStatus.running(""));
Assert.assertEquals(Intervals.of("2015-04-11/2015-04-12"),
actual.getCompactionInterval());
}
@@ -89,7 +90,7 @@ public class ClientCompactionIntervalSpecTest
public void testFromSegmentWithCoarserSegmentGranularity()
{
// The umbrella interval of segments is 2015-02-12/2015-04-14
- CompactionCandidate actual =
CompactionCandidate.from(ImmutableList.of(dataSegment1, dataSegment2,
dataSegment3), Granularities.YEAR);
+ CompactionCandidate actual =
CompactionCandidate.from(ImmutableList.of(dataSegment1, dataSegment2,
dataSegment3), Granularities.YEAR, CompactionStatus.running(""));
// The compaction interval should be expanded to start of the year and end
of the year to cover the segmentGranularity
Assert.assertEquals(Intervals.of("2015-01-01/2016-01-01"),
actual.getCompactionInterval());
}
@@ -98,7 +99,7 @@ public class ClientCompactionIntervalSpecTest
public void
testFromSegmentWithFinerSegmentGranularityAndUmbrellaIntervalAlign()
{
// The umbrella interval of segments is 2015-02-12/2015-04-14
- CompactionCandidate actual =
CompactionCandidate.from(ImmutableList.of(dataSegment1, dataSegment2,
dataSegment3), Granularities.DAY);
+ CompactionCandidate actual =
CompactionCandidate.from(ImmutableList.of(dataSegment1, dataSegment2,
dataSegment3), Granularities.DAY, CompactionStatus.running(""));
// The segmentGranularity of DAY align with the umbrella interval
(umbrella interval can be evenly divide into the segmentGranularity)
Assert.assertEquals(Intervals.of("2015-02-12/2015-04-14"),
actual.getCompactionInterval());
}
@@ -107,7 +108,7 @@ public class ClientCompactionIntervalSpecTest
public void
testFromSegmentWithFinerSegmentGranularityAndUmbrellaIntervalNotAlign()
{
// The umbrella interval of segments is 2015-02-12/2015-04-14
- CompactionCandidate actual =
CompactionCandidate.from(ImmutableList.of(dataSegment1, dataSegment2,
dataSegment3), Granularities.WEEK);
+ CompactionCandidate actual =
CompactionCandidate.from(ImmutableList.of(dataSegment1, dataSegment2,
dataSegment3), Granularities.WEEK, CompactionStatus.running(""));
// The segmentGranularity of WEEK does not align with the umbrella
interval (umbrella interval cannot be evenly divide into the segmentGranularity)
// Hence the compaction interval is modified to aling with the
segmentGranularity
Assert.assertEquals(Intervals.of("2015-02-09/2015-04-20"),
actual.getCompactionInterval());
diff --git
a/server/src/test/java/org/apache/druid/server/compaction/CompactionStatusTest.java
b/server/src/test/java/org/apache/druid/server/compaction/CompactionStatusTest.java
index 370dcaf444d..e466f92f265 100644
---
a/server/src/test/java/org/apache/druid/server/compaction/CompactionStatusTest.java
+++
b/server/src/test/java/org/apache/druid/server/compaction/CompactionStatusTest.java
@@ -368,7 +368,7 @@ public class CompactionStatusTest
final DataSegment segment =
DataSegment.builder(WIKI_SEGMENT).lastCompactionState(lastCompactionState).build();
final CompactionStatus status = CompactionStatus.compute(
- CompactionCandidate.from(List.of(segment), Granularities.HOUR),
+ List.of(segment),
compactionConfig,
fingerprintMapper
);
@@ -418,7 +418,7 @@ public class CompactionStatusTest
final DataSegment segment =
DataSegment.builder(WIKI_SEGMENT).lastCompactionState(lastCompactionState).build();
final CompactionStatus status = CompactionStatus.compute(
- CompactionCandidate.from(List.of(segment), Granularities.HOUR),
+ List.of(segment),
compactionConfig,
fingerprintMapper
);
@@ -473,7 +473,7 @@ public class CompactionStatusTest
final DataSegment segment =
DataSegment.builder(WIKI_SEGMENT).lastCompactionState(lastCompactionState).build();
final CompactionStatus status = CompactionStatus.compute(
- CompactionCandidate.from(List.of(segment), Granularities.HOUR),
+ List.of(segment),
compactionConfig,
fingerprintMapper
);
@@ -507,7 +507,7 @@ public class CompactionStatusTest
DataSegment segment =
DataSegment.builder(WIKI_SEGMENT).lastCompactionState(lastCompactionState).build();
CompactionStatus status = CompactionStatus.compute(
- CompactionCandidate.from(List.of(segment), null), compactionConfig,
fingerprintMapper
+ List.of(segment), compactionConfig, fingerprintMapper
);
Assert.assertTrue(status.isComplete());
}
@@ -540,7 +540,7 @@ public class CompactionStatusTest
DataSegment segment =
DataSegment.builder(WIKI_SEGMENT).lastCompactionState(lastCompactionState).build();
CompactionStatus status = CompactionStatus.compute(
- CompactionCandidate.from(List.of(segment), null), compactionConfig,
fingerprintMapper
+ List.of(segment), compactionConfig, fingerprintMapper
);
Assert.assertFalse(status.isComplete());
Assert.assertTrue(status.getReason().startsWith("'transformSpec'
mismatch"));
@@ -574,9 +574,7 @@ public class CompactionStatusTest
List<DataSegment> segments = List.of(
DataSegment.builder(WIKI_SEGMENT).indexingStateFingerprint(oldFingerprint).build()
);
- CompactionStatus status = CompactionStatus.compute(
- CompactionCandidate.from(segments, null), newConfig, fingerprintMapper
- );
+ CompactionStatus status = CompactionStatus.compute(segments, newConfig,
fingerprintMapper);
Assert.assertFalse(status.isComplete());
Assert.assertTrue(status.getReason().startsWith("'transformSpec'
mismatch"));
}
@@ -628,7 +626,7 @@ public class CompactionStatusTest
final DataSegment segment =
DataSegment.builder(WIKI_SEGMENT).lastCompactionState(lastCompactionState).build();
final CompactionStatus status = CompactionStatus.compute(
- CompactionCandidate.from(List.of(segment), null),
+ List.of(segment),
compactionConfig,
fingerprintMapper
);
@@ -682,7 +680,7 @@ public class CompactionStatusTest
final DataSegment segment =
DataSegment.builder(WIKI_SEGMENT).lastCompactionState(lastCompactionState).build();
final CompactionStatus status = CompactionStatus.compute(
- CompactionCandidate.from(List.of(segment), null),
+ List.of(segment),
compactionConfig,
fingerprintMapper
);
@@ -714,7 +712,7 @@ public class CompactionStatusTest
syncCacheFromManager();
verifyEvaluationNeedsCompactionBecauseWithCustomSegments(
- CompactionCandidate.from(segments, null),
+ segments,
compactionConfig,
"'segmentGranularity' mismatch: required[DAY], current[HOUR]"
);
@@ -750,7 +748,7 @@ public class CompactionStatusTest
syncCacheFromManager();
verifyEvaluationNeedsCompactionBecauseWithCustomSegments(
- CompactionCandidate.from(segments, null),
+ segments,
compactionConfig,
"'segmentGranularity' mismatch: required[DAY], current[HOUR]"
);
@@ -773,7 +771,7 @@ public class CompactionStatusTest
syncCacheFromManager();
final CompactionStatus status = CompactionStatus.compute(
- CompactionCandidate.from(segments, null),
+ segments,
compactionConfig,
fingerprintMapper
);
@@ -793,7 +791,7 @@ public class CompactionStatusTest
.build();
verifyEvaluationNeedsCompactionBecauseWithCustomSegments(
- CompactionCandidate.from(segments, null),
+ segments,
compactionConfig,
"One or more fingerprinted segments do not have a cached indexing
state"
);
@@ -818,7 +816,7 @@ public class CompactionStatusTest
);
final CompactionStatus status = CompactionStatus.compute(
- CompactionCandidate.from(segments, null),
+ segments,
compactionConfig,
fingerprintMapper
);
@@ -847,7 +845,7 @@ public class CompactionStatusTest
verifyEvaluationNeedsCompactionBecauseWithCustomSegments(
- CompactionCandidate.from(segments, null),
+ segments,
compactionConfig,
"'segmentGranularity' mismatch: required[DAY], current[HOUR]"
);
@@ -872,7 +870,7 @@ public class CompactionStatusTest
);
final CompactionStatus status = CompactionStatus.compute(
- CompactionCandidate.from(segments, null),
+ segments,
compactionConfig,
fingerprintMapper
);
@@ -902,7 +900,7 @@ public class CompactionStatusTest
);
final CompactionStatus status = CompactionStatus.compute(
- CompactionCandidate.from(segments, null),
+ segments,
compactionConfig,
fingerprintMapper
);
@@ -919,13 +917,13 @@ public class CompactionStatusTest
* Allows customization of the segments in the compaction candidate.
*/
private void verifyEvaluationNeedsCompactionBecauseWithCustomSegments(
- CompactionCandidate candidate,
+ List<DataSegment> segments,
DataSourceCompactionConfig compactionConfig,
String expectedReason
)
{
final CompactionStatus status = CompactionStatus.compute(
- candidate,
+ segments,
compactionConfig,
fingerprintMapper
);
@@ -945,7 +943,7 @@ public class CompactionStatusTest
.lastCompactionState(lastCompactionState)
.build();
final CompactionStatus status = CompactionStatus.compute(
- CompactionCandidate.from(List.of(segment), null),
+ List.of(segment),
compactionConfig,
fingerprintMapper
);
diff --git
a/server/src/test/java/org/apache/druid/server/compaction/CompactionStatusTrackerTest.java
b/server/src/test/java/org/apache/druid/server/compaction/CompactionStatusTrackerTest.java
index 1314a1a0bc7..98491d3e376 100644
---
a/server/src/test/java/org/apache/druid/server/compaction/CompactionStatusTrackerTest.java
+++
b/server/src/test/java/org/apache/druid/server/compaction/CompactionStatusTrackerTest.java
@@ -48,7 +48,7 @@ public class CompactionStatusTrackerTest
public void testGetLatestTaskStatusForSubmittedTask()
{
final CompactionCandidate candidateSegments
- = CompactionCandidate.from(List.of(WIKI_SEGMENT), null);
+ = CompactionCandidate.from(List.of(WIKI_SEGMENT), null,
CompactionStatus.running(""));
statusTracker.onTaskSubmitted("task1", candidateSegments);
CompactionTaskStatus status =
statusTracker.getLatestTaskStatus(candidateSegments);
@@ -59,7 +59,7 @@ public class CompactionStatusTrackerTest
public void testGetLatestTaskStatusForSuccessfulTask()
{
final CompactionCandidate candidateSegments
- = CompactionCandidate.from(List.of(WIKI_SEGMENT), null);
+ = CompactionCandidate.from(List.of(WIKI_SEGMENT), null,
CompactionStatus.running(""));
statusTracker.onTaskSubmitted("task1", candidateSegments);
statusTracker.onTaskFinished("task1", TaskStatus.success("task1"));
@@ -71,7 +71,7 @@ public class CompactionStatusTrackerTest
public void testGetLatestTaskStatusForFailedTask()
{
final CompactionCandidate candidateSegments
- = CompactionCandidate.from(List.of(WIKI_SEGMENT), null);
+ = CompactionCandidate.from(List.of(WIKI_SEGMENT), null,
CompactionStatus.running(""));
statusTracker.onTaskSubmitted("task1", candidateSegments);
statusTracker.onTaskFinished("task1", TaskStatus.failure("task1", "some
failure"));
@@ -84,7 +84,7 @@ public class CompactionStatusTrackerTest
public void testGetLatestTaskStatusForRepeatedlyFailingTask()
{
final CompactionCandidate candidateSegments
- = CompactionCandidate.from(List.of(WIKI_SEGMENT), null);
+ = CompactionCandidate.from(List.of(WIKI_SEGMENT), null,
CompactionStatus.running(""));
statusTracker.onTaskSubmitted("task1", candidateSegments);
statusTracker.onTaskFinished("task1", TaskStatus.failure("task1", "some
failure"));
@@ -106,7 +106,7 @@ public class CompactionStatusTrackerTest
{
final NewestSegmentFirstPolicy policy = new NewestSegmentFirstPolicy(null);
final CompactionCandidate candidateSegments
- = CompactionCandidate.from(List.of(WIKI_SEGMENT), null);
+ = CompactionCandidate.from(List.of(WIKI_SEGMENT), null,
CompactionStatus.running(""));
// Verify that interval is originally eligible for compaction
CompactionStatus status
diff --git
a/server/src/test/java/org/apache/druid/server/compaction/MostFragmentedIntervalFirstPolicyTest.java
b/server/src/test/java/org/apache/druid/server/compaction/MostFragmentedIntervalFirstPolicyTest.java
index 1b93bfa03a5..fd5c21e0a2c 100644
---
a/server/src/test/java/org/apache/druid/server/compaction/MostFragmentedIntervalFirstPolicyTest.java
+++
b/server/src/test/java/org/apache/druid/server/compaction/MostFragmentedIntervalFirstPolicyTest.java
@@ -237,8 +237,11 @@ public class MostFragmentedIntervalFirstPolicyTest
numSegments,
1L
);
- return CompactionCandidate.from(List.of(SEGMENT), null)
-
.withCurrentStatus(CompactionStatus.pending(dummyCompactedStats,
uncompactedStats, ""));
+ return CompactionCandidate.from(
+ List.of(SEGMENT),
+ null,
+ CompactionStatus.pending(dummyCompactedStats, uncompactedStats, "")
+ );
}
private void verifyCandidateIsEligible(CompactionCandidate candidate,
MostFragmentedIntervalFirstPolicy policy)
diff --git
a/server/src/test/java/org/apache/druid/server/coordinator/duty/CompactSegmentsTest.java
b/server/src/test/java/org/apache/druid/server/coordinator/duty/CompactSegmentsTest.java
index 4ea98e43ad5..24da185296c 100644
---
a/server/src/test/java/org/apache/druid/server/coordinator/duty/CompactSegmentsTest.java
+++
b/server/src/test/java/org/apache/druid/server/coordinator/duty/CompactSegmentsTest.java
@@ -876,7 +876,7 @@ public class CompactSegmentsTest
// All segments is compact at the same time since we changed the segment
granularity to YEAR and all segment
// are within the same year
Assert.assertEquals(
- CompactionCandidate.from(datasourceToSegments.get(dataSource),
Granularities.YEAR).getCompactionInterval(),
+
CompactionCandidate.getCompactionInterval(datasourceToSegments.get(dataSource),
Granularities.YEAR),
taskPayload.getIoConfig().getInputSpec().getInterval()
);
@@ -1067,7 +1067,7 @@ public class CompactSegmentsTest
// All segments is compact at the same time since we changed the segment
granularity to YEAR and all segment
// are within the same year
Assert.assertEquals(
- CompactionCandidate.from(datasourceToSegments.get(dataSource),
Granularities.YEAR).getCompactionInterval(),
+
CompactionCandidate.getCompactionInterval(datasourceToSegments.get(dataSource),
Granularities.YEAR),
taskPayload.getIoConfig().getInputSpec().getInterval()
);
@@ -1162,7 +1162,7 @@ public class CompactSegmentsTest
// All segments is compact at the same time since we changed the segment
granularity to YEAR and all segment
// are within the same year
Assert.assertEquals(
- CompactionCandidate.from(datasourceToSegments.get(dataSource),
Granularities.YEAR).getCompactionInterval(),
+
CompactionCandidate.getCompactionInterval(datasourceToSegments.get(dataSource),
Granularities.YEAR),
taskPayload.getIoConfig().getInputSpec().getInterval()
);
@@ -1404,7 +1404,7 @@ public class CompactSegmentsTest
ClientCompactionTaskQuery taskPayload = (ClientCompactionTaskQuery)
payloadCaptor.getValue();
Assert.assertEquals(
- CompactionCandidate.from(segments,
Granularities.DAY).getCompactionInterval(),
+ CompactionCandidate.getCompactionInterval(segments, Granularities.DAY),
taskPayload.getIoConfig().getInputSpec().getInterval()
);
@@ -1467,7 +1467,7 @@ public class CompactSegmentsTest
ClientCompactionTaskQuery taskPayload = (ClientCompactionTaskQuery)
payloadCaptor.getValue();
Assert.assertEquals(
- CompactionCandidate.from(segments,
Granularities.YEAR).getCompactionInterval(),
+ CompactionCandidate.getCompactionInterval(segments,
Granularities.YEAR),
taskPayload.getIoConfig().getInputSpec().getInterval()
);
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]