This is an automated email from the ASF dual-hosted git repository.
kfaraz pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/druid.git
The following commit(s) were added to refs/heads/master by this push:
new a0234c4e13 Add sampling factor for DeterminePartitionsJob (#13840)
a0234c4e13 is described below
commit a0234c4e131b2eae3b2e464704e0a225b7c965f4
Author: hqx871 <[email protected]>
AuthorDate: Fri Aug 11 13:12:25 2023 +0800
Add sampling factor for DeterminePartitionsJob (#13840)
There are two type of DeterminePartitionsJob:
- When the input data is not assume grouped, there may be duplicate rows.
In this case, two MR jobs are launched. The first one do group job to
remove duplicate rows.
And a second one to perform global sorting to find lower and upper bound
for target segments.
- When the input data is assume grouped, we only need to launch the global
sorting
MR job to find lower and upper bound for segments.
Sampling strategy:
- If the input data is assume grouped, sample by random at the mapper side
of the global sort mr job.
- If the input data is not assume grouped, sample at the mapper of the
group job. Use hash on time
and all dimensions and mod by sampling factor to sample, don't use random
method because there
may be duplicate rows.
---
.../MaterializedViewSupervisorSpec.java | 3 +-
.../druid/indexer/DeterminePartitionsJob.java | 46 +++++++---
.../indexer/DeterminePartitionsJobSampler.java | 70 ++++++++++++++
.../apache/druid/indexer/HadoopTuningConfig.java | 37 +++++++-
.../druid/indexer/BatchDeltaIngestionTest.java | 3 +-
.../indexer/DetermineHashedPartitionsJobTest.java | 3 +-
.../indexer/DeterminePartitionsJobSamplerTest.java | 102 +++++++++++++++++++++
.../druid/indexer/DeterminePartitionsJobTest.java | 3 +-
.../indexer/DetermineRangePartitionsJobTest.java | 3 +-
.../indexer/HadoopDruidIndexerConfigTest.java | 3 +-
.../druid/indexer/HadoopTuningConfigTest.java | 3 +-
.../druid/indexer/IndexGeneratorJobTest.java | 3 +-
.../org/apache/druid/indexer/JobHelperTest.java | 3 +-
.../indexer/path/GranularityPathSpecTest.java | 3 +-
14 files changed, 258 insertions(+), 27 deletions(-)
diff --git
a/extensions-contrib/materialized-view-maintenance/src/main/java/org/apache/druid/indexing/materializedview/MaterializedViewSupervisorSpec.java
b/extensions-contrib/materialized-view-maintenance/src/main/java/org/apache/druid/indexing/materializedview/MaterializedViewSupervisorSpec.java
index 6daaad9d73..eec08ff133 100644
---
a/extensions-contrib/materialized-view-maintenance/src/main/java/org/apache/druid/indexing/materializedview/MaterializedViewSupervisorSpec.java
+++
b/extensions-contrib/materialized-view-maintenance/src/main/java/org/apache/druid/indexing/materializedview/MaterializedViewSupervisorSpec.java
@@ -199,7 +199,8 @@ public class MaterializedViewSupervisorSpec implements
SupervisorSpec
tuningConfig.isLogParseExceptions(),
tuningConfig.getMaxParseExceptions(),
tuningConfig.isUseYarnRMJobStatusFallback(),
- tuningConfig.getAwaitSegmentAvailabilityTimeoutMillis()
+ tuningConfig.getAwaitSegmentAvailabilityTimeoutMillis(),
+ HadoopTuningConfig.DEFAULT_DETERMINE_PARTITIONS_SAMPLING_FACTOR
);
// generate granularity
diff --git
a/indexing-hadoop/src/main/java/org/apache/druid/indexer/DeterminePartitionsJob.java
b/indexing-hadoop/src/main/java/org/apache/druid/indexer/DeterminePartitionsJob.java
index 11839b6931..d7e4eb2e10 100644
---
a/indexing-hadoop/src/main/java/org/apache/druid/indexer/DeterminePartitionsJob.java
+++
b/indexing-hadoop/src/main/java/org/apache/druid/indexer/DeterminePartitionsJob.java
@@ -332,10 +332,22 @@ public class DeterminePartitionsJob implements Jobby
return failureCause;
}
+ private static DeterminePartitionsJobSampler
createSampler(HadoopDruidIndexerConfig config)
+ {
+ HadoopTuningConfig tuningConfig = config.getSchema().getTuningConfig();
+ final DimensionRangePartitionsSpec partitionsSpec =
(DimensionRangePartitionsSpec) config.getPartitionsSpec();
+ return new DeterminePartitionsJobSampler(
+ tuningConfig.getDeterminePartitionsSamplingFactor(),
+ config.getTargetPartitionSize(),
+ partitionsSpec.getMaxRowsPerSegment()
+ );
+ }
+
public static class DeterminePartitionsGroupByMapper extends
HadoopDruidIndexerMapper<BytesWritable, NullWritable>
{
@Nullable
private Granularity rollupGranularity = null;
+ private DeterminePartitionsJobSampler sampler;
@Override
protected void setup(Context context)
@@ -343,6 +355,7 @@ public class DeterminePartitionsJob implements Jobby
{
super.setup(context);
rollupGranularity =
getConfig().getGranularitySpec().getQueryGranularity();
+ sampler = createSampler(getConfig());
}
@Override
@@ -355,10 +368,14 @@ public class DeterminePartitionsJob implements Jobby
rollupGranularity.bucketStart(inputRow.getTimestamp()).getMillis(),
inputRow
);
- context.write(
- new
BytesWritable(HadoopDruidIndexerConfig.JSON_MAPPER.writeValueAsBytes(groupKey)),
- NullWritable.get()
- );
+
+ final byte[] groupKeyBytes =
HadoopDruidIndexerConfig.JSON_MAPPER.writeValueAsBytes(groupKey);
+ if (sampler.shouldEmitRow(groupKeyBytes)) {
+ context.write(
+ new BytesWritable(groupKeyBytes),
+ NullWritable.get()
+ );
+ }
context.getCounter(HadoopDruidIndexerConfig.IndexJobCounters.ROWS_PROCESSED_COUNTER).increment(1);
}
@@ -413,6 +430,7 @@ public class DeterminePartitionsJob implements Jobby
extends HadoopDruidIndexerMapper<BytesWritable, Text>
{
private DeterminePartitionsDimSelectionMapperHelper helper;
+ private DeterminePartitionsJobSampler sampler;
@Override
protected void setup(Context context)
@@ -421,6 +439,7 @@ public class DeterminePartitionsJob implements Jobby
super.setup(context);
final HadoopDruidIndexerConfig config =
HadoopDruidIndexerConfig.fromConfiguration(context.getConfiguration());
helper = new DeterminePartitionsDimSelectionMapperHelper(config);
+ sampler = createSampler(getConfig());
}
@Override
@@ -429,11 +448,13 @@ public class DeterminePartitionsJob implements Jobby
Context context
) throws IOException, InterruptedException
{
- final Map<String, Iterable<String>> dims = new HashMap<>();
- for (final String dim : inputRow.getDimensions()) {
- dims.put(dim, inputRow.getDimension(dim));
+ if (sampler.shouldEmitRow()) {
+ final Map<String, Iterable<String>> dims = new HashMap<>();
+ for (final String dim : inputRow.getDimensions()) {
+ dims.put(dim, inputRow.getDimension(dim));
+ }
+ helper.emitDimValueCounts(context,
DateTimes.utc(inputRow.getTimestampFromEpoch()), dims);
}
- helper.emitDimValueCounts(context,
DateTimes.utc(inputRow.getTimestampFromEpoch()), dims);
}
}
@@ -705,6 +726,7 @@ public class DeterminePartitionsJob implements Jobby
// We'll store possible partitions in here
final Map<List<String>, DimPartitions> dimPartitionss = new HashMap<>();
final DimensionRangePartitionsSpec partitionsSpec =
(DimensionRangePartitionsSpec) config.getPartitionsSpec();
+ final DeterminePartitionsJobSampler sampler = createSampler(config);
while (iterator.hasNext()) {
final DimValueCount dvc = iterator.next();
@@ -728,7 +750,7 @@ public class DeterminePartitionsJob implements Jobby
}
// See if we need to cut a new partition ending immediately before
this dimension value
- if (currentDimPartition.rows > 0 && currentDimPartition.rows +
dvc.numRows > config.getTargetPartitionSize()) {
+ if (currentDimPartition.rows > 0 && currentDimPartition.rows +
dvc.numRows > sampler.getSampledTargetPartitionSize()) {
final ShardSpec shardSpec = createShardSpec(
partitionsSpec instanceof SingleDimensionPartitionsSpec,
currentDimPartitions.dims,
@@ -764,7 +786,7 @@ public class DeterminePartitionsJob implements Jobby
// One more shard to go
final ShardSpec shardSpec;
- if (currentDimPartition.rows < config.getTargetPartitionSize() *
SHARD_COMBINE_THRESHOLD &&
+ if (currentDimPartition.rows <
sampler.getSampledTargetPartitionSize() * SHARD_COMBINE_THRESHOLD &&
!currentDimPartitions.partitions.isEmpty()) {
// Combine with previous shard if it exists and the current
shard is small enough
final DimPartition previousDimPartition =
currentDimPartitions.partitions.remove(
@@ -850,7 +872,7 @@ public class DeterminePartitionsJob implements Jobby
// Make sure none of these shards are oversized
boolean oversized = false;
for (final DimPartition partition : dimPartitions.partitions) {
- if (partition.rows > partitionsSpec.getMaxRowsPerSegment()) {
+ if (partition.rows > sampler.getSampledMaxRowsPerSegment()) {
log.info("Dimension[%s] has an oversized shard: %s",
dimPartitions.dims, partition.shardSpec);
oversized = true;
}
@@ -861,7 +883,7 @@ public class DeterminePartitionsJob implements Jobby
}
final int cardinality = dimPartitions.getCardinality();
- final long distance =
dimPartitions.getDistanceSquaredFromTarget(config.getTargetPartitionSize());
+ final long distance =
dimPartitions.getDistanceSquaredFromTarget(sampler.getSampledTargetPartitionSize());
if (cardinality > maxCardinality) {
maxCardinality = cardinality;
diff --git
a/indexing-hadoop/src/main/java/org/apache/druid/indexer/DeterminePartitionsJobSampler.java
b/indexing-hadoop/src/main/java/org/apache/druid/indexer/DeterminePartitionsJobSampler.java
new file mode 100644
index 0000000000..d26e818e92
--- /dev/null
+++
b/indexing-hadoop/src/main/java/org/apache/druid/indexer/DeterminePartitionsJobSampler.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.druid.indexer;
+
+import com.google.common.hash.HashFunction;
+import com.google.common.hash.Hashing;
+
+import java.util.concurrent.ThreadLocalRandom;
+
+public class DeterminePartitionsJobSampler
+{
+ private static final HashFunction HASH_FUNCTION = Hashing.murmur3_32();
+
+ private final int samplingFactor;
+
+ private final int sampledTargetPartitionSize;
+
+ private final int sampledMaxRowsPerSegment;
+
+ public DeterminePartitionsJobSampler(int samplingFactor, int
targetPartitionSize, int maxRowsPerSegment)
+ {
+ this.samplingFactor = Math.max(samplingFactor, 1);
+ this.sampledTargetPartitionSize = targetPartitionSize /
this.samplingFactor;
+ this.sampledMaxRowsPerSegment = maxRowsPerSegment / this.samplingFactor;
+ }
+
+ /**
+ * If input rows is duplicate, we can use hash and mod to do sample. As we
hash on whole group key,
+ * there will not likely data skew if the hash function is balanced enough.
+ */
+ boolean shouldEmitRow(byte[] groupKeyBytes)
+ {
+ return samplingFactor == 1 ||
HASH_FUNCTION.hashBytes(groupKeyBytes).asInt() % samplingFactor == 0;
+ }
+
+ /**
+ * If input rows is not duplicate, we can sample at random.
+ */
+ boolean shouldEmitRow()
+ {
+ return samplingFactor == 1 ||
ThreadLocalRandom.current().nextInt(samplingFactor) == 0;
+ }
+
+ public int getSampledTargetPartitionSize()
+ {
+ return sampledTargetPartitionSize;
+ }
+
+ public int getSampledMaxRowsPerSegment()
+ {
+ return sampledMaxRowsPerSegment;
+ }
+}
diff --git
a/indexing-hadoop/src/main/java/org/apache/druid/indexer/HadoopTuningConfig.java
b/indexing-hadoop/src/main/java/org/apache/druid/indexer/HadoopTuningConfig.java
index 7371e70653..3a18bf10d9 100644
---
a/indexing-hadoop/src/main/java/org/apache/druid/indexer/HadoopTuningConfig.java
+++
b/indexing-hadoop/src/main/java/org/apache/druid/indexer/HadoopTuningConfig.java
@@ -41,6 +41,8 @@ import java.util.Map;
@JsonTypeName("hadoop")
public class HadoopTuningConfig implements TuningConfig
{
+ public static final int DEFAULT_DETERMINE_PARTITIONS_SAMPLING_FACTOR = 1;
+
private static final DimensionBasedPartitionsSpec DEFAULT_PARTITIONS_SPEC =
HashedPartitionsSpec.defaultSpec();
private static final Map<Long, List<HadoopyShardSpec>> DEFAULT_SHARD_SPECS =
ImmutableMap.of();
private static final IndexSpec DEFAULT_INDEX_SPEC = IndexSpec.DEFAULT;
@@ -74,7 +76,8 @@ public class HadoopTuningConfig implements TuningConfig
null,
null,
null,
- null
+ null,
+ DEFAULT_DETERMINE_PARTITIONS_SAMPLING_FACTOR
);
}
@Nullable
@@ -102,6 +105,15 @@ public class HadoopTuningConfig implements TuningConfig
private final int maxParseExceptions;
private final boolean useYarnRMJobStatusFallback;
private final long awaitSegmentAvailabilityTimeoutMillis;
+ // The sample parameter is only used for range partition spec now. When
using range
+ // partition spec, we need launch many mapper and one reducer to do global
sorting and
+ // find the upper and lower bound for every segment. This mr job may cost a
lot of time
+ // if the input data is large. So we can sample the input data and make the
mr job run
+ // faster. After all, we don't need a segment size which exactly equals
targetRowsPerSegment.
+ // For example, if we ingest 10,000,000,000 rows and the
targetRowsPerSegment is 5,000,000,
+ // we can sample by 500, so the mr job need only process 20,000,000 rows,
this helps save
+ // a lot of time.
+ private final int determinePartitionsSamplingFactor;
@JsonCreator
public HadoopTuningConfig(
@@ -130,7 +142,8 @@ public class HadoopTuningConfig implements TuningConfig
final @JsonProperty("logParseExceptions") @Nullable Boolean
logParseExceptions,
final @JsonProperty("maxParseExceptions") @Nullable Integer
maxParseExceptions,
final @JsonProperty("useYarnRMJobStatusFallback") @Nullable Boolean
useYarnRMJobStatusFallback,
- final @JsonProperty("awaitSegmentAvailabilityTimeoutMillis") @Nullable
Long awaitSegmentAvailabilityTimeoutMillis
+ final @JsonProperty("awaitSegmentAvailabilityTimeoutMillis") @Nullable
Long awaitSegmentAvailabilityTimeoutMillis,
+ final @JsonProperty("determinePartitionsSamplingFactor") @Nullable
Integer determinePartitionsSamplingFactor
)
{
this.workingPath = workingPath;
@@ -182,6 +195,11 @@ public class HadoopTuningConfig implements TuningConfig
} else {
this.awaitSegmentAvailabilityTimeoutMillis =
awaitSegmentAvailabilityTimeoutMillis;
}
+ if (determinePartitionsSamplingFactor == null ||
determinePartitionsSamplingFactor < 1) {
+ this.determinePartitionsSamplingFactor = 1;
+ } else {
+ this.determinePartitionsSamplingFactor =
determinePartitionsSamplingFactor;
+ }
}
@Nullable
@@ -336,6 +354,12 @@ public class HadoopTuningConfig implements TuningConfig
return awaitSegmentAvailabilityTimeoutMillis;
}
+ @JsonProperty
+ public int getDeterminePartitionsSamplingFactor()
+ {
+ return determinePartitionsSamplingFactor;
+ }
+
public HadoopTuningConfig withWorkingPath(String path)
{
return new HadoopTuningConfig(
@@ -363,7 +387,8 @@ public class HadoopTuningConfig implements TuningConfig
logParseExceptions,
maxParseExceptions,
useYarnRMJobStatusFallback,
- awaitSegmentAvailabilityTimeoutMillis
+ awaitSegmentAvailabilityTimeoutMillis,
+ determinePartitionsSamplingFactor
);
}
@@ -394,7 +419,8 @@ public class HadoopTuningConfig implements TuningConfig
logParseExceptions,
maxParseExceptions,
useYarnRMJobStatusFallback,
- awaitSegmentAvailabilityTimeoutMillis
+ awaitSegmentAvailabilityTimeoutMillis,
+ determinePartitionsSamplingFactor
);
}
@@ -425,7 +451,8 @@ public class HadoopTuningConfig implements TuningConfig
logParseExceptions,
maxParseExceptions,
useYarnRMJobStatusFallback,
- awaitSegmentAvailabilityTimeoutMillis
+ awaitSegmentAvailabilityTimeoutMillis,
+ determinePartitionsSamplingFactor
);
}
}
diff --git
a/indexing-hadoop/src/test/java/org/apache/druid/indexer/BatchDeltaIngestionTest.java
b/indexing-hadoop/src/test/java/org/apache/druid/indexer/BatchDeltaIngestionTest.java
index 1e92584e71..ce725230b9 100644
---
a/indexing-hadoop/src/test/java/org/apache/druid/indexer/BatchDeltaIngestionTest.java
+++
b/indexing-hadoop/src/test/java/org/apache/druid/indexer/BatchDeltaIngestionTest.java
@@ -489,7 +489,8 @@ public class BatchDeltaIngestionTest
null,
null,
null,
- null
+ null,
+ 1
)
)
);
diff --git
a/indexing-hadoop/src/test/java/org/apache/druid/indexer/DetermineHashedPartitionsJobTest.java
b/indexing-hadoop/src/test/java/org/apache/druid/indexer/DetermineHashedPartitionsJobTest.java
index 5f7b0157fb..fb1ff1520a 100644
---
a/indexing-hadoop/src/test/java/org/apache/druid/indexer/DetermineHashedPartitionsJobTest.java
+++
b/indexing-hadoop/src/test/java/org/apache/druid/indexer/DetermineHashedPartitionsJobTest.java
@@ -231,7 +231,8 @@ public class DetermineHashedPartitionsJobTest
null,
null,
null,
- null
+ null,
+ 1
)
);
this.indexerConfig = new HadoopDruidIndexerConfig(ingestionSpec);
diff --git
a/indexing-hadoop/src/test/java/org/apache/druid/indexer/DeterminePartitionsJobSamplerTest.java
b/indexing-hadoop/src/test/java/org/apache/druid/indexer/DeterminePartitionsJobSamplerTest.java
new file mode 100644
index 0000000000..21f6be8114
--- /dev/null
+++
b/indexing-hadoop/src/test/java/org/apache/druid/indexer/DeterminePartitionsJobSamplerTest.java
@@ -0,0 +1,102 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.druid.indexer;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.nio.charset.StandardCharsets;
+import java.util.UUID;
+
+public class DeterminePartitionsJobSamplerTest
+{
+ @Test
+ public void testSampled()
+ {
+ int samplingFactor = 10;
+ int targetPartitionSize = 1000000;
+ int maxRowsPerSegment = 5000000;
+ DeterminePartitionsJobSampler sampler = new DeterminePartitionsJobSampler(
+ samplingFactor,
+ targetPartitionSize,
+ maxRowsPerSegment
+ );
+ Assert.assertEquals(100000, sampler.getSampledTargetPartitionSize());
+ Assert.assertEquals(500000, sampler.getSampledMaxRowsPerSegment());
+ }
+
+ @Test
+ public void testNotSampled()
+ {
+ int samplingFactor = 0;
+ int targetPartitionSize = 1000000;
+ int maxRowsPerSegment = 5000000;
+ DeterminePartitionsJobSampler sampler = new DeterminePartitionsJobSampler(
+ samplingFactor,
+ targetPartitionSize,
+ maxRowsPerSegment
+ );
+ Assert.assertEquals(targetPartitionSize,
sampler.getSampledTargetPartitionSize());
+ Assert.assertEquals(maxRowsPerSegment,
sampler.getSampledMaxRowsPerSegment());
+ }
+
+ @Test
+ public void testShouldEmitRowByHash()
+ {
+ int samplingFactor = 10;
+ DeterminePartitionsJobSampler sampler = new DeterminePartitionsJobSampler(
+ samplingFactor,
+ 1000,
+ 5000
+ );
+ long total = 100000L;
+ long hit = 0;
+ for (long i = 0; i < total; i++) {
+ String str = UUID.randomUUID().toString();
+ if (sampler.shouldEmitRow(str.getBytes(StandardCharsets.UTF_8))) {
+ hit++;
+ }
+ }
+ double expect = total * 1.0 / samplingFactor;
+ double error = Math.abs(hit - expect) / expect;
+ Assert.assertTrue(error < 0.01);
+ }
+
+ @Test
+ public void testShouldEmitRowByRandom()
+ {
+ int samplingFactor = 10;
+ DeterminePartitionsJobSampler sampler = new DeterminePartitionsJobSampler(
+ samplingFactor,
+ 1000,
+ 5000
+ );
+ long total = 1000000L;
+ long hit = 0;
+ for (long i = 0; i < total; i++) {
+ if (sampler.shouldEmitRow()) {
+ hit++;
+ }
+ }
+ double expect = total * 1.0 / samplingFactor;
+ double error = Math.abs(hit - expect) / expect;
+ Assert.assertTrue(error < 0.01);
+ }
+}
diff --git
a/indexing-hadoop/src/test/java/org/apache/druid/indexer/DeterminePartitionsJobTest.java
b/indexing-hadoop/src/test/java/org/apache/druid/indexer/DeterminePartitionsJobTest.java
index aedc3695d2..5f338936a4 100644
---
a/indexing-hadoop/src/test/java/org/apache/druid/indexer/DeterminePartitionsJobTest.java
+++
b/indexing-hadoop/src/test/java/org/apache/druid/indexer/DeterminePartitionsJobTest.java
@@ -342,7 +342,8 @@ public class DeterminePartitionsJobTest
null,
null,
null,
- null
+ null,
+ 1
)
)
);
diff --git
a/indexing-hadoop/src/test/java/org/apache/druid/indexer/DetermineRangePartitionsJobTest.java
b/indexing-hadoop/src/test/java/org/apache/druid/indexer/DetermineRangePartitionsJobTest.java
index 83a9bd58e5..f10a898d12 100644
---
a/indexing-hadoop/src/test/java/org/apache/druid/indexer/DetermineRangePartitionsJobTest.java
+++
b/indexing-hadoop/src/test/java/org/apache/druid/indexer/DetermineRangePartitionsJobTest.java
@@ -397,7 +397,8 @@ public class DetermineRangePartitionsJobTest
null,
null,
null,
- null
+ null,
+ 1
)
)
);
diff --git
a/indexing-hadoop/src/test/java/org/apache/druid/indexer/HadoopDruidIndexerConfigTest.java
b/indexing-hadoop/src/test/java/org/apache/druid/indexer/HadoopDruidIndexerConfigTest.java
index f1352cece9..f69ad04915 100644
---
a/indexing-hadoop/src/test/java/org/apache/druid/indexer/HadoopDruidIndexerConfigTest.java
+++
b/indexing-hadoop/src/test/java/org/apache/druid/indexer/HadoopDruidIndexerConfigTest.java
@@ -280,7 +280,8 @@ public class HadoopDruidIndexerConfigTest
null,
null,
null,
- null
+ null,
+ 1
);
return new HadoopIngestionSpec(
diff --git
a/indexing-hadoop/src/test/java/org/apache/druid/indexer/HadoopTuningConfigTest.java
b/indexing-hadoop/src/test/java/org/apache/druid/indexer/HadoopTuningConfigTest.java
index 7bd993d238..97c764dd10 100644
---
a/indexing-hadoop/src/test/java/org/apache/druid/indexer/HadoopTuningConfigTest.java
+++
b/indexing-hadoop/src/test/java/org/apache/druid/indexer/HadoopTuningConfigTest.java
@@ -63,7 +63,8 @@ public class HadoopTuningConfigTest
null,
null,
null,
- null
+ null,
+ 1
);
HadoopTuningConfig actual =
jsonReadWriteRead(JSON_MAPPER.writeValueAsString(expected),
HadoopTuningConfig.class);
diff --git
a/indexing-hadoop/src/test/java/org/apache/druid/indexer/IndexGeneratorJobTest.java
b/indexing-hadoop/src/test/java/org/apache/druid/indexer/IndexGeneratorJobTest.java
index e38bc1b1ad..2b8fc6749e 100644
---
a/indexing-hadoop/src/test/java/org/apache/druid/indexer/IndexGeneratorJobTest.java
+++
b/indexing-hadoop/src/test/java/org/apache/druid/indexer/IndexGeneratorJobTest.java
@@ -547,7 +547,8 @@ public class IndexGeneratorJobTest
null,
null,
null,
- null
+ null,
+ 1
)
)
);
diff --git
a/indexing-hadoop/src/test/java/org/apache/druid/indexer/JobHelperTest.java
b/indexing-hadoop/src/test/java/org/apache/druid/indexer/JobHelperTest.java
index 5956bbe6ff..3c1b80166c 100644
--- a/indexing-hadoop/src/test/java/org/apache/druid/indexer/JobHelperTest.java
+++ b/indexing-hadoop/src/test/java/org/apache/druid/indexer/JobHelperTest.java
@@ -185,7 +185,8 @@ public class JobHelperTest
null,
null,
null,
- null
+ null,
+ 1
)
)
);
diff --git
a/indexing-hadoop/src/test/java/org/apache/druid/indexer/path/GranularityPathSpecTest.java
b/indexing-hadoop/src/test/java/org/apache/druid/indexer/path/GranularityPathSpecTest.java
index fcecb0bf07..d1aee3d669 100644
---
a/indexing-hadoop/src/test/java/org/apache/druid/indexer/path/GranularityPathSpecTest.java
+++
b/indexing-hadoop/src/test/java/org/apache/druid/indexer/path/GranularityPathSpecTest.java
@@ -79,7 +79,8 @@ public class GranularityPathSpecTest
null,
null,
null,
- null
+ null,
+ 1
);
private GranularityPathSpec granularityPathSpec;
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]