jihoonson commented on a change in pull request #6095: Add support
'keepSegmentGranularity' for compactionTask
URL: https://github.com/apache/incubator-druid/pull/6095#discussion_r207683180
##########
File path:
indexing-service/src/main/java/io/druid/indexing/common/task/CompactionTask.java
##########
@@ -263,34 +282,75 @@ static IndexIngestionSpec createIngestionSchema(
final List<TimelineObjectHolder<String, DataSegment>> timelineSegments =
pair.rhs;
if (timelineSegments.size() == 0) {
- return null;
+ return Collections.emptyList();
}
- final DataSchema dataSchema = createDataSchema(
- segmentProvider.dataSource,
- segmentProvider.interval,
- dimensionsSpec,
- toolbox.getIndexIO(),
- jsonMapper,
- timelineSegments,
- segmentFileMap
- );
- return new IndexIngestionSpec(
- dataSchema,
- new IndexIOConfig(
- new IngestSegmentFirehoseFactory(
- segmentProvider.dataSource,
- segmentProvider.interval,
- null, // no filter
- // set dimensions and metrics names to make sure that the
generated dataSchema is used for the firehose
-
dataSchema.getParser().getParseSpec().getDimensionsSpec().getDimensionNames(),
-
Arrays.stream(dataSchema.getAggregators()).map(AggregatorFactory::getName).collect(Collectors.toList()),
- toolbox.getIndexIO()
- ),
- false
- ),
- tuningConfig
- );
+ if (keepSegmentGranularity) {
+ // if keepSegmentGranularity = true, create indexIngestionSpec per
segment interval, so that we can run an index
+ // task per segment interval.
+ final List<IndexIngestionSpec> specs = new
ArrayList<>(timelineSegments.size());
+ for (TimelineObjectHolder<String, DataSegment> holder :
timelineSegments) {
+ final DataSchema dataSchema = createDataSchema(
+ segmentProvider.dataSource,
+ holder.getInterval(),
+ Collections.singletonList(holder),
+ dimensionsSpec,
+ toolbox.getIndexIO(),
+ jsonMapper,
+ segmentFileMap
+ );
+
+ specs.add(
+ new IndexIngestionSpec(
+ dataSchema,
+ new IndexIOConfig(
+ new IngestSegmentFirehoseFactory(
+ segmentProvider.dataSource,
+ holder.getInterval(),
+ null, // no filter
+ // set dimensions and metrics names to make sure that
the generated dataSchema is used for the firehose
+
dataSchema.getParser().getParseSpec().getDimensionsSpec().getDimensionNames(),
+
Arrays.stream(dataSchema.getAggregators()).map(AggregatorFactory::getName).collect(Collectors.toList()),
+ toolbox.getIndexIO()
+ ),
+ false
+ ),
+ tuningConfig
+ )
+ );
+ }
+
+ return specs;
+ } else {
+ final DataSchema dataSchema = createDataSchema(
+ segmentProvider.dataSource,
+ segmentProvider.interval,
+ timelineSegments,
+ dimensionsSpec,
+ toolbox.getIndexIO(),
+ jsonMapper,
+ segmentFileMap
+ );
+
+ return Collections.singletonList(
+ new IndexIngestionSpec(
+ dataSchema,
+ new IndexIOConfig(
Review comment:
Sounds good. Fixed.
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
With regards,
Apache Git Services
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]