Jackie-Jiang commented on a change in pull request #7092:
URL: https://github.com/apache/incubator-pinot/pull/7092#discussion_r660895862
##########
File path:
pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/merge_rollup/MergeRollupTaskExecutor.java
##########
@@ -62,33 +64,52 @@
LOGGER.info("Starting task: {} with configs: {}", taskType, configs);
long startMillis = System.currentTimeMillis();
- String mergeTypeString =
configs.get(MinionConstants.MergeRollupTask.MERGE_TYPE_KEY);
- Preconditions.checkNotNull(mergeTypeString, "MergeType cannot be null");
-
Preconditions.checkState(mergeTypeString.equalsIgnoreCase(CollectorFactory.CollectorType.CONCAT.name()),
+ Preconditions.checkState(
+
MergeType.CONCAT.name().equalsIgnoreCase(configs.get(MinionConstants.MergeRollupTask.MERGE_TYPE_KEY)),
"Only 'CONCAT' mode is currently supported.");
String tableNameWithType = configs.get(MinionConstants.TABLE_NAME_KEY);
TableConfig tableConfig = getTableConfig(tableNameWithType);
Schema schema = getSchema(tableNameWithType);
- Map<String, ValueAggregatorFactory.ValueAggregatorType> aggregatorConfigs =
- MergeRollupTaskUtils.getRollupAggregationTypeMap(configs);
+ Map<String, AggregationFunctionType> aggregationTypes =
MergeRollupTaskUtils.getRollupAggregationTypes(configs);
String numRecordsPerSegmentString =
configs.get(MinionConstants.MergeRollupTask.MAX_NUM_RECORDS_PER_SEGMENT);
- int numRecordsPerSegment =
- numRecordsPerSegmentString != null ?
Integer.parseInt(numRecordsPerSegmentString) : DEFAULT_NUM_RECORDS_PER_SEGMENT;
- MergeRollupConverter processorFramework =
- new
MergeRollupConverter.Builder().setTableConfig(tableConfig).setSchema(schema)
-
.setMergeType(mergeTypeString).setAggregatorConfigs(aggregatorConfigs)
-
.setNumRecordsPerSegment(numRecordsPerSegment).setOriginalIndexDirs(originalIndexDirs)
-
.setWorkingDir(workingDir).setInputSegmentDir(INPUT_SEGMENTS_DIR).setOutputSegmentDir(OUTPUT_SEGMENTS_DIR)
- .build();
- File[] outputFiles = processorFramework.convert();
+ SegmentProcessorConfig.Builder segmentProcessorConfigBuilder =
+ new
SegmentProcessorConfig.Builder().setTableConfig(tableConfig).setSchema(schema)
+ .setMergeType(MergeType.CONCAT);
+ if (!aggregationTypes.isEmpty()) {
+ segmentProcessorConfigBuilder.setAggregationTypes(aggregationTypes);
+ }
+ if (numRecordsPerSegmentString != null) {
+ segmentProcessorConfigBuilder.setSegmentConfig(
+ new
SegmentConfig.Builder().setMaxNumRecordsPerSegment(Integer.parseInt(numRecordsPerSegmentString)).build());
+ }
+
+ SegmentProcessorConfig segmentProcessorConfig =
segmentProcessorConfigBuilder.build();
+
+ File inputSegmentsDir = new File(workingDir, INPUT_SEGMENTS_DIR);
+ Preconditions.checkState(inputSegmentsDir.mkdirs(), "Failed to create
input directory: %s for task: %s",
+ inputSegmentsDir.getAbsolutePath(), taskType);
+ for (File indexDir : originalIndexDirs) {
Review comment:
Yes, we should avoid the copying within the executor. Planning to
eliminate the copy for both realtime-to-offline and merge-rollup in a separate
PR
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]