mcvsubbu commented on a change in pull request #6050:
URL: https://github.com/apache/incubator-pinot/pull/6050#discussion_r497145807



##########
File path: 
pinot-minion/src/main/java/org/apache/pinot/minion/executor/RealtimeToOfflineSegmentsTaskExecutor.java
##########
@@ -0,0 +1,275 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pinot.minion.executor;
+
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+import java.io.File;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+import org.apache.commons.io.FileUtils;
+import org.apache.pinot.core.common.MinionConstants;
+import org.apache.pinot.core.minion.PinotTaskConfig;
+import org.apache.pinot.core.segment.processing.collector.CollectorConfig;
+import org.apache.pinot.core.segment.processing.collector.CollectorFactory;
+import 
org.apache.pinot.core.segment.processing.collector.ValueAggregatorFactory;
+import org.apache.pinot.core.segment.processing.filter.RecordFilterConfig;
+import org.apache.pinot.core.segment.processing.filter.RecordFilterFactory;
+import org.apache.pinot.core.segment.processing.framework.SegmentConfig;
+import 
org.apache.pinot.core.segment.processing.framework.SegmentProcessorConfig;
+import 
org.apache.pinot.core.segment.processing.framework.SegmentProcessorFramework;
+import org.apache.pinot.core.segment.processing.partitioner.PartitionerConfig;
+import org.apache.pinot.core.segment.processing.partitioner.PartitionerFactory;
+import 
org.apache.pinot.core.segment.processing.transformer.RecordTransformerConfig;
+import org.apache.pinot.spi.config.table.ColumnPartitionConfig;
+import org.apache.pinot.spi.config.table.TableConfig;
+import org.apache.pinot.spi.data.DateTimeFieldSpec;
+import org.apache.pinot.spi.data.DateTimeFormatSpec;
+import org.apache.pinot.spi.data.Schema;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+
+/**
+ * A task to convert segments from a REALTIME table to segments for its 
corresponding OFFLINE table.
+ * The realtime segments could span across multiple time windows. This task 
extracts data and creates segments for a configured time range.
+ * The {@link SegmentProcessorFramework} is used for the segment conversion, 
which also does
+ * 1. time column rollup
+ * 2. time window extraction using filter function
+ * 3. partitioning using table config's segmentPartitioningConfig
+ * 4. aggregations and rollup
+ * 5. data sorting
+ */
+public class RealtimeToOfflineSegmentsTaskExecutor extends 
BaseMultipleSegmentsConversionExecutor {
+  private static final Logger LOGGER = 
LoggerFactory.getLogger(RealtimeToOfflineSegmentsTaskExecutor.class);
+  private static final String INPUT_SEGMENTS_DIR = "input_segments";
+  private static final String OUTPUT_SEGMENTS_DIR = "output_segments";
+
+  @Override
+  protected List<SegmentConversionResult> convert(PinotTaskConfig 
pinotTaskConfig, List<File> originalIndexDirs,
+      File workingDir)
+      throws Exception {
+    String taskType = pinotTaskConfig.getTaskType();
+    Map<String, String> configs = pinotTaskConfig.getConfigs();
+    LOGGER.info("Starting task: {} with configs: {}", taskType, configs);
+
+    String tableNameWithType = configs.get(MinionConstants.TABLE_NAME_KEY); // 
rawTableName_OFFLINE expected here
+    TableConfig tableConfig = getTableConfig(tableNameWithType);
+    Schema schema = getSchema(tableNameWithType);
+    Set<String> schemaColumns = schema.getPhysicalColumnNames();
+    String timeColumn = tableConfig.getValidationConfig().getTimeColumnName();
+    DateTimeFieldSpec dateTimeFieldSpec = 
schema.getSpecForTimeColumn(timeColumn);
+    assert dateTimeFieldSpec != null;
+
+    long windowStartMs =
+        
Long.parseLong(configs.get(MinionConstants.RealtimeToOfflineSegmentsTask.WINDOW_START_MILLIS_KEY));
+    long windowEndMs = 
Long.parseLong(configs.get(MinionConstants.RealtimeToOfflineSegmentsTask.WINDOW_END_MILLIS_KEY));
+    String timeColumnTransformFunction =
+        
configs.get(MinionConstants.RealtimeToOfflineSegmentsTask.TIME_COLUMN_TRANSFORM_FUNCTION_KEY);
+    String collectorTypeStr = 
configs.get(MinionConstants.RealtimeToOfflineSegmentsTask.COLLECTOR_TYPE_KEY);
+    Map<String, String> aggregatorConfigs = new HashMap<>();
+    for (Map.Entry<String, String> entry : configs.entrySet()) {
+      String key = entry.getKey();
+      if 
(key.endsWith(MinionConstants.RealtimeToOfflineSegmentsTask.AGGREGATION_TYPE_KEY_SUFFIX))
 {
+        String column = 
key.split(MinionConstants.RealtimeToOfflineSegmentsTask.AGGREGATION_TYPE_KEY_SUFFIX)[0];
+        aggregatorConfigs.put(column, entry.getValue());
+      }
+    }
+    String numRecordsPerSegment =
+        
configs.get(MinionConstants.RealtimeToOfflineSegmentsTask.NUM_RECORDS_PER_SEGMENT_KEY);
+
+    SegmentProcessorConfig.Builder segmentProcessorConfigBuilder =
+        new 
SegmentProcessorConfig.Builder().setTableConfig(tableConfig).setSchema(schema);
+
+    // Time rollup using configured time transformation function
+    if (timeColumnTransformFunction != null) {
+      RecordTransformerConfig recordTransformerConfig =
+          getRecordTransformerConfigForTime(timeColumnTransformFunction, 
timeColumn);
+      
segmentProcessorConfigBuilder.setRecordTransformerConfig(recordTransformerConfig);
+    }
+
+    // Filter function for extracting data between start and end time window
+    RecordFilterConfig recordFilterConfig =
+        getRecordFilterConfigForWindow(windowStartMs, windowEndMs, 
dateTimeFieldSpec, timeColumn);
+    segmentProcessorConfigBuilder.setRecordFilterConfig(recordFilterConfig);
+
+    // Partitioner config from tableConfig
+    if (tableConfig.getIndexingConfig().getSegmentPartitionConfig() != null) {
+      Map<String, ColumnPartitionConfig> columnPartitionMap =
+          
tableConfig.getIndexingConfig().getSegmentPartitionConfig().getColumnPartitionMap();
+      PartitionerConfig partitionerConfig = 
getPartitionerConfig(columnPartitionMap, tableNameWithType, schemaColumns);
+      
segmentProcessorConfigBuilder.setPartitionerConfigs(Lists.newArrayList(partitionerConfig));
+    }
+
+    // Aggregations using configured Collector
+    List<String> sortedColumns = 
tableConfig.getIndexingConfig().getSortedColumn();
+    CollectorConfig collectorConfig =
+        getCollectorConfig(collectorTypeStr, aggregatorConfigs, schemaColumns, 
sortedColumns);
+    segmentProcessorConfigBuilder.setCollectorConfig(collectorConfig);
+
+    // Segment config
+    if (numRecordsPerSegment != null) {
+      SegmentConfig segmentConfig = getSegmentConfig(numRecordsPerSegment);
+      segmentProcessorConfigBuilder.setSegmentConfig(segmentConfig);
+    }
+
+    SegmentProcessorConfig segmentProcessorConfig = 
segmentProcessorConfigBuilder.build();
+
+    File inputSegmentsDir = new File(workingDir, INPUT_SEGMENTS_DIR);
+    Preconditions.checkState(inputSegmentsDir.mkdirs(), "Failed to create 
input directory: %s for task: %s",
+        inputSegmentsDir.getAbsolutePath(), taskType);
+    for (File indexDir : originalIndexDirs) {
+      FileUtils.copyDirectoryToDirectory(indexDir, inputSegmentsDir);
+    }
+    File outputSegmentsDir = new File(workingDir, OUTPUT_SEGMENTS_DIR);
+    Preconditions.checkState(outputSegmentsDir.mkdirs(), "Failed to create 
output directory: %s for task: %s",
+        outputSegmentsDir.getAbsolutePath(), taskType);
+
+    SegmentProcessorFramework segmentProcessorFramework =
+        new SegmentProcessorFramework(inputSegmentsDir, 
segmentProcessorConfig, outputSegmentsDir);
+    try {
+      segmentProcessorFramework.processSegments();
+    } finally {
+      segmentProcessorFramework.cleanup();
+    }
+
+    LOGGER.info("Finished task: {} with configs: {}", taskType, configs);
+    List<SegmentConversionResult> results = new ArrayList<>();
+    for (File file : outputSegmentsDir.listFiles()) {
+      String outputSegmentName = file.getName();
+      results.add(new 
SegmentConversionResult.Builder().setFile(file).setSegmentName(outputSegmentName)
+          .setTableNameWithType(tableNameWithType).build());
+    }
+    return results;
+  }
+
+  /**
+   * Construct a {@link RecordTransformerConfig} for time column transformation
+   */
+  private RecordTransformerConfig getRecordTransformerConfigForTime(String 
timeColumnTransformFunction,
+      String timeColumn) {
+    Map<String, String> transformationsMap = new HashMap<>();
+    transformationsMap.put(timeColumn, timeColumnTransformFunction);
+    return new 
RecordTransformerConfig.Builder().setTransformFunctionsMap(transformationsMap).build();
+  }
+
+  /**
+   * Construct a {@link RecordFilterConfig} by setting a filter function on 
the time column, for extracting data between window start/end
+   */
+  private RecordFilterConfig getRecordFilterConfigForWindow(long 
windowStartMs, long windowEndMs,
+      DateTimeFieldSpec dateTimeFieldSpec, String timeColumn) {
+    String filterFunction;
+    DateTimeFormatSpec dateTimeFormatSpec = new 
DateTimeFormatSpec(dateTimeFieldSpec.getFormat());
+    TimeUnit timeUnit = dateTimeFormatSpec.getColumnUnit();
+    DateTimeFieldSpec.TimeFormat timeFormat = 
dateTimeFormatSpec.getTimeFormat();
+    if (timeUnit.equals(TimeUnit.MILLISECONDS) && 
timeFormat.equals(DateTimeFieldSpec.TimeFormat.EPOCH)) {
+      // If time column is in EPOCH millis, use windowStart and windowEnd 
directly to filter
+      filterFunction = getFilterFunctionLong(windowStartMs, windowEndMs, 
timeColumn);
+    } else {
+      // Convert windowStart and windowEnd to time format of the data
+      if 
(dateTimeFormatSpec.getTimeFormat().equals(DateTimeFieldSpec.TimeFormat.EPOCH)) 
{
+        long windowStart = 
dateTimeFormatSpec.fromMillisToFormat(windowStartMs, Long.class);
+        long windowEnd = dateTimeFormatSpec.fromMillisToFormat(windowEndMs, 
Long.class);
+        filterFunction = getFilterFunctionLong(windowStart, windowEnd, 
timeColumn);
+      } else {
+        String windowStart = 
dateTimeFormatSpec.fromMillisToFormat(windowStartMs, String.class);
+        String windowEnd = dateTimeFormatSpec.fromMillisToFormat(windowEndMs, 
String.class);
+        if (dateTimeFieldSpec.getDataType().isNumeric()) {
+          filterFunction = getFilterFunctionLong(Long.parseLong(windowStart), 
Long.parseLong(windowEnd), timeColumn);
+        } else {
+          filterFunction = getFilterFunctionString(windowStart, windowEnd, 
timeColumn);
+        }
+      }
+    }
+    return new 
RecordFilterConfig.Builder().setRecordFilterType(RecordFilterFactory.RecordFilterType.FILTER_FUNCTION)
+        .setFilterFunction(filterFunction).build();
+  }
+
+  /**
+   * Construct a {@link PartitionerConfig} using {@link 
org.apache.pinot.spi.config.table.SegmentPartitionConfig} from the table config
+   */
+  private PartitionerConfig getPartitionerConfig(Map<String, 
ColumnPartitionConfig> columnPartitionMap,
+      String tableNameWithType, Set<String> schemaColumns) {
+
+    Preconditions.checkState(columnPartitionMap.size() == 1,
+        "Cannot partition using more than 1 ColumnPartitionConfig for table: 
%s", tableNameWithType);
+    String partitionColumn = columnPartitionMap.keySet().iterator().next();
+    Preconditions.checkState(schemaColumns.contains(partitionColumn),
+        "Partition column: %s is not a physical column in the schema", 
partitionColumn);
+    return new 
PartitionerConfig.Builder().setPartitionerType(PartitionerFactory.PartitionerType.TABLE_PARTITION_CONFIG)
+        
.setColumnName(partitionColumn).setColumnPartitionConfig(columnPartitionMap.get(partitionColumn)).build();
+  }
+
+  /**
+   * Construct a {@link CollectorConfig} using configured collector configs 
and sorted columns from table config
+   */
+  private CollectorConfig getCollectorConfig(String collectorTypeStr, 
Map<String, String> aggregateConfigs,
+      Set<String> schemaColumns, List<String> sortedColumns) {
+    CollectorFactory.CollectorType collectorType = collectorTypeStr == null ? 
CollectorFactory.CollectorType.CONCAT
+        : 
CollectorFactory.CollectorType.valueOf(collectorTypeStr.toUpperCase());
+
+    Map<String, ValueAggregatorFactory.ValueAggregatorType> aggregatorTypeMap 
= new HashMap<>();
+    for (Map.Entry<String, String> entry : aggregateConfigs.entrySet()) {
+      String column = entry.getKey();
+      Preconditions
+          .checkState(schemaColumns.contains(column), "Aggregate column: %s is 
not a physical column in the schema",
+              column);
+      aggregatorTypeMap.put(column, 
ValueAggregatorFactory.ValueAggregatorType.valueOf(entry.getValue().toUpperCase()));
+    }
+
+    if (sortedColumns != null) {
+      for (String column : sortedColumns) {

Review comment:
       I meant if a column is present both as sortedColumn, but specified in 
`aggregateConfigs` above. Not sure if that case is handled correctly




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org



---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@pinot.apache.org
For additional commands, e-mail: commits-h...@pinot.apache.org

Reply via email to