gargvishesh commented on code in PR #15965:
URL: https://github.com/apache/druid/pull/15965#discussion_r1502318935
##########
extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/exec/ControllerImpl.java:
##########
@@ -1715,12 +1725,95 @@ private void publishSegmentsIfNeeded(
{
if (queryKernel.isSuccess() &&
MSQControllerTask.isIngestion(task.getQuerySpec())) {
final StageId finalStageId =
queryKernel.getStageId(queryDef.getFinalStageDefinition().getStageNumber());
+ queryDef.getFinalStageDefinition().getClusterBy();
//noinspection unchecked
@SuppressWarnings("unchecked")
final Set<DataSegment> segments = (Set<DataSegment>)
queryKernel.getResultObjectForStage(finalStageId);
+ DataSchema dataSchema = ((SegmentGeneratorFrameProcessorFactory)
queryKernel.getStageDefinition(finalStageId)
+
.getProcessorFactory()).getDataSchema();
+
+ ShardSpec shardSpec = segments.isEmpty()
+ ? null
+ : segments.stream()
+ .findFirst()
+ .get()
+
.getShardSpec();
+ List<String> partitionDimensions = Collections.emptyList();
+
+ if (shardSpec != null && (Objects.equals(shardSpec.getType(),
ShardSpec.Type.SINGLE)
+ || Objects.equals(shardSpec.getType(),
ShardSpec.Type.RANGE))) {
+ partitionDimensions = ((DimensionRangeShardSpec)
shardSpec).getDimensions();
+ }
+
+ Function<Set<DataSegment>, Set<DataSegment>>
compactionStateAnnotateFunction = compactionStateAnnotateFunction(
+ task(),
+ context.jsonMapper(),
+ dataSchema,
+ partitionDimensions
+ );
log.info("Query [%s] publishing %d segments.", queryDef.getQueryId(),
segments.size());
- publishAllSegments(segments);
+ publishAllSegments(compactionStateAnnotateFunction.apply(segments));
+ }
+ }
+
+ public static Function<Set<DataSegment>, Set<DataSegment>>
compactionStateAnnotateFunction(
+ MSQControllerTask task,
+ ObjectMapper jsonMapper,
+ DataSchema dataSchema,
+ List<String> partitionDimensions
+ )
+ {
+ final boolean storeCompactionState = task.getContextValue(
+ Tasks.STORE_COMPACTION_STATE_KEY,
+ Tasks.DEFAULT_STORE_COMPACTION_STATE
+ );
+
+ if (storeCompactionState) {
+ IndexSpec indexSpec =
task.getQuerySpec().getTuningConfig().getIndexSpec();
+ GranularitySpec granularitySpec = dataSchema.getGranularitySpec();
+ DimensionsSpec dimensionsSpec = dataSchema.getDimensionsSpec();
+ Map<String, Object> transformSpec = dataSchema.getTransformSpec() == null
Review Comment:
All these fields are captured when setting compaction state in the [native
flow](https://github.com/apache/druid/blob/54d0e482dc560f42194c613d1e1bcd5310f281c2/indexing-service/src/main/java/org/apache/druid/indexing/common/task/AbstractBatchIndexTask.java#L586).
##########
extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/exec/ControllerImpl.java:
##########
@@ -1715,12 +1725,95 @@ private void publishSegmentsIfNeeded(
{
if (queryKernel.isSuccess() &&
MSQControllerTask.isIngestion(task.getQuerySpec())) {
final StageId finalStageId =
queryKernel.getStageId(queryDef.getFinalStageDefinition().getStageNumber());
+ queryDef.getFinalStageDefinition().getClusterBy();
//noinspection unchecked
@SuppressWarnings("unchecked")
final Set<DataSegment> segments = (Set<DataSegment>)
queryKernel.getResultObjectForStage(finalStageId);
+ DataSchema dataSchema = ((SegmentGeneratorFrameProcessorFactory)
queryKernel.getStageDefinition(finalStageId)
+
.getProcessorFactory()).getDataSchema();
+
+ ShardSpec shardSpec = segments.isEmpty()
+ ? null
+ : segments.stream()
+ .findFirst()
+ .get()
+
.getShardSpec();
+ List<String> partitionDimensions = Collections.emptyList();
+
+ if (shardSpec != null && (Objects.equals(shardSpec.getType(),
ShardSpec.Type.SINGLE)
+ || Objects.equals(shardSpec.getType(),
ShardSpec.Type.RANGE))) {
+ partitionDimensions = ((DimensionRangeShardSpec)
shardSpec).getDimensions();
+ }
+
+ Function<Set<DataSegment>, Set<DataSegment>>
compactionStateAnnotateFunction = compactionStateAnnotateFunction(
+ task(),
+ context.jsonMapper(),
+ dataSchema,
+ partitionDimensions
+ );
log.info("Query [%s] publishing %d segments.", queryDef.getQueryId(),
segments.size());
- publishAllSegments(segments);
+ publishAllSegments(compactionStateAnnotateFunction.apply(segments));
+ }
+ }
+
+ public static Function<Set<DataSegment>, Set<DataSegment>>
compactionStateAnnotateFunction(
+ MSQControllerTask task,
+ ObjectMapper jsonMapper,
+ DataSchema dataSchema,
+ List<String> partitionDimensions
+ )
+ {
+ final boolean storeCompactionState = task.getContextValue(
Review Comment:
I'm just thinking if it would be better to move it to query context instead
of task context to enable setting it from the web-console. Any thoughts on that?
##########
extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/exec/ControllerImpl.java:
##########
@@ -1715,12 +1725,95 @@ private void publishSegmentsIfNeeded(
{
if (queryKernel.isSuccess() &&
MSQControllerTask.isIngestion(task.getQuerySpec())) {
final StageId finalStageId =
queryKernel.getStageId(queryDef.getFinalStageDefinition().getStageNumber());
+ queryDef.getFinalStageDefinition().getClusterBy();
//noinspection unchecked
@SuppressWarnings("unchecked")
final Set<DataSegment> segments = (Set<DataSegment>)
queryKernel.getResultObjectForStage(finalStageId);
+ DataSchema dataSchema = ((SegmentGeneratorFrameProcessorFactory)
queryKernel.getStageDefinition(finalStageId)
+
.getProcessorFactory()).getDataSchema();
+
+ ShardSpec shardSpec = segments.isEmpty()
+ ? null
+ : segments.stream()
+ .findFirst()
+ .get()
+
.getShardSpec();
+ List<String> partitionDimensions = Collections.emptyList();
+
+ if (shardSpec != null && (Objects.equals(shardSpec.getType(),
ShardSpec.Type.SINGLE)
+ || Objects.equals(shardSpec.getType(),
ShardSpec.Type.RANGE))) {
+ partitionDimensions = ((DimensionRangeShardSpec)
shardSpec).getDimensions();
+ }
+
+ Function<Set<DataSegment>, Set<DataSegment>>
compactionStateAnnotateFunction = compactionStateAnnotateFunction(
+ task(),
+ context.jsonMapper(),
+ dataSchema,
+ partitionDimensions
+ );
log.info("Query [%s] publishing %d segments.", queryDef.getQueryId(),
segments.size());
- publishAllSegments(segments);
+ publishAllSegments(compactionStateAnnotateFunction.apply(segments));
+ }
+ }
+
+ public static Function<Set<DataSegment>, Set<DataSegment>>
compactionStateAnnotateFunction(
+ MSQControllerTask task,
+ ObjectMapper jsonMapper,
+ DataSchema dataSchema,
+ List<String> partitionDimensions
+ )
+ {
+ final boolean storeCompactionState = task.getContextValue(
+ Tasks.STORE_COMPACTION_STATE_KEY,
+ Tasks.DEFAULT_STORE_COMPACTION_STATE
+ );
+
+ if (storeCompactionState) {
+ IndexSpec indexSpec =
task.getQuerySpec().getTuningConfig().getIndexSpec();
+ GranularitySpec granularitySpec = dataSchema.getGranularitySpec();
+ DimensionsSpec dimensionsSpec = dataSchema.getDimensionsSpec();
+ Map<String, Object> transformSpec = dataSchema.getTransformSpec() == null
+ ||
TransformSpec.NONE.equals(dataSchema.getTransformSpec())
+ ? null
+ : new
ClientCompactionTaskTransformSpec(dataSchema.getTransformSpec()
+
.getFilter()).asMap(
+ jsonMapper);
+ List<Object> metricsSpec = dataSchema.getAggregators() == null
+ ? null
+ : jsonMapper.convertValue(
+ dataSchema.getAggregators(),
+ new TypeReference<List<Object>>()
+ {
+ }
+ );
+
+ // Even if partition dimensions is empty, use
DimensionRangePartitionsSpec to record other info
+ // such as rowsPerSegment
+
+ PartitionsSpec partitionSpec = new DimensionRangePartitionsSpec(
Review Comment:
Since MSQ only allows range partitioning only, I was thinking we could
safely keep it as DimensionRangePartitionsSpec -- with empty partition
dimensions if no cluster by cols.
##########
extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/exec/ControllerImpl.java:
##########
@@ -1715,12 +1725,95 @@ private void publishSegmentsIfNeeded(
{
if (queryKernel.isSuccess() &&
MSQControllerTask.isIngestion(task.getQuerySpec())) {
final StageId finalStageId =
queryKernel.getStageId(queryDef.getFinalStageDefinition().getStageNumber());
+ queryDef.getFinalStageDefinition().getClusterBy();
//noinspection unchecked
@SuppressWarnings("unchecked")
final Set<DataSegment> segments = (Set<DataSegment>)
queryKernel.getResultObjectForStage(finalStageId);
+ DataSchema dataSchema = ((SegmentGeneratorFrameProcessorFactory)
queryKernel.getStageDefinition(finalStageId)
+
.getProcessorFactory()).getDataSchema();
+
+ ShardSpec shardSpec = segments.isEmpty()
+ ? null
+ : segments.stream()
+ .findFirst()
+ .get()
+
.getShardSpec();
+ List<String> partitionDimensions = Collections.emptyList();
+
+ if (shardSpec != null && (Objects.equals(shardSpec.getType(),
ShardSpec.Type.SINGLE)
+ || Objects.equals(shardSpec.getType(),
ShardSpec.Type.RANGE))) {
+ partitionDimensions = ((DimensionRangeShardSpec)
shardSpec).getDimensions();
+ }
+
+ Function<Set<DataSegment>, Set<DataSegment>>
compactionStateAnnotateFunction = compactionStateAnnotateFunction(
+ task(),
+ context.jsonMapper(),
+ dataSchema,
+ partitionDimensions
+ );
log.info("Query [%s] publishing %d segments.", queryDef.getQueryId(),
segments.size());
- publishAllSegments(segments);
+ publishAllSegments(compactionStateAnnotateFunction.apply(segments));
+ }
+ }
+
+ public static Function<Set<DataSegment>, Set<DataSegment>>
compactionStateAnnotateFunction(
+ MSQControllerTask task,
+ ObjectMapper jsonMapper,
+ DataSchema dataSchema,
+ List<String> partitionDimensions
+ )
+ {
+ final boolean storeCompactionState = task.getContextValue(
+ Tasks.STORE_COMPACTION_STATE_KEY,
+ Tasks.DEFAULT_STORE_COMPACTION_STATE
+ );
+
+ if (storeCompactionState) {
Review Comment:
That would be part of the logic to set this flag itself. Currently, this PR
doesn't incorporate the logic to implicitly set it in REPLACE commands, so this
would have to be explicitly set.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]