kgyrtkirk commented on code in PR #18144: URL: https://github.com/apache/druid/pull/18144#discussion_r2150020496
########## extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/exec/std/StandardStageRunner.java: ########## @@ -0,0 +1,224 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.msq.exec.std; + +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; +import org.apache.druid.common.guava.FutureUtils; +import org.apache.druid.frame.processor.BlockingQueueOutputChannelFactory; +import org.apache.druid.frame.processor.Bouncer; +import org.apache.druid.frame.processor.FrameProcessorExecutor; +import org.apache.druid.frame.processor.OutputChannelFactory; +import org.apache.druid.frame.processor.manager.ProcessorManager; +import org.apache.druid.java.util.common.UOE; +import org.apache.druid.msq.counters.CounterNames; +import org.apache.druid.msq.counters.CounterTracker; +import org.apache.druid.msq.counters.CpuCounters; +import org.apache.druid.msq.exec.ExecutionContext; +import org.apache.druid.msq.exec.FrameContext; +import org.apache.druid.msq.exec.OutputChannelMode; +import org.apache.druid.msq.exec.StageProcessor; +import org.apache.druid.msq.indexing.CountingOutputChannelFactory; +import org.apache.druid.msq.kernel.ShuffleSpec; +import org.apache.druid.msq.kernel.StageDefinition; +import org.apache.druid.msq.kernel.WorkOrder; +import org.checkerframework.checker.nullness.qual.MonotonicNonNull; + +/** + * Runner for {@link StageProcessor} that want to build a {@link ProcessorsAndChannels} for some shuffle-agnostic + * work, then have the shuffle work taken care of generically by {@link StandardStageRunner}. + * + * Using this class allows the {@link StageProcessor} implementation to be simpler, since it doesn't need to worry + * about how to generate channels "properly" for the shuffle. However, it comes at the cost of not being able + * to do shuffle-specific optimizations. + */ +public class StandardStageRunner<T, R> +{ + private final ExecutionContext executionContext; + private final WorkOrder workOrder; + private final CounterTracker counterTracker; + private final FrameProcessorExecutor exec; + private final String cancellationId; + private final int threadCount; + private final FrameContext frameContext; + + @MonotonicNonNull + private OutputChannelFactory workOutputChannelFactory; + @MonotonicNonNull + private ListenableFuture<R> workResultFuture; + @MonotonicNonNull + private ListenableFuture<ResultAndChannels<Object>> pipelineFuture; + + public StandardStageRunner(final ExecutionContext executionContext) + { + this.executionContext = executionContext; + this.workOrder = executionContext.workOrder(); + this.counterTracker = executionContext.counters(); + this.exec = executionContext.executor(); + this.cancellationId = executionContext.cancellationId(); Review Comment: there are a lot of localized stuff from the `executionContext` here which are only used 1/2 times...I think the field might just add complexity ########## extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/exec/ExecutionContext.java: ########## @@ -0,0 +1,107 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.msq.exec; + +import com.google.common.util.concurrent.ListenableFuture; +import org.apache.druid.frame.channel.BlockingQueueFrameChannel; +import org.apache.druid.frame.key.ClusterByPartitions; +import org.apache.druid.frame.processor.FrameProcessorExecutor; +import org.apache.druid.frame.processor.OutputChannelFactory; +import org.apache.druid.msq.counters.CounterTracker; +import org.apache.druid.msq.exec.std.StandardShuffleOperations; +import org.apache.druid.msq.input.InputSliceReader; +import org.apache.druid.msq.kernel.ShuffleKind; +import org.apache.druid.msq.kernel.WorkOrder; +import org.apache.druid.msq.statistics.ClusterByStatisticsSnapshot; + +import javax.annotation.Nullable; + +/** + * All the things needed for {@link StageProcessor#execute(ExecutionContext)} to run the work for a stage. + */ +public interface ExecutionContext Review Comment: I think this interface might be unnecessary: it declares 12 methods and has only a single implementation - its already seem to have suffered from some bending from that single implementation. might be better to just drop the interface and introduce generalization when there will be at least 2 implementors ########## extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/exec/ExecutionContext.java: ########## @@ -0,0 +1,107 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.msq.exec; + +import com.google.common.util.concurrent.ListenableFuture; +import org.apache.druid.frame.channel.BlockingQueueFrameChannel; +import org.apache.druid.frame.key.ClusterByPartitions; +import org.apache.druid.frame.processor.FrameProcessorExecutor; +import org.apache.druid.frame.processor.OutputChannelFactory; +import org.apache.druid.msq.counters.CounterTracker; +import org.apache.druid.msq.exec.std.StandardShuffleOperations; +import org.apache.druid.msq.input.InputSliceReader; +import org.apache.druid.msq.kernel.ShuffleKind; +import org.apache.druid.msq.kernel.WorkOrder; +import org.apache.druid.msq.statistics.ClusterByStatisticsSnapshot; + +import javax.annotation.Nullable; + +/** + * All the things needed for {@link StageProcessor#execute(ExecutionContext)} to run the work for a stage. + */ +public interface ExecutionContext +{ + /** + * Work order to execute. + */ + WorkOrder workOrder(); + + /** + * Processing thread pool to use. + */ + FrameProcessorExecutor executor(); + + /** + * Reader for {@link WorkOrder#getInputs()}. + */ + InputSliceReader inputSliceReader(); + + /** + * For {@link ShuffleKind#GLOBAL_SORT}, a future that resolves to the global {@link ClusterByPartitions} (when known). + * Used by {@link StandardShuffleOperations#globalSort(ListenableFuture, OutputChannelFactory)}. + */ + ListenableFuture<ClusterByPartitions> globalClusterByPartitions(); + + /** + * Factory for generating stage output channels. + */ + OutputChannelFactory outputChannelFactory(); + + /** + * Creates a buffered intermediate output channel, such as for spilling temporary streams to disk. For a non-buffered + * intermediate channel, use {@link BlockingQueueFrameChannel#minimal()} instead. + */ + OutputChannelFactory makeIntermediateOutputChannelFactory(String name); + + /** + * Services and objects for the functioning of various processors. + */ + FrameContext frameContext(); + + /** + * Facility for tracking query counters and metrics. + */ + CounterTracker counters(); + + /** + * Number of threads available in {@link #executor()}. + */ + int threadCount(); + + /** + * Cancellation ID that must be provided to {@link FrameProcessorExecutor} when running work. + */ + String cancellationId(); + + /** + * Callback that must be called when input is done being read. This is essential for two reasons: + * (1) If the prior stage ran with {@link OutputChannelMode#MEMORY}, this informs the controller that it can shut + * down the prior stage. + * (2) With {@link ShuffleKind#GLOBAL_SORT}, this provides statistics that are used to determine global boundaries. + * + * Typically called by {@link StandardShuffleOperations#gatherResultKeyStatisticsIfNeeded(ListenableFuture)}. + */ + void onDoneReadingInput(@Nullable ClusterByStatisticsSnapshot snapshot); + + /** + * Callback to report a nonfatal warning. + */ + void onWarning(Throwable t); Review Comment: note: in the scope of an `ExecutionContext` these callback like methods look a little bit like outliers ########## extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/exec/RunWorkOrder.java: ########## @@ -215,23 +172,11 @@ public void startAsync() throw new ISE("Cannot start from state[%s]", state); } - final StageDefinition stageDef = workOrder.getStageDefinition(); - try { exec.registerCancellationId(cancellationId); - makeInputSliceReader(); - makeWorkOutputChannelFactory(); - makeShuffleOutputChannelFactory(); - makeAndRunWorkProcessors(); - - if (stageDef.doesShuffle()) { - makeAndRunShuffleProcessors(); - } else { - // No shuffling: work output _is_ stage output. Retain read-only versions to reduce memory footprint. - stageOutputChannelsFuture = - Futures.immediateFuture(workResultAndOutputChannels.getOutputChannels().readOnly()); - } - + initInputSliceReader(); + initGlobalSortPartitionBoundariesIfNeeded(); Review Comment: I feel like something is leaking here...not sure how to fix it - but this and the stuff it moves doesn't seem to belong here...I'll try to dig into this ########## extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/exec/std/StandardStageRunner.java: ########## @@ -0,0 +1,224 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.msq.exec.std; + +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; +import org.apache.druid.common.guava.FutureUtils; +import org.apache.druid.frame.processor.BlockingQueueOutputChannelFactory; +import org.apache.druid.frame.processor.Bouncer; +import org.apache.druid.frame.processor.FrameProcessorExecutor; +import org.apache.druid.frame.processor.OutputChannelFactory; +import org.apache.druid.frame.processor.manager.ProcessorManager; +import org.apache.druid.java.util.common.UOE; +import org.apache.druid.msq.counters.CounterNames; +import org.apache.druid.msq.counters.CounterTracker; +import org.apache.druid.msq.counters.CpuCounters; +import org.apache.druid.msq.exec.ExecutionContext; +import org.apache.druid.msq.exec.FrameContext; +import org.apache.druid.msq.exec.OutputChannelMode; +import org.apache.druid.msq.exec.StageProcessor; +import org.apache.druid.msq.indexing.CountingOutputChannelFactory; +import org.apache.druid.msq.kernel.ShuffleSpec; +import org.apache.druid.msq.kernel.StageDefinition; +import org.apache.druid.msq.kernel.WorkOrder; +import org.checkerframework.checker.nullness.qual.MonotonicNonNull; + +/** + * Runner for {@link StageProcessor} that want to build a {@link ProcessorsAndChannels} for some shuffle-agnostic + * work, then have the shuffle work taken care of generically by {@link StandardStageRunner}. + * + * Using this class allows the {@link StageProcessor} implementation to be simpler, since it doesn't need to worry + * about how to generate channels "properly" for the shuffle. However, it comes at the cost of not being able + * to do shuffle-specific optimizations. + */ +public class StandardStageRunner<T, R> +{ + private final ExecutionContext executionContext; + private final WorkOrder workOrder; + private final CounterTracker counterTracker; + private final FrameProcessorExecutor exec; + private final String cancellationId; + private final int threadCount; + private final FrameContext frameContext; + + @MonotonicNonNull + private OutputChannelFactory workOutputChannelFactory; + @MonotonicNonNull + private ListenableFuture<R> workResultFuture; + @MonotonicNonNull + private ListenableFuture<ResultAndChannels<Object>> pipelineFuture; + + public StandardStageRunner(final ExecutionContext executionContext) + { + this.executionContext = executionContext; + this.workOrder = executionContext.workOrder(); + this.counterTracker = executionContext.counters(); + this.exec = executionContext.executor(); + this.cancellationId = executionContext.cancellationId(); + this.threadCount = executionContext.threadCount(); + this.frameContext = executionContext.frameContext(); + } + + /** + * Start execution. + */ + public ListenableFuture<R> run(final ProcessorsAndChannels<T, R> processors) + { + final StageDefinition stageDef = workOrder.getStageDefinition(); + + makeAndRunWorkProcessors(processors); + + if (stageDef.doesShuffle()) { + makeAndRunShuffleProcessors(); + } + + // Return a future that resolves to the work result, but only when the final stage result and output channels are + // *also* ready (from pipelineFuture). + return FutureUtils.transformAsync( + Futures.allAsList( + workResultFuture, + FutureUtils.transformAsync(pipelineFuture, ResultAndChannels::resultFuture) + ), + ignored -> workResultFuture + ); + } + + /** + * Returns the {@link OutputChannelFactory} that the processors passed to {@link #run(ProcessorsAndChannels)} + * are expected to use. + */ + public OutputChannelFactory workOutputChannelFactory() + { + if (workOutputChannelFactory != null) { + return workOutputChannelFactory; + } + + final OutputChannelFactory baseOutputChannelFactory; + + if (workOrder.getStageDefinition().doesShuffle()) { + // Writing to a consumer in the same JVM (which will be set up later on in this method). + baseOutputChannelFactory = new BlockingQueueOutputChannelFactory(frameContext.memoryParameters().getFrameSize()); + } else { + // Writing stage output. + baseOutputChannelFactory = executionContext.outputChannelFactory(); + } + + workOutputChannelFactory = new CountingOutputChannelFactory( + baseOutputChannelFactory, + counterTracker.channel(CounterNames.outputChannel()) + ); + + return workOutputChannelFactory; + } + + /** + * Executes processors using {@link #exec}. Saves the result future in {@link #workResultFuture} and saves the + * current pipeline state (result and output channels) in {@link #pipelineFuture}. + */ + private void makeAndRunWorkProcessors(final ProcessorsAndChannels<T, R> processors) + { + final ProcessorManager<T, R> processorManager = processors.getProcessorManager(); + + final int maxOutstandingProcessors; + + if (processors.getOutputChannels().getAllChannels().isEmpty()) { + // No output channels: run up to "threadCount" processors at once. + maxOutstandingProcessors = Math.max(1, threadCount); + } else { + // If there are output channels, that acts as a ceiling on the number of processors that can run at once. + maxOutstandingProcessors = + Math.max(1, Math.min(threadCount, processors.getOutputChannels().getAllChannels().size())); + } + + final boolean usesProcessingBuffers = workOrder.getStageDefinition().getProcessor().usesProcessingBuffers(); + + workResultFuture = exec.runAllFully( + counterTracker.trackCpu(processorManager, CpuCounters.LABEL_MAIN), + maxOutstandingProcessors, + usesProcessingBuffers ? frameContext.processingBuffers().getBouncer() : Bouncer.unlimited(), Review Comment: a method like `executionContext.getBouncer` could have access to everything this line needs ########## extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/exec/RunWorkOrder.java: ########## @@ -215,23 +172,11 @@ public void startAsync() throw new ISE("Cannot start from state[%s]", state); } - final StageDefinition stageDef = workOrder.getStageDefinition(); - try { exec.registerCancellationId(cancellationId); - makeInputSliceReader(); - makeWorkOutputChannelFactory(); - makeShuffleOutputChannelFactory(); - makeAndRunWorkProcessors(); - - if (stageDef.doesShuffle()) { - makeAndRunShuffleProcessors(); - } else { - // No shuffling: work output _is_ stage output. Retain read-only versions to reduce memory footprint. - stageOutputChannelsFuture = - Futures.immediateFuture(workResultAndOutputChannels.getOutputChannels().readOnly()); - } - + initInputSliceReader(); Review Comment: I think the `ExecutionContext` is created to late; for example this `initInputSliceReader` method is called twice - here and from when that context is created. I feel like creating it earlier could enable to clean up some of this logic ########## extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/exec/std/StandardStageRunner.java: ########## @@ -0,0 +1,224 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.msq.exec.std; + +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; +import org.apache.druid.common.guava.FutureUtils; +import org.apache.druid.frame.processor.BlockingQueueOutputChannelFactory; +import org.apache.druid.frame.processor.Bouncer; +import org.apache.druid.frame.processor.FrameProcessorExecutor; +import org.apache.druid.frame.processor.OutputChannelFactory; +import org.apache.druid.frame.processor.manager.ProcessorManager; +import org.apache.druid.java.util.common.UOE; +import org.apache.druid.msq.counters.CounterNames; +import org.apache.druid.msq.counters.CounterTracker; +import org.apache.druid.msq.counters.CpuCounters; +import org.apache.druid.msq.exec.ExecutionContext; +import org.apache.druid.msq.exec.FrameContext; +import org.apache.druid.msq.exec.OutputChannelMode; +import org.apache.druid.msq.exec.StageProcessor; +import org.apache.druid.msq.indexing.CountingOutputChannelFactory; +import org.apache.druid.msq.kernel.ShuffleSpec; +import org.apache.druid.msq.kernel.StageDefinition; +import org.apache.druid.msq.kernel.WorkOrder; +import org.checkerframework.checker.nullness.qual.MonotonicNonNull; + +/** + * Runner for {@link StageProcessor} that want to build a {@link ProcessorsAndChannels} for some shuffle-agnostic + * work, then have the shuffle work taken care of generically by {@link StandardStageRunner}. + * + * Using this class allows the {@link StageProcessor} implementation to be simpler, since it doesn't need to worry + * about how to generate channels "properly" for the shuffle. However, it comes at the cost of not being able + * to do shuffle-specific optimizations. + */ +public class StandardStageRunner<T, R> +{ + private final ExecutionContext executionContext; + private final WorkOrder workOrder; + private final CounterTracker counterTracker; + private final FrameProcessorExecutor exec; + private final String cancellationId; + private final int threadCount; + private final FrameContext frameContext; + + @MonotonicNonNull + private OutputChannelFactory workOutputChannelFactory; + @MonotonicNonNull + private ListenableFuture<R> workResultFuture; + @MonotonicNonNull + private ListenableFuture<ResultAndChannels<Object>> pipelineFuture; + + public StandardStageRunner(final ExecutionContext executionContext) + { + this.executionContext = executionContext; + this.workOrder = executionContext.workOrder(); + this.counterTracker = executionContext.counters(); + this.exec = executionContext.executor(); + this.cancellationId = executionContext.cancellationId(); + this.threadCount = executionContext.threadCount(); + this.frameContext = executionContext.frameContext(); + } + + /** + * Start execution. + */ + public ListenableFuture<R> run(final ProcessorsAndChannels<T, R> processors) + { + final StageDefinition stageDef = workOrder.getStageDefinition(); + + makeAndRunWorkProcessors(processors); + + if (stageDef.doesShuffle()) { + makeAndRunShuffleProcessors(); + } + + // Return a future that resolves to the work result, but only when the final stage result and output channels are + // *also* ready (from pipelineFuture). + return FutureUtils.transformAsync( + Futures.allAsList( + workResultFuture, + FutureUtils.transformAsync(pipelineFuture, ResultAndChannels::resultFuture) + ), + ignored -> workResultFuture + ); + } + + /** + * Returns the {@link OutputChannelFactory} that the processors passed to {@link #run(ProcessorsAndChannels)} + * are expected to use. + */ + public OutputChannelFactory workOutputChannelFactory() + { + if (workOutputChannelFactory != null) { + return workOutputChannelFactory; + } + + final OutputChannelFactory baseOutputChannelFactory; + + if (workOrder.getStageDefinition().doesShuffle()) { + // Writing to a consumer in the same JVM (which will be set up later on in this method). + baseOutputChannelFactory = new BlockingQueueOutputChannelFactory(frameContext.memoryParameters().getFrameSize()); + } else { + // Writing stage output. + baseOutputChannelFactory = executionContext.outputChannelFactory(); + } + + workOutputChannelFactory = new CountingOutputChannelFactory( + baseOutputChannelFactory, + counterTracker.channel(CounterNames.outputChannel()) + ); + + return workOutputChannelFactory; + } + + /** + * Executes processors using {@link #exec}. Saves the result future in {@link #workResultFuture} and saves the + * current pipeline state (result and output channels) in {@link #pipelineFuture}. + */ + private void makeAndRunWorkProcessors(final ProcessorsAndChannels<T, R> processors) + { + final ProcessorManager<T, R> processorManager = processors.getProcessorManager(); + + final int maxOutstandingProcessors; + + if (processors.getOutputChannels().getAllChannels().isEmpty()) { + // No output channels: run up to "threadCount" processors at once. + maxOutstandingProcessors = Math.max(1, threadCount); + } else { + // If there are output channels, that acts as a ceiling on the number of processors that can run at once. + maxOutstandingProcessors = + Math.max(1, Math.min(threadCount, processors.getOutputChannels().getAllChannels().size())); + } + + final boolean usesProcessingBuffers = workOrder.getStageDefinition().getProcessor().usesProcessingBuffers(); + + workResultFuture = exec.runAllFully( + counterTracker.trackCpu(processorManager, CpuCounters.LABEL_MAIN), + maxOutstandingProcessors, + usesProcessingBuffers ? frameContext.processingBuffers().getBouncer() : Bouncer.unlimited(), + cancellationId + ); + + final ResultAndChannels<R> workResultAndChannels = new ResultAndChannels<>( + workResultFuture, + processors.getOutputChannels() + ); + + //noinspection unchecked + pipelineFuture = Futures.immediateFuture((ResultAndChannels<Object>) workResultAndChannels); + } + + /** + * Executes the shuffle pipeline and sets the result future in {@link #pipelineFuture}. + */ + private void makeAndRunShuffleProcessors() + { + final ShuffleSpec shuffleSpec = workOrder.getStageDefinition().getShuffleSpec(); + final StandardShuffleOperations stageOperations = new StandardShuffleOperations(executionContext); + + pipelineFuture = stageOperations.gatherResultKeyStatisticsIfNeeded(pipelineFuture); + + final OutputChannelFactory stageOutputChannelFactory = new CountingOutputChannelFactory( + executionContext.outputChannelFactory(), + counterTracker.channel(CounterNames.shuffleChannel()) + ); + + switch (shuffleSpec.kind()) { + case MIX: + pipelineFuture = stageOperations.mix(pipelineFuture, stageOutputChannelFactory); + break; + + case HASH: + pipelineFuture = stageOperations.hashPartition( + pipelineFuture, + stageOutputChannelFactory, + executionContext.workOrder().getOutputChannelMode() != OutputChannelMode.MEMORY + ); + break; + + case HASH_LOCAL_SORT: + final OutputChannelFactory hashOutputChannelFactory; + final boolean hashOutputBuffered; + + if (shuffleSpec.partitionCount() == 1) { + // Single partition; no need to write temporary files. + hashOutputChannelFactory = + new BlockingQueueOutputChannelFactory(frameContext.memoryParameters().getFrameSize()); + hashOutputBuffered = false; + } else { + // Multi-partition; write temporary files and then sort each one file-by-file. + hashOutputChannelFactory = executionContext.makeIntermediateOutputChannelFactory("hash-parts"); + hashOutputBuffered = true; Review Comment: note: couldn't the `hashOutputChannelFactory` tell the `hashPartition()` somehow about this fact? the check on `1` and the other `!= MEMORY` is just odd ########## extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/exec/std/StandardStageProcessor.java: ########## @@ -78,23 +83,31 @@ ProcessorsAndChannels<T, R> makeProcessors( boolean removeNullBytes ) throws IOException; - /** - * Whether processors from this factory use {@link org.apache.druid.msq.exec.ProcessingBuffers}. - */ - boolean usesProcessingBuffers(); + @Override + public ListenableFuture<R> execute(ExecutionContext context) + { + try { + final StandardStageRunner<T, R> stageRunner = new StandardStageRunner<>(context); - @Nullable - TypeReference<R> getResultTypeReference(); + @SuppressWarnings("unchecked") + final ProcessorsAndChannels<T, R> processors = makeProcessors( + context.workOrder().getStageDefinition(), + context.workOrder().getWorkerNumber(), + context.workOrder().getInputs(), + context.inputSliceReader(), + (ExtraInfoType) context.workOrder().getExtraInfo(), + stageRunner.workOutputChannelFactory(), + context.frameContext(), + context.threadCount(), + context.counters(), + context::onWarning, + MultiStageQueryContext.removeNullBytes(context.workOrder().getWorkerContext()) Review Comment: note: around 10 arguments are constructed one way or another from the `context` argument.... I think it would be more straight forward to just pass the `ExecutionContext` and let the other end decide what it needs... ########## extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/exec/std/StandardStageRunner.java: ########## @@ -0,0 +1,224 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.msq.exec.std; + +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; +import org.apache.druid.common.guava.FutureUtils; +import org.apache.druid.frame.processor.BlockingQueueOutputChannelFactory; +import org.apache.druid.frame.processor.Bouncer; +import org.apache.druid.frame.processor.FrameProcessorExecutor; +import org.apache.druid.frame.processor.OutputChannelFactory; +import org.apache.druid.frame.processor.manager.ProcessorManager; +import org.apache.druid.java.util.common.UOE; +import org.apache.druid.msq.counters.CounterNames; +import org.apache.druid.msq.counters.CounterTracker; +import org.apache.druid.msq.counters.CpuCounters; +import org.apache.druid.msq.exec.ExecutionContext; +import org.apache.druid.msq.exec.FrameContext; +import org.apache.druid.msq.exec.OutputChannelMode; +import org.apache.druid.msq.exec.StageProcessor; +import org.apache.druid.msq.indexing.CountingOutputChannelFactory; +import org.apache.druid.msq.kernel.ShuffleSpec; +import org.apache.druid.msq.kernel.StageDefinition; +import org.apache.druid.msq.kernel.WorkOrder; +import org.checkerframework.checker.nullness.qual.MonotonicNonNull; + +/** + * Runner for {@link StageProcessor} that want to build a {@link ProcessorsAndChannels} for some shuffle-agnostic + * work, then have the shuffle work taken care of generically by {@link StandardStageRunner}. + * + * Using this class allows the {@link StageProcessor} implementation to be simpler, since it doesn't need to worry + * about how to generate channels "properly" for the shuffle. However, it comes at the cost of not being able + * to do shuffle-specific optimizations. + */ +public class StandardStageRunner<T, R> +{ + private final ExecutionContext executionContext; + private final WorkOrder workOrder; + private final CounterTracker counterTracker; + private final FrameProcessorExecutor exec; + private final String cancellationId; + private final int threadCount; + private final FrameContext frameContext; + + @MonotonicNonNull + private OutputChannelFactory workOutputChannelFactory; + @MonotonicNonNull + private ListenableFuture<R> workResultFuture; + @MonotonicNonNull + private ListenableFuture<ResultAndChannels<Object>> pipelineFuture; + + public StandardStageRunner(final ExecutionContext executionContext) + { + this.executionContext = executionContext; + this.workOrder = executionContext.workOrder(); + this.counterTracker = executionContext.counters(); + this.exec = executionContext.executor(); + this.cancellationId = executionContext.cancellationId(); + this.threadCount = executionContext.threadCount(); + this.frameContext = executionContext.frameContext(); + } + + /** + * Start execution. + */ + public ListenableFuture<R> run(final ProcessorsAndChannels<T, R> processors) + { + final StageDefinition stageDef = workOrder.getStageDefinition(); + + makeAndRunWorkProcessors(processors); + + if (stageDef.doesShuffle()) { + makeAndRunShuffleProcessors(); + } + + // Return a future that resolves to the work result, but only when the final stage result and output channels are + // *also* ready (from pipelineFuture). + return FutureUtils.transformAsync( + Futures.allAsList( + workResultFuture, + FutureUtils.transformAsync(pipelineFuture, ResultAndChannels::resultFuture) + ), + ignored -> workResultFuture + ); + } + + /** + * Returns the {@link OutputChannelFactory} that the processors passed to {@link #run(ProcessorsAndChannels)} + * are expected to use. + */ + public OutputChannelFactory workOutputChannelFactory() + { + if (workOutputChannelFactory != null) { + return workOutputChannelFactory; + } + + final OutputChannelFactory baseOutputChannelFactory; + + if (workOrder.getStageDefinition().doesShuffle()) { + // Writing to a consumer in the same JVM (which will be set up later on in this method). + baseOutputChannelFactory = new BlockingQueueOutputChannelFactory(frameContext.memoryParameters().getFrameSize()); + } else { + // Writing stage output. + baseOutputChannelFactory = executionContext.outputChannelFactory(); + } + + workOutputChannelFactory = new CountingOutputChannelFactory( + baseOutputChannelFactory, + counterTracker.channel(CounterNames.outputChannel()) + ); + + return workOutputChannelFactory; + } + + /** + * Executes processors using {@link #exec}. Saves the result future in {@link #workResultFuture} and saves the + * current pipeline state (result and output channels) in {@link #pipelineFuture}. + */ + private void makeAndRunWorkProcessors(final ProcessorsAndChannels<T, R> processors) + { + final ProcessorManager<T, R> processorManager = processors.getProcessorManager(); + + final int maxOutstandingProcessors; + + if (processors.getOutputChannels().getAllChannels().isEmpty()) { + // No output channels: run up to "threadCount" processors at once. + maxOutstandingProcessors = Math.max(1, threadCount); + } else { + // If there are output channels, that acts as a ceiling on the number of processors that can run at once. + maxOutstandingProcessors = + Math.max(1, Math.min(threadCount, processors.getOutputChannels().getAllChannels().size())); + } + + final boolean usesProcessingBuffers = workOrder.getStageDefinition().getProcessor().usesProcessingBuffers(); + + workResultFuture = exec.runAllFully( + counterTracker.trackCpu(processorManager, CpuCounters.LABEL_MAIN), + maxOutstandingProcessors, Review Comment: a method like `executionContext.getMaxProcessingThreads` could be usefull here -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected] --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
