bkietz commented on a change in pull request #10664:
URL: https://github.com/apache/arrow/pull/10664#discussion_r666200686
##########
File path: cpp/src/arrow/compute/exec/exec_plan.cc
##########
@@ -220,58 +240,61 @@ struct SourceNode : ExecNode {
const char* kind_name() override { return "SourceNode"; }
- static void NoInputs() { DCHECK(false) << "no inputs; this should never be
called"; }
- void InputReceived(ExecNode*, int, ExecBatch) override { NoInputs(); }
- void ErrorReceived(ExecNode*, Status) override { NoInputs(); }
- void InputFinished(ExecNode*, int) override { NoInputs(); }
+ [[noreturn]] static void NoInputs() {
+ DCHECK(false) << "no inputs; this should never be called";
+ std::abort();
+ }
+ [[noreturn]] void InputReceived(ExecNode*, int, ExecBatch) override {
NoInputs(); }
+ [[noreturn]] void ErrorReceived(ExecNode*, Status) override { NoInputs(); }
+ [[noreturn]] void InputFinished(ExecNode*, int) override { NoInputs(); }
Status StartProducing() override {
- if (finished_) {
- return Status::Invalid("Restarted SourceNode '", label(), "'");
+ DCHECK(!stop_requested_) << "Restarted SourceNode";
+
+ CallbackOptions options;
+ if (auto executor = plan()->exec_context()->executor()) {
+ // These options will transfer execution to the desired Executor if
necessary.
+ // This can happen for in-memory scans where batches didn't require
+ // any CPU work to decode. Otherwise, parsing etc should have already
+ // been placed us on the desired Executor and no queues will be pushed
to.
+ options.executor = executor;
+ options.should_schedule = ShouldSchedule::IfDifferentExecutor;
Review comment:
It would work but in the case where a batch is already on the desired
executor (because a Fragment has already seeded parallelism) there's no need
and scheduling would potentially lose thread locality
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]