vibhatha commented on a change in pull request #12033: URL: https://github.com/apache/arrow/pull/12033#discussion_r778494297
########## File path: cpp/examples/arrow/exec_plan_examples.cc ########## @@ -0,0 +1,1122 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include <memory> +#include <utility> + +#include "arrow/compute/api.h" +#include "arrow/compute/api_scalar.h" +#include "arrow/compute/api_vector.h" +#include "arrow/compute/cast.h" +#include "arrow/compute/exec/exec_plan.h" +#include "arrow/compute/exec/ir_consumer.h" +#include "arrow/compute/exec/test_util.h" + +#include <arrow/dataset/dataset.h> +#include <arrow/dataset/file_parquet.h> +#include "arrow/dataset/file_base.h" +#include "arrow/dataset/plan.h" +#include "arrow/dataset/scanner.h" +#include "arrow/dataset/dataset_writer.h" + +#include "arrow/io/interfaces.h" +#include "arrow/io/memory.h" +#include "arrow/io/slow.h" +#include "arrow/io/transform.h" + +#include <arrow/result.h> +#include <arrow/status.h> +#include <arrow/table.h> + +#include <arrow/ipc/api.h> + +#include <arrow/util/future.h> +#include "arrow/util/range.h" +#include "arrow/util/thread_pool.h" +#include "arrow/util/vector.h" + +// Demonstrate various operators in Arrow Streaming Execution Engine + +#define ABORT_ON_FAILURE(expr) \ + do { \ + arrow::Status status_ = (expr); \ + if (!status_.ok()) { \ + std::cerr << status_.message() << std::endl; \ + abort(); \ + } \ + } while (0); + +#define CHECK_AND_RETURN(expr) \ + do { \ + arrow::Status status_ = (expr); \ + if (!status_.ok()) { \ + std::cerr << status_.message() << std::endl; \ + return EXIT_FAILURE; \ + } else { \ + return EXIT_SUCCESS; \ + } \ + } while (0); + +#define CHECK_AND_CONTINUE(expr) \ + do { \ + arrow::Status status_ = (expr); \ + if (!status_.ok()) { \ + std::cerr << status_.message() << std::endl; \ + return EXIT_FAILURE; \ + } \ + } while (0); + +#define SEP_STR "******" + +#define PRINT_BLOCK(msg) \ + std::cout << "" << std::endl; \ + std::cout << "\t" << SEP_STR << " " << msg << " " << SEP_STR << std::endl; \ + std::cout << "" << std::endl; + +#define PRINT_LINE(msg) std::cout << msg << std::endl; + +namespace cp = ::arrow::compute; + +std::shared_ptr<arrow::Array> GetArrayFromJSON( + const std::shared_ptr<arrow::DataType>& type, arrow::util::string_view json) { + std::shared_ptr<arrow::Array> out; + + ABORT_ON_FAILURE(arrow::ipc::internal::json::ArrayFromJSON(type, json, &out)); + return out; +} + +std::shared_ptr<arrow::RecordBatch> GetRecordBatchFromJSON( + const std::shared_ptr<arrow::Schema>& schema, arrow::util::string_view json) { + // Parse as a StructArray + auto struct_type = struct_(schema->fields()); + std::shared_ptr<arrow::Array> struct_array = GetArrayFromJSON(struct_type, json); + + // Convert StructArray to RecordBatch + return *arrow::RecordBatch::FromStructArray(struct_array); +} + +std::shared_ptr<arrow::Table> GetTableFromJSON( + const std::shared_ptr<arrow::Schema>& schema, const std::vector<std::string>& json) { + std::vector<std::shared_ptr<arrow::RecordBatch>> batches; + for (const std::string& batch_json : json) { + batches.push_back(GetRecordBatchFromJSON(schema, batch_json)); + } + return *arrow::Table::FromRecordBatches(schema, std::move(batches)); +} + +std::shared_ptr<arrow::Table> CreateTable() { + auto schema = + arrow::schema({arrow::field("a", arrow::int64()), + arrow::field("b", arrow::int64()), + arrow::field("c", arrow::int64())}); + std::shared_ptr<arrow::Array> array_a; + std::shared_ptr<arrow::Array> array_b; + std::shared_ptr<arrow::Array> array_c; + arrow::NumericBuilder<arrow::Int64Type> builder; + ABORT_ON_FAILURE(builder.AppendValues({0, 1, 2, 3, 4, 5, 6, 7, 8, 9})); + ABORT_ON_FAILURE(builder.Finish(&array_a)); + builder.Reset(); + ABORT_ON_FAILURE(builder.AppendValues({9, 8, 7, 6, 5, 4, 3, 2, 1, 0})); + ABORT_ON_FAILURE(builder.Finish(&array_b)); + builder.Reset(); + ABORT_ON_FAILURE(builder.AppendValues({1, 2, 1, 2, 1, 2, 1, 2, 1, 2})); + ABORT_ON_FAILURE(builder.Finish(&array_c)); + return arrow::Table::Make(schema, {array_a, array_b, array_c}); +} + +std::shared_ptr<arrow::dataset::Dataset> CreateDataset() { + return std::make_shared<arrow::dataset::InMemoryDataset>( + GetTableFromJSON(arrow::schema({arrow::field("a", arrow::int32()), + arrow::field("b", arrow::boolean())}), + { + R"([{"a": 1, "b": null}, + {"a": 2, "b": true}])", + R"([{"a": null, "b": true}, + {"a": 3, "b": false}])", + R"([{"a": null, "b": true}, + {"a": 4, "b": false}])", + R"([{"a": 5, "b": null}, + {"a": 6, "b": false}, + {"a": 7, "b": false}, + {"a": 8, "b": true}])", + })); +} + +std::shared_ptr<arrow::Table> CreateSimpleTable() { + auto schema = arrow::schema( + {arrow::field("a", arrow::int32()), arrow::field("b", arrow::boolean())}); + std::shared_ptr<arrow::Array> array_a; + std::shared_ptr<arrow::Array> array_b; + arrow::NumericBuilder<arrow::Int32Type> builder; + arrow::BooleanBuilder b_builder; + ABORT_ON_FAILURE(builder.AppendValues({1, 2, 3, 4, 5, 6, 7})); + ABORT_ON_FAILURE(builder.Finish(&array_a)); + builder.Reset(); + + std::vector<bool> bool_vec{false, true, false, true, false, true, false}; + ABORT_ON_FAILURE(b_builder.AppendValues(bool_vec)); + ABORT_ON_FAILURE(builder.Finish(&array_b)); + builder.Reset(); + return arrow::Table::Make(schema, {array_a, array_b}); +} + +arrow::Status exec_plan_end_to_end_sample() { + cp::ExecContext exec_context(arrow::default_memory_pool(), + ::arrow::internal::GetCpuThreadPool()); + + // ensure arrow::dataset node factories are in the registry + arrow::dataset::internal::Initialize(); + + ARROW_ASSIGN_OR_RAISE(std::shared_ptr<cp::ExecPlan> plan, + cp::ExecPlan::Make(&exec_context)); + + std::shared_ptr<arrow::dataset::Dataset> dataset = CreateDataset(); + + auto options = std::make_shared<arrow::dataset::ScanOptions>(); + // sync scanning is not supported by ScanNode + options->use_async = true; + // specify the filter + cp::Expression b_is_true = cp::field_ref("b"); + options->filter = b_is_true; + // for now, specify the projection as the full project expression (eventually this can + // just be a list of materialized field names) + + cp::Expression a_times_2 = cp::call("multiply", {cp::field_ref("a"), cp::literal(2)}); + options->projection = + cp::call("make_struct", {a_times_2}, cp::MakeStructOptions{{"a * 2"}}); + + // // construct the scan node + cp::ExecNode* scan; + + auto scan_node_options = arrow::dataset::ScanNodeOptions{dataset, options}; + + ARROW_ASSIGN_OR_RAISE(scan, + cp::MakeExecNode("scan", plan.get(), {}, scan_node_options)); + + // pipe the scan node into a filter node + cp::ExecNode* filter; + ARROW_ASSIGN_OR_RAISE(filter, cp::MakeExecNode("filter", plan.get(), {scan}, + cp::FilterNodeOptions{b_is_true})); + + cp::ExecNode* project; + + ARROW_ASSIGN_OR_RAISE(project, + cp::MakeExecNode("augmented_project", plan.get(), {filter}, + cp::ProjectNodeOptions{{a_times_2}})); + + // // finally, pipe the project node into a sink node + arrow::AsyncGenerator<arrow::util::optional<cp::ExecBatch>> sink_gen; + ARROW_ASSIGN_OR_RAISE(cp::ExecNode * sink, + cp::MakeExecNode("sink", + plan.get(), {project}, + cp::SinkNodeOptions{&sink_gen})); + + ABORT_ON_FAILURE(sink->Validate()); + + // // translate sink_gen (async) to sink_reader (sync) + std::shared_ptr<arrow::RecordBatchReader> sink_reader = + cp::MakeGeneratorReader(arrow::schema({arrow::field("a * 2", arrow::int32())}), + std::move(sink_gen), exec_context.memory_pool()); + + // // validate the plan + ABORT_ON_FAILURE(plan->Validate()); + PRINT_LINE("Exec Plan created: " << plan->ToString()); + // // start the ExecPlan + ABORT_ON_FAILURE(plan->StartProducing()); + + // // collect sink_reader into a Table + std::shared_ptr<arrow::Table> response_table; + ARROW_ASSIGN_OR_RAISE(response_table, + arrow::Table::FromRecordBatchReader(sink_reader.get())); + + std::cout << "Results : " << response_table->ToString() << std::endl; + + // // stop producing + plan->StopProducing(); + + // // plan mark finished + plan->finished().Wait(); + + return arrow::Status::OK(); +} + +cp::Expression Materialize(std::vector<std::string> names, + bool include_aug_fields = false) { + if (include_aug_fields) { + for (auto aug_name : {"__fragment_index", + "__batch_index", "__last_in_fragment"}) { + names.emplace_back(aug_name); + } + } + + std::vector<cp::Expression> exprs; + for (const auto& name : names) { + exprs.push_back(cp::field_ref(name)); + } + + return cp::project(exprs, names); +} + +arrow::Status consume(std::shared_ptr<arrow::Schema> schema, + std::function<arrow::Future<arrow::util::optional<cp::ExecBatch>>()>* sink_gen) { + auto iterator = MakeGeneratorIterator(*sink_gen); + while (true) { + ARROW_ASSIGN_OR_RAISE(auto exec_batch, iterator.Next()); + if (!exec_batch.has_value()) { + break; + } + ARROW_ASSIGN_OR_RAISE(auto record_batch, exec_batch->ToRecordBatch(schema)); + std::cout << record_batch->ToString() << '\n'; + } + return arrow::Status::OK(); +} + + +arrow::Status scan_sink_node_example() { + cp::ExecContext exec_context(arrow::default_memory_pool(), + ::arrow::internal::GetCpuThreadPool()); + + // ensure arrow::dataset node factories are in the registry + arrow::dataset::internal::Initialize(); + + // Execution plan created + ARROW_ASSIGN_OR_RAISE( + std::shared_ptr<cp::ExecPlan> plan, cp::ExecPlan::Make(&exec_context)); + + std::shared_ptr<arrow::dataset::Dataset> dataset = CreateDataset(); + + auto options = std::make_shared<arrow::dataset::ScanOptions>(); + // sync scanning is not supported by ScanNode + options->use_async = true; + options->projection = Materialize({}); // create empty projection + + // construct the scan node + cp::ExecNode* scan; + auto scan_node_options = arrow::dataset::ScanNodeOptions{dataset, options}; + + ARROW_ASSIGN_OR_RAISE(scan, + cp::MakeExecNode("scan", plan.get(), {}, scan_node_options)); + + arrow::AsyncGenerator<arrow::util::optional<cp::ExecBatch>> sink_gen; + + cp::ExecNode* sink; + + ARROW_ASSIGN_OR_RAISE( + sink, cp::MakeExecNode("sink", plan.get(), {scan}, + cp::SinkNodeOptions{&sink_gen})); + + // // translate sink_gen (async) to sink_reader (sync) + std::shared_ptr<arrow::RecordBatchReader> sink_reader = cp::MakeGeneratorReader( + dataset->schema(), std::move(sink_gen), exec_context.memory_pool()); + + // validate the ExecPlan + ABORT_ON_FAILURE(plan->Validate()); + PRINT_LINE("ExecPlan created : " << plan->ToString()); + // // start the ExecPlan + ABORT_ON_FAILURE(plan->StartProducing()); + + // // collect sink_reader into a Table + std::shared_ptr<arrow::Table> response_table; + + ARROW_ASSIGN_OR_RAISE(response_table, + arrow::Table::FromRecordBatchReader(sink_reader.get())); + + PRINT_LINE("Results : " << response_table->ToString()); + + // // stop producing + plan->StopProducing(); + // // plan mark finished + plan->finished().Wait(); + + return arrow::Status::OK(); +} + +cp::ExecBatch GetExecBatchFromJSON(const std::vector<arrow::ValueDescr>& descrs, + arrow::util::string_view json) { + auto fields = ::arrow::internal::MapVector( + [](const arrow::ValueDescr& descr) { return arrow::field("", descr.type); }, + descrs); + + cp::ExecBatch batch{*GetRecordBatchFromJSON(arrow::schema(std::move(fields)), json)}; + + auto value_it = batch.values.begin(); + for (const auto& descr : descrs) { + if (descr.shape == arrow::ValueDescr::SCALAR) { + if (batch.length == 0) { + *value_it = arrow::MakeNullScalar(value_it->type()); + } else { + *value_it = value_it->make_array()->GetScalar(0).ValueOrDie(); + } + } + ++value_it; + } + + return batch; +} + +struct BatchesWithSchema { + std::vector<cp::ExecBatch> batches; + std::shared_ptr<arrow::Schema> schema; + + arrow::AsyncGenerator<arrow::util::optional<cp::ExecBatch>> gen(bool parallel) const { Review comment: > Can we add a placeholder section and link to that perhaps? > […](#) > On Tue, Jan 4, 2022, at 11:48, Vibhatha Lakmal Abeykoon wrote: ***@***.**** commented on this pull request. In docs/source/cpp/streaming_execution.rst <[#12033 (comment)](https://github.com/apache/arrow/pull/12033#discussion_r778232832)>: > + // data schema + auto schema = arrow::schema({...}) + + // source node options + auto source_node_options = arrow::compute::SourceNodeOptions{schema, gen}; + + // create a source node + ARROW_ASSIGN_OR_RAISE(arrow::compute::ExecNode * source, + arrow::compute::MakeExecNode("source", plan.get(), {}, + source_node_options)); + +``FilterNode`` +-------------- + +:class:`FilterNode`, as the name suggests, provide a container to define a data filtering criteria. +Filter can be written using :class:`arrow::compute::Expression`. For instance if the row values compute.rst contains a some of the function definitions, but not a detailed examples on how to use expressions. — Reply to this email directly, view it on GitHub <[#12033 (comment)](https:/ /github.com/apache/arrow/pull/12033#discussion_r778232832)>, or unsubscribe <https://github.com/notifications/unsubscribe-auth/AACQB37JP4IFUAYPITMT3JLUUMQFPANCNFSM5KUSEHEA>. Triage notifications on the go with GitHub Mobile for iOS <https://apps.apple.com/app/apple-store/id1477376905?ct=notification-email&mt=8&pt=524675> or Android <https://play.google.com/store/apps/details?id=com.github.android&referrer=utm_campaign%3Dnotification-email%26utm_medium%3Demail%26utm_source%3Dgithub>. You are receiving this because you were mentioned.Message ID: ***@***.***> @lidavidm should we create a new RST or just a sub-section within the same document? -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected]
