benibus commented on code in PR #33732:
URL: https://github.com/apache/arrow/pull/33732#discussion_r1101774393


##########
cpp/src/arrow/dataset/file_json.cc:
##########
@@ -0,0 +1,414 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include "arrow/dataset/file_json.h"
+
+#include <unordered_set>
+
+#include "arrow/dataset/dataset_internal.h"
+#include "arrow/io/buffered.h"
+#include "arrow/json/chunker.h"
+#include "arrow/json/parser.h"
+#include "arrow/json/reader.h"
+#include "arrow/util/checked_cast.h"
+#include "arrow/util/logging.h"
+
+namespace arrow {
+
+using internal::checked_cast;
+using internal::checked_pointer_cast;
+using internal::Executor;
+
+namespace dataset {
+
+namespace {
+
+using ReaderPtr = std::shared_ptr<json::StreamingReader>;
+
+struct JsonInspectedFragment : public InspectedFragment {
+  JsonInspectedFragment() : InspectedFragment({}) {}
+  JsonInspectedFragment(std::vector<std::string> column_names,
+                        std::shared_ptr<io::InputStream> stream, int64_t size)
+      : InspectedFragment(std::move(column_names)),
+        stream(std::move(stream)),
+        size(size) {}
+  std::shared_ptr<io::InputStream> stream;
+  int64_t size;
+};
+
+class JsonFragmentScanner : public FragmentScanner {
+ public:
+  JsonFragmentScanner(ReaderPtr reader, int num_batches, int64_t block_size)
+      : reader_(std::move(reader)), block_size_(block_size), 
num_batches_(num_batches) {}
+
+  int NumBatches() override { return num_batches_; }
+
+  Future<std::shared_ptr<RecordBatch>> ScanBatch(int index) override {
+    DCHECK_EQ(num_scanned_++, index);
+    return reader_->ReadNextAsync();
+  }
+
+  int64_t EstimatedDataBytes(int) override { return block_size_; }
+
+  static Result<std::shared_ptr<Schema>> GetSchema(
+      const FragmentScanRequest& scan_request, const JsonInspectedFragment& 
inspected) {
+    FieldVector fields;
+    fields.reserve(inspected.column_names.size());
+    std::unordered_set<int> indices;
+    indices.reserve(inspected.column_names.size());
+
+    for (const auto& scan_column : scan_request.columns) {
+      const auto index = scan_column.path[0];
+
+      if (!indices.emplace(index).second) continue;
+
+      const auto& name = inspected.column_names.at(index);
+      auto type = scan_column.requested_type->GetSharedPtr();
+      fields.push_back(field((name), std::move(type)));
+    }
+
+    return ::arrow::schema(std::move(fields));
+  }
+
+  static Future<std::shared_ptr<FragmentScanner>> Make(
+      const FragmentScanRequest& scan_request,
+      const JsonFragmentScanOptions& format_options,
+      const JsonInspectedFragment& inspected, Executor* cpu_executor) {
+    auto parse_options = format_options.parse_options;
+    ARROW_ASSIGN_OR_RAISE(parse_options.explicit_schema,
+                          GetSchema(scan_request, inspected));
+    parse_options.unexpected_field_behavior = 
json::UnexpectedFieldBehavior::Ignore;
+
+    int64_t block_size = format_options.read_options.block_size;
+    auto num_batches = static_cast<int>(bit_util::CeilDiv(inspected.size, 
block_size));
+
+    auto future = json::StreamingReader::MakeAsync(
+        inspected.stream, format_options.read_options, parse_options,
+        io::default_io_context(), cpu_executor);
+    return future.Then(
+        [num_batches, block_size](
+            const ReaderPtr& reader) -> 
Result<std::shared_ptr<FragmentScanner>> {
+          return std::make_shared<JsonFragmentScanner>(reader, num_batches, 
block_size);
+        },
+        [](const Status& e) -> Result<std::shared_ptr<FragmentScanner>> { 
return e; });
+  }
+
+ private:
+  ReaderPtr reader_;
+  int64_t block_size_;
+  int num_batches_;
+  int num_scanned_ = 0;
+};
+
+Result<std::shared_ptr<StructType>> ParseToStructType(
+    std::string_view data, const json::ParseOptions& parse_options, 
MemoryPool* pool) {
+  if (!pool) pool = default_memory_pool();
+
+  auto full_buffer = std::make_shared<Buffer>(data);
+  std::shared_ptr<Buffer> buffer, partial;
+  auto chunker = json::MakeChunker(parse_options);
+  RETURN_NOT_OK(chunker->Process(full_buffer, &buffer, &partial));
+
+  std::unique_ptr<json::BlockParser> parser;
+  RETURN_NOT_OK(json::BlockParser::Make(pool, parse_options, &parser));
+  RETURN_NOT_OK(parser->Parse(buffer));
+  std::shared_ptr<Array> parsed;
+  RETURN_NOT_OK(parser->Finish(&parsed));
+
+  return checked_pointer_cast<StructType>(parsed->type());
+}
+
+Result<std::shared_ptr<Schema>> ParseToSchema(std::string_view data,
+                                              const json::ParseOptions& 
parse_options,
+                                              MemoryPool* pool) {
+  ARROW_ASSIGN_OR_RAISE(auto type, ParseToStructType(data, parse_options, 
pool));
+  return ::arrow::schema(type->fields());
+}
+
+// Converts a FieldPath to a FieldRef consisting exclusively of field names.
+//
+// The resulting FieldRef can be used to lookup the corresponding field in any 
schema
+// regardless of missing/unordered fields. The input path is assumed to be 
valid for the
+// given schema.
+FieldRef ToUniversalRef(const FieldPath& path, const Schema& schema) {
+  std::vector<FieldRef> refs;
+  refs.reserve(path.indices().size());
+
+  const FieldVector* fields = &schema.fields();
+  for (auto it = path.begin(); it != path.end(); ++it) {
+    DCHECK_LT(*it, static_cast<int>(fields->size()));
+    const auto& child_field = *(*fields)[*it];
+    refs.push_back(FieldRef(child_field.name()));
+    if (it + 1 != path.end()) {
+      auto&& child_type = checked_cast<const StructType&>(*child_field.type());
+      fields = &child_type.fields();
+    }
+  }
+
+  return refs.empty()       ? FieldRef()
+         : refs.size() == 1 ? refs[0]
+                            : FieldRef(std::move(refs));
+}
+
+int TopLevelIndex(const FieldRef& ref, const Schema& schema) {
+  if (const auto* name = ref.name()) {
+    return schema.GetFieldIndex(*name);
+  } else if (const auto* path = ref.field_path()) {
+    DCHECK(!path->empty());
+    return (*path)[0];
+  }
+  const auto* nested_refs = ref.nested_refs();
+  DCHECK(nested_refs && !nested_refs->empty());
+  return TopLevelIndex((*nested_refs)[0], schema);
+}
+
+// Make a new schema consisting only of the top-level fields in the dataset 
schema that:
+//  (a) Have children that require materialization
+//  (b) Have children present in `physical_schema`
+//
+// The resulting schema can be used in reader instantiation to ignore unused 
fields. Note
+// that `physical_schema` is only of structural importance and its data types 
are ignored
+// when constructing the final schema.
+Result<std::shared_ptr<Schema>> GetPartialSchema(const ScanOptions& 
scan_options,
+                                                 const Schema& 
physical_schema) {
+  auto dataset_schema = scan_options.dataset_schema;
+  DCHECK_NE(dataset_schema, nullptr);
+  const auto max_num_fields = 
static_cast<size_t>(dataset_schema->num_fields());
+
+  std::vector<bool> selected(max_num_fields, false);
+  std::vector<int> toplevel_indices;
+  toplevel_indices.reserve(max_num_fields);
+
+  for (const auto& ref : scan_options.MaterializedFields()) {
+    auto index = TopLevelIndex(ref, *dataset_schema);
+    DCHECK_GE(index, 0);
+    if (selected[index]) continue;
+
+    // Determine if the field exists in the physical schema before selecting it
+    bool found;
+    if (!ref.IsNested()) {
+      const auto& name = dataset_schema->field(index)->name();
+      found = physical_schema.GetFieldIndex(name) != -1;
+    } else {
+      // Check if the nested field is present in the physical schema. If so, 
we load its
+      // entire top-level field
+      ARROW_ASSIGN_OR_RAISE(auto path, ref.FindOne(*dataset_schema));
+      auto universal_ref = ToUniversalRef(path, *dataset_schema);
+      ARROW_ASSIGN_OR_RAISE(auto match, 
universal_ref.FindOneOrNone(physical_schema));
+      found = !match.empty();
+    }
+
+    if (!found) continue;
+
+    toplevel_indices.push_back(index);
+    selected[index] = true;
+    // All fields in the dataset schema require materialization, so return 
early
+    if (toplevel_indices.size() == max_num_fields) {
+      return dataset_schema;
+    }
+  }
+
+  FieldVector fields;
+  fields.reserve(toplevel_indices.size());
+  std::sort(toplevel_indices.begin(), toplevel_indices.end());
+  for (auto index : toplevel_indices) {
+    fields.push_back(dataset_schema->field(index));
+  }
+
+  return ::arrow::schema(std::move(fields));
+}
+
+Result<std::shared_ptr<JsonFragmentScanOptions>> GetJsonFormatOptions(
+    const JsonFileFormat& format, const ScanOptions* scan_options) {
+  return GetFragmentScanOptions<JsonFragmentScanOptions>(
+      kJsonTypeName, scan_options, format.default_fragment_scan_options);
+}
+
+Result<Future<ReaderPtr>> DoOpenReader(
+    const FileSource& source, const JsonFileFormat& format,
+    const std::shared_ptr<ScanOptions>& scan_options = nullptr) {
+  ARROW_ASSIGN_OR_RAISE(auto json_options,
+                        GetJsonFormatOptions(format, scan_options.get()));
+
+  struct State {
+    State(const JsonFragmentScanOptions& json_options,
+          const std::shared_ptr<ScanOptions>& scan_options)
+        : parse_options(json_options.parse_options),
+          read_options(json_options.read_options),
+          scan_options(scan_options) {
+      // We selectively ignore some user options, primarily those that will 
influence the
+      // output schema.
+      parse_options.explicit_schema = nullptr;
+      parse_options.unexpected_field_behavior = 
json::UnexpectedFieldBehavior::InferType;
+      // We don't wish to contend with the scanner's CPU threads if multiple 
fragments are
+      // being scanned in parallel, so fragment-level parallelism gets 
conditionally
+      // disabled.
+      if (this->scan_options && this->scan_options->use_threads) {
+        read_options.use_threads = false;
+      }

Review Comment:
   There was a similar comment in `file_csv.cc` that I based that on (since the 
CSV reader has similar async properties). Admittedly, I wasn't completely on 
board with the reasoning myself.
   
   Otherwise, I don't think there's any real risk of nested parallelism.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to