westonpace commented on code in PR #33732:
URL: https://github.com/apache/arrow/pull/33732#discussion_r1100408828


##########
cpp/src/arrow/dataset/file_json_test.cc:
##########
@@ -0,0 +1,213 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include "arrow/dataset/file_json.h"
+
+#include "arrow/dataset/plan.h"
+#include "arrow/dataset/test_util_internal.h"
+#include "arrow/filesystem/mockfs.h"
+#include "arrow/json/parser.h"
+#include "arrow/json/rapidjson_defs.h"
+#include "arrow/testing/gtest_util.h"
+#include "arrow/testing/util.h"
+
+#include "rapidjson/writer.h"
+
+namespace arrow {
+
+using internal::checked_cast;
+
+namespace dataset {
+
+namespace rj = arrow::rapidjson;
+
+#define CASE(TYPE_CLASS)                            \
+  case TYPE_CLASS##Type::type_id: {                 \
+    const TYPE_CLASS##Type* concrete_ptr = nullptr; \
+    return visitor->Visit(concrete_ptr);            \
+  }
+
+template <typename VISITOR>
+static Status VisitWriteableTypeId(Type::type id, VISITOR* visitor) {
+  switch (id) {
+    ARROW_GENERATE_FOR_ALL_NUMERIC_TYPES(CASE)
+    CASE(Boolean)
+    CASE(Struct)
+    default:
+      return Status::NotImplemented("TypeId: ", id);
+  }
+}
+
+#undef CASE
+
+// There's currently no proper API for writing JSON files, which is reflected 
in the JSON
+// dataset API as well. However, this ad-hoc implementation is good enough for 
the shared
+// format test fixtures
+struct WriteVisitor {
+  static Status OK(bool ok) { return ok ? Status::OK() : Status::Invalid(""); }

Review Comment:
   ```suggestion
     static Status OK(bool ok) { return ok ? Status::OK() : 
Status::Invalid("Unexpected false return from JSON writer"); }
   ```
   Probably a long shot (or impossible) we ever trigger this but having the 
text in here will help future readers understand the purpose of this method.



##########
cpp/src/arrow/dataset/file_json.cc:
##########
@@ -0,0 +1,414 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include "arrow/dataset/file_json.h"
+
+#include <unordered_set>
+
+#include "arrow/dataset/dataset_internal.h"
+#include "arrow/io/buffered.h"
+#include "arrow/json/chunker.h"
+#include "arrow/json/parser.h"
+#include "arrow/json/reader.h"
+#include "arrow/util/checked_cast.h"
+#include "arrow/util/logging.h"
+
+namespace arrow {
+
+using internal::checked_cast;
+using internal::checked_pointer_cast;
+using internal::Executor;
+
+namespace dataset {
+
+namespace {
+
+using ReaderPtr = std::shared_ptr<json::StreamingReader>;
+
+struct JsonInspectedFragment : public InspectedFragment {
+  JsonInspectedFragment() : InspectedFragment({}) {}
+  JsonInspectedFragment(std::vector<std::string> column_names,
+                        std::shared_ptr<io::InputStream> stream, int64_t size)
+      : InspectedFragment(std::move(column_names)),
+        stream(std::move(stream)),
+        size(size) {}
+  std::shared_ptr<io::InputStream> stream;
+  int64_t size;
+};
+
+class JsonFragmentScanner : public FragmentScanner {
+ public:
+  JsonFragmentScanner(ReaderPtr reader, int num_batches, int64_t block_size)
+      : reader_(std::move(reader)), block_size_(block_size), 
num_batches_(num_batches) {}
+
+  int NumBatches() override { return num_batches_; }
+
+  Future<std::shared_ptr<RecordBatch>> ScanBatch(int index) override {
+    DCHECK_EQ(num_scanned_++, index);
+    return reader_->ReadNextAsync();
+  }
+
+  int64_t EstimatedDataBytes(int) override { return block_size_; }
+
+  static Result<std::shared_ptr<Schema>> GetSchema(
+      const FragmentScanRequest& scan_request, const JsonInspectedFragment& 
inspected) {
+    FieldVector fields;
+    fields.reserve(inspected.column_names.size());
+    std::unordered_set<int> indices;
+    indices.reserve(inspected.column_names.size());
+
+    for (const auto& scan_column : scan_request.columns) {
+      const auto index = scan_column.path[0];
+
+      if (!indices.emplace(index).second) continue;
+
+      const auto& name = inspected.column_names.at(index);
+      auto type = scan_column.requested_type->GetSharedPtr();
+      fields.push_back(field((name), std::move(type)));
+    }
+
+    return ::arrow::schema(std::move(fields));
+  }
+
+  static Future<std::shared_ptr<FragmentScanner>> Make(
+      const FragmentScanRequest& scan_request,
+      const JsonFragmentScanOptions& format_options,
+      const JsonInspectedFragment& inspected, Executor* cpu_executor) {
+    auto parse_options = format_options.parse_options;
+    ARROW_ASSIGN_OR_RAISE(parse_options.explicit_schema,
+                          GetSchema(scan_request, inspected));
+    parse_options.unexpected_field_behavior = 
json::UnexpectedFieldBehavior::Ignore;
+
+    int64_t block_size = format_options.read_options.block_size;
+    auto num_batches = static_cast<int>(bit_util::CeilDiv(inspected.size, 
block_size));
+
+    auto future = json::StreamingReader::MakeAsync(
+        inspected.stream, format_options.read_options, parse_options,
+        io::default_io_context(), cpu_executor);
+    return future.Then(
+        [num_batches, block_size](
+            const ReaderPtr& reader) -> 
Result<std::shared_ptr<FragmentScanner>> {
+          return std::make_shared<JsonFragmentScanner>(reader, num_batches, 
block_size);
+        },
+        [](const Status& e) -> Result<std::shared_ptr<FragmentScanner>> { 
return e; });
+  }
+
+ private:
+  ReaderPtr reader_;
+  int64_t block_size_;
+  int num_batches_;
+  int num_scanned_ = 0;
+};
+
+Result<std::shared_ptr<StructType>> ParseToStructType(
+    std::string_view data, const json::ParseOptions& parse_options, 
MemoryPool* pool) {
+  if (!pool) pool = default_memory_pool();
+
+  auto full_buffer = std::make_shared<Buffer>(data);
+  std::shared_ptr<Buffer> buffer, partial;
+  auto chunker = json::MakeChunker(parse_options);
+  RETURN_NOT_OK(chunker->Process(full_buffer, &buffer, &partial));
+
+  std::unique_ptr<json::BlockParser> parser;
+  RETURN_NOT_OK(json::BlockParser::Make(pool, parse_options, &parser));
+  RETURN_NOT_OK(parser->Parse(buffer));
+  std::shared_ptr<Array> parsed;
+  RETURN_NOT_OK(parser->Finish(&parsed));
+
+  return checked_pointer_cast<StructType>(parsed->type());
+}
+
+Result<std::shared_ptr<Schema>> ParseToSchema(std::string_view data,
+                                              const json::ParseOptions& 
parse_options,
+                                              MemoryPool* pool) {
+  ARROW_ASSIGN_OR_RAISE(auto type, ParseToStructType(data, parse_options, 
pool));
+  return ::arrow::schema(type->fields());
+}
+
+// Converts a FieldPath to a FieldRef consisting exclusively of field names.
+//
+// The resulting FieldRef can be used to lookup the corresponding field in any 
schema
+// regardless of missing/unordered fields. The input path is assumed to be 
valid for the
+// given schema.
+FieldRef ToUniversalRef(const FieldPath& path, const Schema& schema) {
+  std::vector<FieldRef> refs;
+  refs.reserve(path.indices().size());
+
+  const FieldVector* fields = &schema.fields();
+  for (auto it = path.begin(); it != path.end(); ++it) {
+    DCHECK_LT(*it, static_cast<int>(fields->size()));
+    const auto& child_field = *(*fields)[*it];
+    refs.push_back(FieldRef(child_field.name()));
+    if (it + 1 != path.end()) {
+      auto&& child_type = checked_cast<const StructType&>(*child_field.type());
+      fields = &child_type.fields();
+    }
+  }
+
+  return refs.empty()       ? FieldRef()
+         : refs.size() == 1 ? refs[0]
+                            : FieldRef(std::move(refs));
+}
+
+int TopLevelIndex(const FieldRef& ref, const Schema& schema) {
+  if (const auto* name = ref.name()) {
+    return schema.GetFieldIndex(*name);
+  } else if (const auto* path = ref.field_path()) {
+    DCHECK(!path->empty());
+    return (*path)[0];
+  }
+  const auto* nested_refs = ref.nested_refs();
+  DCHECK(nested_refs && !nested_refs->empty());
+  return TopLevelIndex((*nested_refs)[0], schema);
+}
+
+// Make a new schema consisting only of the top-level fields in the dataset 
schema that:
+//  (a) Have children that require materialization
+//  (b) Have children present in `physical_schema`
+//
+// The resulting schema can be used in reader instantiation to ignore unused 
fields. Note
+// that `physical_schema` is only of structural importance and its data types 
are ignored
+// when constructing the final schema.
+Result<std::shared_ptr<Schema>> GetPartialSchema(const ScanOptions& 
scan_options,
+                                                 const Schema& 
physical_schema) {

Review Comment:
   This "shouldn't" be needed in the new scanner.  I don't remember if it's 
needed in the old path or not.  If it is then we need to keep it unfortunately. 
 We can re-examine later when adding support for the new path.



##########
cpp/src/arrow/dataset/file_json.cc:
##########
@@ -0,0 +1,414 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include "arrow/dataset/file_json.h"
+
+#include <unordered_set>
+
+#include "arrow/dataset/dataset_internal.h"
+#include "arrow/io/buffered.h"
+#include "arrow/json/chunker.h"
+#include "arrow/json/parser.h"
+#include "arrow/json/reader.h"
+#include "arrow/util/checked_cast.h"
+#include "arrow/util/logging.h"
+
+namespace arrow {
+
+using internal::checked_cast;
+using internal::checked_pointer_cast;
+using internal::Executor;
+
+namespace dataset {
+
+namespace {
+
+using ReaderPtr = std::shared_ptr<json::StreamingReader>;
+
+struct JsonInspectedFragment : public InspectedFragment {
+  JsonInspectedFragment() : InspectedFragment({}) {}
+  JsonInspectedFragment(std::vector<std::string> column_names,
+                        std::shared_ptr<io::InputStream> stream, int64_t size)
+      : InspectedFragment(std::move(column_names)),
+        stream(std::move(stream)),
+        size(size) {}
+  std::shared_ptr<io::InputStream> stream;
+  int64_t size;
+};
+
+class JsonFragmentScanner : public FragmentScanner {
+ public:
+  JsonFragmentScanner(ReaderPtr reader, int num_batches, int64_t block_size)
+      : reader_(std::move(reader)), block_size_(block_size), 
num_batches_(num_batches) {}
+
+  int NumBatches() override { return num_batches_; }
+
+  Future<std::shared_ptr<RecordBatch>> ScanBatch(int index) override {
+    DCHECK_EQ(num_scanned_++, index);
+    return reader_->ReadNextAsync();
+  }
+
+  int64_t EstimatedDataBytes(int) override { return block_size_; }
+
+  static Result<std::shared_ptr<Schema>> GetSchema(
+      const FragmentScanRequest& scan_request, const JsonInspectedFragment& 
inspected) {
+    FieldVector fields;
+    fields.reserve(inspected.column_names.size());
+    std::unordered_set<int> indices;
+    indices.reserve(inspected.column_names.size());
+
+    for (const auto& scan_column : scan_request.columns) {
+      const auto index = scan_column.path[0];
+
+      if (!indices.emplace(index).second) continue;
+
+      const auto& name = inspected.column_names.at(index);
+      auto type = scan_column.requested_type->GetSharedPtr();
+      fields.push_back(field((name), std::move(type)));
+    }
+
+    return ::arrow::schema(std::move(fields));
+  }
+
+  static Future<std::shared_ptr<FragmentScanner>> Make(
+      const FragmentScanRequest& scan_request,
+      const JsonFragmentScanOptions& format_options,
+      const JsonInspectedFragment& inspected, Executor* cpu_executor) {
+    auto parse_options = format_options.parse_options;
+    ARROW_ASSIGN_OR_RAISE(parse_options.explicit_schema,
+                          GetSchema(scan_request, inspected));
+    parse_options.unexpected_field_behavior = 
json::UnexpectedFieldBehavior::Ignore;
+
+    int64_t block_size = format_options.read_options.block_size;
+    auto num_batches = static_cast<int>(bit_util::CeilDiv(inspected.size, 
block_size));
+
+    auto future = json::StreamingReader::MakeAsync(
+        inspected.stream, format_options.read_options, parse_options,
+        io::default_io_context(), cpu_executor);
+    return future.Then(
+        [num_batches, block_size](
+            const ReaderPtr& reader) -> 
Result<std::shared_ptr<FragmentScanner>> {
+          return std::make_shared<JsonFragmentScanner>(reader, num_batches, 
block_size);
+        },
+        [](const Status& e) -> Result<std::shared_ptr<FragmentScanner>> { 
return e; });
+  }
+
+ private:
+  ReaderPtr reader_;
+  int64_t block_size_;
+  int num_batches_;
+  int num_scanned_ = 0;
+};
+
+Result<std::shared_ptr<StructType>> ParseToStructType(
+    std::string_view data, const json::ParseOptions& parse_options, 
MemoryPool* pool) {
+  if (!pool) pool = default_memory_pool();

Review Comment:
   This is just an observation but it seems like this is something file formats 
probably shouldn't have to worry about.  It would be better to normalize the 
memory pool in the scanner I think.
   
   That being said, I'm not surprised if we don't.  But maybe a potential for a 
follow-up PR.



##########
cpp/src/arrow/dataset/file_json.cc:
##########
@@ -0,0 +1,414 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include "arrow/dataset/file_json.h"
+
+#include <unordered_set>
+
+#include "arrow/dataset/dataset_internal.h"
+#include "arrow/io/buffered.h"
+#include "arrow/json/chunker.h"
+#include "arrow/json/parser.h"
+#include "arrow/json/reader.h"
+#include "arrow/util/checked_cast.h"
+#include "arrow/util/logging.h"
+
+namespace arrow {
+
+using internal::checked_cast;
+using internal::checked_pointer_cast;
+using internal::Executor;
+
+namespace dataset {
+
+namespace {
+
+using ReaderPtr = std::shared_ptr<json::StreamingReader>;
+
+struct JsonInspectedFragment : public InspectedFragment {
+  JsonInspectedFragment() : InspectedFragment({}) {}
+  JsonInspectedFragment(std::vector<std::string> column_names,
+                        std::shared_ptr<io::InputStream> stream, int64_t size)
+      : InspectedFragment(std::move(column_names)),
+        stream(std::move(stream)),
+        size(size) {}
+  std::shared_ptr<io::InputStream> stream;
+  int64_t size;
+};
+
+class JsonFragmentScanner : public FragmentScanner {
+ public:
+  JsonFragmentScanner(ReaderPtr reader, int num_batches, int64_t block_size)
+      : reader_(std::move(reader)), block_size_(block_size), 
num_batches_(num_batches) {}
+
+  int NumBatches() override { return num_batches_; }
+
+  Future<std::shared_ptr<RecordBatch>> ScanBatch(int index) override {
+    DCHECK_EQ(num_scanned_++, index);
+    return reader_->ReadNextAsync();
+  }
+
+  int64_t EstimatedDataBytes(int) override { return block_size_; }
+
+  static Result<std::shared_ptr<Schema>> GetSchema(
+      const FragmentScanRequest& scan_request, const JsonInspectedFragment& 
inspected) {
+    FieldVector fields;
+    fields.reserve(inspected.column_names.size());
+    std::unordered_set<int> indices;
+    indices.reserve(inspected.column_names.size());
+
+    for (const auto& scan_column : scan_request.columns) {
+      const auto index = scan_column.path[0];
+
+      if (!indices.emplace(index).second) continue;
+
+      const auto& name = inspected.column_names.at(index);
+      auto type = scan_column.requested_type->GetSharedPtr();
+      fields.push_back(field((name), std::move(type)));
+    }
+
+    return ::arrow::schema(std::move(fields));
+  }
+
+  static Future<std::shared_ptr<FragmentScanner>> Make(
+      const FragmentScanRequest& scan_request,
+      const JsonFragmentScanOptions& format_options,
+      const JsonInspectedFragment& inspected, Executor* cpu_executor) {
+    auto parse_options = format_options.parse_options;
+    ARROW_ASSIGN_OR_RAISE(parse_options.explicit_schema,
+                          GetSchema(scan_request, inspected));
+    parse_options.unexpected_field_behavior = 
json::UnexpectedFieldBehavior::Ignore;
+
+    int64_t block_size = format_options.read_options.block_size;
+    auto num_batches = static_cast<int>(bit_util::CeilDiv(inspected.size, 
block_size));
+
+    auto future = json::StreamingReader::MakeAsync(
+        inspected.stream, format_options.read_options, parse_options,
+        io::default_io_context(), cpu_executor);
+    return future.Then(
+        [num_batches, block_size](
+            const ReaderPtr& reader) -> 
Result<std::shared_ptr<FragmentScanner>> {
+          return std::make_shared<JsonFragmentScanner>(reader, num_batches, 
block_size);
+        },
+        [](const Status& e) -> Result<std::shared_ptr<FragmentScanner>> { 
return e; });
+  }
+
+ private:
+  ReaderPtr reader_;
+  int64_t block_size_;
+  int num_batches_;
+  int num_scanned_ = 0;
+};
+
+Result<std::shared_ptr<StructType>> ParseToStructType(
+    std::string_view data, const json::ParseOptions& parse_options, 
MemoryPool* pool) {
+  if (!pool) pool = default_memory_pool();
+
+  auto full_buffer = std::make_shared<Buffer>(data);
+  std::shared_ptr<Buffer> buffer, partial;
+  auto chunker = json::MakeChunker(parse_options);
+  RETURN_NOT_OK(chunker->Process(full_buffer, &buffer, &partial));
+
+  std::unique_ptr<json::BlockParser> parser;
+  RETURN_NOT_OK(json::BlockParser::Make(pool, parse_options, &parser));
+  RETURN_NOT_OK(parser->Parse(buffer));
+  std::shared_ptr<Array> parsed;
+  RETURN_NOT_OK(parser->Finish(&parsed));
+
+  return checked_pointer_cast<StructType>(parsed->type());
+}
+
+Result<std::shared_ptr<Schema>> ParseToSchema(std::string_view data,
+                                              const json::ParseOptions& 
parse_options,
+                                              MemoryPool* pool) {
+  ARROW_ASSIGN_OR_RAISE(auto type, ParseToStructType(data, parse_options, 
pool));
+  return ::arrow::schema(type->fields());

Review Comment:
   Any particular need for `::arrow::`?



##########
cpp/src/arrow/dataset/file_json.cc:
##########
@@ -0,0 +1,414 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include "arrow/dataset/file_json.h"
+
+#include <unordered_set>
+
+#include "arrow/dataset/dataset_internal.h"
+#include "arrow/io/buffered.h"
+#include "arrow/json/chunker.h"
+#include "arrow/json/parser.h"
+#include "arrow/json/reader.h"
+#include "arrow/util/checked_cast.h"
+#include "arrow/util/logging.h"
+
+namespace arrow {
+
+using internal::checked_cast;
+using internal::checked_pointer_cast;
+using internal::Executor;
+
+namespace dataset {
+
+namespace {
+
+using ReaderPtr = std::shared_ptr<json::StreamingReader>;
+
+struct JsonInspectedFragment : public InspectedFragment {
+  JsonInspectedFragment() : InspectedFragment({}) {}
+  JsonInspectedFragment(std::vector<std::string> column_names,
+                        std::shared_ptr<io::InputStream> stream, int64_t size)
+      : InspectedFragment(std::move(column_names)),
+        stream(std::move(stream)),
+        size(size) {}
+  std::shared_ptr<io::InputStream> stream;
+  int64_t size;
+};
+
+class JsonFragmentScanner : public FragmentScanner {
+ public:
+  JsonFragmentScanner(ReaderPtr reader, int num_batches, int64_t block_size)
+      : reader_(std::move(reader)), block_size_(block_size), 
num_batches_(num_batches) {}
+
+  int NumBatches() override { return num_batches_; }
+
+  Future<std::shared_ptr<RecordBatch>> ScanBatch(int index) override {
+    DCHECK_EQ(num_scanned_++, index);
+    return reader_->ReadNextAsync();
+  }
+
+  int64_t EstimatedDataBytes(int) override { return block_size_; }
+
+  static Result<std::shared_ptr<Schema>> GetSchema(
+      const FragmentScanRequest& scan_request, const JsonInspectedFragment& 
inspected) {
+    FieldVector fields;
+    fields.reserve(inspected.column_names.size());
+    std::unordered_set<int> indices;
+    indices.reserve(inspected.column_names.size());
+
+    for (const auto& scan_column : scan_request.columns) {
+      const auto index = scan_column.path[0];
+
+      if (!indices.emplace(index).second) continue;
+
+      const auto& name = inspected.column_names.at(index);
+      auto type = scan_column.requested_type->GetSharedPtr();
+      fields.push_back(field((name), std::move(type)));
+    }
+
+    return ::arrow::schema(std::move(fields));
+  }
+
+  static Future<std::shared_ptr<FragmentScanner>> Make(
+      const FragmentScanRequest& scan_request,
+      const JsonFragmentScanOptions& format_options,
+      const JsonInspectedFragment& inspected, Executor* cpu_executor) {
+    auto parse_options = format_options.parse_options;
+    ARROW_ASSIGN_OR_RAISE(parse_options.explicit_schema,
+                          GetSchema(scan_request, inspected));
+    parse_options.unexpected_field_behavior = 
json::UnexpectedFieldBehavior::Ignore;

Review Comment:
   I get why explicit_schema would be ignored but why is this ignored?



##########
cpp/src/arrow/dataset/file_json_test.cc:
##########
@@ -0,0 +1,213 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include "arrow/dataset/file_json.h"
+
+#include "arrow/dataset/plan.h"
+#include "arrow/dataset/test_util_internal.h"
+#include "arrow/filesystem/mockfs.h"
+#include "arrow/json/parser.h"
+#include "arrow/json/rapidjson_defs.h"
+#include "arrow/testing/gtest_util.h"
+#include "arrow/testing/util.h"
+
+#include "rapidjson/writer.h"
+
+namespace arrow {
+
+using internal::checked_cast;
+
+namespace dataset {
+
+namespace rj = arrow::rapidjson;
+
+#define CASE(TYPE_CLASS)                            \
+  case TYPE_CLASS##Type::type_id: {                 \
+    const TYPE_CLASS##Type* concrete_ptr = nullptr; \
+    return visitor->Visit(concrete_ptr);            \
+  }
+
+template <typename VISITOR>
+static Status VisitWriteableTypeId(Type::type id, VISITOR* visitor) {
+  switch (id) {
+    ARROW_GENERATE_FOR_ALL_NUMERIC_TYPES(CASE)
+    CASE(Boolean)
+    CASE(Struct)
+    default:
+      return Status::NotImplemented("TypeId: ", id);
+  }
+}
+
+#undef CASE
+
+// There's currently no proper API for writing JSON files, which is reflected 
in the JSON
+// dataset API as well. However, this ad-hoc implementation is good enough for 
the shared
+// format test fixtures
+struct WriteVisitor {
+  static Status OK(bool ok) { return ok ? Status::OK() : Status::Invalid(""); }
+
+  template <typename T>
+  enable_if_physical_signed_integer<T, Status> Visit(const T*) {
+    const auto& scalar = checked_cast<const NumericScalar<T>&>(scalar_);
+    return OK(writer_.Int64(scalar.value));
+  }
+
+  template <typename T>
+  enable_if_physical_unsigned_integer<T, Status> Visit(const T*) {
+    const auto& scalar = checked_cast<const NumericScalar<T>&>(scalar_);
+    return OK(writer_.Uint64(scalar.value));
+  }
+
+  template <typename T>
+  enable_if_physical_floating_point<T, Status> Visit(const T*) {
+    const auto& scalar = checked_cast<const NumericScalar<T>&>(scalar_);
+    return OK(writer_.Double(scalar.value));
+  }
+
+  Status Visit(const BooleanType*) {
+    const auto& scalar = checked_cast<const BooleanScalar&>(scalar_);
+    return OK(writer_.Bool(scalar.value));
+  }
+
+  Status Visit(const StructType*) {
+    const auto& scalar = checked_cast<const StructScalar&>(scalar_);
+    const auto& type = checked_cast<const StructType&>(*scalar.type);
+    DCHECK_EQ(type.num_fields(), static_cast<int>(scalar.value.size()));
+
+    RETURN_NOT_OK(OK(writer_.StartObject()));
+
+    for (int i = 0; i < type.num_fields(); ++i) {
+      const auto& name = type.field(i)->name();
+      RETURN_NOT_OK(
+          OK(writer_.Key(name.data(), 
static_cast<rj::SizeType>(name.length()))));
+
+      const auto& child = *scalar.value[i];
+      if (!child.is_valid) {
+        RETURN_NOT_OK(OK(writer_.Null()));
+        continue;
+      }
+
+      WriteVisitor visitor{writer_, child};
+      RETURN_NOT_OK(VisitWriteableTypeId(child.type->id(), &visitor));
+    }
+
+    RETURN_NOT_OK(OK(writer_.EndObject(type.num_fields())));
+    return Status::OK();
+  }
+
+  rj::Writer<rj::StringBuffer>& writer_;
+  const Scalar& scalar_;
+};
+
+Result<std::string> WriteJson(const StructScalar& scalar) {
+  rj::StringBuffer sink;
+  rj::Writer<rj::StringBuffer> writer(sink);
+  WriteVisitor visitor{writer, scalar};
+  RETURN_NOT_OK(VisitWriteableTypeId(Type::STRUCT, &visitor));
+  return sink.GetString();
+}
+
+class JsonFormatHelper {
+ public:
+  using FormatType = JsonFileFormat;
+
+  static Result<std::shared_ptr<Buffer>> Write(RecordBatchReader* reader) {
+    ARROW_ASSIGN_OR_RAISE(auto scalars, ToScalars(reader));
+    std::string out;
+    for (const auto& scalar : scalars) {
+      ARROW_ASSIGN_OR_RAISE(auto json, WriteJson(*scalar));
+      out += json + "\n";
+    }
+    return Buffer::FromString(std::move(out));

Review Comment:
   Could we use a `stringstream` here?  I think you can even wrap it with 
`rj::OStreamWrapper` to avoid needing to create a `rj::StringBuffer` for every 
row.  Either way we should avoid the `out +=` which will have quadratic 
complexity.



##########
cpp/src/arrow/dataset/file_json.cc:
##########
@@ -0,0 +1,414 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include "arrow/dataset/file_json.h"
+
+#include <unordered_set>
+
+#include "arrow/dataset/dataset_internal.h"
+#include "arrow/io/buffered.h"
+#include "arrow/json/chunker.h"
+#include "arrow/json/parser.h"
+#include "arrow/json/reader.h"
+#include "arrow/util/checked_cast.h"
+#include "arrow/util/logging.h"
+
+namespace arrow {
+
+using internal::checked_cast;
+using internal::checked_pointer_cast;
+using internal::Executor;
+
+namespace dataset {
+
+namespace {
+
+using ReaderPtr = std::shared_ptr<json::StreamingReader>;
+
+struct JsonInspectedFragment : public InspectedFragment {
+  JsonInspectedFragment() : InspectedFragment({}) {}
+  JsonInspectedFragment(std::vector<std::string> column_names,
+                        std::shared_ptr<io::InputStream> stream, int64_t size)
+      : InspectedFragment(std::move(column_names)),
+        stream(std::move(stream)),
+        size(size) {}
+  std::shared_ptr<io::InputStream> stream;
+  int64_t size;
+};
+
+class JsonFragmentScanner : public FragmentScanner {
+ public:
+  JsonFragmentScanner(ReaderPtr reader, int num_batches, int64_t block_size)
+      : reader_(std::move(reader)), block_size_(block_size), 
num_batches_(num_batches) {}
+
+  int NumBatches() override { return num_batches_; }
+
+  Future<std::shared_ptr<RecordBatch>> ScanBatch(int index) override {
+    DCHECK_EQ(num_scanned_++, index);
+    return reader_->ReadNextAsync();
+  }
+
+  int64_t EstimatedDataBytes(int) override { return block_size_; }
+
+  static Result<std::shared_ptr<Schema>> GetSchema(
+      const FragmentScanRequest& scan_request, const JsonInspectedFragment& 
inspected) {
+    FieldVector fields;
+    fields.reserve(inspected.column_names.size());
+    std::unordered_set<int> indices;
+    indices.reserve(inspected.column_names.size());
+
+    for (const auto& scan_column : scan_request.columns) {
+      const auto index = scan_column.path[0];
+
+      if (!indices.emplace(index).second) continue;
+
+      const auto& name = inspected.column_names.at(index);
+      auto type = scan_column.requested_type->GetSharedPtr();
+      fields.push_back(field((name), std::move(type)));
+    }
+
+    return ::arrow::schema(std::move(fields));
+  }
+
+  static Future<std::shared_ptr<FragmentScanner>> Make(
+      const FragmentScanRequest& scan_request,
+      const JsonFragmentScanOptions& format_options,
+      const JsonInspectedFragment& inspected, Executor* cpu_executor) {
+    auto parse_options = format_options.parse_options;
+    ARROW_ASSIGN_OR_RAISE(parse_options.explicit_schema,
+                          GetSchema(scan_request, inspected));
+    parse_options.unexpected_field_behavior = 
json::UnexpectedFieldBehavior::Ignore;
+
+    int64_t block_size = format_options.read_options.block_size;
+    auto num_batches = static_cast<int>(bit_util::CeilDiv(inspected.size, 
block_size));
+
+    auto future = json::StreamingReader::MakeAsync(
+        inspected.stream, format_options.read_options, parse_options,
+        io::default_io_context(), cpu_executor);
+    return future.Then(
+        [num_batches, block_size](
+            const ReaderPtr& reader) -> 
Result<std::shared_ptr<FragmentScanner>> {
+          return std::make_shared<JsonFragmentScanner>(reader, num_batches, 
block_size);
+        },
+        [](const Status& e) -> Result<std::shared_ptr<FragmentScanner>> { 
return e; });
+  }
+
+ private:
+  ReaderPtr reader_;
+  int64_t block_size_;
+  int num_batches_;
+  int num_scanned_ = 0;
+};
+
+Result<std::shared_ptr<StructType>> ParseToStructType(
+    std::string_view data, const json::ParseOptions& parse_options, 
MemoryPool* pool) {
+  if (!pool) pool = default_memory_pool();
+
+  auto full_buffer = std::make_shared<Buffer>(data);
+  std::shared_ptr<Buffer> buffer, partial;
+  auto chunker = json::MakeChunker(parse_options);
+  RETURN_NOT_OK(chunker->Process(full_buffer, &buffer, &partial));
+
+  std::unique_ptr<json::BlockParser> parser;
+  RETURN_NOT_OK(json::BlockParser::Make(pool, parse_options, &parser));
+  RETURN_NOT_OK(parser->Parse(buffer));
+  std::shared_ptr<Array> parsed;
+  RETURN_NOT_OK(parser->Finish(&parsed));
+
+  return checked_pointer_cast<StructType>(parsed->type());
+}
+
+Result<std::shared_ptr<Schema>> ParseToSchema(std::string_view data,
+                                              const json::ParseOptions& 
parse_options,
+                                              MemoryPool* pool) {
+  ARROW_ASSIGN_OR_RAISE(auto type, ParseToStructType(data, parse_options, 
pool));
+  return ::arrow::schema(type->fields());
+}
+
+// Converts a FieldPath to a FieldRef consisting exclusively of field names.
+//
+// The resulting FieldRef can be used to lookup the corresponding field in any 
schema
+// regardless of missing/unordered fields. The input path is assumed to be 
valid for the
+// given schema.
+FieldRef ToUniversalRef(const FieldPath& path, const Schema& schema) {
+  std::vector<FieldRef> refs;
+  refs.reserve(path.indices().size());
+
+  const FieldVector* fields = &schema.fields();
+  for (auto it = path.begin(); it != path.end(); ++it) {
+    DCHECK_LT(*it, static_cast<int>(fields->size()));
+    const auto& child_field = *(*fields)[*it];
+    refs.push_back(FieldRef(child_field.name()));
+    if (it + 1 != path.end()) {
+      auto&& child_type = checked_cast<const StructType&>(*child_field.type());
+      fields = &child_type.fields();
+    }
+  }
+
+  return refs.empty()       ? FieldRef()
+         : refs.size() == 1 ? refs[0]
+                            : FieldRef(std::move(refs));
+}
+
+int TopLevelIndex(const FieldRef& ref, const Schema& schema) {
+  if (const auto* name = ref.name()) {
+    return schema.GetFieldIndex(*name);
+  } else if (const auto* path = ref.field_path()) {
+    DCHECK(!path->empty());
+    return (*path)[0];
+  }
+  const auto* nested_refs = ref.nested_refs();
+  DCHECK(nested_refs && !nested_refs->empty());
+  return TopLevelIndex((*nested_refs)[0], schema);
+}
+
+// Make a new schema consisting only of the top-level fields in the dataset 
schema that:
+//  (a) Have children that require materialization
+//  (b) Have children present in `physical_schema`
+//
+// The resulting schema can be used in reader instantiation to ignore unused 
fields. Note
+// that `physical_schema` is only of structural importance and its data types 
are ignored
+// when constructing the final schema.
+Result<std::shared_ptr<Schema>> GetPartialSchema(const ScanOptions& 
scan_options,
+                                                 const Schema& 
physical_schema) {
+  auto dataset_schema = scan_options.dataset_schema;
+  DCHECK_NE(dataset_schema, nullptr);
+  const auto max_num_fields = 
static_cast<size_t>(dataset_schema->num_fields());
+
+  std::vector<bool> selected(max_num_fields, false);
+  std::vector<int> toplevel_indices;
+  toplevel_indices.reserve(max_num_fields);
+
+  for (const auto& ref : scan_options.MaterializedFields()) {
+    auto index = TopLevelIndex(ref, *dataset_schema);
+    DCHECK_GE(index, 0);
+    if (selected[index]) continue;
+
+    // Determine if the field exists in the physical schema before selecting it
+    bool found;
+    if (!ref.IsNested()) {
+      const auto& name = dataset_schema->field(index)->name();
+      found = physical_schema.GetFieldIndex(name) != -1;
+    } else {
+      // Check if the nested field is present in the physical schema. If so, 
we load its
+      // entire top-level field
+      ARROW_ASSIGN_OR_RAISE(auto path, ref.FindOne(*dataset_schema));
+      auto universal_ref = ToUniversalRef(path, *dataset_schema);
+      ARROW_ASSIGN_OR_RAISE(auto match, 
universal_ref.FindOneOrNone(physical_schema));
+      found = !match.empty();
+    }
+
+    if (!found) continue;
+
+    toplevel_indices.push_back(index);
+    selected[index] = true;
+    // All fields in the dataset schema require materialization, so return 
early
+    if (toplevel_indices.size() == max_num_fields) {
+      return dataset_schema;
+    }
+  }
+
+  FieldVector fields;
+  fields.reserve(toplevel_indices.size());
+  std::sort(toplevel_indices.begin(), toplevel_indices.end());
+  for (auto index : toplevel_indices) {
+    fields.push_back(dataset_schema->field(index));
+  }
+
+  return ::arrow::schema(std::move(fields));
+}
+
+Result<std::shared_ptr<JsonFragmentScanOptions>> GetJsonFormatOptions(
+    const JsonFileFormat& format, const ScanOptions* scan_options) {
+  return GetFragmentScanOptions<JsonFragmentScanOptions>(
+      kJsonTypeName, scan_options, format.default_fragment_scan_options);
+}
+
+Result<Future<ReaderPtr>> DoOpenReader(
+    const FileSource& source, const JsonFileFormat& format,
+    const std::shared_ptr<ScanOptions>& scan_options = nullptr) {
+  ARROW_ASSIGN_OR_RAISE(auto json_options,
+                        GetJsonFormatOptions(format, scan_options.get()));
+
+  struct State {
+    State(const JsonFragmentScanOptions& json_options,
+          const std::shared_ptr<ScanOptions>& scan_options)
+        : parse_options(json_options.parse_options),
+          read_options(json_options.read_options),
+          scan_options(scan_options) {
+      // We selectively ignore some user options, primarily those that will 
influence the
+      // output schema.
+      parse_options.explicit_schema = nullptr;
+      parse_options.unexpected_field_behavior = 
json::UnexpectedFieldBehavior::InferType;
+      // We don't wish to contend with the scanner's CPU threads if multiple 
fragments are
+      // being scanned in parallel, so fragment-level parallelism gets 
conditionally
+      // disabled.
+      if (this->scan_options && this->scan_options->use_threads) {
+        read_options.use_threads = false;
+      }

Review Comment:
   We've done this in the past to avoid nested deadlock.  That shouldn't be a 
problem if we use the async paths (though I don't know what the async path for 
the JSON reader looks like).
   
   If there are only 1 or 2 files and there is a potential to speed up JSON 
decoding with multiple threads then that seems like something we'd want to do.



##########
cpp/src/arrow/dataset/file_json.cc:
##########
@@ -0,0 +1,414 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include "arrow/dataset/file_json.h"
+
+#include <unordered_set>
+
+#include "arrow/dataset/dataset_internal.h"
+#include "arrow/io/buffered.h"
+#include "arrow/json/chunker.h"
+#include "arrow/json/parser.h"
+#include "arrow/json/reader.h"
+#include "arrow/util/checked_cast.h"
+#include "arrow/util/logging.h"
+
+namespace arrow {
+
+using internal::checked_cast;
+using internal::checked_pointer_cast;
+using internal::Executor;
+
+namespace dataset {
+
+namespace {
+
+using ReaderPtr = std::shared_ptr<json::StreamingReader>;
+
+struct JsonInspectedFragment : public InspectedFragment {
+  JsonInspectedFragment() : InspectedFragment({}) {}
+  JsonInspectedFragment(std::vector<std::string> column_names,
+                        std::shared_ptr<io::InputStream> stream, int64_t size)
+      : InspectedFragment(std::move(column_names)),
+        stream(std::move(stream)),
+        size(size) {}
+  std::shared_ptr<io::InputStream> stream;
+  int64_t size;

Review Comment:
   Can we stick with `num_bytes` here?  When the name is `size` or `length` I 
find it hard to know if we're talking about the number of rows or the total 
number of bytes.



##########
cpp/src/arrow/dataset/file_json.cc:
##########
@@ -0,0 +1,414 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include "arrow/dataset/file_json.h"
+
+#include <unordered_set>
+
+#include "arrow/dataset/dataset_internal.h"
+#include "arrow/io/buffered.h"
+#include "arrow/json/chunker.h"
+#include "arrow/json/parser.h"
+#include "arrow/json/reader.h"
+#include "arrow/util/checked_cast.h"
+#include "arrow/util/logging.h"
+
+namespace arrow {
+
+using internal::checked_cast;
+using internal::checked_pointer_cast;
+using internal::Executor;
+
+namespace dataset {
+
+namespace {
+
+using ReaderPtr = std::shared_ptr<json::StreamingReader>;
+
+struct JsonInspectedFragment : public InspectedFragment {
+  JsonInspectedFragment() : InspectedFragment({}) {}
+  JsonInspectedFragment(std::vector<std::string> column_names,
+                        std::shared_ptr<io::InputStream> stream, int64_t size)
+      : InspectedFragment(std::move(column_names)),
+        stream(std::move(stream)),
+        size(size) {}
+  std::shared_ptr<io::InputStream> stream;
+  int64_t size;
+};
+
+class JsonFragmentScanner : public FragmentScanner {
+ public:
+  JsonFragmentScanner(ReaderPtr reader, int num_batches, int64_t block_size)
+      : reader_(std::move(reader)), block_size_(block_size), 
num_batches_(num_batches) {}
+
+  int NumBatches() override { return num_batches_; }
+
+  Future<std::shared_ptr<RecordBatch>> ScanBatch(int index) override {
+    DCHECK_EQ(num_scanned_++, index);
+    return reader_->ReadNextAsync();
+  }
+
+  int64_t EstimatedDataBytes(int) override { return block_size_; }
+
+  static Result<std::shared_ptr<Schema>> GetSchema(
+      const FragmentScanRequest& scan_request, const JsonInspectedFragment& 
inspected) {
+    FieldVector fields;
+    fields.reserve(inspected.column_names.size());
+    std::unordered_set<int> indices;
+    indices.reserve(inspected.column_names.size());
+
+    for (const auto& scan_column : scan_request.columns) {
+      const auto index = scan_column.path[0];
+
+      if (!indices.emplace(index).second) continue;
+
+      const auto& name = inspected.column_names.at(index);
+      auto type = scan_column.requested_type->GetSharedPtr();
+      fields.push_back(field((name), std::move(type)));
+    }
+
+    return ::arrow::schema(std::move(fields));
+  }
+
+  static Future<std::shared_ptr<FragmentScanner>> Make(
+      const FragmentScanRequest& scan_request,
+      const JsonFragmentScanOptions& format_options,
+      const JsonInspectedFragment& inspected, Executor* cpu_executor) {
+    auto parse_options = format_options.parse_options;
+    ARROW_ASSIGN_OR_RAISE(parse_options.explicit_schema,
+                          GetSchema(scan_request, inspected));
+    parse_options.unexpected_field_behavior = 
json::UnexpectedFieldBehavior::Ignore;
+
+    int64_t block_size = format_options.read_options.block_size;
+    auto num_batches = static_cast<int>(bit_util::CeilDiv(inspected.size, 
block_size));
+
+    auto future = json::StreamingReader::MakeAsync(
+        inspected.stream, format_options.read_options, parse_options,
+        io::default_io_context(), cpu_executor);
+    return future.Then(
+        [num_batches, block_size](
+            const ReaderPtr& reader) -> 
Result<std::shared_ptr<FragmentScanner>> {
+          return std::make_shared<JsonFragmentScanner>(reader, num_batches, 
block_size);
+        },
+        [](const Status& e) -> Result<std::shared_ptr<FragmentScanner>> { 
return e; });

Review Comment:
   ```suggestion
           });
   ```
   This is the default error callback and can be omitted.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to