westonpace commented on code in PR #33732:
URL: https://github.com/apache/arrow/pull/33732#discussion_r1103046875


##########
cpp/src/arrow/dataset/CMakeLists.txt:
##########
@@ -46,6 +46,10 @@ if(ARROW_CSV)
   set(ARROW_DATASET_SRCS ${ARROW_DATASET_SRCS} file_csv.cc)
 endif()
 
+if(ARROW_JSON)

Review Comment:
   Is there already a separate github issue for adding python bindings?  If 
not, let's make sure we add one.



##########
cpp/src/arrow/dataset/file_json.h:
##########
@@ -0,0 +1,95 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#pragma once
+
+#include <memory>
+#include <string>
+
+#include "arrow/dataset/dataset.h"
+#include "arrow/dataset/file_base.h"
+#include "arrow/dataset/type_fwd.h"
+#include "arrow/dataset/visibility.h"
+#include "arrow/ipc/type_fwd.h"
+#include "arrow/json/options.h"
+#include "arrow/status.h"
+#include "arrow/util/compression.h"

Review Comment:
   ```suggestion
   #include <memory>
   #include <optional>
   #include <string>
   
   #include "arrow/dataset/dataset.h"
   #include "arrow/dataset/file_base.h"
   #include "arrow/dataset/type_fwd.h"
   #include "arrow/dataset/visibility.h"
   #include "arrow/ipc/type_fwd.h"
   #include "arrow/json/options.h"
   #include "arrow/result.h"
   #include "arrow/status.h"
   #include "arrow/util/future.h"
   #include "arrow/util/macros.h"
   ```



##########
cpp/src/arrow/dataset/file_json.cc:
##########
@@ -0,0 +1,405 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include "arrow/dataset/file_json.h"
+
+#include <unordered_set>

Review Comment:
   ```suggestion
   #include <algorithm>
   #include <unordered_set>
   #include <vector>
   ```
   algorithm for std::sort



##########
cpp/src/arrow/dataset/file_json.cc:
##########
@@ -0,0 +1,405 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include "arrow/dataset/file_json.h"
+
+#include <unordered_set>
+
+#include "arrow/dataset/dataset_internal.h"
+#include "arrow/io/buffered.h"
+#include "arrow/json/chunker.h"
+#include "arrow/json/parser.h"
+#include "arrow/json/reader.h"
+#include "arrow/util/checked_cast.h"
+#include "arrow/util/logging.h"
+
+namespace arrow {
+
+using internal::checked_cast;
+using internal::checked_pointer_cast;
+using internal::Executor;
+
+namespace dataset {
+
+namespace {
+
+using ReaderPtr = std::shared_ptr<json::StreamingReader>;
+
+struct JsonInspectedFragment : public InspectedFragment {
+  JsonInspectedFragment() : InspectedFragment({}) {}
+  JsonInspectedFragment(std::vector<std::string> column_names,
+                        std::shared_ptr<io::InputStream> stream, int64_t 
num_bytes)
+      : InspectedFragment(std::move(column_names)),
+        stream(std::move(stream)),
+        num_bytes(num_bytes) {}
+  std::shared_ptr<io::InputStream> stream;
+  int64_t num_bytes;
+};
+
+class JsonFragmentScanner : public FragmentScanner {
+ public:
+  JsonFragmentScanner(ReaderPtr reader, int num_batches, int64_t block_size)
+      : reader_(std::move(reader)), block_size_(block_size), 
num_batches_(num_batches) {}
+
+  int NumBatches() override { return num_batches_; }
+
+  Future<std::shared_ptr<RecordBatch>> ScanBatch(int index) override {
+    DCHECK_EQ(num_scanned_++, index);
+    return reader_->ReadNextAsync();
+  }
+
+  int64_t EstimatedDataBytes(int) override { return block_size_; }
+
+  static Result<std::shared_ptr<Schema>> GetSchema(
+      const FragmentScanRequest& scan_request, const JsonInspectedFragment& 
inspected) {
+    FieldVector fields;
+    fields.reserve(inspected.column_names.size());
+    std::unordered_set<int> indices;
+    indices.reserve(inspected.column_names.size());
+
+    for (const auto& scan_column : scan_request.columns) {
+      const auto index = scan_column.path[0];
+
+      if (!indices.emplace(index).second) continue;
+
+      const auto& name = inspected.column_names.at(index);
+      auto type = scan_column.requested_type->GetSharedPtr();
+      fields.push_back(field((name), std::move(type)));
+    }
+
+    return schema(std::move(fields));
+  }
+
+  static Future<std::shared_ptr<FragmentScanner>> Make(
+      const FragmentScanRequest& scan_request,
+      const JsonFragmentScanOptions& format_options,
+      const JsonInspectedFragment& inspected, Executor* cpu_executor) {
+    auto parse_options = format_options.parse_options;
+    ARROW_ASSIGN_OR_RAISE(parse_options.explicit_schema,
+                          GetSchema(scan_request, inspected));
+    parse_options.unexpected_field_behavior = 
json::UnexpectedFieldBehavior::Ignore;
+
+    int64_t block_size = format_options.read_options.block_size;
+    auto num_batches =
+        static_cast<int>(bit_util::CeilDiv(inspected.num_bytes, block_size));
+
+    auto future = json::StreamingReader::MakeAsync(
+        inspected.stream, format_options.read_options, parse_options,
+        io::default_io_context(), cpu_executor);
+    return future.Then([num_batches, block_size](const ReaderPtr& reader)
+                           -> Result<std::shared_ptr<FragmentScanner>> {
+      return std::make_shared<JsonFragmentScanner>(reader, num_batches, 
block_size);
+    });
+  }
+
+ private:
+  ReaderPtr reader_;
+  int64_t block_size_;
+  int num_batches_;
+  int num_scanned_ = 0;
+};
+
+Result<std::shared_ptr<StructType>> ParseToStructType(
+    std::string_view data, const json::ParseOptions& parse_options, 
MemoryPool* pool) {
+  auto full_buffer = std::make_shared<Buffer>(data);
+  std::shared_ptr<Buffer> buffer, partial;
+  auto chunker = json::MakeChunker(parse_options);
+  RETURN_NOT_OK(chunker->Process(full_buffer, &buffer, &partial));
+
+  std::unique_ptr<json::BlockParser> parser;
+  RETURN_NOT_OK(json::BlockParser::Make(pool, parse_options, &parser));
+  RETURN_NOT_OK(parser->Parse(buffer));
+  std::shared_ptr<Array> parsed;
+  RETURN_NOT_OK(parser->Finish(&parsed));
+
+  return checked_pointer_cast<StructType>(parsed->type());
+}
+
+Result<std::shared_ptr<Schema>> ParseToSchema(std::string_view data,
+                                              const json::ParseOptions& 
parse_options,
+                                              MemoryPool* pool) {
+  ARROW_ASSIGN_OR_RAISE(auto type, ParseToStructType(data, parse_options, 
pool));
+  return schema(type->fields());
+}
+
+// Converts a FieldPath to a FieldRef consisting exclusively of field names.
+//
+// The resulting FieldRef can be used to lookup the corresponding field in any 
schema
+// regardless of missing/unordered fields. The input path is assumed to be 
valid for the
+// given schema.
+FieldRef ToUniversalRef(const FieldPath& path, const Schema& schema) {
+  std::vector<FieldRef> refs;
+  refs.reserve(path.indices().size());
+
+  const FieldVector* fields = &schema.fields();
+  for (auto it = path.begin(); it != path.end(); ++it) {
+    DCHECK_LT(*it, static_cast<int>(fields->size()));
+    const auto& child_field = *(*fields)[*it];
+    refs.push_back(FieldRef(child_field.name()));

Review Comment:
   ```suggestion
       refs.emplace_back(child_field.name());
   ```
   
   Minor nit



##########
cpp/src/arrow/dataset/file_json.cc:
##########
@@ -0,0 +1,405 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include "arrow/dataset/file_json.h"
+
+#include <unordered_set>
+
+#include "arrow/dataset/dataset_internal.h"
+#include "arrow/io/buffered.h"
+#include "arrow/json/chunker.h"
+#include "arrow/json/parser.h"
+#include "arrow/json/reader.h"
+#include "arrow/util/checked_cast.h"
+#include "arrow/util/logging.h"

Review Comment:
   ```suggestion
   #include "arrow/compute/exec.h"
   #include "arrow/compute/exec/expression.h"
   #include "arrow/dataset/dataset_internal.h"
   #include "arrow/dataset/scanner.h"
   #include "arrow/dataset/type_fwd.h"
   #include "arrow/io/buffered.h"
   #include "arrow/io/interfaces.h"
   #include "arrow/io/type_fwd.h"
   #include "arrow/json/chunker.h"
   #include "arrow/json/parser.h"
   #include "arrow/json/reader.h"
   #include "arrow/record_batch.h"
   #include "arrow/type.h"
   #include "arrow/util/async_generator.h"
   #include "arrow/util/bit_util.h"
   #include "arrow/util/checked_cast.h"
   #include "arrow/util/delimiting.h"
   #include "arrow/util/logging.h"
   #include "arrow/util/thread_pool.h"
   ```
   
   I'm not sure how practical IWYU is when we don't have the tool wired up 
:laughing: so feel free to treat this as a minor nit.



##########
cpp/src/arrow/dataset/file_json_test.cc:
##########
@@ -0,0 +1,216 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include "arrow/dataset/file_json.h"
+
+#include "arrow/dataset/plan.h"
+#include "arrow/dataset/test_util_internal.h"
+#include "arrow/filesystem/mockfs.h"
+#include "arrow/json/parser.h"
+#include "arrow/json/rapidjson_defs.h"
+#include "arrow/testing/gtest_util.h"
+#include "arrow/testing/util.h"
+
+#include "rapidjson/ostreamwrapper.h"
+#include "rapidjson/writer.h"
+
+namespace arrow {
+
+using internal::checked_cast;
+
+namespace dataset {
+
+namespace rj = arrow::rapidjson;
+
+#define CASE(TYPE_CLASS)                            \
+  case TYPE_CLASS##Type::type_id: {                 \
+    const TYPE_CLASS##Type* concrete_ptr = nullptr; \
+    return visitor->Visit(concrete_ptr);            \
+  }
+
+template <typename VISITOR>
+static Status VisitWriteableTypeId(Type::type id, VISITOR* visitor) {
+  switch (id) {
+    ARROW_GENERATE_FOR_ALL_NUMERIC_TYPES(CASE)
+    CASE(Boolean)
+    CASE(Struct)
+    default:
+      return Status::NotImplemented("TypeId: ", id);
+  }
+}
+
+#undef CASE
+
+// There's currently no proper API for writing JSON files, which is reflected 
in the JSON
+// dataset API as well. However, this ad-hoc implementation is good enough for 
the shared
+// format test fixtures
+struct WriteVisitor {
+  static Status OK(bool ok) {
+    return ok ? Status::OK()
+              : Status::Invalid("Unexpected false return from JSON writer");
+  }
+
+  template <typename T>
+  enable_if_physical_signed_integer<T, Status> Visit(const T*) {
+    const auto& scalar = checked_cast<const NumericScalar<T>&>(scalar_);
+    return OK(writer_.Int64(scalar.value));
+  }
+
+  template <typename T>
+  enable_if_physical_unsigned_integer<T, Status> Visit(const T*) {
+    const auto& scalar = checked_cast<const NumericScalar<T>&>(scalar_);
+    return OK(writer_.Uint64(scalar.value));
+  }
+
+  template <typename T>
+  enable_if_physical_floating_point<T, Status> Visit(const T*) {
+    const auto& scalar = checked_cast<const NumericScalar<T>&>(scalar_);
+    return OK(writer_.Double(scalar.value));
+  }
+
+  Status Visit(const BooleanType*) {
+    const auto& scalar = checked_cast<const BooleanScalar&>(scalar_);
+    return OK(writer_.Bool(scalar.value));
+  }
+
+  Status Visit(const StructType*) {
+    const auto& scalar = checked_cast<const StructScalar&>(scalar_);
+    const auto& type = checked_cast<const StructType&>(*scalar.type);
+    DCHECK_EQ(type.num_fields(), static_cast<int>(scalar.value.size()));
+
+    RETURN_NOT_OK(OK(writer_.StartObject()));
+
+    for (int i = 0; i < type.num_fields(); ++i) {
+      const auto& name = type.field(i)->name();
+      RETURN_NOT_OK(
+          OK(writer_.Key(name.data(), 
static_cast<rj::SizeType>(name.length()))));
+
+      const auto& child = *scalar.value[i];
+      if (!child.is_valid) {
+        RETURN_NOT_OK(OK(writer_.Null()));
+        continue;
+      }
+
+      WriteVisitor visitor{writer_, child};
+      RETURN_NOT_OK(VisitWriteableTypeId(child.type->id(), &visitor));
+    }
+
+    RETURN_NOT_OK(OK(writer_.EndObject(type.num_fields())));
+    return Status::OK();
+  }
+
+  rj::Writer<rj::OStreamWrapper>& writer_;
+  const Scalar& scalar_;
+};
+
+Status WriteJson(const StructScalar& scalar, rj::OStreamWrapper* sink) {
+  rj::Writer<rj::OStreamWrapper> writer(*sink);
+  WriteVisitor visitor{writer, scalar};
+  return VisitWriteableTypeId(Type::STRUCT, &visitor);
+}
+
+class JsonFormatHelper {
+ public:
+  using FormatType = JsonFileFormat;
+
+  static Result<std::shared_ptr<Buffer>> Write(RecordBatchReader* reader) {
+    ARROW_ASSIGN_OR_RAISE(auto scalars, ToScalars(reader));
+    std::stringstream ss;
+    rj::OStreamWrapper sink(ss);
+    for (const auto& scalar : scalars) {
+      RETURN_NOT_OK(WriteJson(*scalar, &sink));
+      ss << "\n";
+    }
+    return Buffer::FromString(ss.str());
+  }
+
+  static std::shared_ptr<FormatType> MakeFormat() {
+    return std::make_shared<FormatType>();
+  }
+
+ private:
+  static Result<std::vector<std::shared_ptr<StructScalar>>> ToScalars(
+      RecordBatchReader* reader) {
+    std::vector<std::shared_ptr<StructScalar>> scalars;
+    for (auto maybe_batch : *reader) {
+      ARROW_ASSIGN_OR_RAISE(auto batch, maybe_batch);
+      ARROW_ASSIGN_OR_RAISE(auto array, batch->ToStructArray());
+      for (int i = 0; i < array->length(); ++i) {
+        ARROW_ASSIGN_OR_RAISE(auto scalar, array->GetScalar(i));
+        
scalars.push_back(checked_pointer_cast<StructScalar>(std::move(scalar)));
+      }
+    }
+    return scalars;
+  }
+};
+
+class TestJsonFormat
+    : public FileFormatFixtureMixin<JsonFormatHelper, json::kMaxParserNumRows> 
{};
+
+class TestJsonFormatScan : public FileFormatScanMixin<JsonFormatHelper> {};
+
+class TestJsonFormatScanNode : public 
FileFormatScanNodeMixin<JsonFormatHelper> {
+  void SetUp() override { internal::Initialize(); }
+
+  const FragmentScanOptions* GetFormatOptions() override { return 
&json_options_; }
+
+ protected:
+  JsonFragmentScanOptions json_options_;
+};
+
+TEST_F(TestJsonFormat, Equals) {
+  JsonFileFormat format;
+  ASSERT_TRUE(format.Equals(JsonFileFormat()));
+  ASSERT_FALSE(format.Equals(DummyFileFormat()));
+}
+
+TEST_F(TestJsonFormat, IsSupported) { TestIsSupported(); }
+TEST_F(TestJsonFormat, Inspect) { TestInspect(); }
+TEST_F(TestJsonFormat, FragmentEquals) { TestFragmentEquals(); }
+TEST_F(TestJsonFormat, InspectFailureWithRelevantError) {
+  TestInspectFailureWithRelevantError(StatusCode::Invalid, "JSON");
+}
+TEST_F(TestJsonFormat, CountRows) { TestCountRows(); }
+
+TEST_P(TestJsonFormatScan, Scan) { TestScan(); }
+TEST_P(TestJsonFormatScan, ScanBatchSize) { TestScanBatchSize(); }
+TEST_P(TestJsonFormatScan, ScanProjected) { TestScanProjected(); }
+TEST_P(TestJsonFormatScan, ScanWithDuplicateColumnError) {
+  TestScanWithDuplicateColumnError();
+}
+TEST_P(TestJsonFormatScan, ScanWithVirtualColumn) { 
TestScanWithVirtualColumn(); }
+TEST_P(TestJsonFormatScan, ScanWithPushdownNulls) { 
TestScanWithPushdownNulls(); }
+TEST_P(TestJsonFormatScan, ScanProjectedMissingCols) { 
TestScanProjectedMissingCols(); }
+TEST_P(TestJsonFormatScan, ScanProjectedNested) { TestScanProjectedNested(); }
+
+INSTANTIATE_TEST_SUITE_P(TestJsonScan, TestJsonFormatScan,
+                         ::testing::ValuesIn(TestFormatParams::Values()),
+                         TestFormatParams::ToTestNameString);
+
+TEST_P(TestJsonFormatScanNode, Scan) { TestScan(); }
+TEST_P(TestJsonFormatScanNode, ScanMissingFilterField) { 
TestScanMissingFilterField(); }
+TEST_P(TestJsonFormatScanNode, ScanProjected) { TestScanProjected(); }
+TEST_P(TestJsonFormatScanNode, ScanProjectedMissingColumns) {
+  TestScanProjectedMissingCols();
+}
+

Review Comment:
   Just scanning through the CSV tests I wonder if there are a few 
JSON-specific tests that might be nice to have:
   
    * Confirm that alternate read/parse options (e.g. alternate block size or 
newlines allowed) get passed in correctly
    * Does the JSON reader handle the the presence of a unicode BOM well?  
Maybe not as big an issue here where we have to explicitly handle it for the 
CSV reader.
   



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to