benibus commented on code in PR #33732: URL: https://github.com/apache/arrow/pull/33732#discussion_r1101804612
########## cpp/src/arrow/dataset/file_json.cc: ########## @@ -0,0 +1,414 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include "arrow/dataset/file_json.h" + +#include <unordered_set> + +#include "arrow/dataset/dataset_internal.h" +#include "arrow/io/buffered.h" +#include "arrow/json/chunker.h" +#include "arrow/json/parser.h" +#include "arrow/json/reader.h" +#include "arrow/util/checked_cast.h" +#include "arrow/util/logging.h" + +namespace arrow { + +using internal::checked_cast; +using internal::checked_pointer_cast; +using internal::Executor; + +namespace dataset { + +namespace { + +using ReaderPtr = std::shared_ptr<json::StreamingReader>; + +struct JsonInspectedFragment : public InspectedFragment { + JsonInspectedFragment() : InspectedFragment({}) {} + JsonInspectedFragment(std::vector<std::string> column_names, + std::shared_ptr<io::InputStream> stream, int64_t size) + : InspectedFragment(std::move(column_names)), + stream(std::move(stream)), + size(size) {} + std::shared_ptr<io::InputStream> stream; + int64_t size; +}; + +class JsonFragmentScanner : public FragmentScanner { + public: + JsonFragmentScanner(ReaderPtr reader, int num_batches, int64_t block_size) + : reader_(std::move(reader)), block_size_(block_size), num_batches_(num_batches) {} + + int NumBatches() override { return num_batches_; } + + Future<std::shared_ptr<RecordBatch>> ScanBatch(int index) override { + DCHECK_EQ(num_scanned_++, index); + return reader_->ReadNextAsync(); + } + + int64_t EstimatedDataBytes(int) override { return block_size_; } + + static Result<std::shared_ptr<Schema>> GetSchema( + const FragmentScanRequest& scan_request, const JsonInspectedFragment& inspected) { + FieldVector fields; + fields.reserve(inspected.column_names.size()); + std::unordered_set<int> indices; + indices.reserve(inspected.column_names.size()); + + for (const auto& scan_column : scan_request.columns) { + const auto index = scan_column.path[0]; + + if (!indices.emplace(index).second) continue; + + const auto& name = inspected.column_names.at(index); + auto type = scan_column.requested_type->GetSharedPtr(); + fields.push_back(field((name), std::move(type))); + } + + return ::arrow::schema(std::move(fields)); + } + + static Future<std::shared_ptr<FragmentScanner>> Make( + const FragmentScanRequest& scan_request, + const JsonFragmentScanOptions& format_options, + const JsonInspectedFragment& inspected, Executor* cpu_executor) { + auto parse_options = format_options.parse_options; + ARROW_ASSIGN_OR_RAISE(parse_options.explicit_schema, + GetSchema(scan_request, inspected)); + parse_options.unexpected_field_behavior = json::UnexpectedFieldBehavior::Ignore; + + int64_t block_size = format_options.read_options.block_size; + auto num_batches = static_cast<int>(bit_util::CeilDiv(inspected.size, block_size)); + + auto future = json::StreamingReader::MakeAsync( + inspected.stream, format_options.read_options, parse_options, + io::default_io_context(), cpu_executor); + return future.Then( + [num_batches, block_size]( + const ReaderPtr& reader) -> Result<std::shared_ptr<FragmentScanner>> { + return std::make_shared<JsonFragmentScanner>(reader, num_batches, block_size); + }, + [](const Status& e) -> Result<std::shared_ptr<FragmentScanner>> { return e; }); + } + + private: + ReaderPtr reader_; + int64_t block_size_; + int num_batches_; + int num_scanned_ = 0; +}; + +Result<std::shared_ptr<StructType>> ParseToStructType( + std::string_view data, const json::ParseOptions& parse_options, MemoryPool* pool) { + if (!pool) pool = default_memory_pool(); + + auto full_buffer = std::make_shared<Buffer>(data); + std::shared_ptr<Buffer> buffer, partial; + auto chunker = json::MakeChunker(parse_options); + RETURN_NOT_OK(chunker->Process(full_buffer, &buffer, &partial)); + + std::unique_ptr<json::BlockParser> parser; + RETURN_NOT_OK(json::BlockParser::Make(pool, parse_options, &parser)); + RETURN_NOT_OK(parser->Parse(buffer)); + std::shared_ptr<Array> parsed; + RETURN_NOT_OK(parser->Finish(&parsed)); + + return checked_pointer_cast<StructType>(parsed->type()); +} + +Result<std::shared_ptr<Schema>> ParseToSchema(std::string_view data, + const json::ParseOptions& parse_options, + MemoryPool* pool) { + ARROW_ASSIGN_OR_RAISE(auto type, ParseToStructType(data, parse_options, pool)); + return ::arrow::schema(type->fields()); +} + +// Converts a FieldPath to a FieldRef consisting exclusively of field names. +// +// The resulting FieldRef can be used to lookup the corresponding field in any schema +// regardless of missing/unordered fields. The input path is assumed to be valid for the +// given schema. +FieldRef ToUniversalRef(const FieldPath& path, const Schema& schema) { + std::vector<FieldRef> refs; + refs.reserve(path.indices().size()); + + const FieldVector* fields = &schema.fields(); + for (auto it = path.begin(); it != path.end(); ++it) { + DCHECK_LT(*it, static_cast<int>(fields->size())); + const auto& child_field = *(*fields)[*it]; + refs.push_back(FieldRef(child_field.name())); + if (it + 1 != path.end()) { + auto&& child_type = checked_cast<const StructType&>(*child_field.type()); + fields = &child_type.fields(); + } + } + + return refs.empty() ? FieldRef() + : refs.size() == 1 ? refs[0] + : FieldRef(std::move(refs)); +} + +int TopLevelIndex(const FieldRef& ref, const Schema& schema) { + if (const auto* name = ref.name()) { + return schema.GetFieldIndex(*name); + } else if (const auto* path = ref.field_path()) { + DCHECK(!path->empty()); + return (*path)[0]; + } + const auto* nested_refs = ref.nested_refs(); + DCHECK(nested_refs && !nested_refs->empty()); + return TopLevelIndex((*nested_refs)[0], schema); +} + +// Make a new schema consisting only of the top-level fields in the dataset schema that: +// (a) Have children that require materialization +// (b) Have children present in `physical_schema` +// +// The resulting schema can be used in reader instantiation to ignore unused fields. Note +// that `physical_schema` is only of structural importance and its data types are ignored +// when constructing the final schema. +Result<std::shared_ptr<Schema>> GetPartialSchema(const ScanOptions& scan_options, + const Schema& physical_schema) { Review Comment: Yeah, from what I can tell, the new scanner does a lot of the legwork here (as far as top-level fields, at least). For the old path, this was what was required to get the common format tests to pass. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected]
