zanmato1984 commented on code in PR #44217: URL: https://github.com/apache/arrow/pull/44217#discussion_r1857863292
########## cpp/src/arrow/compute/kernels/chunked_internal.cc: ########## @@ -0,0 +1,118 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include "arrow/compute/kernels/chunked_internal.h" + +#include <algorithm> + +#include "arrow/record_batch.h" +#include "arrow/util/logging.h" + +namespace arrow::compute::internal { + +std::vector<const Array*> GetArrayPointers(const ArrayVector& arrays) { + std::vector<const Array*> pointers(arrays.size()); + std::transform(arrays.begin(), arrays.end(), pointers.begin(), + [&](const std::shared_ptr<Array>& array) { return array.get(); }); + return pointers; +} + +std::vector<int64_t> ChunkedIndexMapper::GetChunkLengths( + util::span<const Array* const> chunks) { + std::vector<int64_t> chunk_lengths(chunks.size()); + for (int64_t i = 0; i < static_cast<int64_t>(chunks.size()); ++i) { + chunk_lengths[i] = chunks[i]->length(); + } + return chunk_lengths; +} + +std::vector<int64_t> ChunkedIndexMapper::GetChunkLengths( + const RecordBatchVector& chunks) { + std::vector<int64_t> chunk_lengths(chunks.size()); + for (int64_t i = 0; i < static_cast<int64_t>(chunks.size()); ++i) { + chunk_lengths[i] = chunks[i]->num_rows(); + } + return chunk_lengths; +} + +Result<std::pair<CompressedChunkLocation*, CompressedChunkLocation*>> +ChunkedIndexMapper::LogicalToPhysical() { + // Check that indices would fall in bounds for CompressedChunkLocation + if (ARROW_PREDICT_FALSE(chunk_lengths_.size() > + CompressedChunkLocation::kMaxChunkIndex + 1)) { + return Status::NotImplemented("Chunked array has more than ", + CompressedChunkLocation::kMaxChunkIndex + 1, " chunks"); + } + for (int64_t chunk_length : chunk_lengths_) { + if (ARROW_PREDICT_FALSE(static_cast<uint64_t>(chunk_length) > + CompressedChunkLocation::kMaxIndexInChunk + 1)) { + return Status::NotImplemented("Individual chunk in chunked array has more than ", + CompressedChunkLocation::kMaxIndexInChunk + 1, + " elements"); + } + } + + const int64_t num_indices = static_cast<int64_t>(indices_end_ - indices_begin_); + CompressedChunkLocation* physical_begin = + reinterpret_cast<CompressedChunkLocation*>(indices_begin_); + DCHECK_EQ(physical_begin + num_indices, + reinterpret_cast<CompressedChunkLocation*>(indices_end_)); + + int64_t chunk_offset = 0; + for (int64_t chunk_index = 0; chunk_index < static_cast<int64_t>(chunk_lengths_.size()); + ++chunk_index) { + const int64_t chunk_length = chunk_lengths_[chunk_index]; + for (int64_t i = 0; i < chunk_length; ++i) { Review Comment: So this `N` (the total length of all chunks) loop is actually what makes the benchmarks for many-null-case worse, because null values are already handled elsewhere and this `N` loop would be duplicated for nulls? ########## cpp/src/arrow/compute/kernels/chunked_internal.cc: ########## @@ -0,0 +1,118 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include "arrow/compute/kernels/chunked_internal.h" + +#include <algorithm> + +#include "arrow/record_batch.h" +#include "arrow/util/logging.h" + +namespace arrow::compute::internal { + +std::vector<const Array*> GetArrayPointers(const ArrayVector& arrays) { + std::vector<const Array*> pointers(arrays.size()); + std::transform(arrays.begin(), arrays.end(), pointers.begin(), + [&](const std::shared_ptr<Array>& array) { return array.get(); }); + return pointers; +} + +std::vector<int64_t> ChunkedIndexMapper::GetChunkLengths( + util::span<const Array* const> chunks) { + std::vector<int64_t> chunk_lengths(chunks.size()); + for (int64_t i = 0; i < static_cast<int64_t>(chunks.size()); ++i) { + chunk_lengths[i] = chunks[i]->length(); + } + return chunk_lengths; +} + +std::vector<int64_t> ChunkedIndexMapper::GetChunkLengths( + const RecordBatchVector& chunks) { + std::vector<int64_t> chunk_lengths(chunks.size()); + for (int64_t i = 0; i < static_cast<int64_t>(chunks.size()); ++i) { + chunk_lengths[i] = chunks[i]->num_rows(); + } + return chunk_lengths; +} + +Result<std::pair<CompressedChunkLocation*, CompressedChunkLocation*>> +ChunkedIndexMapper::LogicalToPhysical() { + // Check that indices would fall in bounds for CompressedChunkLocation + if (ARROW_PREDICT_FALSE(chunk_lengths_.size() > + CompressedChunkLocation::kMaxChunkIndex + 1)) { + return Status::NotImplemented("Chunked array has more than ", + CompressedChunkLocation::kMaxChunkIndex + 1, " chunks"); + } + for (int64_t chunk_length : chunk_lengths_) { + if (ARROW_PREDICT_FALSE(static_cast<uint64_t>(chunk_length) > + CompressedChunkLocation::kMaxIndexInChunk + 1)) { + return Status::NotImplemented("Individual chunk in chunked array has more than ", + CompressedChunkLocation::kMaxIndexInChunk + 1, + " elements"); + } + } + + const int64_t num_indices = static_cast<int64_t>(indices_end_ - indices_begin_); + CompressedChunkLocation* physical_begin = + reinterpret_cast<CompressedChunkLocation*>(indices_begin_); + DCHECK_EQ(physical_begin + num_indices, + reinterpret_cast<CompressedChunkLocation*>(indices_end_)); + + int64_t chunk_offset = 0; + for (int64_t chunk_index = 0; chunk_index < static_cast<int64_t>(chunk_lengths_.size()); + ++chunk_index) { + const int64_t chunk_length = chunk_lengths_[chunk_index]; + for (int64_t i = 0; i < chunk_length; ++i) { + DCHECK_GE(indices_begin_[chunk_offset + i], static_cast<uint64_t>(chunk_offset)); + DCHECK_LT(indices_begin_[chunk_offset + i], + static_cast<uint64_t>(chunk_offset + chunk_length)); + physical_begin[chunk_offset + i] = CompressedChunkLocation{ + static_cast<uint64_t>(chunk_index), + indices_begin_[chunk_offset + i] - static_cast<uint64_t>(chunk_offset)}; + } + chunk_offset += chunk_length; + } + + return std::pair{physical_begin, physical_begin + num_indices}; +} + +Status ChunkedIndexMapper::PhysicalToLogical() { + std::vector<int64_t> chunk_offsets(chunk_lengths_.size()); Review Comment: Is it possible to re-use `this->chunk_lengths_` for `chunk_offsets` to reduce the heap allocation? I assume that one `ChunkedIndexMapper` instance is not supposed to be used repeatedly so the content of `this->chunk_lengths_` doesn't matter any more after calling `PhysicalToLogical()`. ########## cpp/src/arrow/compute/kernels/chunked_internal.h: ########## @@ -50,34 +56,109 @@ struct ResolvedChunk { } }; +// A compressed (chunk_index, index_in_chunk) pair. +// The goal of compression is to make it fit in 64 bits, allowing in place +// replacement of logical uint64_t indices with physical indices. +// (see ChunkedIndexMapper) +struct CompressedChunkLocation { + static constexpr int kChunkIndexBits = 24; + static constexpr int KIndexInChunkBits = 64 - kChunkIndexBits; + + static constexpr uint64_t kMaxChunkIndex = (1ULL << kChunkIndexBits) - 1; + static constexpr uint64_t kMaxIndexInChunk = (1ULL << KIndexInChunkBits) - 1; + + CompressedChunkLocation() = default; + + constexpr uint64_t chunk_index() const { return data_ & kMaxChunkIndex; } + constexpr uint64_t index_in_chunk() const { return data_ >> kChunkIndexBits; } + + explicit constexpr CompressedChunkLocation(uint64_t chunk_index, + uint64_t index_in_chunk) + : data_((index_in_chunk << kChunkIndexBits) | chunk_index) {} + + template <typename IndexType> + explicit operator TypedChunkLocation<IndexType>() { + return {static_cast<IndexType>(chunk_index()), + static_cast<IndexType>(index_in_chunk())}; + } + + private: + uint64_t data_; +}; + +static_assert(sizeof(uint64_t) == sizeof(CompressedChunkLocation)); + class ChunkedArrayResolver { private: ChunkResolver resolver_; - std::vector<const Array*> chunks_; + util::span<const Array* const> chunks_; + std::vector<const Array*> owned_chunks_; public: - explicit ChunkedArrayResolver(const std::vector<const Array*>& chunks) + explicit ChunkedArrayResolver(std::vector<const Array*>&& chunks) + : resolver_(chunks), chunks_(chunks), owned_chunks_(std::move(chunks)) {} + explicit ChunkedArrayResolver(util::span<const Array* const> chunks) : resolver_(chunks), chunks_(chunks) {} - ChunkedArrayResolver(ChunkedArrayResolver&& other) = default; - ChunkedArrayResolver& operator=(ChunkedArrayResolver&& other) = default; + ARROW_DEFAULT_MOVE_AND_ASSIGN(ChunkedArrayResolver); - ChunkedArrayResolver(const ChunkedArrayResolver& other) = default; - ChunkedArrayResolver& operator=(const ChunkedArrayResolver& other) = default; + ChunkedArrayResolver(const ChunkedArrayResolver& other) + : resolver_(other.resolver_), owned_chunks_(other.owned_chunks_) { + // Rebind span to owned_chunks_ if necessary + chunks_ = owned_chunks_.empty() ? other.chunks_ : owned_chunks_; + } + ChunkedArrayResolver& operator=(const ChunkedArrayResolver& other) { + resolver_ = other.resolver_; + owned_chunks_ = other.owned_chunks_; + chunks_ = owned_chunks_.empty() ? other.chunks_ : owned_chunks_; + return *this; + } ResolvedChunk Resolve(int64_t index) const { const auto loc = resolver_.Resolve(index); return {chunks_[loc.chunk_index], loc.index_in_chunk}; } }; -inline std::vector<const Array*> GetArrayPointers(const ArrayVector& arrays) { - std::vector<const Array*> pointers(arrays.size()); - std::transform(arrays.begin(), arrays.end(), pointers.begin(), - [&](const std::shared_ptr<Array>& array) { return array.get(); }); - return pointers; -} +std::vector<const Array*> GetArrayPointers(const ArrayVector& arrays); + +// A class that turns logical (linear) indices into physical (chunked) indices, +// and vice-versa. +class ChunkedIndexMapper { + public: + ChunkedIndexMapper(const std::vector<const Array*>& chunks, uint64_t* indices_begin, + uint64_t* indices_end) + : ChunkedIndexMapper(util::span(chunks), indices_begin, indices_end) {} + ChunkedIndexMapper(util::span<const Array* const> chunks, uint64_t* indices_begin, + uint64_t* indices_end) + : chunk_lengths_(GetChunkLengths(chunks)), + indices_begin_(indices_begin), + indices_end_(indices_end) {} + ChunkedIndexMapper(const RecordBatchVector& chunks, uint64_t* indices_begin, + uint64_t* indices_end) + : chunk_lengths_(GetChunkLengths(chunks)), + indices_begin_(indices_begin), + indices_end_(indices_end) {} Review Comment: IIUC, the input (logical) indices should be chunk-partitioned, and the order of the partitions should be the same as the input `chunks`? -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected]
