jianxind commented on a change in pull request #7029: URL: https://github.com/apache/arrow/pull/7029#discussion_r421214661
########## File path: cpp/src/arrow/util/spaced.h ########## @@ -0,0 +1,547 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/util/bit_util.h" +#include "arrow/util/simd.h" + +#if defined(ARROW_HAVE_SSE4_2) +#include "arrow/util/spaced_sse_generated.h" +#endif // ARROW_HAVE_SSE4_2 + +namespace arrow { +namespace util { +namespace internal { + +template <typename T> +int SpacedCompressScalar(const T* values, int num_values, const uint8_t* valid_bits, + int64_t valid_bits_offset, T* output) { + int num_valid_values = 0; + arrow::internal::BitmapReader valid_bits_reader(valid_bits, valid_bits_offset, + num_values); + for (int32_t i = 0; i < num_values; i++) { + if (valid_bits_reader.IsSet()) { + output[num_valid_values++] = values[i]; + } + valid_bits_reader.Next(); + } + return num_valid_values; +} + +template <typename T> +int SpacedExpandScalar(T* buffer, int num_values, int null_count, + const uint8_t* valid_bits, int64_t valid_bits_offset) { + const int values_read = num_values - null_count; + + // Depending on the number of nulls, some of the value slots in buffer may + // be uninitialized, and this will cause valgrind warnings / potentially UB + memset(static_cast<void*>(buffer + values_read), 0, + (num_values - values_read) * sizeof(T)); + + // Add spacing for null entries. As we have filled the buffer from the front, + // we need to add the spacing from the back. + int values_to_move = values_read - 1; + // We stop early on one of two conditions: + // 1. There are no more null values that need spacing. Note we infer this + // backwards, when 'i' is equal to 'values_to_move' it indicates + // all nulls have been consumed. + // 2. There are no more non-null values that need to move which indicates + // all remaining slots are null, so their exact value doesn't matter. + for (int i = num_values - 1; (i > values_to_move) && (values_to_move >= 0); i--) { + if (BitUtil::GetBit(valid_bits, valid_bits_offset + i)) { + buffer[i] = buffer[values_to_move]; + values_to_move--; + } + } + return num_values; +} + +#if defined(ARROW_HAVE_SSE4_2) +template <typename T> +int SpacedCompressSseShuffle(const T* values, int num_values, const uint8_t* valid_bits, + int64_t valid_bits_offset, T* output) { + assert(sizeof(T) == 4 || sizeof(T) == 8 || sizeof(T) == 1); + // [(2 128i block/8 epi32), (4 128i/8 epi64), (1 128i/16 epi8)] for each batch + constexpr int kBatchSize = (sizeof(T) == 1) ? sizeof(__m128i) : 8; + int num_valid_values = 0; + int idx_values = 0; + int64_t idx_valid_bits = valid_bits_offset; + + // First handle the front suffix + const int num_front_suffix = idx_valid_bits ? (8 - (valid_bits_offset % 8)) : 0; + if (num_front_suffix > 0) { + num_valid_values += + SpacedCompressScalar(values + idx_values, num_front_suffix, valid_bits, + idx_valid_bits, output + num_valid_values); + idx_values += num_front_suffix; + idx_valid_bits += num_front_suffix; + } + + // The parts can fill into batches + int64_t idx_valid_bytes = BitUtil::BytesForBits(idx_valid_bits + 1) - 1; + while (num_values - idx_values >= kBatchSize) { + const uint8_t valid_byte_value = valid_bits[idx_valid_bytes]; + idx_valid_bytes++; + + // Compiler able to pick the path at instantiation time + if (sizeof(T) == 1) { + // Path for epi8, 16 epi8 one batch, two bytes in valid_bits + uint8_t valid_byte_value_high, valid_count_low, valid_count_high; + valid_count_low = BitUtil::kBytePopcount[valid_byte_value]; + + valid_byte_value_high = valid_bits[idx_valid_bytes]; + idx_valid_bytes++; + valid_count_high = BitUtil::kBytePopcount[valid_byte_value_high]; + + // Thin table used, it need add back the offset of high and compact two parts + __m128i src = + _mm_loadu_si128(reinterpret_cast<const __m128i*>(values + idx_values)); + __m128i mask = _mm_set_epi64x(*(reinterpret_cast<const uint64_t*>( Review comment: 3x, will change ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [email protected]
