[ 
https://issues.apache.org/jira/browse/ARROW-1559?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16254059#comment-16254059
 ] 

ASF GitHub Bot commented on ARROW-1559:
---------------------------------------

xhochy commented on a change in pull request #1266: ARROW-1559: [C++] Add 
Unique kernel and refactor DictionaryBuilder to be a stateful kernel
URL: https://github.com/apache/arrow/pull/1266#discussion_r151233579
 
 

 ##########
 File path: cpp/src/arrow/compute/kernels/hash.cc
 ##########
 @@ -0,0 +1,880 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include "arrow/compute/kernels/hash.h"
+
+#include <exception>
+#include <limits>
+#include <memory>
+#include <mutex>
+#include <sstream>
+#include <string>
+#include <vector>
+
+#include "arrow/builder.h"
+#include "arrow/compute/context.h"
+#include "arrow/compute/kernel.h"
+#include "arrow/compute/kernels/util-internal.h"
+#include "arrow/util/hash-util.h"
+
+namespace arrow {
+namespace compute {
+
+namespace {
+
+// Initially 1024 elements
+static constexpr int64_t kInitialHashTableSize = 1 << 10;
+
+typedef int32_t hash_slot_t;
+static constexpr hash_slot_t kHashSlotEmpty = 
std::numeric_limits<int32_t>::max();
+
+// The maximum load factor for the hash table before resizing.
+static constexpr double kMaxHashTableLoad = 0.7;
+
+enum class SIMDMode : char { NOSIMD, SSE4, AVX2 };
+
+#define CHECK_IMPLEMENTED(KERNEL, FUNCNAME, TYPE)                  \
+  if (!KERNEL) {                                                   \
+    std::stringstream ss;                                          \
+    ss << FUNCNAME << " not implemented for " << type->ToString(); \
+    return Status::NotImplemented(ss.str());                       \
+  }
+
+Status NewHashTable(int64_t size, MemoryPool* pool, std::shared_ptr<Buffer>* 
out) {
+  auto hash_table = std::make_shared<PoolBuffer>(pool);
+
+  RETURN_NOT_OK(hash_table->Resize(sizeof(hash_slot_t) * size));
+  int32_t* slots = reinterpret_cast<hash_slot_t*>(hash_table->mutable_data());
+  std::fill(slots, slots + size, kHashSlotEmpty);
+
+  *out = hash_table;
+  return Status::OK();
+}
+
+// This is a slight design concession -- some hash actions have the possibility
+// of failure. Rather than introduce extra error checking into all actions, we
+// will raise an internal exception so that only the actions where errors can
+// occur will experience the extra overhead
+class HashException : public std::exception {
+ public:
+  explicit HashException(const std::string& msg, StatusCode code = 
StatusCode::Invalid)
+      : msg_(msg), code_(code) {}
+
+  ~HashException() throw() {}
+
+  const char* what() const throw() override;
+
+  StatusCode code() const { return code_; }
+
+ private:
+  std::string msg_;
+  StatusCode code_;
+};
+
+const char* HashException::what() const throw() { return msg_.c_str(); }
+
+class HashTable {
+ public:
+  HashTable(const std::shared_ptr<DataType>& type, MemoryPool* pool)
+      : type_(type),
+        pool_(pool),
+        initialized_(false),
+        hash_table_(nullptr),
+        hash_slots_(nullptr),
+        hash_table_size_(0),
+        mod_bitmask_(0) {}
+
+  virtual ~HashTable() {}
+
+  virtual Status Append(const ArrayData& input) = 0;
+  virtual Status Flush(std::vector<Datum>* out) = 0;
+  virtual Status GetDictionary(std::shared_ptr<ArrayData>* out) = 0;
+
+ protected:
+  Status Init(int64_t elements);
+
+  std::shared_ptr<DataType> type_;
+  MemoryPool* pool_;
+  bool initialized_;
+
+  // The hash table contains integer indices that reference the set of observed
+  // distinct values
+  std::shared_ptr<Buffer> hash_table_;
+  hash_slot_t* hash_slots_;
+
+  /// Size of the table. Must be a power of 2.
+  int64_t hash_table_size_;
+
+  // Store hash_table_size_ - 1, so that j & mod_bitmask_ is equivalent to j %
+  // hash_table_size_, but uses far fewer CPU cycles
+  int64_t mod_bitmask_;
+};
+
+Status HashTable::Init(int64_t elements) {
+  DCHECK_EQ(elements, BitUtil::NextPower2(elements));
+  RETURN_NOT_OK(NewHashTable(elements, pool_, &hash_table_));
+  hash_slots_ = reinterpret_cast<hash_slot_t*>(hash_table_->mutable_data());
+  hash_table_size_ = elements;
+  mod_bitmask_ = elements - 1;
+  initialized_ = true;
+  return Status::OK();
+}
+
+template <typename Type, typename Action, typename Enable = void>
+class HashTableKernel : public HashTable {};
+
+// Types of hash actions
+//
+// unique: append to dictionary when not found, no-op with slot
+// dictionary-encode: append to dictionary when not found, append slot #
+// match: raise or set null when not found, otherwise append slot #
+// isin: set false when not found, otherwise true
+// value counts: append to dictionary when not found, increment count for slot
+
+template <typename Type, typename Enable = void>
+class HashDictionary {};
+
+// ----------------------------------------------------------------------
+// Hash table pass for nulls
+
+template <typename Type, typename Action>
+class HashTableKernel<Type, Action, enable_if_null<Type>> : public HashTable {
+ public:
+  using HashTable::HashTable;
+
+  Status Init() {
+    // No-op, do not even need to initialize hash table
+    return Status::OK();
+  }
+
+  Status Append(const ArrayData& arr) override {
+    if (!initialized_) {
+      RETURN_NOT_OK(Init());
+    }
+    auto action = static_cast<Action*>(this);
+    RETURN_NOT_OK(action->Reserve(arr.length));
+    for (int64_t i = 0; i < arr.length; ++i) {
+      action->ObserveNull();
+    }
+    return Status::OK();
+  }
+
+  Status GetDictionary(std::shared_ptr<ArrayData>* out) override {
+    // TODO(wesm): handle null being a valid dictionary value
+    auto null_array = std::make_shared<NullArray>(0);
+    *out = null_array->data();
+    return Status::OK();
+  }
+};
+
+// ----------------------------------------------------------------------
+// Hash table pass for primitive types
+
+template <typename Type>
+struct HashDictionary<Type, enable_if_has_c_type<Type>> {
+  using T = typename Type::c_type;
+
+  explicit HashDictionary(MemoryPool* pool)
+      : pool(pool), buffer(std::make_shared<PoolBuffer>(pool)), size(0), 
capacity(0) {}
+
+  Status Init() {
+    this->size = 0;
+    return Resize(kInitialHashTableSize);
+  }
+
+  Status DoubleSize() { return Resize(this->size * 2); }
+
+  Status Resize(const int64_t elements) {
+    RETURN_NOT_OK(this->buffer->Resize(elements * sizeof(T)));
+
+    this->capacity = elements;
+    this->values = reinterpret_cast<T*>(this->buffer->mutable_data());
+    return Status::OK();
+  }
+
+  MemoryPool* pool;
+  std::shared_ptr<ResizableBuffer> buffer;
+  T* values;
+  int64_t size;
+  int64_t capacity;
+};
+
+#define GENERIC_HASH_PASS(HASH_INNER_LOOP)                                     
          \
+  if (arr.null_count != 0) {                                                   
          \
+    internal::BitmapReader valid_reader(arr.buffers[0]->data(), arr.offset, 
arr.length); \
+    for (int64_t i = 0; i < arr.length; ++i) {                                 
          \
+      const bool is_null = valid_reader.IsNotSet();                            
          \
+      valid_reader.Next();                                                     
          \
+                                                                               
          \
+      if (is_null) {                                                           
          \
+        action->ObserveNull();                                                 
          \
+        continue;                                                              
          \
+      }                                                                        
          \
+                                                                               
          \
+      HASH_INNER_LOOP();                                                       
          \
+    }                                                                          
          \
+  } else {                                                                     
          \
+    for (int64_t i = 0; i < arr.length; ++i) {                                 
          \
+      HASH_INNER_LOOP();                                                       
          \
+    }                                                                          
          \
+  }
+
+template <typename Type, typename Action>
+class HashTableKernel<Type, Action, enable_if_has_c_type<Type>> : public 
HashTable {
+ public:
+  using T = typename Type::c_type;
+
+  HashTableKernel(const std::shared_ptr<DataType>& type, MemoryPool* pool)
+      : HashTable(type, pool), dict_(pool) {}
+
+  Status Init() {
+    RETURN_NOT_OK(dict_.Init());
+    return HashTable::Init(kInitialHashTableSize);
+  }
+
+  Status Append(const ArrayData& arr) override {
+    if (!initialized_) {
+      RETURN_NOT_OK(Init());
+    }
+
+    const T* values = GetValues<T>(arr, 1);
+    auto action = static_cast<Action*>(this);
+
+    RETURN_NOT_OK(action->Reserve(arr.length));
+
+#define HASH_INNER_LOOP()                                                      
   \
+  const T value = values[i];                                                   
   \
+  int64_t j = HashValue(value) & mod_bitmask_;                                 
   \
+  hash_slot_t slot = hash_slots_[j];                                           
   \
+                                                                               
   \
+  while (kHashSlotEmpty != slot && dict_.values[slot] != value) {              
   \
+    ++j;                                                                       
   \
+    if (ARROW_PREDICT_FALSE(j == hash_table_size_)) {                          
   \
+      j = 0;                                                                   
   \
+    }                                                                          
   \
+    slot = hash_slots_[j];                                                     
   \
+  }                                                                            
   \
+                                                                               
   \
+  if (slot == kHashSlotEmpty) {                                                
   \
+    if (!Action::allow_expand) {                                               
   \
+      throw HashException("Encountered new dictionary value");                 
   \
+    }                                                                          
   \
+                                                                               
   \
+    slot = static_cast<hash_slot_t>(dict_.size);                               
   \
+    hash_slots_[j] = slot;                                                     
   \
+    dict_.values[dict_.size++] = value;                                        
   \
+                                                                               
   \
+    action->ObserveNotFound(slot);                                             
   \
+                                                                               
   \
+    if (ARROW_PREDICT_FALSE(dict_.size > hash_table_size_ * 
kMaxHashTableLoad)) { \
+      RETURN_NOT_OK(action->DoubleSize());                                     
   \
+    }                                                                          
   \
+  } else {                                                                     
   \
+    action->ObserveFound(slot);                                                
   \
+  }
+
+    GENERIC_HASH_PASS(HASH_INNER_LOOP);
+
+#undef HASH_INNER_LOOP
+
+    return Status::OK();
+  }
+
+  Status GetDictionary(std::shared_ptr<ArrayData>* out) override {
+    // TODO(wesm): handle null being in the dictionary
+    auto dict_data = dict_.buffer;
+    RETURN_NOT_OK(dict_data->Resize(dict_.size * sizeof(T), false));
+
+    BufferVector buffers = {nullptr, dict_data};
+    *out = std::make_shared<ArrayData>(type_, dict_.size, std::move(buffers), 
0);
+    return Status::OK();
+  }
+
+ protected:
+  int64_t HashValue(const T& value) const {
+    // TODO(wesm): Use faster hash function for C types
+    return HashUtil::Hash(&value, sizeof(T), 0);
+  }
+
+  Status DoubleTableSize() {
+    int64_t new_size = hash_table_size_ * 2;
+
+    std::shared_ptr<Buffer> new_hash_table;
+    RETURN_NOT_OK(NewHashTable(new_size, pool_, &new_hash_table));
+    int32_t* new_hash_slots =
+        reinterpret_cast<hash_slot_t*>(new_hash_table->mutable_data());
+    int64_t new_mod_bitmask = new_size - 1;
+
+    for (int i = 0; i < hash_table_size_; ++i) {
+      hash_slot_t index = hash_slots_[i];
+
+      if (index == kHashSlotEmpty) {
+        continue;
+      }
+
+      // Compute the hash value mod the new table size to start looking for an
+      // empty slot
+      const T value = dict_.values[index];
+
+      // Find empty slot in the new hash table
+      int64_t j = HashValue(value) & new_mod_bitmask;
+      while (kHashSlotEmpty != new_hash_slots[j]) {
+        ++j;
+        if (ARROW_PREDICT_FALSE(j == hash_table_size_)) {
+          j = 0;
+        }
+      }
+
+      // Copy the old slot index to the new hash table
+      new_hash_slots[j] = index;
+    }
+
+    hash_table_ = new_hash_table;
+    hash_slots_ = reinterpret_cast<hash_slot_t*>(hash_table_->mutable_data());
+    hash_table_size_ = new_size;
+    mod_bitmask_ = new_size - 1;
+
+    return dict_.Resize(new_size);
+  }
+
+  HashDictionary<Type> dict_;
+};
+
+// ----------------------------------------------------------------------
+// Hash table pass for variable-length binary types
+
+template <typename Type, typename Action>
+class HashTableKernel<Type, Action, enable_if_binary<Type>> : public HashTable 
{
+ public:
+  HashTableKernel(const std::shared_ptr<DataType>& type, MemoryPool* pool)
+      : HashTable(type, pool), dict_offsets_(pool), dict_data_(pool), 
dict_size_(0) {}
+
+  Status Init() {
+    RETURN_NOT_OK(dict_offsets_.Resize(kInitialHashTableSize));
+
+    // We append the end offset after each append to the dictionary, so this
+    // sets the initial condition for the length-0 case
+    //
+    // initial offsets (dict size == 0): 0
+    // after 1st dict entry of length 3: 0 3
+    // after 2nd dict entry of length 4: 0 3 7
+    RETURN_NOT_OK(dict_offsets_.Append(0));
+    return HashTable::Init(kInitialHashTableSize);
+  }
+
+  Status Append(const ArrayData& arr) override {
+    if (!initialized_) {
+      RETURN_NOT_OK(Init());
+    }
+
+    const int32_t* offsets = GetValues<int32_t>(arr, 1);
+    const uint8_t* data = GetValues<uint8_t>(arr, 2);
+
+    auto action = static_cast<Action*>(this);
+    RETURN_NOT_OK(action->Reserve(arr.length));
+
+#define HASH_INNER_LOOP()                                                      
     \
+  const int32_t position = offsets[i];                                         
     \
+  const int32_t length = offsets[i + 1] - position;                            
     \
+  const uint8_t* value = data + position;                                      
     \
+                                                                               
     \
+  int64_t j = HashValue(value, length) & mod_bitmask_;                         
     \
+  hash_slot_t slot = hash_slots_[j];                                           
     \
+                                                                               
     \
+  const int32_t* dict_offsets = dict_offsets_.data();                          
     \
+  const uint8_t* dict_data = dict_data_.data();                                
     \
+  while (kHashSlotEmpty != slot &&                                             
     \
+         !((dict_offsets[slot + 1] - dict_offsets[slot]) == length &&          
     \
+           0 == memcmp(value, dict_data + dict_offsets[slot], length))) {      
     \
+    ++j;                                                                       
     \
+    if (ARROW_PREDICT_FALSE(j == hash_table_size_)) {                          
     \
+      j = 0;                                                                   
     \
+    }                                                                          
     \
+    slot = hash_slots_[j];                                                     
     \
+  }                                                                            
     \
+                                                                               
     \
+  if (slot == kHashSlotEmpty) {                                                
     \
+    if (!Action::allow_expand) {                                               
     \
+      throw HashException("Encountered new dictionary value");                 
     \
+    }                                                                          
     \
+                                                                               
     \
+    slot = dict_size_++;                                                       
     \
+    hash_slots_[j] = slot;                                                     
     \
+                                                                               
     \
+    RETURN_NOT_OK(dict_data_.Append(value, length));                           
     \
+    
RETURN_NOT_OK(dict_offsets_.Append(static_cast<int32_t>(dict_data_.length()))); 
\
+                                                                               
     \
+    action->ObserveNotFound(slot);                                             
     \
+                                                                               
     \
+    if (ARROW_PREDICT_FALSE(dict_size_ > hash_table_size_ * 
kMaxHashTableLoad)) {   \
+      RETURN_NOT_OK(action->DoubleSize());                                     
     \
+    }                                                                          
     \
+  } else {                                                                     
     \
+    action->ObserveFound(slot);                                                
     \
+  }
+
+    GENERIC_HASH_PASS(HASH_INNER_LOOP);
+
+#undef HASH_INNER_LOOP
+
+    return Status::OK();
+  }
+
+  Status GetDictionary(std::shared_ptr<ArrayData>* out) override {
+    // TODO(wesm): handle null being in the dictionary
+    BufferVector buffers = {nullptr, nullptr, nullptr};
+
+    RETURN_NOT_OK(dict_offsets_.Finish(&buffers[1]));
+    RETURN_NOT_OK(dict_data_.Finish(&buffers[2]));
+
+    *out = std::make_shared<ArrayData>(type_, dict_size_, std::move(buffers), 
0);
+    return Status::OK();
+  }
+
+ protected:
+  int64_t HashValue(const uint8_t* data, int32_t length) const {
+    return HashUtil::Hash(data, length, 0);
+  }
+
+  Status DoubleTableSize() {
 
 Review comment:
   This is duplicated between the different HashKernel implementations?

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


> [C++] Kernel implementations for "unique" (compute distinct elements of array)
> ------------------------------------------------------------------------------
>
>                 Key: ARROW-1559
>                 URL: https://issues.apache.org/jira/browse/ARROW-1559
>             Project: Apache Arrow
>          Issue Type: New Feature
>          Components: C++
>            Reporter: Wes McKinney
>            Assignee: Uwe L. Korn
>              Labels: Analytics, pull-request-available
>             Fix For: 0.8.0
>
>




--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

Reply via email to