askoa commented on code in PR #3558:
URL: https://github.com/apache/arrow-rs/pull/3558#discussion_r1084561073


##########
arrow-select/src/dictionary.rs:
##########
@@ -0,0 +1,270 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+use crate::interleave::interleave;
+use ahash::RandomState;
+use arrow_array::builder::BooleanBufferBuilder;
+use arrow_array::cast::{as_generic_binary_array, as_largestring_array, 
as_string_array};
+use arrow_array::types::{ArrowDictionaryKeyType, ByteArrayType};
+use arrow_array::{Array, ArrayRef, DictionaryArray, GenericByteArray};
+use arrow_buffer::{ArrowNativeType, Buffer, MutableBuffer};
+use arrow_data::bit_iterator::BitIndexIterator;
+use arrow_data::ArrayData;
+use arrow_schema::{ArrowError, DataType};
+
+/// A best effort interner that maintains a fixed number of buckets
+/// and interns keys based on their hash value
+///
+/// Hash collisions will result in replacement
+struct Interner<'a, V> {
+    state: RandomState,
+    buckets: Vec<Option<(&'a [u8], V)>>,
+    shift: u32,
+}
+
+impl<'a, V> Interner<'a, V> {
+    fn new(capacity: usize) -> Self {
+        // Add additional buckets to help reduce collisions
+        let shift = (capacity as u64 + 128).leading_zeros();
+        let num_buckets = (u64::MAX >> shift) as usize;
+        let buckets = (0..num_buckets.saturating_add(1)).map(|_| 
None).collect();
+        Self {
+            // A fixed seed to ensure deterministic behaviour
+            state: RandomState::with_seeds(0, 0, 0, 0),
+            buckets,
+            shift,
+        }
+    }
+
+    fn intern<F: FnOnce() -> Result<V, E>, E>(
+        &mut self,
+        new: &'a [u8],
+        f: F,
+    ) -> Result<&V, E> {
+        let hash = self.state.hash_one(new);
+        let bucket_idx = hash >> self.shift;
+        Ok(match &mut self.buckets[bucket_idx as usize] {
+            Some((current, v)) => {
+                if *current != new {
+                    *v = f()?;
+                    *current = new;
+                }
+                v
+            }
+            slot => &slot.insert((new, f()?)).1,
+        })
+    }
+}
+
+pub struct MergedDictionaries<K: ArrowDictionaryKeyType> {
+    /// Provides `key_mappings[`array_idx`][`old_key`] -> new_key`
+    pub key_mappings: Vec<Vec<K::Native>>,
+    /// The new values
+    pub values: ArrayRef,
+}
+
+/// A weak heuristic of whether to merge dictionary values that aims to only
+/// perform the expensive computation when is likely to yield at least
+/// some return over the naive approach used by MutableArrayData
+///
+/// `len` is the total length of the merged output
+pub fn should_merge_dictionary_values<K: ArrowDictionaryKeyType>(
+    arrays: &[&dyn Array],
+    len: usize,
+) -> bool {
+    let first_array = &arrays[0].data();
+    let first_values = &first_array.child_data()[0];
+
+    let mut single_dictionary = true;
+    let mut total_values = first_values.len();
+    for a in arrays.iter().skip(1) {
+        let data = a.data();
+
+        let values = &data.child_data()[0];
+        total_values += values.len();
+        single_dictionary &= ArrayData::ptr_eq(values, first_values);
+    }
+
+    let overflow = K::Native::from_usize(total_values).is_none();
+    let values_exceed_length = total_values >= len;
+    let is_supported = first_values.data_type().is_byte_array();
+
+    !single_dictionary && is_supported && (overflow || values_exceed_length)
+}
+
+/// Given an array of dictionaries and an optional row mask compute a values 
array
+/// containing referenced values, along with mappings from the 
[`DictionaryArray`]
+/// keys to the new keys within this values array. Best-effort will be made to 
ensure
+/// that the dictionary values are unique
+pub fn merge_dictionary_values<K: ArrowDictionaryKeyType>(
+    dictionaries: &[(&DictionaryArray<K>, Option<Buffer>)],
+) -> Result<MergedDictionaries<K>, ArrowError> {
+    let mut num_values = 0;
+
+    let mut values = Vec::with_capacity(dictionaries.len());
+    let mut value_slices = Vec::with_capacity(dictionaries.len());
+
+    for (dictionary, key_mask) in dictionaries {

Review Comment:
   I saw in `should_merge dictionary_values` function that dictionaries with 
same pointers are considered same `ArrayData::ptr_eq(values, first_values);` I 
don't know how frequent the scenario is. But is it worth to keep track of 
merged dictionary pointers and skip merging them if they are seen again?



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to