alamb commented on code in PR #3558:
URL: https://github.com/apache/arrow-rs/pull/3558#discussion_r1083272492
##########
arrow-schema/src/datatype.rs:
##########
@@ -346,6 +346,13 @@ impl DataType {
)
}
+ /// Returns true if this type is a variable length byte array type
Review Comment:
```suggestion
/// Returns true if this type is a variable length byte array type
(`Utf8`, `LargeUtf8`, `Binary` or `LargeBinary`)
```
##########
arrow-select/src/concat.rs:
##########
@@ -54,6 +58,53 @@ fn binary_capacity<T: ByteArrayType>(arrays: &[&dyn Array])
-> Capacities {
Capacities::Binary(item_capacity, Some(bytes_capacity))
}
+fn concat_dictionaries<K: ArrowDictionaryKeyType>(
+ arrays: &[&dyn Array],
+) -> Result<ArrayRef, ArrowError> {
+ let output_len = arrays.iter().map(|x| x.len()).sum();
+ if !should_merge_dictionary_values::<K>(arrays, output_len) {
+ return concat_fallback(arrays, Capacities::Array(output_len));
+ }
+
+ // Recompute dictionaries
+ let dictionaries: Vec<_> = arrays
+ .iter()
+ .map(|a| (as_dictionary_array::<K>(*a), None))
+ .collect();
+
+ let merged = merge_dictionary_values(&dictionaries)?;
+
+ // Recompute keys
+ let mut keys = PrimitiveBuilder::<K>::with_capacity(output_len);
+
+ for ((d, _), mapping) in dictionaries.iter().zip(merged.key_mappings) {
+ for key in d.keys_iter() {
+ keys.append_option(key.map(|x| mapping[x]));
+ }
+ }
Review Comment:
Is there any way to use the new `extend` function that got added? Something
like (untested)
```suggestion
for ((d, _), mapping) in dictionaries.iter().zip(merged.key_mappings) {
keys.extend(d.keys_iter().map(|x| mapping[x]))
}
```
##########
arrow-select/src/concat.rs:
##########
@@ -78,9 +129,20 @@ pub fn concat(arrays: &[&dyn Array]) -> Result<ArrayRef,
ArrowError> {
DataType::LargeUtf8 => binary_capacity::<LargeUtf8Type>(arrays),
DataType::Binary => binary_capacity::<BinaryType>(arrays),
DataType::LargeBinary => binary_capacity::<LargeBinaryType>(arrays),
+ DataType::Dictionary(k, _) => downcast_integer! {
+ k.as_ref() => (dict_helper, arrays),
+ _ => unreachable!("illegal dictionary key type {k}")
+ },
_ => Capacities::Array(arrays.iter().map(|a| a.len()).sum()),
};
+ concat_fallback(arrays, capacity)
+}
+
+fn concat_fallback(
Review Comment:
Maybe it would help here to offer a comment like
```suggestion
/// Naively concatenates arrays by copying values
fn concat_fallback(
```
##########
arrow-select/src/interleave.rs:
##########
@@ -281,6 +346,32 @@ mod tests {
)
}
+ #[test]
+ fn test_interleave_dictionary() {
+ let a = DictionaryArray::<Int32Type>::from_iter(["a", "b", "c", "a",
"b"]);
+ let b = DictionaryArray::<Int32Type>::from_iter(["a", "c", "a", "c",
"a"]);
Review Comment:
Likewise I think we should have code to test nullable arrays as well
##########
arrow-select/src/dictionary.rs:
##########
@@ -0,0 +1,270 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+use crate::interleave::interleave;
+use ahash::RandomState;
+use arrow_array::builder::BooleanBufferBuilder;
+use arrow_array::cast::{as_generic_binary_array, as_largestring_array,
as_string_array};
+use arrow_array::types::{ArrowDictionaryKeyType, ByteArrayType};
+use arrow_array::{Array, ArrayRef, DictionaryArray, GenericByteArray};
+use arrow_buffer::{ArrowNativeType, Buffer, MutableBuffer};
+use arrow_data::bit_iterator::BitIndexIterator;
+use arrow_data::ArrayData;
+use arrow_schema::{ArrowError, DataType};
+
+/// A best effort interner that maintains a fixed number of buckets
+/// and interns keys based on their hash value
+///
+/// Hash collisions will result in replacement
+struct Interner<'a, V> {
+ state: RandomState,
+ buckets: Vec<Option<(&'a [u8], V)>>,
+ shift: u32,
+}
+
+impl<'a, V> Interner<'a, V> {
+ fn new(capacity: usize) -> Self {
Review Comment:
It might help to document the implications of `capacity` size here -- it
seems like the idea is the smaller the capacity greater the chance for
collisions (and thus duplicates)
##########
arrow-select/src/concat.rs:
##########
@@ -502,16 +554,45 @@ mod tests {
.into_iter()
.collect();
let input_2: DictionaryArray<Int32Type> =
vec![None].into_iter().collect();
- let expected = vec![
- Some("foo".to_string()),
- Some("bar".to_string()),
- None,
- Some("fiz".to_string()),
- None,
- ];
+ let expected = vec![Some("foo"), Some("bar"), None, Some("fiz"), None];
+
+ let concat = concat(&[&input_1 as _, &input_2 as _]).unwrap();
+ let dictionary = as_dictionary_array::<Int32Type>(concat.as_ref());
+ let actual = collect_string_dictionary(dictionary);
+ assert_eq!(actual, expected);
- let concat = concat_dictionary(input_1, input_2);
- assert_eq!(concat, expected);
+ // Should have concatenated inputs together
+ assert_eq!(
+ dictionary.values().len(),
+ input_1.values().len() + input_2.values().len(),
+ )
+ }
+
+ #[test]
+ fn test_string_dictionary_merge() {
+ let mut builder = StringDictionaryBuilder::<Int32Type>::new();
+ for i in 0..20 {
+ builder.append(&i.to_string()).unwrap();
+ }
+ let input_1 = builder.finish();
+
+ let mut builder = StringDictionaryBuilder::<Int32Type>::new();
+ for i in 0..30 {
+ builder.append(&i.to_string()).unwrap();
+ }
+ let input_2 = builder.finish();
+
+ let expected: Vec<_> = (0..20).chain(0..30).map(|x|
x.to_string()).collect();
+ let expected: Vec<_> = expected.iter().map(|x|
Some(x.as_str())).collect();
+
+ let concat = concat(&[&input_1 as _, &input_2 as _]).unwrap();
+ let dictionary = as_dictionary_array::<Int32Type>(concat.as_ref());
+ let actual = collect_string_dictionary(dictionary);
+ assert_eq!(actual, expected);
+
+ // Should have merged inputs together
+ // Not 30 as this is done on a best-effort basis
Review Comment:
👍
##########
arrow-select/src/dictionary.rs:
##########
@@ -0,0 +1,270 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+use crate::interleave::interleave;
+use ahash::RandomState;
+use arrow_array::builder::BooleanBufferBuilder;
+use arrow_array::cast::{as_generic_binary_array, as_largestring_array,
as_string_array};
+use arrow_array::types::{ArrowDictionaryKeyType, ByteArrayType};
+use arrow_array::{Array, ArrayRef, DictionaryArray, GenericByteArray};
+use arrow_buffer::{ArrowNativeType, Buffer, MutableBuffer};
+use arrow_data::bit_iterator::BitIndexIterator;
+use arrow_data::ArrayData;
+use arrow_schema::{ArrowError, DataType};
+
+/// A best effort interner that maintains a fixed number of buckets
+/// and interns keys based on their hash value
+///
+/// Hash collisions will result in replacement
+struct Interner<'a, V> {
+ state: RandomState,
+ buckets: Vec<Option<(&'a [u8], V)>>,
+ shift: u32,
+}
+
+impl<'a, V> Interner<'a, V> {
+ fn new(capacity: usize) -> Self {
+ // Add additional buckets to help reduce collisions
+ let shift = (capacity as u64 + 128).leading_zeros();
+ let num_buckets = (u64::MAX >> shift) as usize;
+ let buckets = (0..num_buckets.saturating_add(1)).map(|_|
None).collect();
+ Self {
+ // A fixed seed to ensure deterministic behaviour
+ state: RandomState::with_seeds(0, 0, 0, 0),
+ buckets,
+ shift,
+ }
+ }
+
+ fn intern<F: FnOnce() -> Result<V, E>, E>(
+ &mut self,
+ new: &'a [u8],
+ f: F,
+ ) -> Result<&V, E> {
+ let hash = self.state.hash_one(new);
+ let bucket_idx = hash >> self.shift;
+ Ok(match &mut self.buckets[bucket_idx as usize] {
+ Some((current, v)) => {
+ if *current != new {
+ *v = f()?;
+ *current = new;
+ }
+ v
+ }
+ slot => &slot.insert((new, f()?)).1,
+ })
+ }
+}
+
+pub struct MergedDictionaries<K: ArrowDictionaryKeyType> {
+ /// Provides `key_mappings[`array_idx`][`old_key`] -> new_key`
+ pub key_mappings: Vec<Vec<K::Native>>,
+ /// The new values
+ pub values: ArrayRef,
+}
+
+/// A weak heuristic of whether to merge dictionary values that aims to only
+/// perform the expensive computation when is likely to yield at least
+/// some return over the naive approach used by MutableArrayData
+///
+/// `len` is the total length of the merged output
+pub fn should_merge_dictionary_values<K: ArrowDictionaryKeyType>(
+ arrays: &[&dyn Array],
+ len: usize,
+) -> bool {
+ let first_array = &arrays[0].data();
+ let first_values = &first_array.child_data()[0];
+
+ let mut single_dictionary = true;
+ let mut total_values = first_values.len();
+ for a in arrays.iter().skip(1) {
+ let data = a.data();
+
+ let values = &data.child_data()[0];
+ total_values += values.len();
+ single_dictionary &= ArrayData::ptr_eq(values, first_values);
+ }
+
+ let overflow = K::Native::from_usize(total_values).is_none();
+ let values_exceed_length = total_values >= len;
+ let is_supported = first_values.data_type().is_byte_array();
+
+ !single_dictionary && is_supported && (overflow || values_exceed_length)
+}
+
+/// Given an array of dictionaries and an optional row mask compute a values
array
+/// containing referenced values, along with mappings from the
[`DictionaryArray`]
+/// keys to the new keys within this values array. Best-effort will be made to
ensure
+/// that the dictionary values are unique
+pub fn merge_dictionary_values<K: ArrowDictionaryKeyType>(
+ dictionaries: &[(&DictionaryArray<K>, Option<Buffer>)],
+) -> Result<MergedDictionaries<K>, ArrowError> {
+ let mut num_values = 0;
+
+ let mut values = Vec::with_capacity(dictionaries.len());
+ let mut value_slices = Vec::with_capacity(dictionaries.len());
+
+ for (dictionary, key_mask) in dictionaries {
+ let values_mask = match key_mask {
+ Some(key_mask) => {
+ let iter = BitIndexIterator::new(key_mask, 0,
dictionary.len());
+ compute_values_mask(dictionary, iter)
+ }
+ None => compute_values_mask(dictionary, 0..dictionary.len()),
+ };
+ let v = dictionary.values().as_ref();
+ num_values += v.len();
+ value_slices.push(get_masked_values(v, &values_mask));
+ values.push(v)
+ }
+
+ // Map from value to new index
+ let mut interner = Interner::new(num_values);
+ // Interleave indices for new values array
+ let mut indices = Vec::with_capacity(num_values);
+
+ // Compute the mapping for each dictionary
+ let key_mappings = dictionaries
+ .iter()
+ .enumerate()
+ .zip(value_slices)
+ .map(|((dictionary_idx, (dictionary, _)), values)| {
+ let zero = K::Native::from_usize(0).unwrap();
+ let mut mapping = vec![zero; dictionary.values().len()];
+
+ for (value_idx, value) in values {
+ mapping[value_idx] = *interner.intern(value, || {
+ match K::Native::from_usize(indices.len()) {
+ Some(idx) => {
+ indices.push((dictionary_idx, value_idx));
+ Ok(idx)
+ }
+ None => Err(ArrowError::DictionaryKeyOverflowError),
+ }
+ })?;
+ }
+ Ok(mapping)
+ })
+ .collect::<Result<Vec<_>, ArrowError>>()?;
+
+ Ok(MergedDictionaries {
+ key_mappings,
+ values: interleave(&values, &indices)?,
+ })
+}
+
+/// Return a mask identifying the values that are referenced by keys in
`dictionary`
+/// at the positions indicated by `selection`
+fn compute_values_mask<K, I>(dictionary: &DictionaryArray<K>, selection: I) ->
Buffer
+where
+ K: ArrowDictionaryKeyType,
+ I: IntoIterator<Item = usize>,
+{
+ let len = dictionary.values().len();
+ let mut builder =
+ BooleanBufferBuilder::new_from_buffer(MutableBuffer::new_null(len),
len);
+
+ let keys = dictionary.keys();
+
+ for i in selection {
+ if keys.is_valid(i) {
+ let key = keys.values()[i];
+ builder.set_bit(key.as_usize(), true)
+ }
+ }
+ builder.finish()
+}
+
+/// Return a Vec containing for each set index in `mask`, the index and byte
value of that index
+fn get_masked_values<'a>(array: &'a dyn Array, mask: &Buffer) -> Vec<(usize,
&'a [u8])> {
+ match array.data_type() {
+ DataType::Utf8 => masked_bytes(as_string_array(array), mask),
+ DataType::LargeUtf8 => masked_bytes(as_largestring_array(array), mask),
+ DataType::Binary =>
masked_bytes(as_generic_binary_array::<i32>(array), mask),
+ DataType::LargeBinary => {
+ masked_bytes(as_generic_binary_array::<i64>(array), mask)
+ }
+ _ => unimplemented!(),
+ }
+}
+
+/// Compute [`get_masked_values`] for a [`GenericByteArray`]
+///
+/// Note: this does not check the null mask and will return values contained
in null slots
+fn masked_bytes<'a, T: ByteArrayType>(
+ array: &'a GenericByteArray<T>,
+ mask: &Buffer,
+) -> Vec<(usize, &'a [u8])> {
+ let cap = mask.count_set_bits_offset(0, array.len());
+ let mut out = Vec::with_capacity(cap);
+ for idx in BitIndexIterator::new(mask.as_slice(), 0, array.len()) {
+ out.push((idx, array.value(idx).as_ref()))
+ }
+ out
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::dictionary::merge_dictionary_values;
+ use arrow_array::cast::{as_dictionary_array, as_string_array};
+ use arrow_array::types::Int32Type;
+ use arrow_array::{Array, DictionaryArray};
+ use arrow_buffer::Buffer;
+
+ #[test]
+ fn test_merge_strings() {
+ let a =
+ DictionaryArray::<Int32Type>::from_iter(["a", "b", "a", "b", "d",
"c", "e"]);
+ let b = DictionaryArray::<Int32Type>::from_iter(["c", "f", "c", "d",
"a", "d"]);
+ let merged = merge_dictionary_values(&[(&a, None), (&b,
None)]).unwrap();
Review Comment:
I think we should have at least one test with NULL values in the dictionary
keys
##########
arrow-select/src/dictionary.rs:
##########
@@ -0,0 +1,270 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+use crate::interleave::interleave;
+use ahash::RandomState;
+use arrow_array::builder::BooleanBufferBuilder;
+use arrow_array::cast::{as_generic_binary_array, as_largestring_array,
as_string_array};
+use arrow_array::types::{ArrowDictionaryKeyType, ByteArrayType};
+use arrow_array::{Array, ArrayRef, DictionaryArray, GenericByteArray};
+use arrow_buffer::{ArrowNativeType, Buffer, MutableBuffer};
+use arrow_data::bit_iterator::BitIndexIterator;
+use arrow_data::ArrayData;
+use arrow_schema::{ArrowError, DataType};
+
+/// A best effort interner that maintains a fixed number of buckets
+/// and interns keys based on their hash value
+///
+/// Hash collisions will result in replacement
+struct Interner<'a, V> {
+ state: RandomState,
+ buckets: Vec<Option<(&'a [u8], V)>>,
+ shift: u32,
+}
+
+impl<'a, V> Interner<'a, V> {
+ fn new(capacity: usize) -> Self {
+ // Add additional buckets to help reduce collisions
+ let shift = (capacity as u64 + 128).leading_zeros();
+ let num_buckets = (u64::MAX >> shift) as usize;
+ let buckets = (0..num_buckets.saturating_add(1)).map(|_|
None).collect();
+ Self {
+ // A fixed seed to ensure deterministic behaviour
+ state: RandomState::with_seeds(0, 0, 0, 0),
+ buckets,
+ shift,
+ }
+ }
+
+ fn intern<F: FnOnce() -> Result<V, E>, E>(
+ &mut self,
+ new: &'a [u8],
+ f: F,
+ ) -> Result<&V, E> {
+ let hash = self.state.hash_one(new);
+ let bucket_idx = hash >> self.shift;
+ Ok(match &mut self.buckets[bucket_idx as usize] {
+ Some((current, v)) => {
+ if *current != new {
+ *v = f()?;
+ *current = new;
+ }
+ v
+ }
+ slot => &slot.insert((new, f()?)).1,
+ })
+ }
+}
+
+pub struct MergedDictionaries<K: ArrowDictionaryKeyType> {
+ /// Provides `key_mappings[`array_idx`][`old_key`] -> new_key`
+ pub key_mappings: Vec<Vec<K::Native>>,
+ /// The new values
+ pub values: ArrayRef,
+}
+
+/// A weak heuristic of whether to merge dictionary values that aims to only
+/// perform the expensive computation when is likely to yield at least
+/// some return over the naive approach used by MutableArrayData
+///
+/// `len` is the total length of the merged output
+pub fn should_merge_dictionary_values<K: ArrowDictionaryKeyType>(
+ arrays: &[&dyn Array],
+ len: usize,
+) -> bool {
+ let first_array = &arrays[0].data();
+ let first_values = &first_array.child_data()[0];
+
+ let mut single_dictionary = true;
+ let mut total_values = first_values.len();
+ for a in arrays.iter().skip(1) {
+ let data = a.data();
+
+ let values = &data.child_data()[0];
+ total_values += values.len();
+ single_dictionary &= ArrayData::ptr_eq(values, first_values);
+ }
+
+ let overflow = K::Native::from_usize(total_values).is_none();
+ let values_exceed_length = total_values >= len;
+ let is_supported = first_values.data_type().is_byte_array();
+
+ !single_dictionary && is_supported && (overflow || values_exceed_length)
+}
+
+/// Given an array of dictionaries and an optional row mask compute a values
array
+/// containing referenced values, along with mappings from the
[`DictionaryArray`]
+/// keys to the new keys within this values array. Best-effort will be made to
ensure
+/// that the dictionary values are unique
Review Comment:
```suggestion
/// that the dictionary values are unique.
///
/// This method is meant to be very fast and the output dictionary values
/// may not be unique, unlike `GenericByteDictionaryBuilder` which is slower
/// but produces unique values
##########
arrow-select/src/dictionary.rs:
##########
@@ -0,0 +1,270 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+use crate::interleave::interleave;
+use ahash::RandomState;
+use arrow_array::builder::BooleanBufferBuilder;
+use arrow_array::cast::{as_generic_binary_array, as_largestring_array,
as_string_array};
+use arrow_array::types::{ArrowDictionaryKeyType, ByteArrayType};
+use arrow_array::{Array, ArrayRef, DictionaryArray, GenericByteArray};
+use arrow_buffer::{ArrowNativeType, Buffer, MutableBuffer};
+use arrow_data::bit_iterator::BitIndexIterator;
+use arrow_data::ArrayData;
+use arrow_schema::{ArrowError, DataType};
+
+/// A best effort interner that maintains a fixed number of buckets
+/// and interns keys based on their hash value
+///
+/// Hash collisions will result in replacement
+struct Interner<'a, V> {
+ state: RandomState,
+ buckets: Vec<Option<(&'a [u8], V)>>,
+ shift: u32,
+}
+
+impl<'a, V> Interner<'a, V> {
+ fn new(capacity: usize) -> Self {
+ // Add additional buckets to help reduce collisions
+ let shift = (capacity as u64 + 128).leading_zeros();
+ let num_buckets = (u64::MAX >> shift) as usize;
+ let buckets = (0..num_buckets.saturating_add(1)).map(|_|
None).collect();
+ Self {
+ // A fixed seed to ensure deterministic behaviour
+ state: RandomState::with_seeds(0, 0, 0, 0),
+ buckets,
+ shift,
+ }
+ }
+
+ fn intern<F: FnOnce() -> Result<V, E>, E>(
+ &mut self,
+ new: &'a [u8],
+ f: F,
+ ) -> Result<&V, E> {
+ let hash = self.state.hash_one(new);
+ let bucket_idx = hash >> self.shift;
+ Ok(match &mut self.buckets[bucket_idx as usize] {
+ Some((current, v)) => {
+ if *current != new {
+ *v = f()?;
+ *current = new;
+ }
+ v
+ }
+ slot => &slot.insert((new, f()?)).1,
+ })
+ }
+}
+
+pub struct MergedDictionaries<K: ArrowDictionaryKeyType> {
+ /// Provides `key_mappings[`array_idx`][`old_key`] -> new_key`
+ pub key_mappings: Vec<Vec<K::Native>>,
+ /// The new values
+ pub values: ArrayRef,
+}
+
+/// A weak heuristic of whether to merge dictionary values that aims to only
+/// perform the expensive computation when is likely to yield at least
Review Comment:
```suggestion
/// perform the expensive merge computation when is likely to yield at least
```
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]