Dandandan commented on a change in pull request #8722:
URL: https://github.com/apache/arrow/pull/8722#discussion_r528935157



##########
File path: rust/arrow/src/arch/avx512.rs
##########
@@ -41,6 +45,173 @@ pub(crate) unsafe fn avx512_bin_or(left: &[u8], right: 
&[u8], res: &mut [u8]) {
     std::ptr::copy_nonoverlapping(s, d, std::mem::size_of::<__m512i>());
 }
 
+///
+/// Sorting network for a single SIMD vector of i64s
+#[target_feature(enable = "avx512f")]
+pub(crate) unsafe fn avx512_vec_sort_i64_single<'a>(input: &[i64]) -> [i64; 8] 
{
+    use core::arch::x86_64::{
+        __m512i, _mm512_loadu_epi64, _mm512_mask_mov_epi64, _mm512_max_epi64,
+        _mm512_min_epi64, _mm512_permutexvar_epi64, _mm512_set_epi64,
+    };
+
+    // First wiring's permute exchange for the sorting network
+    let mut inp: __m512i = _mm512_loadu_epi64(input.as_ptr() as *const _);
+    let idxnn1: __m512i = _mm512_set_epi64(6, 7, 4, 5, 2, 3, 0, 1);
+    let wire_n: __m512i = _mm512_permutexvar_epi64(idxnn1, inp);
+    let wire_n_min: __m512i = _mm512_min_epi64(wire_n, inp);
+    let wire_n_max: __m512i = _mm512_max_epi64(wire_n, inp);
+    inp = _mm512_mask_mov_epi64(wire_n_min, 0xAA, wire_n_max);
+
+    // Second wiring's permute exchange for the sorting network
+    let idxnn2: __m512i = _mm512_set_epi64(4, 5, 6, 7, 0, 1, 2, 3);
+    let wire_n: __m512i = _mm512_permutexvar_epi64(idxnn2, inp);
+    let wire_n_min: __m512i = _mm512_min_epi64(wire_n, inp);
+    let wire_n_max: __m512i = _mm512_max_epi64(wire_n, inp);
+    inp = _mm512_mask_mov_epi64(wire_n_min, 0xCC, wire_n_max);
+
+    // Third wiring's permute exchange for the sorting network
+    let idxnn3: __m512i = _mm512_set_epi64(6, 7, 4, 5, 2, 3, 0, 1);
+    let wire_n: __m512i = _mm512_permutexvar_epi64(idxnn3, inp);
+    let wire_n_min: __m512i = _mm512_min_epi64(wire_n, inp);
+    let wire_n_max: __m512i = _mm512_max_epi64(wire_n, inp);
+    inp = _mm512_mask_mov_epi64(wire_n_min, 0xAA, wire_n_max);
+
+    // Fourth wiring's permute exchange, does forwarding.
+    let idxnn4: __m512i = _mm512_set_epi64(0, 1, 2, 3, 4, 5, 6, 7);
+    let wire_n: __m512i = _mm512_permutexvar_epi64(idxnn4, inp);
+    let wire_n_min: __m512i = _mm512_min_epi64(wire_n, inp);
+    let wire_n_max: __m512i = _mm512_max_epi64(wire_n, inp);
+    inp = _mm512_mask_mov_epi64(wire_n_min, 0xF0, wire_n_max);
+
+    // Fifth wiring's permute exchange for the sorting network
+    let idxnn5: __m512i = _mm512_set_epi64(5, 4, 7, 6, 1, 0, 3, 2);
+    let wire_n: __m512i = _mm512_permutexvar_epi64(idxnn5, inp);
+    let wire_n_min: __m512i = _mm512_min_epi64(wire_n, inp);
+    let wire_n_max: __m512i = _mm512_max_epi64(wire_n, inp);
+    inp = _mm512_mask_mov_epi64(wire_n_min, 0xCC, wire_n_max);
+
+    // Sixth wiring's permute exchange for the sorting network
+    let idxnn6: __m512i = _mm512_set_epi64(6, 7, 4, 5, 2, 3, 0, 1);
+    let wire_n: __m512i = _mm512_permutexvar_epi64(idxnn6, inp);
+    let wire_n_min: __m512i = _mm512_min_epi64(wire_n, inp);
+    let wire_n_max: __m512i = _mm512_max_epi64(wire_n, inp);
+    inp = _mm512_mask_mov_epi64(wire_n_min, 0xAA, wire_n_max);
+
+    std::mem::transmute(inp)
+}
+
+///
+/// Sorting network with SIMD merger for two SIMD vector of i64s
+#[target_feature(enable = "avx512f")]
+pub(crate) unsafe fn avx512_vec_sort_i64_double(
+    left: &[i64],
+    right: &[i64],
+) -> [[i64; 8]; 2] {
+    use core::arch::x86_64::{
+        __m512i, _mm512_loadu_epi64, _mm512_mask_mov_epi64, _mm512_max_epi64,
+        _mm512_min_epi64, _mm512_permutexvar_epi64, _mm512_set_epi64,
+    };
+
+    let (l, r) = (
+        avx512_vec_sort_i64_single(left),
+        avx512_vec_sort_i64_single(right),
+    );
+
+    let mut l: __m512i = _mm512_loadu_epi64(l.as_ptr() as *const _);
+    let mut r: __m512i = _mm512_loadu_epi64(r.as_ptr() as *const _);
+
+    // Full blend of the both vector wires
+    let idxnn1: __m512i = _mm512_set_epi64(0, 1, 2, 3, 4, 5, 6, 7);
+    let wire_n: __m512i = _mm512_permutexvar_epi64(idxnn1, l);
+    l = _mm512_min_epi64(r, wire_n);
+    r = _mm512_max_epi64(r, wire_n);
+
+    // Carries on with normal sorting network operation
+    let idxnn2: __m512i = _mm512_set_epi64(3, 2, 1, 0, 7, 6, 5, 4);
+    let wire_n: __m512i = _mm512_permutexvar_epi64(idxnn2, l);
+    let wire_n_min: __m512i = _mm512_min_epi64(wire_n, l);
+    let wire_n_max: __m512i = _mm512_max_epi64(wire_n, l);
+    l = _mm512_mask_mov_epi64(wire_n_min, 0xF0, wire_n_max); // 0x33
+    let wire_n: __m512i = _mm512_permutexvar_epi64(idxnn2, r);
+    let wire_n_min: __m512i = _mm512_min_epi64(wire_n, r);
+    let wire_n_max: __m512i = _mm512_max_epi64(wire_n, r);
+    r = _mm512_mask_mov_epi64(wire_n_min, 0xF0, wire_n_max); // 0x33
+
+    let idxnn3: __m512i = _mm512_set_epi64(5, 4, 7, 6, 1, 0, 3, 2);
+    let wire_n: __m512i = _mm512_permutexvar_epi64(idxnn3, l);
+    let wire_n_min: __m512i = _mm512_min_epi64(wire_n, l);
+    let wire_n_max: __m512i = _mm512_max_epi64(wire_n, l);
+    l = _mm512_mask_mov_epi64(wire_n_min, 0xCC, wire_n_max);
+    let wire_n: __m512i = _mm512_permutexvar_epi64(idxnn3, r);
+    let wire_n_min: __m512i = _mm512_min_epi64(wire_n, r);
+    let wire_n_max: __m512i = _mm512_max_epi64(wire_n, r);
+    r = _mm512_mask_mov_epi64(wire_n_min, 0xCC, wire_n_max);
+
+    let idxnn4: __m512i = _mm512_set_epi64(6, 7, 4, 5, 2, 3, 0, 1);
+    let wire_n: __m512i = _mm512_permutexvar_epi64(idxnn4, l);
+    let wire_n_min: __m512i = _mm512_min_epi64(wire_n, l);
+    let wire_n_max: __m512i = _mm512_max_epi64(wire_n, l);
+    l = _mm512_mask_mov_epi64(wire_n_min, 0xAA, wire_n_max);
+    let wire_n: __m512i = _mm512_permutexvar_epi64(idxnn4, r);
+    let wire_n_min: __m512i = _mm512_min_epi64(wire_n, r);
+    let wire_n_max: __m512i = _mm512_max_epi64(wire_n, r);
+    r = _mm512_mask_mov_epi64(wire_n_min, 0xAA, wire_n_max);
+
+    let lf: [i64; 8] = std::mem::transmute(l);
+    let rf: [i64; 8] = std::mem::transmute(r);
+
+    [lf, rf]
+}
+
+///
+/// Permute exchange width for the AVX-512 SIMD application
+pub(crate) const PERMUTE_EXCHANGE_WIDTH: usize = 8;
+
+///
+/// Merge layer for sorting network
+fn merger_net(mut input: Vec<i64>) -> Vec<i64> {
+    let half = input.len() / 2;
+    if half > PERMUTE_EXCHANGE_WIDTH {
+        (0..half).into_iter().for_each(|e| unsafe {
+            if input[e] > input[e + half] {
+                let pl: *mut i64 = &mut input[e];
+                let pr: *mut i64 = &mut input[e + half];
+                std::ptr::swap(pl, pr);
+            }
+        });
+        merger_net(input[..half].to_vec());

Review comment:
       Doesn't this create a lot of intermediate vecs / recursion? I guess it 
could be written manually and with one bigger allocation?

##########
File path: rust/arrow/src/arch/avx512.rs
##########
@@ -41,6 +45,173 @@ pub(crate) unsafe fn avx512_bin_or(left: &[u8], right: 
&[u8], res: &mut [u8]) {
     std::ptr::copy_nonoverlapping(s, d, std::mem::size_of::<__m512i>());
 }
 
+///
+/// Sorting network for a single SIMD vector of i64s
+#[target_feature(enable = "avx512f")]
+pub(crate) unsafe fn avx512_vec_sort_i64_single<'a>(input: &[i64]) -> [i64; 8] 
{
+    use core::arch::x86_64::{
+        __m512i, _mm512_loadu_epi64, _mm512_mask_mov_epi64, _mm512_max_epi64,
+        _mm512_min_epi64, _mm512_permutexvar_epi64, _mm512_set_epi64,
+    };
+
+    // First wiring's permute exchange for the sorting network
+    let mut inp: __m512i = _mm512_loadu_epi64(input.as_ptr() as *const _);
+    let idxnn1: __m512i = _mm512_set_epi64(6, 7, 4, 5, 2, 3, 0, 1);
+    let wire_n: __m512i = _mm512_permutexvar_epi64(idxnn1, inp);
+    let wire_n_min: __m512i = _mm512_min_epi64(wire_n, inp);
+    let wire_n_max: __m512i = _mm512_max_epi64(wire_n, inp);
+    inp = _mm512_mask_mov_epi64(wire_n_min, 0xAA, wire_n_max);
+
+    // Second wiring's permute exchange for the sorting network
+    let idxnn2: __m512i = _mm512_set_epi64(4, 5, 6, 7, 0, 1, 2, 3);
+    let wire_n: __m512i = _mm512_permutexvar_epi64(idxnn2, inp);
+    let wire_n_min: __m512i = _mm512_min_epi64(wire_n, inp);
+    let wire_n_max: __m512i = _mm512_max_epi64(wire_n, inp);
+    inp = _mm512_mask_mov_epi64(wire_n_min, 0xCC, wire_n_max);
+
+    // Third wiring's permute exchange for the sorting network
+    let idxnn3: __m512i = _mm512_set_epi64(6, 7, 4, 5, 2, 3, 0, 1);
+    let wire_n: __m512i = _mm512_permutexvar_epi64(idxnn3, inp);
+    let wire_n_min: __m512i = _mm512_min_epi64(wire_n, inp);
+    let wire_n_max: __m512i = _mm512_max_epi64(wire_n, inp);
+    inp = _mm512_mask_mov_epi64(wire_n_min, 0xAA, wire_n_max);
+
+    // Fourth wiring's permute exchange, does forwarding.
+    let idxnn4: __m512i = _mm512_set_epi64(0, 1, 2, 3, 4, 5, 6, 7);
+    let wire_n: __m512i = _mm512_permutexvar_epi64(idxnn4, inp);
+    let wire_n_min: __m512i = _mm512_min_epi64(wire_n, inp);
+    let wire_n_max: __m512i = _mm512_max_epi64(wire_n, inp);
+    inp = _mm512_mask_mov_epi64(wire_n_min, 0xF0, wire_n_max);
+
+    // Fifth wiring's permute exchange for the sorting network
+    let idxnn5: __m512i = _mm512_set_epi64(5, 4, 7, 6, 1, 0, 3, 2);
+    let wire_n: __m512i = _mm512_permutexvar_epi64(idxnn5, inp);
+    let wire_n_min: __m512i = _mm512_min_epi64(wire_n, inp);
+    let wire_n_max: __m512i = _mm512_max_epi64(wire_n, inp);
+    inp = _mm512_mask_mov_epi64(wire_n_min, 0xCC, wire_n_max);
+
+    // Sixth wiring's permute exchange for the sorting network
+    let idxnn6: __m512i = _mm512_set_epi64(6, 7, 4, 5, 2, 3, 0, 1);
+    let wire_n: __m512i = _mm512_permutexvar_epi64(idxnn6, inp);
+    let wire_n_min: __m512i = _mm512_min_epi64(wire_n, inp);
+    let wire_n_max: __m512i = _mm512_max_epi64(wire_n, inp);
+    inp = _mm512_mask_mov_epi64(wire_n_min, 0xAA, wire_n_max);
+
+    std::mem::transmute(inp)
+}
+
+///
+/// Sorting network with SIMD merger for two SIMD vector of i64s
+#[target_feature(enable = "avx512f")]
+pub(crate) unsafe fn avx512_vec_sort_i64_double(
+    left: &[i64],
+    right: &[i64],
+) -> [[i64; 8]; 2] {
+    use core::arch::x86_64::{
+        __m512i, _mm512_loadu_epi64, _mm512_mask_mov_epi64, _mm512_max_epi64,
+        _mm512_min_epi64, _mm512_permutexvar_epi64, _mm512_set_epi64,
+    };
+
+    let (l, r) = (
+        avx512_vec_sort_i64_single(left),
+        avx512_vec_sort_i64_single(right),
+    );
+
+    let mut l: __m512i = _mm512_loadu_epi64(l.as_ptr() as *const _);
+    let mut r: __m512i = _mm512_loadu_epi64(r.as_ptr() as *const _);
+
+    // Full blend of the both vector wires
+    let idxnn1: __m512i = _mm512_set_epi64(0, 1, 2, 3, 4, 5, 6, 7);
+    let wire_n: __m512i = _mm512_permutexvar_epi64(idxnn1, l);
+    l = _mm512_min_epi64(r, wire_n);
+    r = _mm512_max_epi64(r, wire_n);
+
+    // Carries on with normal sorting network operation
+    let idxnn2: __m512i = _mm512_set_epi64(3, 2, 1, 0, 7, 6, 5, 4);
+    let wire_n: __m512i = _mm512_permutexvar_epi64(idxnn2, l);
+    let wire_n_min: __m512i = _mm512_min_epi64(wire_n, l);
+    let wire_n_max: __m512i = _mm512_max_epi64(wire_n, l);
+    l = _mm512_mask_mov_epi64(wire_n_min, 0xF0, wire_n_max); // 0x33
+    let wire_n: __m512i = _mm512_permutexvar_epi64(idxnn2, r);
+    let wire_n_min: __m512i = _mm512_min_epi64(wire_n, r);
+    let wire_n_max: __m512i = _mm512_max_epi64(wire_n, r);
+    r = _mm512_mask_mov_epi64(wire_n_min, 0xF0, wire_n_max); // 0x33
+
+    let idxnn3: __m512i = _mm512_set_epi64(5, 4, 7, 6, 1, 0, 3, 2);
+    let wire_n: __m512i = _mm512_permutexvar_epi64(idxnn3, l);
+    let wire_n_min: __m512i = _mm512_min_epi64(wire_n, l);
+    let wire_n_max: __m512i = _mm512_max_epi64(wire_n, l);
+    l = _mm512_mask_mov_epi64(wire_n_min, 0xCC, wire_n_max);
+    let wire_n: __m512i = _mm512_permutexvar_epi64(idxnn3, r);
+    let wire_n_min: __m512i = _mm512_min_epi64(wire_n, r);
+    let wire_n_max: __m512i = _mm512_max_epi64(wire_n, r);
+    r = _mm512_mask_mov_epi64(wire_n_min, 0xCC, wire_n_max);
+
+    let idxnn4: __m512i = _mm512_set_epi64(6, 7, 4, 5, 2, 3, 0, 1);
+    let wire_n: __m512i = _mm512_permutexvar_epi64(idxnn4, l);
+    let wire_n_min: __m512i = _mm512_min_epi64(wire_n, l);
+    let wire_n_max: __m512i = _mm512_max_epi64(wire_n, l);
+    l = _mm512_mask_mov_epi64(wire_n_min, 0xAA, wire_n_max);
+    let wire_n: __m512i = _mm512_permutexvar_epi64(idxnn4, r);
+    let wire_n_min: __m512i = _mm512_min_epi64(wire_n, r);
+    let wire_n_max: __m512i = _mm512_max_epi64(wire_n, r);
+    r = _mm512_mask_mov_epi64(wire_n_min, 0xAA, wire_n_max);
+
+    let lf: [i64; 8] = std::mem::transmute(l);
+    let rf: [i64; 8] = std::mem::transmute(r);
+
+    [lf, rf]
+}
+
+///
+/// Permute exchange width for the AVX-512 SIMD application
+pub(crate) const PERMUTE_EXCHANGE_WIDTH: usize = 8;
+
+///
+/// Merge layer for sorting network
+fn merger_net(mut input: Vec<i64>) -> Vec<i64> {
+    let half = input.len() / 2;
+    if half > PERMUTE_EXCHANGE_WIDTH {
+        (0..half).into_iter().for_each(|e| unsafe {
+            if input[e] > input[e + half] {
+                let pl: *mut i64 = &mut input[e];
+                let pr: *mut i64 = &mut input[e + half];
+                std::ptr::swap(pl, pr);
+            }
+        });
+        merger_net(input[..half].to_vec());
+        merger_net(input[half..].to_vec());
+    }
+    input
+}
+
+///
+/// Cold path marker for hinting the CPU for the further optimizations.
+#[inline]
+#[cold]
+fn cold() {}
+
+///
+/// Size independent sorter for any vector which is power of two.
+pub(crate) unsafe fn avx512_vec_sort_i64(input: &[i64]) -> Vec<i64> {
+    if (input.len() / 2) == PERMUTE_EXCHANGE_WIDTH {
+        let v: Vec<&[i64]> = 
input.chunks_exact(PERMUTE_EXCHANGE_WIDTH).collect();
+        let x = avx512_vec_sort_i64_double(&v[0], &v[1]);
+        [x[0], x[1]].concat()
+    } else {
+        if (input.len() / 2) == 0 {
+            cold();
+            input.to_vec()
+        } else {
+            let mut it = input.chunks_exact(input.len() / 2);
+            let l = avx512_vec_sort_i64(it.next().unwrap());

Review comment:
       Here as well?




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to