================
@@ -0,0 +1,159 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s
--check-prefixes=AVX512,AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512dq |
FileCheck %s --check-prefixes=AVX512,AVX512DQ
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw |
FileCheck %s --check-prefixes=AVX512,AVX512BW
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown
-mattr=+avx512f,+avx512dq,+avx512bw | FileCheck %s
--check-prefixes=AVX512,AVX512DQBW
+
+declare <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr>, i32, <16 x
i1>, <16 x float>)
+declare <16 x float> @llvm.masked.expandload.v16f32(ptr, <16 x i1>, <16 x
float>)
+declare <8 x float> @llvm.masked.expandload.v8f32(ptr, <8 x i1>, <8 x float>)
+declare <16 x i32> @llvm.masked.expandload.v16i32(ptr, <16 x i1>, <16 x i32>)
+
+; Test case 1: Direct v8i1 all-ones mask (should use kxnorb on AVX512DQ)
+define <8 x float> @mask_v8i1_allones(ptr %ptr) {
+; AVX512F-LABEL: mask_v8i1_allones:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: movw $255, %ax
+; AVX512F-NEXT: kmovw %eax, %k1
+; AVX512F-NEXT: vexpandps (%rdi), %zmm0 {%k1} {z}
+; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512F-NEXT: retq
+;
+; AVX512DQ-LABEL: mask_v8i1_allones:
+; AVX512DQ: # %bb.0:
+; AVX512DQ-NEXT: kxnorb %k0, %k0, %k1
+; AVX512DQ-NEXT: vexpandps (%rdi), %zmm0 {%k1} {z}
+; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: mask_v8i1_allones:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: movw $255, %ax
+; AVX512BW-NEXT: kmovd %eax, %k1
+; AVX512BW-NEXT: vexpandps (%rdi), %zmm0 {%k1} {z}
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512BW-NEXT: retq
+;
+; AVX512DQBW-LABEL: mask_v8i1_allones:
+; AVX512DQBW: # %bb.0:
+; AVX512DQBW-NEXT: kxnorb %k0, %k0, %k1
+; AVX512DQBW-NEXT: vexpandps (%rdi), %zmm0 {%k1} {z}
+; AVX512DQBW-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512DQBW-NEXT: retq
+ %res = call <8 x float> @llvm.masked.expandload.v8f32(ptr %ptr, <8 x i1> <i1
true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x
float> zeroinitializer)
+ ret <8 x float> %res
+}
+
+; Test case 2: v16i1 with lower 8 bits set via bitconvert (should use kxnorb
on AVX512DQ)
+define <16 x float> @mask_v16i1_lower8(ptr %ptr) {
+; AVX512F-LABEL: mask_v16i1_lower8:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: movw $255, %ax
+; AVX512F-NEXT: kmovw %eax, %k1
+; AVX512F-NEXT: vexpandps (%rdi), %zmm0 {%k1} {z}
+; AVX512F-NEXT: retq
+;
+; AVX512DQ-LABEL: mask_v16i1_lower8:
+; AVX512DQ: # %bb.0:
+; AVX512DQ-NEXT: kxnorb %k0, %k0, %k1
+; AVX512DQ-NEXT: vexpandps (%rdi), %zmm0 {%k1} {z}
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: mask_v16i1_lower8:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: movw $255, %ax
+; AVX512BW-NEXT: kmovd %eax, %k1
+; AVX512BW-NEXT: vexpandps (%rdi), %zmm0 {%k1} {z}
+; AVX512BW-NEXT: retq
+;
+; AVX512DQBW-LABEL: mask_v16i1_lower8:
+; AVX512DQBW: # %bb.0:
+; AVX512DQBW-NEXT: kxnorb %k0, %k0, %k1
+; AVX512DQBW-NEXT: vexpandps (%rdi), %zmm0 {%k1} {z}
+; AVX512DQBW-NEXT: retq
+ %res = call <16 x float> @llvm.masked.expandload.v16f32(ptr %ptr, <16 x i1>
<i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1
false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>,
<16 x float> zeroinitializer)
+ ret <16 x float> %res
+}
+
+; Test case 3: v16i1 with all bits set (should use kxnorw on all targets)
+define <16 x float> @gather_all(ptr %base, <16 x i32> %ind, i16 %mask) {
+; AVX512-LABEL: gather_all:
+; AVX512: # %bb.0:
+; AVX512-NEXT: kxnorw %k0, %k0, %k1
+; AVX512-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; AVX512-NEXT: vgatherdps (%rdi,%zmm0,4), %zmm1 {%k1}
+; AVX512-NEXT: vmovaps %zmm1, %zmm0
+; AVX512-NEXT: retq
+ %broadcast.splatinsert = insertelement <16 x ptr> poison, ptr %base, i32 0
+ %broadcast.splat = shufflevector <16 x ptr> %broadcast.splatinsert, <16 x
ptr> poison, <16 x i32> zeroinitializer
+ %sext_ind = sext <16 x i32> %ind to <16 x i64>
+ %gep.random = getelementptr float, <16 x ptr> %broadcast.splat, <16 x i64>
%sext_ind
+ %res = call <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr>
%gep.random, i32 4, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1
true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true,
i1 true, i1 true>, <16 x float> poison)
+ ret <16 x float> %res
+}
+
+; Test case 4: v8i1 with lower 8 bits set in gather (should use kxnorb on
AVX512DQ targets)
+define <16 x float> @gather_lower(ptr %base, <16 x i32> %ind, i16 %mask) {
+; AVX512F-LABEL: gather_lower:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; AVX512F-NEXT: movw $255, %ax
+; AVX512F-NEXT: kmovw %eax, %k1
+; AVX512F-NEXT: vgatherdps (%rdi,%zmm0,4), %zmm1 {%k1}
+; AVX512F-NEXT: vmovaps %zmm1, %zmm0
+; AVX512F-NEXT: retq
+;
+; AVX512DQ-LABEL: gather_lower:
+; AVX512DQ: # %bb.0:
+; AVX512DQ-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; AVX512DQ-NEXT: kxnorb %k0, %k0, %k1
+; AVX512DQ-NEXT: vgatherdps (%rdi,%zmm0,4), %zmm1 {%k1}
+; AVX512DQ-NEXT: vmovaps %zmm1, %zmm0
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: gather_lower:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; AVX512BW-NEXT: movw $255, %ax
+; AVX512BW-NEXT: kmovd %eax, %k1
+; AVX512BW-NEXT: vgatherdps (%rdi,%zmm0,4), %zmm1 {%k1}
+; AVX512BW-NEXT: vmovaps %zmm1, %zmm0
+; AVX512BW-NEXT: retq
+;
+; AVX512DQBW-LABEL: gather_lower:
+; AVX512DQBW: # %bb.0:
+; AVX512DQBW-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; AVX512DQBW-NEXT: kxnorb %k0, %k0, %k1
+; AVX512DQBW-NEXT: vgatherdps (%rdi,%zmm0,4), %zmm1 {%k1}
+; AVX512DQBW-NEXT: vmovaps %zmm1, %zmm0
+; AVX512DQBW-NEXT: retq
+ %broadcast.splatinsert = insertelement <16 x ptr> poison, ptr %base, i32 0
+ %broadcast.splat = shufflevector <16 x ptr> %broadcast.splatinsert, <16 x
ptr> poison, <16 x i32> zeroinitializer
+ %sext_ind = sext <16 x i32> %ind to <16 x i64>
+ %gep.random = getelementptr float, <16 x ptr> %broadcast.splat, <16 x i64>
%sext_ind
+ %res = call <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr>
%gep.random, i32 4, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1
true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1
false, i1 false, i1 false>, <16 x float> poison)
+ ret <16 x float> %res
+}
+
+; Test case 5: v32i1 mask via bitconvert, lower 16 bits set (tests bitconvert
pattern)
+define <32 x i16> @mask_v32i1_lower16(<32 x i16> %a, <32 x i16> %b) {
+; AVX512-LABEL: mask_v32i1_lower16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm1[4,5,6,7]
+; AVX512-NEXT: retq
+ %mask = bitcast i32 65535 to <32 x i1>
+ %res = select <32 x i1> %mask, <32 x i16> %a, <32 x i16> %b
+ ret <32 x i16> %res
+}
+
+; Test case 6: v64i1 mask via bitconvert, lower 32 bits set (tests bitconvert
pattern)
+define <64 x i8> @mask_v64i1_lower32(<64 x i8> %a, <64 x i8> %b) {
+; AVX512-LABEL: mask_v64i1_lower32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm1[4,5,6,7]
+; AVX512-NEXT: retq
+ %mask = bitcast i64 4294967295 to <64 x i1>
+ %res = select <64 x i1> %mask, <64 x i8> %a, <64 x i8> %b
+ ret <64 x i8> %res
+}
----------------
ahmednoursphinx wrote:
@RKSimon Thanks for the suggestion upated tests to prevent folding please
check again
https://github.com/llvm/llvm-project/pull/166178
_______________________________________________
cfe-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits