tislam created this revision.
tislam added a reviewer: nemanjai.
Herald added subscribers: kbarton, hiraditya.
tislam requested review of this revision.
Herald added projects: clang, LLVM.
Herald added subscribers: llvm-commits, cfe-commits.

A big-endian version of `vpermxor`, named `vpermxor_be`, is added to LLVM and 
Clang. `vpermxor_be` can be called directly on a little-endian platform.


Repository:
  rG LLVM Github Monorepo

https://reviews.llvm.org/D114540

Files:
  clang/include/clang/Basic/BuiltinsPPC.def
  llvm/include/llvm/IR/IntrinsicsPowerPC.td
  llvm/lib/Target/PowerPC/PPCInstrVSX.td
  llvm/test/CodeGen/PowerPC/crypto_bifs_be.ll

Index: llvm/test/CodeGen/PowerPC/crypto_bifs_be.ll
===================================================================
--- /dev/null
+++ llvm/test/CodeGen/PowerPC/crypto_bifs_be.ll
@@ -0,0 +1,94 @@
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu \
+; RUN:   -mcpu=pwr8 < %s | FileCheck %s
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu \
+; RUN:   -mcpu=pwr9 < %s | FileCheck %s
+
+; Function Attrs: nounwind
+define <16 x i8> @test_vpermxorb() #0 {
+entry:
+  %a = alloca <16 x i8>, align 16
+  %b = alloca <16 x i8>, align 16
+  %c = alloca <16 x i8>, align 16
+  store <16 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16>, <16 x i8>* %a, align 16
+  store <16 x i8> <i8 113, i8 114, i8 115, i8 116, i8 117, i8 118, i8 119, i8 120, i8 121, i8 122, i8 123, i8 124, i8 125, i8 126, i8 127, i8 112>, <16 x i8>* %b, align 16
+  store <16 x i8> <i8 113, i8 114, i8 115, i8 116, i8 117, i8 118, i8 119, i8 120, i8 121, i8 122, i8 123, i8 124, i8 125, i8 126, i8 127, i8 112>, <16 x i8>* %c, align 16
+  %0 = load <16 x i8>,  <16 x i8>* %a, align 16
+  %1 = load <16 x i8>,  <16 x i8>* %b, align 16
+  %2 = load <16 x i8>,  <16 x i8>* %c, align 16
+  %3 = call <16 x i8> @llvm.ppc.altivec.crypto.vpermxor.be(<16 x i8> %0, <16 x i8> %1, <16 x i8> %2)
+  ret <16 x i8> %3
+; CHECK-NOT: xxlnor
+; CHECK: vpermxor 2,
+}
+
+; Function Attrs: nounwind readnone
+declare <16 x i8> @llvm.ppc.altivec.crypto.vpermxor.be(<16 x i8>, <16 x i8>, <16 x i8>) #1
+
+; Function Attrs: nounwind
+define <8 x i16> @test_vpermxorh() #0 {
+entry:
+  %a = alloca <8 x i16>, align 16
+  %b = alloca <8 x i16>, align 16
+  %c = alloca <8 x i16>, align 16
+  store <8 x i16> <i16 258, i16 772, i16 1286, i16 1800, i16 2314, i16 2828, i16 3342, i16 3856>, <8 x i16>* %a, align 16
+  store <8 x i16> <i16 29042, i16 29556, i16 30070, i16 30584, i16 31098, i16 31612, i16 32126, i16 32624>, <8 x i16>* %b, align 16
+  store <8 x i16> <i16 29042, i16 29556, i16 30070, i16 30584, i16 31098, i16 31612, i16 32126, i16 32624>, <8 x i16>* %c, align 16
+  %0 = load <8 x i16>,  <8 x i16>* %a, align 16
+  %1 = bitcast <8 x i16> %0 to <16 x i8>
+  %2 = load <8 x i16>,  <8 x i16>* %b, align 16
+  %3 = bitcast <8 x i16> %2 to <16 x i8>
+  %4 = load <8 x i16>,  <8 x i16>* %c, align 16
+  %5 = bitcast <8 x i16> %4 to <16 x i8>
+  %6 = call <16 x i8> @llvm.ppc.altivec.crypto.vpermxor.be(<16 x i8> %1, <16 x i8> %3, <16 x i8> %5)
+  %7 = bitcast <16 x i8> %6 to <8 x i16>
+  ret <8 x i16> %7
+; CHECK-NOT: xxlnor
+; CHECK: vpermxor 2,
+}
+
+; Function Attrs: nounwind
+define <4 x i32> @test_vpermxorw() #0 {
+entry:
+  %a = alloca <4 x i32>, align 16
+  %b = alloca <4 x i32>, align 16
+  %c = alloca <4 x i32>, align 16
+  store <4 x i32> <i32 16909060, i32 84281096, i32 151653132, i32 219025168>, <4 x i32>* %a, align 16
+  store <4 x i32> <i32 1903326068, i32 1970698104, i32 2038070140, i32 2105442160>, <4 x i32>* %b, align 16
+  store <4 x i32> <i32 1903326068, i32 1970698104, i32 2038070140, i32 2105442160>, <4 x i32>* %c, align 16
+  %0 = load <4 x i32>,  <4 x i32>* %a, align 16
+  %1 = bitcast <4 x i32> %0 to <16 x i8>
+  %2 = load <4 x i32>,  <4 x i32>* %b, align 16
+  %3 = bitcast <4 x i32> %2 to <16 x i8>
+  %4 = load <4 x i32>,  <4 x i32>* %c, align 16
+  %5 = bitcast <4 x i32> %4 to <16 x i8>
+  %6 = call <16 x i8> @llvm.ppc.altivec.crypto.vpermxor.be(<16 x i8> %1, <16 x i8> %3, <16 x i8> %5)
+  %7 = bitcast <16 x i8> %6 to <4 x i32>
+  ret <4 x i32> %7
+; CHECK-NOT: xxlnor
+; CHECK: vpermxor 2,
+}
+
+; Function Attrs: nounwind
+define <2 x i64> @test_vpermxord() #0 {
+entry:
+  %a = alloca <2 x i64>, align 16
+  %b = alloca <2 x i64>, align 16
+  %c = alloca <2 x i64>, align 16
+  store <2 x i64> <i64 72623859790382856, i64 651345242494996240>, <2 x i64>* %a, align 16
+  store <2 x i64> <i64 8174723217654970232, i64 8753444600359583600>, <2 x i64>* %b, align 16
+  store <2 x i64> <i64 8174723217654970232, i64 8753444600359583600>, <2 x i64>* %c, align 16
+  %0 = load <2 x i64>,  <2 x i64>* %a, align 16
+  %1 = bitcast <2 x i64> %0 to <16 x i8>
+  %2 = load <2 x i64>,  <2 x i64>* %b, align 16
+  %3 = bitcast <2 x i64> %2 to <16 x i8>
+  %4 = load <2 x i64>,  <2 x i64>* %c, align 16
+  %5 = bitcast <2 x i64> %4 to <16 x i8>
+  %6 = call <16 x i8> @llvm.ppc.altivec.crypto.vpermxor.be(<16 x i8> %1, <16 x i8> %3, <16 x i8> %5)
+  %7 = bitcast <16 x i8> %6 to <2 x i64>
+  ret <2 x i64> %7
+; CHECK-NOT: xxlnor
+; CHECK: vpermxor 2,
+}
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-realign-stack" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind readnone }
Index: llvm/lib/Target/PowerPC/PPCInstrVSX.td
===================================================================
--- llvm/lib/Target/PowerPC/PPCInstrVSX.td
+++ llvm/lib/Target/PowerPC/PPCInstrVSX.td
@@ -2491,11 +2491,16 @@
 
 // These Altivec patterns are here because we need a VSX instruction to match
 // the intrinsic (but only for little endian system).
-let Predicates = [HasVSX, IsLittleEndian, HasP8Altivec] in
+let Predicates = [HasVSX, IsLittleEndian, HasP8Altivec] in {
   def : Pat<(v16i8 (int_ppc_altivec_crypto_vpermxor v16i8:$a,
                                                     v16i8:$b, v16i8:$c)),
             (v16i8 (VPERMXOR $a, $b, (XXLNOR (COPY_TO_REGCLASS $c, VSRC),
                                              (COPY_TO_REGCLASS $c, VSRC))))>;
+  def : Pat<(v16i8 (int_ppc_altivec_crypto_vpermxor_be v16i8:$a,
+                                                    v16i8:$b, v16i8:$c)),
+            (v16i8 (VPERMXOR $a, $b, $c))>;
+} // HasVSX, IsLittleEndian, HasP8Altivec
+
 let Predicates = [HasVSX, IsBigEndian, HasP8Altivec] in
   def : Pat<(v16i8 (int_ppc_altivec_crypto_vpermxor v16i8:$a,
                                                     v16i8:$b, v16i8:$c)),
Index: llvm/include/llvm/IR/IntrinsicsPowerPC.td
===================================================================
--- llvm/include/llvm/IR/IntrinsicsPowerPC.td
+++ llvm/include/llvm/IR/IntrinsicsPowerPC.td
@@ -1087,6 +1087,10 @@
               GCCBuiltin<"__builtin_altivec_crypto_vpermxor">,
               Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty,
                          llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+  def int_ppc_altivec_crypto_vpermxor_be :
+              GCCBuiltin<"__builtin_altivec_crypto_vpermxor_be">,
+              Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty,
+                         llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
 
 def int_ppc_altivec_crypto_vshasigmad :
             GCCBuiltin<"__builtin_altivec_crypto_vshasigmad">,
Index: clang/include/clang/Basic/BuiltinsPPC.def
===================================================================
--- clang/include/clang/Basic/BuiltinsPPC.def
+++ clang/include/clang/Basic/BuiltinsPPC.def
@@ -404,6 +404,7 @@
 // P8 Crypto built-ins.
 BUILTIN(__builtin_altivec_crypto_vsbox, "V2ULLiV2ULLi", "")
 BUILTIN(__builtin_altivec_crypto_vpermxor, "V16UcV16UcV16UcV16Uc", "")
+BUILTIN(__builtin_altivec_crypto_vpermxor_be, "V16UcV16UcV16UcV16Uc", "")
 BUILTIN(__builtin_altivec_crypto_vshasigmaw, "V4UiV4UiIiIi", "")
 BUILTIN(__builtin_altivec_crypto_vshasigmad, "V2ULLiV2ULLiIiIi", "")
 BUILTIN(__builtin_altivec_crypto_vcipher, "V2ULLiV2ULLiV2ULLi", "")
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to