This revision was automatically updated to reflect the committed changes.
Closed by commit rGd5fc592b7c26: [AArch64][SVE] Add bfloat16 support to svext 
intrinsic (authored by c-rhodes).

Changed prior to commit:
  https://reviews.llvm.org/D82391?vs=273685&id=274052#toc

Repository:
  rG LLVM Github Monorepo

CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D82391/new/

https://reviews.llvm.org/D82391

Files:
  clang/include/clang/Basic/arm_sve.td
  clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_ext-bfloat.c
  llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
  llvm/test/CodeGen/AArch64/sve-intrinsics-perm-select.ll


Index: llvm/test/CodeGen/AArch64/sve-intrinsics-perm-select.ll
===================================================================
--- llvm/test/CodeGen/AArch64/sve-intrinsics-perm-select.ll
+++ llvm/test/CodeGen/AArch64/sve-intrinsics-perm-select.ll
@@ -516,6 +516,16 @@
   ret <vscale x 2 x i64> %out
 }
 
+define <vscale x 8 x bfloat> @ext_bf16(<vscale x 8 x bfloat> %a, <vscale x 8 x 
bfloat> %b) #0 {
+; CHECK-LABEL: ext_bf16:
+; CHECK: ext z0.b, z0.b, z1.b, #6
+; CHECK-NEXT: ret
+  %out = call <vscale x 8 x bfloat> @llvm.aarch64.sve.ext.nxv8bf16(<vscale x 8 
x bfloat> %a,
+                                                                   <vscale x 8 
x bfloat> %b,
+                                                                   i32 3)
+  ret <vscale x 8 x bfloat> %out
+}
+
 define <vscale x 8 x half> @ext_f16(<vscale x 8 x half> %a, <vscale x 8 x 
half> %b) {
 ; CHECK-LABEL: ext_f16:
 ; CHECK: ext z0.b, z0.b, z1.b, #6
@@ -1885,6 +1895,7 @@
 declare <vscale x 8 x i16> @llvm.aarch64.sve.ext.nxv8i16(<vscale x 8 x i16>, 
<vscale x 8 x i16>, i32)
 declare <vscale x 4 x i32> @llvm.aarch64.sve.ext.nxv4i32(<vscale x 4 x i32>, 
<vscale x 4 x i32>, i32)
 declare <vscale x 2 x i64> @llvm.aarch64.sve.ext.nxv2i64(<vscale x 2 x i64>, 
<vscale x 2 x i64>, i32)
+declare <vscale x 8 x bfloat> @llvm.aarch64.sve.ext.nxv8bf16(<vscale x 8 x 
bfloat>, <vscale x 8 x bfloat>, i32)
 declare <vscale x 8 x half> @llvm.aarch64.sve.ext.nxv8f16(<vscale x 8 x half>, 
<vscale x 8 x half>, i32)
 declare <vscale x 4 x float> @llvm.aarch64.sve.ext.nxv4f32(<vscale x 4 x 
float>, <vscale x 4 x float>, i32)
 declare <vscale x 2 x double> @llvm.aarch64.sve.ext.nxv2f64(<vscale x 2 x 
double>, <vscale x 2 x double>, i32)
Index: llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
===================================================================
--- llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -1446,7 +1446,6 @@
     def : Pat<(nxv8i16 (bitconvert (nxv4i32 ZPR:$src))), (nxv8i16 ZPR:$src)>;
     def : Pat<(nxv8i16 (bitconvert (nxv2i64 ZPR:$src))), (nxv8i16 ZPR:$src)>;
     def : Pat<(nxv8i16 (bitconvert (nxv8f16 ZPR:$src))), (nxv8i16 ZPR:$src)>;
-    def : Pat<(nxv8i16 (bitconvert (nxv8bf16 ZPR:$src))), (nxv8i16 ZPR:$src)>;
     def : Pat<(nxv8i16 (bitconvert (nxv4f32 ZPR:$src))), (nxv8i16 ZPR:$src)>;
     def : Pat<(nxv8i16 (bitconvert (nxv2f64 ZPR:$src))), (nxv8i16 ZPR:$src)>;
 
Index: clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_ext-bfloat.c
===================================================================
--- /dev/null
+++ clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_ext-bfloat.c
@@ -0,0 +1,26 @@
+// REQUIRES: aarch64-registered-target
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_BF16_SCALAR_ARITHMETIC 
-D__ARM_FEATURE_SVE_BF16 -triple aarch64-none-linux-gnu -target-feature +sve 
-target-feature +bf16 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall 
-emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_BF16_SCALAR_ARITHMETIC 
-D__ARM_FEATURE_SVE_BF16 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu 
-target-feature +sve -target-feature +bf16 -fallow-half-arguments-and-returns 
-S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_BF16_SCALAR_ARITHMETIC 
-D__ARM_FEATURE_SVE_BF16 -triple aarch64-none-linux-gnu -target-feature +sve 
-target-feature +bf16 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall 
-o - %s >/dev/null 2>%t
+// RUN: FileCheck --check-prefix=ASM --allow-empty %s <%t
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_BF16_SCALAR_ARITHMETIC 
-triple aarch64-none-linux-gnu -target-feature +sve -target-feature +bf16 
-fallow-half-arguments-and-returns -fsyntax-only -verify 
-verify-ignore-unexpected=error -verify-ignore-unexpected=note %s
+
+// If this check fails please read test/CodeGen/aarch64-sve-intrinsics/README 
for instructions on how to resolve it.
+// ASM-NOT: warning
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svbfloat16_t test_svext_bf16(svbfloat16_t op1, svbfloat16_t op2)
+{
+  // CHECK-LABEL: test_svext_bf16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x bfloat> 
@llvm.aarch64.sve.ext.nxv8bf16(<vscale x 8 x bfloat> %op1, <vscale x 8 x 
bfloat> %op2, i32 127)
+  // CHECK: ret <vscale x 8 x bfloat> %[[INTRINSIC]]
+  // expected-warning@+1 {{implicit declaration of function 'svext_bf16'}}
+  return SVE_ACLE_FUNC(svext,_bf16,,)(op1, op2, 127);
+}
Index: clang/include/clang/Basic/arm_sve.td
===================================================================
--- clang/include/clang/Basic/arm_sve.td
+++ clang/include/clang/Basic/arm_sve.td
@@ -1208,6 +1208,7 @@
 def SVZIP2       : SInst<"svzip2[_{d}]",      "ddd",  "csilUcUsUiUlhfd", 
MergeNone, "aarch64_sve_zip2">;
 
 let ArchGuard = "defined(__ARM_FEATURE_SVE_BF16)" in {
+def SVEXT_BF16    : SInst<"svext[_{d}]",    "dddi", "b", MergeNone, 
"aarch64_sve_ext", [], [ImmCheck<2, ImmCheckExtract, 1>]>;
 def SVREV_BF16    : SInst<"svrev[_{d}]",    "dd",   "b", MergeNone, 
"aarch64_sve_rev">;
 def SVSEL_BF16    : SInst<"svsel[_{d}]",    "dPdd", "b", MergeNone, 
"aarch64_sve_sel">;
 def SVSPLICE_BF16 : SInst<"svsplice[_{d}]", "dPdd", "b", MergeNone, 
"aarch64_sve_splice">;


Index: llvm/test/CodeGen/AArch64/sve-intrinsics-perm-select.ll
===================================================================
--- llvm/test/CodeGen/AArch64/sve-intrinsics-perm-select.ll
+++ llvm/test/CodeGen/AArch64/sve-intrinsics-perm-select.ll
@@ -516,6 +516,16 @@
   ret <vscale x 2 x i64> %out
 }
 
+define <vscale x 8 x bfloat> @ext_bf16(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) #0 {
+; CHECK-LABEL: ext_bf16:
+; CHECK: ext z0.b, z0.b, z1.b, #6
+; CHECK-NEXT: ret
+  %out = call <vscale x 8 x bfloat> @llvm.aarch64.sve.ext.nxv8bf16(<vscale x 8 x bfloat> %a,
+                                                                   <vscale x 8 x bfloat> %b,
+                                                                   i32 3)
+  ret <vscale x 8 x bfloat> %out
+}
+
 define <vscale x 8 x half> @ext_f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b) {
 ; CHECK-LABEL: ext_f16:
 ; CHECK: ext z0.b, z0.b, z1.b, #6
@@ -1885,6 +1895,7 @@
 declare <vscale x 8 x i16> @llvm.aarch64.sve.ext.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, i32)
 declare <vscale x 4 x i32> @llvm.aarch64.sve.ext.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, i32)
 declare <vscale x 2 x i64> @llvm.aarch64.sve.ext.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, i32)
+declare <vscale x 8 x bfloat> @llvm.aarch64.sve.ext.nxv8bf16(<vscale x 8 x bfloat>, <vscale x 8 x bfloat>, i32)
 declare <vscale x 8 x half> @llvm.aarch64.sve.ext.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, i32)
 declare <vscale x 4 x float> @llvm.aarch64.sve.ext.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, i32)
 declare <vscale x 2 x double> @llvm.aarch64.sve.ext.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, i32)
Index: llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
===================================================================
--- llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -1446,7 +1446,6 @@
     def : Pat<(nxv8i16 (bitconvert (nxv4i32 ZPR:$src))), (nxv8i16 ZPR:$src)>;
     def : Pat<(nxv8i16 (bitconvert (nxv2i64 ZPR:$src))), (nxv8i16 ZPR:$src)>;
     def : Pat<(nxv8i16 (bitconvert (nxv8f16 ZPR:$src))), (nxv8i16 ZPR:$src)>;
-    def : Pat<(nxv8i16 (bitconvert (nxv8bf16 ZPR:$src))), (nxv8i16 ZPR:$src)>;
     def : Pat<(nxv8i16 (bitconvert (nxv4f32 ZPR:$src))), (nxv8i16 ZPR:$src)>;
     def : Pat<(nxv8i16 (bitconvert (nxv2f64 ZPR:$src))), (nxv8i16 ZPR:$src)>;
 
Index: clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_ext-bfloat.c
===================================================================
--- /dev/null
+++ clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_ext-bfloat.c
@@ -0,0 +1,26 @@
+// REQUIRES: aarch64-registered-target
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_BF16_SCALAR_ARITHMETIC -D__ARM_FEATURE_SVE_BF16 -triple aarch64-none-linux-gnu -target-feature +sve -target-feature +bf16 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_BF16_SCALAR_ARITHMETIC -D__ARM_FEATURE_SVE_BF16 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -target-feature +bf16 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_BF16_SCALAR_ARITHMETIC -D__ARM_FEATURE_SVE_BF16 -triple aarch64-none-linux-gnu -target-feature +sve -target-feature +bf16 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -o - %s >/dev/null 2>%t
+// RUN: FileCheck --check-prefix=ASM --allow-empty %s <%t
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_BF16_SCALAR_ARITHMETIC -triple aarch64-none-linux-gnu -target-feature +sve -target-feature +bf16 -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error -verify-ignore-unexpected=note %s
+
+// If this check fails please read test/CodeGen/aarch64-sve-intrinsics/README for instructions on how to resolve it.
+// ASM-NOT: warning
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svbfloat16_t test_svext_bf16(svbfloat16_t op1, svbfloat16_t op2)
+{
+  // CHECK-LABEL: test_svext_bf16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x bfloat> @llvm.aarch64.sve.ext.nxv8bf16(<vscale x 8 x bfloat> %op1, <vscale x 8 x bfloat> %op2, i32 127)
+  // CHECK: ret <vscale x 8 x bfloat> %[[INTRINSIC]]
+  // expected-warning@+1 {{implicit declaration of function 'svext_bf16'}}
+  return SVE_ACLE_FUNC(svext,_bf16,,)(op1, op2, 127);
+}
Index: clang/include/clang/Basic/arm_sve.td
===================================================================
--- clang/include/clang/Basic/arm_sve.td
+++ clang/include/clang/Basic/arm_sve.td
@@ -1208,6 +1208,7 @@
 def SVZIP2       : SInst<"svzip2[_{d}]",      "ddd",  "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_zip2">;
 
 let ArchGuard = "defined(__ARM_FEATURE_SVE_BF16)" in {
+def SVEXT_BF16    : SInst<"svext[_{d}]",    "dddi", "b", MergeNone, "aarch64_sve_ext", [], [ImmCheck<2, ImmCheckExtract, 1>]>;
 def SVREV_BF16    : SInst<"svrev[_{d}]",    "dd",   "b", MergeNone, "aarch64_sve_rev">;
 def SVSEL_BF16    : SInst<"svsel[_{d}]",    "dPdd", "b", MergeNone, "aarch64_sve_sel">;
 def SVSPLICE_BF16 : SInst<"svsplice[_{d}]", "dPdd", "b", MergeNone, "aarch64_sve_splice">;
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to