Hi t.p.northover,

Hi Tim and reviewers,

This patch addes the missing ACLE pair intrinsics.

Review please.

Thanks,
-Hao

http://llvm-reviews.chandlerc.com/D2299

Files:
  lib/Target/AArch64/AArch64InstrNEON.td
  test/CodeGen/AArch64/neon-add-pairwise.ll
  test/CodeGen/AArch64/neon-scalar-reduce-pairwise.ll
  tools/clang/include/clang/Basic/arm_neon.td
  tools/clang/lib/CodeGen/CGBuiltin.cpp
  tools/clang/test/CodeGen/aarch64-neon-intrinsics.c
Index: lib/Target/AArch64/AArch64InstrNEON.td
===================================================================
--- lib/Target/AArch64/AArch64InstrNEON.td
+++ lib/Target/AArch64/AArch64InstrNEON.td
@@ -5173,6 +5173,8 @@
 // Scalar Reduce Addition Pairwise (Integer)
 def : Pat<(v1i64 (int_aarch64_neon_vpadd (v2i64 VPR128:$Rn))),
           (ADDPvv_D_2D VPR128:$Rn)>;
+def : Pat<(v1i64 (int_aarch64_neon_vaddv (v2i64 VPR128:$Rn))),
+          (ADDPvv_D_2D VPR128:$Rn)>;
 
 // Scalar Reduce Addition Pairwise (Floating Point)
 defm FADDPvv : NeonI_ScalarPair_SD_sizes<0b1, 0b0, 0b01101, "faddp", 0>;
@@ -5216,6 +5218,26 @@
 defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vpfminnm, 
   int_aarch64_neon_vpfminnmq, FMINNMPvv_S_2S, FMINNMPvv_D_2D>;
 
+defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vaddv, 
+    int_aarch64_neon_vaddv, FADDPvv_S_2S, FADDPvv_D_2D>;
+
+def : Pat<(v1f32 (int_aarch64_neon_vaddv (v4f32 VPR128:$Rn))),
+          (FADDPvv_S_2S (v2f32
+               (EXTRACT_SUBREG
+                   (v4f32 (FADDP_4S (v4f32 VPR128:$Rn), (v4f32 VPR128:$Rn))),
+                   sub_64)))>;
+
+defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vmaxv, 
+    int_aarch64_neon_vmaxv, FMAXPvv_S_2S, FMAXPvv_D_2D>;
+
+defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vminv, 
+    int_aarch64_neon_vminv, FMINPvv_S_2S, FMINPvv_D_2D>;
+
+defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vmaxnmv, 
+    int_aarch64_neon_vmaxnmv, FMAXNMPvv_S_2S, FMAXNMPvv_D_2D>;
+
+defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vminnmv, 
+    int_aarch64_neon_vminnmv, FMINNMPvv_S_2S, FMINNMPvv_D_2D>;
 
 // Scalar by element Arithmetic
 
Index: test/CodeGen/AArch64/neon-add-pairwise.ll
===================================================================
--- test/CodeGen/AArch64/neon-add-pairwise.ll
+++ test/CodeGen/AArch64/neon-add-pairwise.ll
@@ -89,4 +89,3 @@
 ; CHECK: faddp v0.2d, v0.2d, v1.2d
         ret <2 x double> %val
 }
-
Index: test/CodeGen/AArch64/neon-scalar-reduce-pairwise.ll
===================================================================
--- test/CodeGen/AArch64/neon-scalar-reduce-pairwise.ll
+++ test/CodeGen/AArch64/neon-scalar-reduce-pairwise.ll
@@ -101,3 +101,147 @@
         ret <1 x double> %val
 }
 
+define float @test_vaddv_f32(<2 x float> %a) {
+; CHECK-LABEL: test_vaddv_f32
+; CHECK: faddp {{s[0-9]+}}, {{v[0-9]+}}.2s
+  %1 = tail call <1 x float> @llvm.aarch64.neon.vaddv.v1f32.v2f32(<2 x float> %a)
+  %2 = extractelement <1 x float> %1, i32 0
+  ret float %2
+}
+
+define float @test_vaddvq_f32(<4 x float> %a) {
+; CHECK-LABEL: test_vaddvq_f32
+; CHECK: faddp {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+; CHECK: faddp {{s[0-9]+}}, {{v[0-9]+}}.2s
+  %1 = tail call <1 x float> @llvm.aarch64.neon.vaddv.v1f32.v4f32(<4 x float> %a)
+  %2 = extractelement <1 x float> %1, i32 0
+  ret float %2
+}
+
+define double @test_vaddvq_f64(<2 x double> %a) {
+; CHECK-LABEL: test_vaddvq_f64
+; CHECK: faddp {{d[0-9]+}}, {{v[0-9]+}}.2d
+  %1 = tail call <1 x double> @llvm.aarch64.neon.vaddv.v1f64.v2f64(<2 x double> %a)
+  %2 = extractelement <1 x double> %1, i32 0
+  ret double %2
+}
+
+define float @test_vmaxv_f32(<2 x float> %a) {
+; CHECK-LABEL: test_vmaxv_f32
+; CHECK: fmaxp {{s[0-9]+}}, {{v[0-9]+}}.2s
+  %1 = tail call <1 x float> @llvm.aarch64.neon.vmaxv.v1f32.v2f32(<2 x float> %a)
+  %2 = extractelement <1 x float> %1, i32 0
+  ret float %2
+}
+
+define double @test_vmaxvq_f64(<2 x double> %a) {
+; CHECK-LABEL: test_vmaxvq_f64
+; CHECK: fmaxp {{d[0-9]+}}, {{v[0-9]+}}.2d
+  %1 = tail call <1 x double> @llvm.aarch64.neon.vmaxv.v1f64.v2f64(<2 x double> %a)
+  %2 = extractelement <1 x double> %1, i32 0
+  ret double %2
+}
+
+define float @test_vminv_f32(<2 x float> %a) {
+; CHECK-LABEL: test_vminv_f32
+; CHECK: fminp {{s[0-9]+}}, {{v[0-9]+}}.2s
+  %1 = tail call <1 x float> @llvm.aarch64.neon.vminv.v1f32.v2f32(<2 x float> %a)
+  %2 = extractelement <1 x float> %1, i32 0
+  ret float %2
+}
+
+define double @test_vminvq_f64(<2 x double> %a) {
+; CHECK-LABEL: test_vminvq_f64
+; CHECK: fminp {{d[0-9]+}}, {{v[0-9]+}}.2d
+  %1 = tail call <1 x double> @llvm.aarch64.neon.vminv.v1f64.v2f64(<2 x double> %a)
+  %2 = extractelement <1 x double> %1, i32 0
+  ret double %2
+}
+
+define double @test_vmaxnmvq_f64(<2 x double> %a) {
+; CHECK-LABEL: test_vmaxnmvq_f64
+; CHECK: fmaxnmp {{d[0-9]+}}, {{v[0-9]+}}.2d
+  %1 = tail call <1 x double> @llvm.aarch64.neon.vmaxnmv.v1f64.v2f64(<2 x double> %a)
+  %2 = extractelement <1 x double> %1, i32 0
+  ret double %2
+}
+
+define float @test_vmaxnmv_f32(<2 x float> %a) {
+; CHECK-LABEL: test_vmaxnmv_f32
+; CHECK: fmaxnmp {{s[0-9]+}}, {{v[0-9]+}}.2s
+  %1 = tail call <1 x float> @llvm.aarch64.neon.vmaxnmv.v1f32.v2f32(<2 x float> %a)
+  %2 = extractelement <1 x float> %1, i32 0
+  ret float %2
+}
+
+define double @test_vminnmvq_f64(<2 x double> %a) {
+; CHECK-LABEL: test_vminnmvq_f64
+; CHECK: fminnmp {{d[0-9]+}}, {{v[0-9]+}}.2d
+  %1 = tail call <1 x double> @llvm.aarch64.neon.vminnmv.v1f64.v2f64(<2 x double> %a)
+  %2 = extractelement <1 x double> %1, i32 0
+  ret double %2
+}
+
+define float @test_vminnmv_f32(<2 x float> %a) {
+; CHECK-LABEL: test_vminnmv_f32
+; CHECK: fminnmp {{s[0-9]+}}, {{v[0-9]+}}.2s
+  %1 = tail call <1 x float> @llvm.aarch64.neon.vminnmv.v1f32.v2f32(<2 x float> %a)
+  %2 = extractelement <1 x float> %1, i32 0
+  ret float %2
+}
+
+define <2 x i64> @test_vpaddq_s64(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: test_vpaddq_s64
+; CHECK: addp {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+  %1 = tail call <2 x i64> @llvm.arm.neon.vpadd.v2i64(<2 x i64> %a, <2 x i64> %b)
+  ret <2 x i64> %1
+}
+
+define <2 x i64> @test_vpaddq_u64(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: test_vpaddq_u64
+; CHECK: addp {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+  %1 = tail call <2 x i64> @llvm.arm.neon.vpadd.v2i64(<2 x i64> %a, <2 x i64> %b)
+  ret <2 x i64> %1
+}
+
+define i64 @test_vaddvq_s64(<2 x i64> %a) {
+; CHECK-LABEL: test_vaddvq_s64
+; CHECK: addp {{d[0-9]+}}, {{v[0-9]+}}.2d
+  %1 = tail call <1 x i64> @llvm.aarch64.neon.vaddv.v1i64.v2i64(<2 x i64> %a)
+  %2 = extractelement <1 x i64> %1, i32 0
+  ret i64 %2
+}
+
+define i64 @test_vaddvq_u64(<2 x i64> %a) {
+; CHECK-LABEL: test_vaddvq_u64
+; CHECK: addp {{d[0-9]+}}, {{v[0-9]+}}.2d
+  %1 = tail call <1 x i64> @llvm.aarch64.neon.vaddv.v1i64.v2i64(<2 x i64> %a)
+  %2 = extractelement <1 x i64> %1, i32 0
+  ret i64 %2
+}
+
+declare <1 x i64> @llvm.aarch64.neon.vaddv.v1i64.v2i64(<2 x i64>)
+
+declare <2 x i64> @llvm.arm.neon.vpadd.v2i64(<2 x i64>, <2 x i64>)
+
+declare <1 x float> @llvm.aarch64.neon.vminnmv.v1f32.v2f32(<2 x float>)
+
+declare <1 x double> @llvm.aarch64.neon.vminnmv.v1f64.v2f64(<2 x double>)
+
+declare <1 x float> @llvm.aarch64.neon.vmaxnmv.v1f32.v2f32(<2 x float>)
+
+declare <1 x double> @llvm.aarch64.neon.vmaxnmv.v1f64.v2f64(<2 x double>)
+
+declare <1 x double> @llvm.aarch64.neon.vminv.v1f64.v2f64(<2 x double>)
+
+declare <1 x float> @llvm.aarch64.neon.vminv.v1f32.v2f32(<2 x float>)
+
+declare <1 x double> @llvm.aarch64.neon.vmaxv.v1f64.v2f64(<2 x double>)
+
+declare <1 x float> @llvm.aarch64.neon.vmaxv.v1f32.v2f32(<2 x float>)
+
+declare <1 x double> @llvm.aarch64.neon.vaddv.v1f64.v2f64(<2 x double>)
+
+declare <1 x float> @llvm.aarch64.neon.vaddv.v1f32.v4f32(<4 x float>)
+
+declare <1 x float> @llvm.aarch64.neon.vaddv.v1f32.v2f32(<2 x float>)
\ No newline at end of file
Index: tools/clang/include/clang/Basic/arm_neon.td
===================================================================
--- tools/clang/include/clang/Basic/arm_neon.td
+++ tools/clang/include/clang/Basic/arm_neon.td
@@ -764,7 +764,7 @@
 ////////////////////////////////////////////////////////////////////////////////
 // Pairwise Addition
 // With additional Qc Qs Qi QUc QUs QUi Qf Qd types.
-def ADDP  : IInst<"vpadd", "ddd", "csiUcUsUifQcQsQiQUcQUsQUiQfQd">;
+def ADDP  : IInst<"vpadd", "ddd", "csiUcUsUifQcQsQiQlQUcQUsQUiQUlQfQd">;
 
 ////////////////////////////////////////////////////////////////////////////////
 // Shifts by constant
@@ -930,11 +930,11 @@
 ////////////////////////////////////////////////////////////////////////////////
 // Across vectors class
 def VADDLV  : SInst<"vaddlv", "rd", "csiUcUsUiQcQsQiQUcQUsQUi">;
-def VMAXV   : SInst<"vmaxv", "sd", "csiUcUsUiQcQsQiQUcQUsQUiQf">;
-def VMINV   : SInst<"vminv", "sd", "csiUcUsUiQcQsQiQUcQUsQUiQf">;
-def VADDV   : SInst<"vaddv", "sd", "csiUcUsUiQcQsQiQUcQUsQUi">;
-def FMAXNMV : SInst<"vmaxnmv", "sd", "Qf">;
-def FMINNMV : SInst<"vminnmv", "sd", "Qf">;
+def VMAXV   : SInst<"vmaxv", "sd", "csifUcUsUiQcQsQiQUcQUsQUiQfQd">;
+def VMINV   : SInst<"vminv", "sd", "csifUcUsUiQcQsQiQUcQUsQUiQfQd">;
+def VADDV   : SInst<"vaddv", "sd", "csifUcUsUiQcQsQiQUcQUsQUiQfQdQlQUl">;
+def FMAXNMV : SInst<"vmaxnmv", "sd", "fQfQd">;
+def FMINNMV : SInst<"vminnmv", "sd", "fQfQd">;
  
 ////////////////////////////////////////////////////////////////////////////////
 // Newly added Vector Extract for f64
@@ -1083,7 +1083,7 @@
 
 ////////////////////////////////////////////////////////////////////////////////
 // Scalar Reduce Pairwise Addition (Scalar and Floating Point)
-def SCALAR_ADDP  : SInst<"vpadd", "sd", "SfSHlSHd">;
+def SCALAR_ADDP  : SInst<"vpadd", "sd", "SfSHlSHdSHUl">;
 
 ////////////////////////////////////////////////////////////////////////////////
 // Scalar Reduce Floating Point Pairwise Max/Min
Index: tools/clang/lib/CodeGen/CGBuiltin.cpp
===================================================================
--- tools/clang/lib/CodeGen/CGBuiltin.cpp
+++ tools/clang/lib/CodeGen/CGBuiltin.cpp
@@ -1985,6 +1985,7 @@
     s = "vqrshlu"; OverloadInt = true; break;
   // Scalar Reduce Pairwise Add
   case AArch64::BI__builtin_neon_vpaddd_s64:
+  case AArch64::BI__builtin_neon_vpaddd_u64:
     Int = Intrinsic::aarch64_neon_vpadd; s = "vpadd";
     break;
   case AArch64::BI__builtin_neon_vpadds_f32:
@@ -2069,23 +2070,36 @@
   case AArch64::BI__builtin_neon_vaddvq_s8:
   case AArch64::BI__builtin_neon_vaddvq_s16:
   case AArch64::BI__builtin_neon_vaddvq_s32:
+  case AArch64::BI__builtin_neon_vaddvq_s64:
   case AArch64::BI__builtin_neon_vaddv_u8:
   case AArch64::BI__builtin_neon_vaddv_u16:
   case AArch64::BI__builtin_neon_vaddvq_u8:
   case AArch64::BI__builtin_neon_vaddvq_u16:
   case AArch64::BI__builtin_neon_vaddvq_u32:
+  case AArch64::BI__builtin_neon_vaddvq_u64:
+  case AArch64::BI__builtin_neon_vaddv_f32:
+  case AArch64::BI__builtin_neon_vaddvq_f32:
+  case AArch64::BI__builtin_neon_vaddvq_f64:
     Int = Intrinsic::aarch64_neon_vaddv;
     AcrossVec = true; ExtendEle = false; s = "vaddv"; break;      
+  case AArch64::BI__builtin_neon_vmaxv_f32:
   case AArch64::BI__builtin_neon_vmaxvq_f32:
+  case AArch64::BI__builtin_neon_vmaxvq_f64:
     Int = Intrinsic::aarch64_neon_vmaxv;
     AcrossVec = true; ExtendEle = false; s = "vmaxv"; break;
+  case AArch64::BI__builtin_neon_vminv_f32:
   case AArch64::BI__builtin_neon_vminvq_f32:
+  case AArch64::BI__builtin_neon_vminvq_f64:
     Int = Intrinsic::aarch64_neon_vminv;
     AcrossVec = true; ExtendEle = false; s = "vminv"; break;
+  case AArch64::BI__builtin_neon_vmaxnmv_f32:
   case AArch64::BI__builtin_neon_vmaxnmvq_f32:
+  case AArch64::BI__builtin_neon_vmaxnmvq_f64:
     Int = Intrinsic::aarch64_neon_vmaxnmv;
     AcrossVec = true; ExtendEle = false; s = "vmaxnmv"; break;
+  case AArch64::BI__builtin_neon_vminnmv_f32:
   case AArch64::BI__builtin_neon_vminnmvq_f32:
+  case AArch64::BI__builtin_neon_vminnmvq_f64:
     Int = Intrinsic::aarch64_neon_vminnmv;
     AcrossVec = true; ExtendEle = false; s = "vminnmv"; break;
   // Scalar Integer Saturating Doubling Multiply Half High
Index: tools/clang/test/CodeGen/aarch64-neon-intrinsics.c
===================================================================
--- tools/clang/test/CodeGen/aarch64-neon-intrinsics.c
+++ tools/clang/test/CodeGen/aarch64-neon-intrinsics.c
@@ -11218,3 +11218,100 @@
 // CHECK: fabd {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
   return vabdd_f64(a, b);
 }
+
+float32_t test_vaddv_f32(float32x2_t a) {
+  // CHECK-LABEL: test_vaddv_f32
+  return vaddv_f32(a);
+  // CHECK: faddp {{s[0-9]+}}, {{v[0-9]+}}.2s
+}
+
+float32_t test_vaddvq_f32(float32x4_t a) {
+  // CHECK-LABEL: test_vaddvq_f32
+  return vaddvq_f32(a);
+  // CHECK: faddp {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+  // CHECK: faddp {{s[0-9]+}}, {{v[0-9]+}}.2s
+}
+
+float64_t test_vaddvq_f64(float64x2_t a) {
+  // CHECK-LABEL: test_vaddvq_f64
+  return vaddvq_f64(a);
+  // CHECK: faddp {{d[0-9]+}}, {{v[0-9]+}}.2d
+}
+
+float32_t test_vmaxv_f32(float32x2_t a) {
+  // CHECK-LABEL: test_vmaxv_f32
+  return vmaxv_f32(a);
+  // CHECK: fmaxp {{s[0-9]+}}, {{v[0-9]+}}.2s
+}
+
+float64_t test_vmaxvq_f64(float64x2_t a) {
+  // CHECK-LABEL: test_vmaxvq_f64
+  return vmaxvq_f64(a);
+  // CHECK: fmaxp {{d[0-9]+}}, {{v[0-9]+}}.2d
+}
+
+float32_t test_vminv_f32(float32x2_t a) {
+  // CHECK-LABEL: test_vminv_f32
+  return vminv_f32(a);
+  // CHECK: fminp {{s[0-9]+}}, {{v[0-9]+}}.2s
+}
+
+float64_t test_vminvq_f64(float64x2_t a) {
+  // CHECK-LABEL: test_vminvq_f64
+  return vminvq_f64(a);
+  // CHECK: fminp {{d[0-9]+}}, {{v[0-9]+}}.2d
+}
+
+float64_t test_vmaxnmvq_f64(float64x2_t a) {
+  // CHECK-LABEL: test_vmaxnmvq_f64
+  return vmaxnmvq_f64(a);
+  // CHECK: fmaxnmp {{d[0-9]+}}, {{v[0-9]+}}.2d
+}
+
+float32_t test_vmaxnmv_f32(float32x2_t a) {
+  // CHECK-LABEL: test_vmaxnmv_f32
+  return vmaxnmv_f32(a);
+  // CHECK: fmaxnmp {{s[0-9]+}}, {{v[0-9]+}}.2s
+}
+
+float64_t test_vminnmvq_f64(float64x2_t a) {
+  // CHECK-LABEL: test_vminnmvq_f64
+  return vminnmvq_f64(a);
+  // CHECK: fminnmp {{d[0-9]+}}, {{v[0-9]+}}.2d
+}
+
+float32_t test_vminnmv_f32(float32x2_t a) {
+  // CHECK-LABEL: test_vminnmv_f32
+  return vminnmv_f32(a);
+  // CHECK: fminnmp {{s[0-9]+}}, {{v[0-9]+}}.2s
+}
+
+int64x2_t test_vpaddq_s64(int64x2_t a, int64x2_t b) {
+  // CHECK-LABEL: test_vpaddq_s64
+  return vpaddq_s64(a, b);
+  // CHECK: addp {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+}
+
+uint64x2_t test_vpaddq_u64(uint64x2_t a, uint64x2_t b) {
+  // CHECK-LABEL: test_vpaddq_u64
+  return vpaddq_u64(a, b);
+  // CHECK: addp {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+}
+
+uint64_t test_vpaddd_u64(uint64x2_t a) {
+  // CHECK-LABEL: test_vpaddd_u64
+  return vpaddd_u64(a);
+  // CHECK: addp {{d[0-9]+}}, {{v[0-9]+}}.2d
+}
+
+int64_t test_vaddvq_s64(int64x2_t a) {
+  // CHECK-LABEL: test_vaddvq_s64
+  return vaddvq_s64(a);
+  // CHECK: addp {{d[0-9]+}}, {{v[0-9]+}}.2d
+}
+
+uint64_t test_vaddvq_u64(uint64x2_t a) {
+  // CHECK-LABEL: test_vaddvq_u64
+  return vaddvq_u64(a);
+  // CHECK: addp {{d[0-9]+}}, {{v[0-9]+}}.2d
+}
_______________________________________________
cfe-commits mailing list
[email protected]
http://lists.cs.uiuc.edu/mailman/listinfo/cfe-commits

Reply via email to