simon_tatham updated this revision to Diff 243793.
simon_tatham added a comment.

Spotted a mistake by myself: one of my new isel patterns was generating signed 
VMOVL where it should have been generating unsigned. Fixed, and updated the llc 
test that had the wrong expectation in it.

Also rebased to current master.


Repository:
  rG LLVM Github Monorepo

CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D74336/new/

https://reviews.llvm.org/D74336

Files:
  clang/include/clang/Basic/arm_mve.td
  clang/include/clang/Basic/arm_mve_defs.td
  clang/lib/CodeGen/CGBuiltin.cpp
  clang/test/CodeGen/arm-mve-intrinsics/vmovl.c
  llvm/lib/Target/ARM/ARMInstrMVE.td
  llvm/test/CodeGen/Thumb2/mve-intrinsics/vmovl.ll
  llvm/test/CodeGen/Thumb2/mve-shuffleext.ll

Index: llvm/test/CodeGen/Thumb2/mve-shuffleext.ll
===================================================================
--- llvm/test/CodeGen/Thumb2/mve-shuffleext.ll
+++ llvm/test/CodeGen/Thumb2/mve-shuffleext.ll
@@ -15,8 +15,7 @@
 define arm_aapcs_vfpcc <4 x i32> @sext_1357(<8 x i16> %src) {
 ; CHECK-LABEL: sext_1357:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vrev32.16 q0, q0
-; CHECK-NEXT:    vmovlb.s16 q0, q0
+; CHECK-NEXT:    vmovlt.s16 q0, q0
 ; CHECK-NEXT:    bx lr
 entry:
   %strided.vec = shufflevector <8 x i16> %src, <8 x i16> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
@@ -38,8 +37,7 @@
 define arm_aapcs_vfpcc <4 x i32> @zext_1357(<8 x i16> %src) {
 ; CHECK-LABEL: zext_1357:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vrev32.16 q0, q0
-; CHECK-NEXT:    vmovlb.u16 q0, q0
+; CHECK-NEXT:    vmovlt.s16 q0, q0
 ; CHECK-NEXT:    bx lr
 entry:
   %strided.vec = shufflevector <8 x i16> %src, <8 x i16> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
@@ -61,8 +59,7 @@
 define arm_aapcs_vfpcc <8 x i16> @sext_13579111315(<16 x i8> %src) {
 ; CHECK-LABEL: sext_13579111315:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vrev16.8 q0, q0
-; CHECK-NEXT:    vmovlb.s8 q0, q0
+; CHECK-NEXT:    vmovlt.s8 q0, q0
 ; CHECK-NEXT:    bx lr
 entry:
   %strided.vec = shufflevector <16 x i8> %src, <16 x i8> undef, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
@@ -84,8 +81,7 @@
 define arm_aapcs_vfpcc <8 x i16> @zext_13579111315(<16 x i8> %src) {
 ; CHECK-LABEL: zext_13579111315:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vrev16.8 q0, q0
-; CHECK-NEXT:    vmovlb.u8 q0, q0
+; CHECK-NEXT:    vmovlt.u8 q0, q0
 ; CHECK-NEXT:    bx lr
 entry:
   %strided.vec = shufflevector <16 x i8> %src, <16 x i8> undef, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
Index: llvm/test/CodeGen/Thumb2/mve-intrinsics/vmovl.ll
===================================================================
--- /dev/null
+++ llvm/test/CodeGen/Thumb2/mve-intrinsics/vmovl.ll
@@ -0,0 +1,90 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve.fp -verify-machineinstrs -o - %s | FileCheck %s
+
+define arm_aapcs_vfpcc <8 x i16> @test_vmovlbq_s8(<16 x i8> %a) {
+; CHECK-LABEL: test_vmovlbq_s8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmovlb.s8 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+  %1 = sext <8 x i8> %0 to <8 x i16>
+  ret <8 x i16> %1
+}
+
+define arm_aapcs_vfpcc <4 x i32> @test_vmovlbq_s16(<8 x i16> %a) {
+; CHECK-LABEL: test_vmovlbq_s16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmovlb.s16 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+  %1 = sext <4 x i16> %0 to <4 x i32>
+  ret <4 x i32> %1
+}
+
+define arm_aapcs_vfpcc <8 x i16> @test_vmovlbq_u8(<16 x i8> %a) {
+; CHECK-LABEL: test_vmovlbq_u8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmovlb.u8 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+  %1 = zext <8 x i8> %0 to <8 x i16>
+  ret <8 x i16> %1
+}
+
+define arm_aapcs_vfpcc <4 x i32> @test_vmovlbq_u16(<8 x i16> %a) {
+; CHECK-LABEL: test_vmovlbq_u16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmovlb.u16 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+  %1 = zext <4 x i16> %0 to <4 x i32>
+  ret <4 x i32> %1
+}
+
+define arm_aapcs_vfpcc <8 x i16> @test_vmovltq_s8(<16 x i8> %a) {
+; CHECK-LABEL: test_vmovltq_s8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmovlt.s8 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+  %1 = sext <8 x i8> %0 to <8 x i16>
+  ret <8 x i16> %1
+}
+
+define arm_aapcs_vfpcc <4 x i32> @test_vmovltq_s16(<8 x i16> %a) {
+; CHECK-LABEL: test_vmovltq_s16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmovlt.s16 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+  %1 = sext <4 x i16> %0 to <4 x i32>
+  ret <4 x i32> %1
+}
+
+define arm_aapcs_vfpcc <8 x i16> @test_vmovltq_u8(<16 x i8> %a) {
+; CHECK-LABEL: test_vmovltq_u8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmovlt.u8 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+  %1 = zext <8 x i8> %0 to <8 x i16>
+  ret <8 x i16> %1
+}
+
+define arm_aapcs_vfpcc <4 x i32> @test_vmovltq_u16(<8 x i16> %a) {
+; CHECK-LABEL: test_vmovltq_u16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmovlt.u16 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+  %1 = zext <4 x i16> %0 to <4 x i32>
+  ret <4 x i32> %1
+}
Index: llvm/lib/Target/ARM/ARMInstrMVE.td
===================================================================
--- llvm/lib/Target/ARM/ARMInstrMVE.td
+++ llvm/lib/Target/ARM/ARMInstrMVE.td
@@ -2394,6 +2394,18 @@
   def : Pat<(sext_inreg (v4i32 MQPR:$src), v4i8),
             (MVE_VMOVLs16bh (MVE_VMOVLs8bh MQPR:$src))>;
 
+  let Predicates = [IsLE,HasMVEInt] in {
+    def : Pat<(sext_inreg (v8i16 (bitconvert (ARMvrev16 (v16i8 MQPR:$src)))),
+                          v8i8), (MVE_VMOVLs8th MQPR:$src)>;
+    def : Pat<(sext_inreg (v4i32 (bitconvert (ARMvrev32 (v8i16 MQPR:$src)))),
+                          v4i16), (MVE_VMOVLs16th MQPR:$src)>;
+    def : Pat<(ARMvbicImm (v8i16 (bitconvert (ARMvrev16 (v16i8 MQPR:$src)))),
+                          (i32 0xAFF)), (MVE_VMOVLu8th MQPR:$src)>;
+    def : Pat<(and (v4i32 (bitconvert (ARMvrev32 (v8i16 MQPR:$src)))),
+                   (v4i32 (ARMvmovImm (i32 0xCFF)))),
+                   (MVE_VMOVLu16th MQPR:$src)>;
+  }
+
   // zext_inreg 16 -> 32
   def : Pat<(and (v4i32 MQPR:$src), (v4i32 (ARMvmovImm (i32 0xCFF)))),
             (MVE_VMOVLu16bh MQPR:$src)>;
Index: clang/test/CodeGen/arm-mve-intrinsics/vmovl.c
===================================================================
--- /dev/null
+++ clang/test/CodeGen/arm-mve-intrinsics/vmovl.c
@@ -0,0 +1,126 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -triple thumbv8.1m.main-arm-none-eabi -target-feature +mve -mfloat-abi hard -fallow-half-arguments-and-returns -O0 -disable-O0-optnone -S -emit-llvm -o - %s | opt -S -mem2reg | FileCheck %s
+// RUN: %clang_cc1 -triple thumbv8.1m.main-arm-none-eabi -target-feature +mve -mfloat-abi hard -fallow-half-arguments-and-returns -O0 -disable-O0-optnone -DPOLYMORPHIC -S -emit-llvm -o - %s | opt -S -mem2reg | FileCheck %s
+
+#include <arm_mve.h>
+
+// CHECK-LABEL: @test_vmovlbq_s8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = shufflevector <16 x i8> [[A:%.*]], <16 x i8> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+// CHECK-NEXT:    [[TMP1:%.*]] = sext <8 x i8> [[TMP0]] to <8 x i16>
+// CHECK-NEXT:    ret <8 x i16> [[TMP1]]
+//
+int16x8_t test_vmovlbq_s8(int8x16_t a)
+{
+#ifdef POLYMORPHIC
+    return vmovlbq(a);
+#else /* POLYMORPHIC */
+    return vmovlbq_s8(a);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vmovlbq_s16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = shufflevector <8 x i16> [[A:%.*]], <8 x i16> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+// CHECK-NEXT:    [[TMP1:%.*]] = sext <4 x i16> [[TMP0]] to <4 x i32>
+// CHECK-NEXT:    ret <4 x i32> [[TMP1]]
+//
+int32x4_t test_vmovlbq_s16(int16x8_t a)
+{
+#ifdef POLYMORPHIC
+    return vmovlbq(a);
+#else /* POLYMORPHIC */
+    return vmovlbq_s16(a);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vmovlbq_u8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = shufflevector <16 x i8> [[A:%.*]], <16 x i8> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+// CHECK-NEXT:    [[TMP1:%.*]] = zext <8 x i8> [[TMP0]] to <8 x i16>
+// CHECK-NEXT:    ret <8 x i16> [[TMP1]]
+//
+uint16x8_t test_vmovlbq_u8(uint8x16_t a)
+{
+#ifdef POLYMORPHIC
+    return vmovlbq(a);
+#else /* POLYMORPHIC */
+    return vmovlbq_u8(a);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vmovlbq_u16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = shufflevector <8 x i16> [[A:%.*]], <8 x i16> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+// CHECK-NEXT:    [[TMP1:%.*]] = zext <4 x i16> [[TMP0]] to <4 x i32>
+// CHECK-NEXT:    ret <4 x i32> [[TMP1]]
+//
+uint32x4_t test_vmovlbq_u16(uint16x8_t a)
+{
+#ifdef POLYMORPHIC
+    return vmovlbq(a);
+#else /* POLYMORPHIC */
+    return vmovlbq_u16(a);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vmovltq_s8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = shufflevector <16 x i8> [[A:%.*]], <16 x i8> undef, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+// CHECK-NEXT:    [[TMP1:%.*]] = sext <8 x i8> [[TMP0]] to <8 x i16>
+// CHECK-NEXT:    ret <8 x i16> [[TMP1]]
+//
+int16x8_t test_vmovltq_s8(int8x16_t a)
+{
+#ifdef POLYMORPHIC
+    return vmovltq(a);
+#else /* POLYMORPHIC */
+    return vmovltq_s8(a);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vmovltq_s16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = shufflevector <8 x i16> [[A:%.*]], <8 x i16> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+// CHECK-NEXT:    [[TMP1:%.*]] = sext <4 x i16> [[TMP0]] to <4 x i32>
+// CHECK-NEXT:    ret <4 x i32> [[TMP1]]
+//
+int32x4_t test_vmovltq_s16(int16x8_t a)
+{
+#ifdef POLYMORPHIC
+    return vmovltq(a);
+#else /* POLYMORPHIC */
+    return vmovltq_s16(a);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vmovltq_u8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = shufflevector <16 x i8> [[A:%.*]], <16 x i8> undef, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+// CHECK-NEXT:    [[TMP1:%.*]] = zext <8 x i8> [[TMP0]] to <8 x i16>
+// CHECK-NEXT:    ret <8 x i16> [[TMP1]]
+//
+uint16x8_t test_vmovltq_u8(uint8x16_t a)
+{
+#ifdef POLYMORPHIC
+    return vmovltq(a);
+#else /* POLYMORPHIC */
+    return vmovltq_u8(a);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vmovltq_u16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = shufflevector <8 x i16> [[A:%.*]], <8 x i16> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+// CHECK-NEXT:    [[TMP1:%.*]] = zext <4 x i16> [[TMP0]] to <4 x i32>
+// CHECK-NEXT:    ret <4 x i32> [[TMP1]]
+//
+uint32x4_t test_vmovltq_u16(uint16x8_t a)
+{
+#ifdef POLYMORPHIC
+    return vmovltq(a);
+#else /* POLYMORPHIC */
+    return vmovltq_u16(a);
+#endif /* POLYMORPHIC */
+}
+
Index: clang/lib/CodeGen/CGBuiltin.cpp
===================================================================
--- clang/lib/CodeGen/CGBuiltin.cpp
+++ clang/lib/CodeGen/CGBuiltin.cpp
@@ -7058,6 +7058,17 @@
   }
 }
 
+static llvm::Value *VectorUnzip(CGBuilderTy &Builder, llvm::Value *V, bool Odd) {
+  // Make a shufflevector that extracts every other element of a vector (evens
+  // or odds, as desired).
+  SmallVector<uint32_t, 16> Indices;
+  unsigned InputElements = V->getType()->getVectorNumElements();
+  for (unsigned i = 0; i < InputElements; i += 2)
+    Indices.push_back(i + Odd);
+  return Builder.CreateShuffleVector(V, llvm::UndefValue::get(V->getType()),
+                                     Indices);
+}
+
 template<unsigned HighBit, unsigned OtherBits>
 static llvm::Value *ARMMVEConstantSplat(CGBuilderTy &Builder, llvm::Type *VT) {
   // MVE-specific helper function to make a vector splat of a constant such as
Index: clang/include/clang/Basic/arm_mve_defs.td
===================================================================
--- clang/include/clang/Basic/arm_mve_defs.td
+++ clang/include/clang/Basic/arm_mve_defs.td
@@ -128,6 +128,9 @@
 def vrev: CGHelperFn<"ARMMVEVectorElementReverse"> {
   let special_params = [IRBuilderIntParam<1, "unsigned">];
 }
+def unzip: CGHelperFn<"VectorUnzip"> {
+  let special_params = [IRBuilderIntParam<1, "bool">];
+}
 
 // Helper for making boolean flags in IR
 def i1: IRBuilderBase {
Index: clang/include/clang/Basic/arm_mve.td
===================================================================
--- clang/include/clang/Basic/arm_mve.td
+++ clang/include/clang/Basic/arm_mve.td
@@ -347,6 +347,13 @@
           NameOverride<"vcvtq_" # IScalar>;
 }
 
+let params = [s8, u8, s16, u16] in {
+  def vmovlbq: Intrinsic<DblVector, (args Vector:$a),
+    (extend (unzip $a, 0), DblVector, (unsignedflag Scalar))>;
+  def vmovltq: Intrinsic<DblVector, (args Vector:$a),
+    (extend (unzip $a, 1), DblVector, (unsignedflag Scalar))>;
+}
+
 defm : float_int_conversions<f32, u32, fptoui, uitofp>;
 defm : float_int_conversions<f16, u16, fptoui, uitofp>;
 defm : float_int_conversions<f32, s32, fptosi, sitofp>;
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to