These IR intructions are undefined when the shift amount is equal
to the size of the vector element. However these shifts are legal
for NEON.

For now, the old code is still there for AArch64. It will be fixed
separately as it requires changes in the backend.

http://llvm-reviews.chandlerc.com/D1819

Files:
  lib/CodeGen/CGBuiltin.cpp
  test/CodeGen/arm-neon-shifts.c

Index: lib/CodeGen/CGBuiltin.cpp
===================================================================
--- lib/CodeGen/CGBuiltin.cpp
+++ lib/CodeGen/CGBuiltin.cpp
@@ -2107,18 +2107,28 @@
 
   // Shift by immediate
   case AArch64::BI__builtin_neon_vshr_n_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vshr_n_v, E);
   case AArch64::BI__builtin_neon_vshrq_n_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vshrq_n_v, E);
+    Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+    Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false);
+    if (usgn)
+      return Builder.CreateLShr(Ops[0], Ops[1], "vshr_n");
+    else
+      return Builder.CreateAShr(Ops[0], Ops[1], "vshr_n");
   case AArch64::BI__builtin_neon_vrshr_n_v:
   case AArch64::BI__builtin_neon_vrshrq_n_v:
     Int = usgn ? Intrinsic::aarch64_neon_vurshr
                : Intrinsic::aarch64_neon_vsrshr;
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshr_n");
   case AArch64::BI__builtin_neon_vsra_n_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vsra_n_v, E);
   case AArch64::BI__builtin_neon_vsraq_n_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vsraq_n_v, E);
+    Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+    Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+    Ops[2] = EmitNeonShiftVector(Ops[2], Ty, false);
+    if (usgn)
+      Ops[1] = Builder.CreateLShr(Ops[1], Ops[2], "vsra_n");
+    else
+      Ops[1] = Builder.CreateAShr(Ops[1], Ops[2], "vsra_n");
+    return Builder.CreateAdd(Ops[0], Ops[1]);
   case AArch64::BI__builtin_neon_vrsra_n_v:
   case AArch64::BI__builtin_neon_vrsraq_n_v: {
     Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
@@ -2129,9 +2139,10 @@
     return Builder.CreateAdd(Ops[0], Ops[1], "vrsra_n");
   }
   case AArch64::BI__builtin_neon_vshl_n_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vshl_n_v, E);
   case AArch64::BI__builtin_neon_vshlq_n_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vshlq_n_v, E);
+    Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false);
+    return Builder.CreateShl(Builder.CreateBitCast(Ops[0], Ty), Ops[1],
+                             "vshl_n");
   case AArch64::BI__builtin_neon_vqshl_n_v:
     return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqshl_n_v, E);
   case AArch64::BI__builtin_neon_vqshlq_n_v:
@@ -3077,20 +3088,15 @@
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vshll", 1);
   case ARM::BI__builtin_neon_vshl_n_v:
   case ARM::BI__builtin_neon_vshlq_n_v:
-    Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false);
-    return Builder.CreateShl(Builder.CreateBitCast(Ops[0],Ty), Ops[1],
-                             "vshl_n");
+    Int = usgn ? Intrinsic::arm_neon_vshiftu : Intrinsic::arm_neon_vshifts;
+    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vshl_n", 1, false);
   case ARM::BI__builtin_neon_vshrn_n_v:
     return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftn, Ty),
                         Ops, "vshrn_n", 1, true);
   case ARM::BI__builtin_neon_vshr_n_v:
   case ARM::BI__builtin_neon_vshrq_n_v:
-    Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
-    Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false);
-    if (usgn)
-      return Builder.CreateLShr(Ops[0], Ops[1], "vshr_n");
-    else
-      return Builder.CreateAShr(Ops[0], Ops[1], "vshr_n");
+    Int = usgn ? Intrinsic::arm_neon_vshiftu : Intrinsic::arm_neon_vshifts;
+    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vshr_n", 1, true);
   case ARM::BI__builtin_neon_vsri_n_v:
   case ARM::BI__builtin_neon_vsriq_n_v:
     rightShift = true;
@@ -3103,11 +3109,10 @@
   case ARM::BI__builtin_neon_vsraq_n_v:
     Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
     Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
-    Ops[2] = EmitNeonShiftVector(Ops[2], Ty, false);
-    if (usgn)
-      Ops[1] = Builder.CreateLShr(Ops[1], Ops[2], "vsra_n");
-    else
-      Ops[1] = Builder.CreateAShr(Ops[1], Ops[2], "vsra_n");
+    Ops[2] = EmitNeonShiftVector(Ops[2], Ty, true);
+    Int = usgn ? Intrinsic::arm_neon_vshiftu : Intrinsic::arm_neon_vshifts;
+    Ops[1] = Builder.CreateCall2(CGM.getIntrinsic(Int, Ty),
+                                 Ops[1], Ops[2], "vsra_n");
     return Builder.CreateAdd(Ops[0], Ops[1]);
   case ARM::BI__builtin_neon_vst1_v:
   case ARM::BI__builtin_neon_vst1q_v:
Index: test/CodeGen/arm-neon-shifts.c
===================================================================
--- /dev/null
+++ test/CodeGen/arm-neon-shifts.c
@@ -0,0 +1,71 @@
+// REQUIRES: arm-registered-target
+// RUN: %clang_cc1 -triple thumbv7-apple-darwin \
+// RUN:   -target-cpu cortex-a8 \
+// RUN:   -ffreestanding \
+// RUN:   -emit-llvm -w -O1 -o - %s | FileCheck %s
+
+// Check the IR emission for NEON shifts
+
+#include <arm_neon.h>
+
+uint8x8_t test_shift_vshl(uint8x8_t a) {
+  // CHECK-LABEL: test_shift_vshl
+  // CHECK: %vshl_n = tail call <8 x i8> @llvm.arm.neon.vshiftu.v8i8(<8 x i8> 
%a, <8 x i8> <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>)
+  return vshl_n_u8(a, 7);
+}
+
+uint8x8_t test_shift_vshr(uint8x8_t a) {
+  // CHECK-LABEL: test_shift_vshr
+  // CHECK: %vshr_n = tail call <8 x i8> @llvm.arm.neon.vshiftu.v8i8(<8 x i8> 
%a, <8 x i8> <i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8>)
+  return vshr_n_u8(a, 8);
+}
+
+uint8x8_t test_shift_vshrn(uint16x8_t a) {
+  // CHECK-LABEL: test_shift_vshrn
+  // CHECK: %vshrn_n1 = tail call <8 x i8> @llvm.arm.neon.vshiftn.v8i8(<8 x 
i16> %a, <8 x i16> <i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 
-8>)
+  return vshrn_n_u16(a, 8);
+}
+
+uint16x8_t test_shift_vshll(uint8x8_t a) {
+  // CHECK-LABEL: test_shift_vshll
+  // CHECK: %vshll = tail call <8 x i16> @llvm.arm.neon.vshiftlu.v8i16(<8 x 
i8> %a, <8 x i8> <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>)
+  return vshll_n_u8(a, 8);
+}
+
+uint8x8_t test_shift_vrshr(uint8x8_t a) {
+  // CHECK-LABEL: test_shift_vrshr
+  // CHECK: %vrshr_n = tail call <8 x i8> @llvm.arm.neon.vrshiftu.v8i8(<8 x 
i8> %a, <8 x i8> <i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8>)
+  return vrshr_n_u8(a, 8);
+}
+
+uint8x8_t test_shift_vrshrn(uint16x8_t a) {
+  // CHECK-LABEL: test_shift_vrshrn
+  // CHECK: %vrshrn_n1 = tail call <8 x i8> @llvm.arm.neon.vrshiftn.v8i8(<8 x 
i16> %a, <8 x i16> <i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 
-8>)
+  return vrshrn_n_u16(a, 8);
+}
+
+uint8x8_t test_shift_vsra(uint8x8_t a, uint8x8_t b) {
+  // CHECK-LABEL: test_shift_vsra
+  // CHECK: %vsra_n = tail call <8 x i8> @llvm.arm.neon.vshiftu.v8i8(<8 x i8> 
%b, <8 x i8> <i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8>)
+  // CHECK: %0 = add <8 x i8> %vsra_n, %a
+  return vsra_n_u8(a, b, 8);
+}
+
+uint8x8_t test_shift_vrsra(uint8x8_t a, uint8x8_t b) {
+  // CHECK-LABEL: test_shift_vrsra
+  // CHECK: %0 = tail call <8 x i8> @llvm.arm.neon.vrshiftu.v8i8(<8 x i8> %b, 
<8 x i8> <i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8>)
+  // CHECK: %vrsra_n = add <8 x i8> %0, %a
+  return vrsra_n_u8(a, b, 8);
+}
+
+uint8x8_t test_shift_vsli(uint8x8_t a, uint8x8_t b) {
+  // CHECK-LABEL: test_shift_vsli
+  // CHECK: %vsli_n = tail call <8 x i8> @llvm.arm.neon.vshiftins.v8i8(<8 x 
i8> %a, <8 x i8> %b, <8 x i8> <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>)
+  return vsli_n_u8(a, b, 7);
+}
+
+uint8x8_t test_shift_vsri(uint8x8_t a, uint8x8_t b) {
+  // CHECK-LABEL: test_shift_vsri
+  // CHECK: %vsli_n = tail call <8 x i8> @llvm.arm.neon.vshiftins.v8i8(<8 x 
i8> %a, <8 x i8> %b, <8 x i8> <i8 -7, i8 -7, i8 -7, i8 -7, i8 -7, i8 -7, i8 -7, 
i8 -7>)
+  return vsri_n_u8(a, b, 7);
+}
Index: lib/CodeGen/CGBuiltin.cpp
===================================================================
--- lib/CodeGen/CGBuiltin.cpp
+++ lib/CodeGen/CGBuiltin.cpp
@@ -2107,18 +2107,28 @@
 
   // Shift by immediate
   case AArch64::BI__builtin_neon_vshr_n_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vshr_n_v, E);
   case AArch64::BI__builtin_neon_vshrq_n_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vshrq_n_v, E);
+    Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+    Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false);
+    if (usgn)
+      return Builder.CreateLShr(Ops[0], Ops[1], "vshr_n");
+    else
+      return Builder.CreateAShr(Ops[0], Ops[1], "vshr_n");
   case AArch64::BI__builtin_neon_vrshr_n_v:
   case AArch64::BI__builtin_neon_vrshrq_n_v:
     Int = usgn ? Intrinsic::aarch64_neon_vurshr
                : Intrinsic::aarch64_neon_vsrshr;
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshr_n");
   case AArch64::BI__builtin_neon_vsra_n_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vsra_n_v, E);
   case AArch64::BI__builtin_neon_vsraq_n_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vsraq_n_v, E);
+    Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+    Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+    Ops[2] = EmitNeonShiftVector(Ops[2], Ty, false);
+    if (usgn)
+      Ops[1] = Builder.CreateLShr(Ops[1], Ops[2], "vsra_n");
+    else
+      Ops[1] = Builder.CreateAShr(Ops[1], Ops[2], "vsra_n");
+    return Builder.CreateAdd(Ops[0], Ops[1]);
   case AArch64::BI__builtin_neon_vrsra_n_v:
   case AArch64::BI__builtin_neon_vrsraq_n_v: {
     Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
@@ -2129,9 +2139,10 @@
     return Builder.CreateAdd(Ops[0], Ops[1], "vrsra_n");
   }
   case AArch64::BI__builtin_neon_vshl_n_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vshl_n_v, E);
   case AArch64::BI__builtin_neon_vshlq_n_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vshlq_n_v, E);
+    Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false);
+    return Builder.CreateShl(Builder.CreateBitCast(Ops[0], Ty), Ops[1],
+                             "vshl_n");
   case AArch64::BI__builtin_neon_vqshl_n_v:
     return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqshl_n_v, E);
   case AArch64::BI__builtin_neon_vqshlq_n_v:
@@ -3077,20 +3088,15 @@
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vshll", 1);
   case ARM::BI__builtin_neon_vshl_n_v:
   case ARM::BI__builtin_neon_vshlq_n_v:
-    Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false);
-    return Builder.CreateShl(Builder.CreateBitCast(Ops[0],Ty), Ops[1],
-                             "vshl_n");
+    Int = usgn ? Intrinsic::arm_neon_vshiftu : Intrinsic::arm_neon_vshifts;
+    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vshl_n", 1, false);
   case ARM::BI__builtin_neon_vshrn_n_v:
     return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftn, Ty),
                         Ops, "vshrn_n", 1, true);
   case ARM::BI__builtin_neon_vshr_n_v:
   case ARM::BI__builtin_neon_vshrq_n_v:
-    Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
-    Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false);
-    if (usgn)
-      return Builder.CreateLShr(Ops[0], Ops[1], "vshr_n");
-    else
-      return Builder.CreateAShr(Ops[0], Ops[1], "vshr_n");
+    Int = usgn ? Intrinsic::arm_neon_vshiftu : Intrinsic::arm_neon_vshifts;
+    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vshr_n", 1, true);
   case ARM::BI__builtin_neon_vsri_n_v:
   case ARM::BI__builtin_neon_vsriq_n_v:
     rightShift = true;
@@ -3103,11 +3109,10 @@
   case ARM::BI__builtin_neon_vsraq_n_v:
     Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
     Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
-    Ops[2] = EmitNeonShiftVector(Ops[2], Ty, false);
-    if (usgn)
-      Ops[1] = Builder.CreateLShr(Ops[1], Ops[2], "vsra_n");
-    else
-      Ops[1] = Builder.CreateAShr(Ops[1], Ops[2], "vsra_n");
+    Ops[2] = EmitNeonShiftVector(Ops[2], Ty, true);
+    Int = usgn ? Intrinsic::arm_neon_vshiftu : Intrinsic::arm_neon_vshifts;
+    Ops[1] = Builder.CreateCall2(CGM.getIntrinsic(Int, Ty),
+                                 Ops[1], Ops[2], "vsra_n");
     return Builder.CreateAdd(Ops[0], Ops[1]);
   case ARM::BI__builtin_neon_vst1_v:
   case ARM::BI__builtin_neon_vst1q_v:
Index: test/CodeGen/arm-neon-shifts.c
===================================================================
--- /dev/null
+++ test/CodeGen/arm-neon-shifts.c
@@ -0,0 +1,71 @@
+// REQUIRES: arm-registered-target
+// RUN: %clang_cc1 -triple thumbv7-apple-darwin \
+// RUN:   -target-cpu cortex-a8 \
+// RUN:   -ffreestanding \
+// RUN:   -emit-llvm -w -O1 -o - %s | FileCheck %s
+
+// Check the IR emission for NEON shifts
+
+#include <arm_neon.h>
+
+uint8x8_t test_shift_vshl(uint8x8_t a) {
+  // CHECK-LABEL: test_shift_vshl
+  // CHECK: %vshl_n = tail call <8 x i8> @llvm.arm.neon.vshiftu.v8i8(<8 x i8> %a, <8 x i8> <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>)
+  return vshl_n_u8(a, 7);
+}
+
+uint8x8_t test_shift_vshr(uint8x8_t a) {
+  // CHECK-LABEL: test_shift_vshr
+  // CHECK: %vshr_n = tail call <8 x i8> @llvm.arm.neon.vshiftu.v8i8(<8 x i8> %a, <8 x i8> <i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8>)
+  return vshr_n_u8(a, 8);
+}
+
+uint8x8_t test_shift_vshrn(uint16x8_t a) {
+  // CHECK-LABEL: test_shift_vshrn
+  // CHECK: %vshrn_n1 = tail call <8 x i8> @llvm.arm.neon.vshiftn.v8i8(<8 x i16> %a, <8 x i16> <i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8>)
+  return vshrn_n_u16(a, 8);
+}
+
+uint16x8_t test_shift_vshll(uint8x8_t a) {
+  // CHECK-LABEL: test_shift_vshll
+  // CHECK: %vshll = tail call <8 x i16> @llvm.arm.neon.vshiftlu.v8i16(<8 x i8> %a, <8 x i8> <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>)
+  return vshll_n_u8(a, 8);
+}
+
+uint8x8_t test_shift_vrshr(uint8x8_t a) {
+  // CHECK-LABEL: test_shift_vrshr
+  // CHECK: %vrshr_n = tail call <8 x i8> @llvm.arm.neon.vrshiftu.v8i8(<8 x i8> %a, <8 x i8> <i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8>)
+  return vrshr_n_u8(a, 8);
+}
+
+uint8x8_t test_shift_vrshrn(uint16x8_t a) {
+  // CHECK-LABEL: test_shift_vrshrn
+  // CHECK: %vrshrn_n1 = tail call <8 x i8> @llvm.arm.neon.vrshiftn.v8i8(<8 x i16> %a, <8 x i16> <i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8>)
+  return vrshrn_n_u16(a, 8);
+}
+
+uint8x8_t test_shift_vsra(uint8x8_t a, uint8x8_t b) {
+  // CHECK-LABEL: test_shift_vsra
+  // CHECK: %vsra_n = tail call <8 x i8> @llvm.arm.neon.vshiftu.v8i8(<8 x i8> %b, <8 x i8> <i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8>)
+  // CHECK: %0 = add <8 x i8> %vsra_n, %a
+  return vsra_n_u8(a, b, 8);
+}
+
+uint8x8_t test_shift_vrsra(uint8x8_t a, uint8x8_t b) {
+  // CHECK-LABEL: test_shift_vrsra
+  // CHECK: %0 = tail call <8 x i8> @llvm.arm.neon.vrshiftu.v8i8(<8 x i8> %b, <8 x i8> <i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8>)
+  // CHECK: %vrsra_n = add <8 x i8> %0, %a
+  return vrsra_n_u8(a, b, 8);
+}
+
+uint8x8_t test_shift_vsli(uint8x8_t a, uint8x8_t b) {
+  // CHECK-LABEL: test_shift_vsli
+  // CHECK: %vsli_n = tail call <8 x i8> @llvm.arm.neon.vshiftins.v8i8(<8 x i8> %a, <8 x i8> %b, <8 x i8> <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>)
+  return vsli_n_u8(a, b, 7);
+}
+
+uint8x8_t test_shift_vsri(uint8x8_t a, uint8x8_t b) {
+  // CHECK-LABEL: test_shift_vsri
+  // CHECK: %vsli_n = tail call <8 x i8> @llvm.arm.neon.vshiftins.v8i8(<8 x i8> %a, <8 x i8> %b, <8 x i8> <i8 -7, i8 -7, i8 -7, i8 -7, i8 -7, i8 -7, i8 -7, i8 -7>)
+  return vsri_n_u8(a, b, 7);
+}
_______________________________________________
cfe-commits mailing list
[email protected]
http://lists.cs.uiuc.edu/mailman/listinfo/cfe-commits

Reply via email to