Hi t.p.northover,
http://llvm-reviews.chandlerc.com/D2073
Files:
lib/Target/AArch64/AArch64ISelLowering.cpp
lib/Target/AArch64/AArch64ISelLowering.h
lib/Target/AArch64/AArch64InstrFormats.td
lib/Target/AArch64/AArch64InstrNEON.td
test/CodeGen/AArch64/neon-extract.ll
test/MC/AArch64/neon-diagnostics.s
test/MC/AArch64/neon-extract.s
test/MC/Disassembler/AArch64/neon-instructions.txt
Index: lib/Target/AArch64/AArch64ISelLowering.cpp
===================================================================
--- lib/Target/AArch64/AArch64ISelLowering.cpp
+++ lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -886,6 +886,8 @@
return "AArch64ISD::NEON_VDUP";
case AArch64ISD::NEON_VDUPLANE:
return "AArch64ISD::NEON_VDUPLANE";
+ case AArch64ISD::NEON_VEXTRACT:
+ return "AArch64ISD::NEON_VEXTRACT";
default:
return NULL;
}
@@ -3651,7 +3653,7 @@
SDValue
AArch64TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
- SelectionDAG &DAG) const {
+ SelectionDAG &DAG) const {
SDValue V1 = Op.getOperand(0);
SDValue V2 = Op.getOperand(1);
SDLoc dl(Op);
@@ -3665,98 +3667,123 @@
ArrayRef<int> ShuffleMask = SVN->getMask();
unsigned EltSize = VT.getVectorElementType().getSizeInBits();
- if (EltSize <= 64) {
- if (ShuffleVectorSDNode::isSplatMask(&ShuffleMask[0], VT)) {
- int Lane = SVN->getSplatIndex();
- // If this is undef splat, generate it via "just" vdup, if possible.
- if (Lane == -1) Lane = 0;
-
- // Test if V1 is a SCALAR_TO_VECTOR.
- if (V1.getOpcode() == ISD::SCALAR_TO_VECTOR) {
- return DAG.getNode(AArch64ISD::NEON_VDUP, dl, VT, V1.getOperand(0));
- }
- // Test if V1 is a BUILD_VECTOR which is equivalent to a SCALAR_TO_VECTOR.
- if (V1.getOpcode() == ISD::BUILD_VECTOR) {
- bool IsScalarToVector = true;
- for (unsigned i = 0, e = V1.getNumOperands(); i != e; ++i)
- if (V1.getOperand(i).getOpcode() != ISD::UNDEF &&
- i != (unsigned)Lane) {
- IsScalarToVector = false;
- break;
- }
- if (IsScalarToVector)
- return DAG.getNode(AArch64ISD::NEON_VDUP, dl, VT,
- V1.getOperand(Lane));
- }
- return DAG.getNode(AArch64ISD::NEON_VDUPLANE, dl, VT, V1,
- DAG.getConstant(Lane, MVT::i64));
+ if (EltSize > 64)
+ return SDValue();
+
+ // If the element of shuffle mask are all the same constant, we can
+ // transform it into either NEON_VDUP or NEON_VDUPLANE
+ if (ShuffleVectorSDNode::isSplatMask(&ShuffleMask[0], VT)) {
+ int Lane = SVN->getSplatIndex();
+ // If this is undef splat, generate it via "just" vdup, if possible.
+ if (Lane == -1) Lane = 0;
+
+ // Test if V1 is a SCALAR_TO_VECTOR.
+ if (V1.getOpcode() == ISD::SCALAR_TO_VECTOR) {
+ return DAG.getNode(AArch64ISD::NEON_VDUP, dl, VT, V1.getOperand(0));
}
- // For shuffle mask like "0, 1, 2, 3, 4, 5, 13, 7", try to generate insert
- // by element from V2 to V1 .
- // If shuffle mask is like "0, 1, 10, 11, 12, 13, 14, 15", V2 would be a
- // better choice to be inserted than V1 as less insert needed, so we count
- // element to be inserted for both V1 and V2, and select less one as insert
- // target.
-
- // Collect elements need to be inserted and their index.
- SmallVector<int, 8> NV1Elt;
- SmallVector<int, 8> N1Index;
- SmallVector<int, 8> NV2Elt;
- SmallVector<int, 8> N2Index;
- int Length = ShuffleMask.size();
- int V1EltNum = V1.getValueType().getVectorNumElements();
- for (int I = 0; I != Length; ++I) {
- if (ShuffleMask[I] != I) {
- NV1Elt.push_back(ShuffleMask[I]);
- N1Index.push_back(I);
- }
+ // Test if V1 is a BUILD_VECTOR which is equivalent to a SCALAR_TO_VECTOR.
+ if (V1.getOpcode() == ISD::BUILD_VECTOR) {
+ bool IsScalarToVector = true;
+ for (unsigned i = 0, e = V1.getNumOperands(); i != e; ++i)
+ if (V1.getOperand(i).getOpcode() != ISD::UNDEF &&
+ i != (unsigned)Lane) {
+ IsScalarToVector = false;
+ break;
+ }
+ if (IsScalarToVector)
+ return DAG.getNode(AArch64ISD::NEON_VDUP, dl, VT,
+ V1.getOperand(Lane));
}
- for (int I = 0; I != Length; ++I) {
- if (ShuffleMask[I] != (I + V1EltNum)) {
- NV2Elt.push_back(ShuffleMask[I]);
- N2Index.push_back(I);
+ return DAG.getNode(AArch64ISD::NEON_VDUPLANE, dl, VT, V1,
+ DAG.getConstant(Lane, MVT::i64));
+ }
+
+ int Length = ShuffleMask.size();
+ int V1EltNum = V1.getValueType().getVectorNumElements();
+
+ // If the number of v1 elements is the same as the number of shuffle mask
+ // element and the shuffle masks are sequential values, we can transform
+ // it into NEON_VEXTRACT.
+ if (V1EltNum == Length) {
+ // Check if the shuffle mask is sequential.
+ bool IsSequential = true;
+ int CurMask = ShuffleMask[0];
+ for (int I = 0; I < Length; ++I) {
+ if (ShuffleMask[I] != CurMask) {
+ IsSequential = false;
+ break;
}
+ CurMask++;
}
-
- // Decide which to be inserted. If all lanes mismatch, neither V1 nor V2
- // will be inserted.
- SDValue InsV = V1;
- SmallVector<int, 8> InsMasks = NV1Elt;
- SmallVector<int, 8> InsIndex = N1Index;
- if ((int)NV1Elt.size() != Length || (int)NV2Elt.size() != Length) {
- if (NV1Elt.size() > NV2Elt.size()) {
- InsV = V2;
- InsMasks = NV2Elt;
- InsIndex = N2Index;
- }
- } else {
- InsV = DAG.getNode(ISD::UNDEF, dl, VT);
+ if (IsSequential) {
+ assert((EltSize % 8 == 0) && "Bitsize of vector element is incorrect");
+ unsigned VecSize = EltSize * V1EltNum;
+ unsigned Index = (EltSize/8) * ShuffleMask[0];
+ if (VecSize == 64 || VecSize == 128)
+ return DAG.getNode(AArch64ISD::NEON_VEXTRACT, dl, VT, V1, V2,
+ DAG.getConstant(Index, MVT::i64));
}
+ }
- SDValue PassN;
+ // For shuffle mask like "0, 1, 2, 3, 4, 5, 13, 7", try to generate insert
+ // by element from V2 to V1 .
+ // If shuffle mask is like "0, 1, 10, 11, 12, 13, 14, 15", V2 would be a
+ // better choice to be inserted than V1 as less insert needed, so we count
+ // element to be inserted for both V1 and V2, and select less one as insert
+ // target.
+
+ // Collect elements need to be inserted and their index.
+ SmallVector<int, 8> NV1Elt;
+ SmallVector<int, 8> N1Index;
+ SmallVector<int, 8> NV2Elt;
+ SmallVector<int, 8> N2Index;
+ for (int I = 0; I != Length; ++I) {
+ if (ShuffleMask[I] != I) {
+ NV1Elt.push_back(ShuffleMask[I]);
+ N1Index.push_back(I);
+ }
+ }
+ for (int I = 0; I != Length; ++I) {
+ if (ShuffleMask[I] != (I + V1EltNum)) {
+ NV2Elt.push_back(ShuffleMask[I]);
+ N2Index.push_back(I);
+ }
+ }
- for (int I = 0, E = InsMasks.size(); I != E; ++I) {
- SDValue ExtV = V1;
- int Mask = InsMasks[I];
- if (Mask > V1EltNum) {
- ExtV = V2;
- Mask -= V1EltNum;
- }
- // Any value type smaller than i32 is illegal in AArch64, and this lower
- // function is called after legalize pass, so we need to legalize
- // the result here.
- EVT EltVT = MVT::i32;
- if(EltSize == 64)
- EltVT = MVT::i64;
- PassN = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, ExtV,
- DAG.getConstant(Mask, MVT::i64));
- PassN = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, InsV, PassN,
- DAG.getConstant(InsIndex[I], MVT::i64));
+ // Decide which to be inserted. If all lanes mismatch, neither V1 nor V2
+ // will be inserted.
+ SDValue InsV = V1;
+ SmallVector<int, 8> InsMasks = NV1Elt;
+ SmallVector<int, 8> InsIndex = N1Index;
+ if ((int)NV1Elt.size() != Length || (int)NV2Elt.size() != Length) {
+ if (NV1Elt.size() > NV2Elt.size()) {
+ InsV = V2;
+ InsMasks = NV2Elt;
+ InsIndex = N2Index;
}
- return PassN;
+ } else {
+ InsV = DAG.getNode(ISD::UNDEF, dl, VT);
}
- return SDValue();
+ for (int I = 0, E = InsMasks.size(); I != E; ++I) {
+ SDValue ExtV = V1;
+ int Mask = InsMasks[I];
+ if (Mask >= V1EltNum) {
+ ExtV = V2;
+ Mask -= V1EltNum;
+ }
+ // Any value type smaller than i32 is illegal in AArch64, and this lower
+ // function is called after legalize pass, so we need to legalize
+ // the result here.
+ EVT EltVT = MVT::i32;
+ if(EltSize == 64)
+ EltVT = MVT::i64;
+ ExtV = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, ExtV,
+ DAG.getConstant(Mask, MVT::i64));
+ InsV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, InsV, ExtV,
+ DAG.getConstant(InsIndex[I], MVT::i64));
+ }
+ return InsV;
}
AArch64TargetLowering::ConstraintType
Index: lib/Target/AArch64/AArch64ISelLowering.h
===================================================================
--- lib/Target/AArch64/AArch64ISelLowering.h
+++ lib/Target/AArch64/AArch64ISelLowering.h
@@ -142,7 +142,10 @@
NEON_VDUP,
// Vector dup by lane
- NEON_VDUPLANE
+ NEON_VDUPLANE,
+
+ // Vector extract
+ NEON_VEXTRACT
};
}
Index: lib/Target/AArch64/AArch64InstrFormats.td
===================================================================
--- lib/Target/AArch64/AArch64InstrFormats.td
+++ lib/Target/AArch64/AArch64InstrFormats.td
@@ -971,6 +971,24 @@
: InstAlias<Asm, Result, Emit> {
}
+// Format AdvSIMD bitwise extract
+class NeonI_BitExtract<bit q, bits<2> op2,
+ dag outs, dag ins, string asmstr,
+ list<dag> patterns, InstrItinClass itin>
+ : A64InstRdnm<outs, ins, asmstr, patterns, itin> {
+ let Inst{31} = 0b0;
+ let Inst{30} = q;
+ let Inst{29-24} = 0b101110;
+ let Inst{23-22} = op2;
+ let Inst{21} = 0b0;
+ // Inherit Rm in 20-16
+ let Inst{15} = 0b0;
+ // imm4 in 14-11
+ let Inst{10} = 0b0;
+ // Inherit Rn in 9-5
+ // Inherit Rd in 4-0
+}
+
// Format AdvSIMD 3 vector registers with same vector type
class NeonI_3VSame<bit q, bit u, bits<2> size, bits<5> opcode,
dag outs, dag ins, string asmstr,
Index: lib/Target/AArch64/AArch64InstrNEON.td
===================================================================
--- lib/Target/AArch64/AArch64InstrNEON.td
+++ lib/Target/AArch64/AArch64InstrNEON.td
@@ -50,6 +50,9 @@
[SDTCisVec<0>]>>;
def Neon_vduplane : SDNode<"AArch64ISD::NEON_VDUPLANE", SDTypeProfile<1, 2,
[SDTCisVec<0>, SDTCisVec<1>, SDTCisVT<2, i64>]>>;
+def Neon_vextract : SDNode<"AArch64ISD::NEON_VEXTRACT", SDTypeProfile<1, 3,
+ [SDTCisVec<0>, SDTCisSameAs<0, 1>,
+ SDTCisSameAs<0, 2>, SDTCisVT<3, i64>]>>;
//===----------------------------------------------------------------------===//
// Multiclasses
@@ -3944,6 +3947,18 @@
let PrintMethod = "printNeonUImm8OperandBare";
}
+def neon_uimm3 : Operand<i64>,
+ ImmLeaf<i64, [{(void)Imm; return true;}]> {
+ let ParserMatchClass = uimm3_asmoperand;
+ let PrintMethod = "printNeonUImm8Operand";
+}
+
+def neon_uimm4 : Operand<i64>,
+ ImmLeaf<i64, [{(void)Imm; return true;}]> {
+ let ParserMatchClass = uimm4_asmoperand;
+ let PrintMethod = "printNeonUImm8Operand";
+}
+
class NeonI_INS_main<string asmop, string Res, ValueType ResTy,
RegisterClass OpGPR, ValueType OpTy, Operand OpImm>
: NeonI_copy<0b1, 0b0, 0b0011,
@@ -3959,6 +3974,47 @@
let Constraints = "$src = $Rd";
}
+// Bitwise Extract
+class NeonI_Extract<bit q, bits<2> op2, string asmop,
+ string OpS, RegisterOperand OpVPR, Operand OpImm>
+ : NeonI_BitExtract<q, op2, (outs OpVPR:$Rd),
+ (ins OpVPR:$Rn, OpVPR:$Rm, OpImm:$Index),
+ asmop # "\t$Rd." # OpS # ", $Rn." # OpS #
+ ", $Rm." # OpS # ", $Index",
+ [],
+ NoItinerary>{
+ bits<4> Index;
+}
+
+def EXTvvvi_8b : NeonI_Extract<0b0, 0b00, "ext", "8b",
+ VPR64, neon_uimm3> {
+ let Inst{14-11} = {0b0, Index{2}, Index{1}, Index{0}};
+}
+
+def EXTvvvi_16b: NeonI_Extract<0b1, 0b00, "ext", "16b",
+ VPR128, neon_uimm4> {
+ let Inst{14-11} = Index;
+}
+
+class NI_Extract<ValueType OpTy, RegisterOperand OpVPR, Instruction INST,
+ Operand OpImm>
+ : Pat<(OpTy (Neon_vextract (OpTy OpVPR:$Rn), (OpTy OpVPR:$Rm),
+ (i64 OpImm:$Imm))),
+ (INST OpVPR:$Rn, OpVPR:$Rm, OpImm:$Imm)>;
+
+def : NI_Extract<v8i8, VPR64, EXTvvvi_8b, neon_uimm3>;
+def : NI_Extract<v4i16, VPR64, EXTvvvi_8b, neon_uimm3>;
+def : NI_Extract<v2i32, VPR64, EXTvvvi_8b, neon_uimm3>;
+def : NI_Extract<v1i64, VPR64, EXTvvvi_8b, neon_uimm3>;
+def : NI_Extract<v2f32, VPR64, EXTvvvi_8b, neon_uimm3>;
+def : NI_Extract<v1f64, VPR64, EXTvvvi_8b, neon_uimm3>;
+def : NI_Extract<v16i8, VPR128, EXTvvvi_16b, neon_uimm4>;
+def : NI_Extract<v8i16, VPR128, EXTvvvi_16b, neon_uimm4>;
+def : NI_Extract<v4i32, VPR128, EXTvvvi_16b, neon_uimm4>;
+def : NI_Extract<v2i64, VPR128, EXTvvvi_16b, neon_uimm4>;
+def : NI_Extract<v4f32, VPR128, EXTvvvi_16b, neon_uimm4>;
+def : NI_Extract<v2f64, VPR128, EXTvvvi_16b, neon_uimm4>;
+
// The followings are for instruction class (3V Elem)
// Variant 1
Index: test/CodeGen/AArch64/neon-extract.ll
===================================================================
--- /dev/null
+++ test/CodeGen/AArch64/neon-extract.ll
@@ -0,0 +1,190 @@
+; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
+
+define <8 x i8> @test_vext_s8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK: test_vext_s8:
+; CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x2
+entry:
+ %vext = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9>
+ ret <8 x i8> %vext
+}
+
+define <4 x i16> @test_vext_s16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK: test_vext_s16:
+; CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x6
+entry:
+ %vext = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 3, i32 4, i32 5, i32 6>
+ ret <4 x i16> %vext
+}
+
+define <2 x i32> @test_vext_s32(<2 x i32> %a, <2 x i32> %b) {
+; CHECK: test_vext_s32:
+; CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x4
+entry:
+ %vext = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 1, i32 2>
+ ret <2 x i32> %vext
+}
+
+define <1 x i64> @test_vext_s64(<1 x i64> %a, <1 x i64> %b) {
+; CHECK: test_vext_s64:
+entry:
+ %vext = shufflevector <1 x i64> %a, <1 x i64> %b, <1 x i32> <i32 0>
+ ret <1 x i64> %vext
+}
+
+define <16 x i8> @test_vextq_s8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK: test_vextq_s8:
+; CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x2
+entry:
+ %vext = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17>
+ ret <16 x i8> %vext
+}
+
+define <8 x i16> @test_vextq_s16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK: test_vextq_s16:
+; CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x6
+entry:
+ %vext = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10>
+ ret <8 x i16> %vext
+}
+
+define <4 x i32> @test_vextq_s32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK: test_vextq_s32:
+; CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x4
+entry:
+ %vext = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 1, i32 2, i32 3, i32 4>
+ ret <4 x i32> %vext
+}
+
+define <2 x i64> @test_vextq_s64(<2 x i64> %a, <2 x i64> %b) {
+; CHECK: test_vextq_s64:
+; CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x8
+entry:
+ %vext = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 1, i32 2>
+ ret <2 x i64> %vext
+}
+
+define <8 x i8> @test_vext_u8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK: test_vext_u8:
+; CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x2
+entry:
+ %vext = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9>
+ ret <8 x i8> %vext
+}
+
+define <4 x i16> @test_vext_u16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK: test_vext_u16:
+; CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x6
+entry:
+ %vext = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 3, i32 4, i32 5, i32 6>
+ ret <4 x i16> %vext
+}
+
+define <2 x i32> @test_vext_u32(<2 x i32> %a, <2 x i32> %b) {
+; CHECK: test_vext_u32:
+; CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x4
+entry:
+ %vext = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 1, i32 2>
+ ret <2 x i32> %vext
+}
+
+define <1 x i64> @test_vext_u64(<1 x i64> %a, <1 x i64> %b) {
+; CHECK: test_vext_u64:
+entry:
+ %vext = shufflevector <1 x i64> %a, <1 x i64> %b, <1 x i32> <i32 0>
+ ret <1 x i64> %vext
+}
+
+define <16 x i8> @test_vextq_u8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK: test_vextq_u8:
+; CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x2
+entry:
+ %vext = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17>
+ ret <16 x i8> %vext
+}
+
+define <8 x i16> @test_vextq_u16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK: test_vextq_u16:
+; CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x6
+entry:
+ %vext = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10>
+ ret <8 x i16> %vext
+}
+
+define <4 x i32> @test_vextq_u32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK: test_vextq_u32:
+; CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x4
+entry:
+ %vext = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 1, i32 2, i32 3, i32 4>
+ ret <4 x i32> %vext
+}
+
+define <2 x i64> @test_vextq_u64(<2 x i64> %a, <2 x i64> %b) {
+; CHECK: test_vextq_u64:
+; CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x8
+entry:
+ %vext = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 1, i32 2>
+ ret <2 x i64> %vext
+}
+
+define <2 x float> @test_vext_f32(<2 x float> %a, <2 x float> %b) {
+; CHECK: test_vext_f32:
+; CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x4
+entry:
+ %vext = shufflevector <2 x float> %a, <2 x float> %b, <2 x i32> <i32 1, i32 2>
+ ret <2 x float> %vext
+}
+
+define <1 x double> @test_vext_f64(<1 x double> %a, <1 x double> %b) {
+; CHECK: test_vext_f64:
+entry:
+ %vext = shufflevector <1 x double> %a, <1 x double> %b, <1 x i32> <i32 0>
+ ret <1 x double> %vext
+}
+
+define <4 x float> @test_vextq_f32(<4 x float> %a, <4 x float> %b) {
+; CHECK: test_vextq_f32:
+; CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x4
+entry:
+ %vext = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 1, i32 2, i32 3, i32 4>
+ ret <4 x float> %vext
+}
+
+define <2 x double> @test_vextq_f64(<2 x double> %a, <2 x double> %b) {
+; CHECK: test_vextq_f64:
+; CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x8
+entry:
+ %vext = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> <i32 1, i32 2>
+ ret <2 x double> %vext
+}
+
+define <8 x i8> @test_vext_p8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK: test_vext_p8:
+; CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x2
+entry:
+ %vext = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9>
+ ret <8 x i8> %vext
+}
+
+define <4 x i16> @test_vext_p16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK: test_vext_p16:
+; CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x6
+entry:
+ %vext = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 3, i32 4, i32 5, i32 6>
+ ret <4 x i16> %vext
+}
+
+define <16 x i8> @test_vextq_p8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK: test_vextq_p8:
+; CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x2
+entry:
+ %vext = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17>
+ ret <16 x i8> %vext
+}
+
+define <8 x i16> @test_vextq_p16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK: test_vextq_p16:
+; CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x6
+entry:
+ %vext = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10>
+ ret <8 x i16> %vext
+}
Index: test/MC/AArch64/neon-diagnostics.s
===================================================================
--- test/MC/AArch64/neon-diagnostics.s
+++ test/MC/AArch64/neon-diagnostics.s
@@ -4557,3 +4557,44 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: uqxtn s19, s14
// CHECK-ERROR: ^
+
+//----------------------------------------------------------------------
+// Bitwise extract
+//----------------------------------------------------------------------
+
+ ext v0.8b, v1.8b, v2.4h, #0x3
+ ext v0.4h, v1.4h, v2.4h, #0x3
+ ext v0.2s, v1.2s, v2.2s, #0x1
+ ext v0.1d, v1.1d, v2.1d, #0x0
+
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: ext v0.8b, v1.8b, v2.4h, #0x3
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: ext v0.4h, v1.4h, v2.4h, #0x3
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: ext v0.2s, v1.2s, v2.2s, #0x1
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: ext v0.1d, v1.1d, v2.1d, #0x0
+// CHECK-ERROR: ^
+
+ ext v0.16b, v1.16b, v2.8h, #0x3
+ ext v0.8h, v1.8h, v2.8h, #0x3
+ ext v0.4s, v1.4s, v2.4s, #0x1
+ ext v0.2d, v1.2d, v2.2d, #0x0
+
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: ext v0.16b, v1.16b, v2.8h, #0x3
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: ext v0.8h, v1.8h, v2.8h, #0x3
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: ext v0.4s, v1.4s, v2.4s, #0x1
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: ext v0.2d, v1.2d, v2.2d, #0x0
+// CHECK-ERROR: ^
+
Index: test/MC/AArch64/neon-extract.s
===================================================================
--- /dev/null
+++ test/MC/AArch64/neon-extract.s
@@ -0,0 +1,13 @@
+// RUN: llvm-mc -triple=aarch64 -mattr=+neon -show-encoding < %s | FileCheck %s
+
+// Check that the assembler can handle the documented syntax for AArch64
+
+//------------------------------------------------------------------------------
+// Instructions for bitwise extract
+//------------------------------------------------------------------------------
+
+ ext v0.8b, v1.8b, v2.8b, #0x3
+ ext v0.16b, v1.16b, v2.16b, #0x3
+
+// CHECK: ext v0.8b, v1.8b, v2.8b, #0x3 // encoding: [0x20,0x18,0x02,0x2e]
+// CHECK: ext v0.16b, v1.16b, v2.16b, #0x3 // encoding: [0x20,0x18,0x02,0x6e]
Index: test/MC/Disassembler/AArch64/neon-instructions.txt
===================================================================
--- test/MC/Disassembler/AArch64/neon-instructions.txt
+++ test/MC/Disassembler/AArch64/neon-instructions.txt
@@ -1713,3 +1713,12 @@
0x52,0x4a,0x21,0x7e
0x34,0x4a,0x61,0x7e
0xd3,0x49,0xa1,0x7e
+
+#----------------------------------------------------------------------
+# Bitwise extract
+#----------------------------------------------------------------------
+0x20,0x18,0x02,0x2e
+0x20,0x18,0x02,0x6e
+# CHECK: ext v0.8b, v1.8b, v2.8b, #0x3
+# CHECK: ext v0.16b, v1.16b, v2.16b, #0x3
+
_______________________________________________
cfe-commits mailing list
[email protected]
http://lists.cs.uiuc.edu/mailman/listinfo/cfe-commits