================
@@ -0,0 +1,1515 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
UTC_ARGS: --version 6
+; RUN: llc -mtriple=arm-none-eabi -mcpu=cortex-a55 %s
-disable-strictnode-mutation -o - | FileCheck %s --check-prefixes=CHECK
+
+define <4 x float> @add_v4f32(<4 x float> %x, <4 x float> %y) #0 {
+; CHECK-LABEL: add_v4f32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: mov r12, sp
+; CHECK-NEXT: vmov d1, r2, r3
+; CHECK-NEXT: vld1.64 {d2, d3}, [r12]
+; CHECK-NEXT: vmov d0, r0, r1
+; CHECK-NEXT: vadd.f32 s11, s3, s7
+; CHECK-NEXT: vadd.f32 s10, s2, s6
+; CHECK-NEXT: vadd.f32 s9, s1, s5
+; CHECK-NEXT: vadd.f32 s8, s0, s4
+; CHECK-NEXT: vmov r2, r3, d5
+; CHECK-NEXT: vmov r0, r1, d4
+; CHECK-NEXT: bx lr
+ %val = call <4 x float> @llvm.experimental.constrained.fadd.v4f32(<4 x
float> %x, <4 x float> %y, metadata !"round.tonearest", metadata
!"fpexcept.strict") #0
+ ret <4 x float> %val
+}
+
+define <4 x float> @sub_v4f32(<4 x float> %x, <4 x float> %y) #0 {
+; CHECK-LABEL: sub_v4f32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: mov r12, sp
+; CHECK-NEXT: vmov d1, r2, r3
+; CHECK-NEXT: vld1.64 {d2, d3}, [r12]
+; CHECK-NEXT: vmov d0, r0, r1
+; CHECK-NEXT: vsub.f32 s11, s3, s7
+; CHECK-NEXT: vsub.f32 s10, s2, s6
+; CHECK-NEXT: vsub.f32 s9, s1, s5
+; CHECK-NEXT: vsub.f32 s8, s0, s4
+; CHECK-NEXT: vmov r2, r3, d5
+; CHECK-NEXT: vmov r0, r1, d4
+; CHECK-NEXT: bx lr
+ %val = call <4 x float> @llvm.experimental.constrained.fsub.v4f32(<4 x
float> %x, <4 x float> %y, metadata !"round.tonearest", metadata
!"fpexcept.strict") #0
+ ret <4 x float> %val
+}
+
+define <4 x float> @mul_v4f32(<4 x float> %x, <4 x float> %y) #0 {
+; CHECK-LABEL: mul_v4f32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: mov r12, sp
+; CHECK-NEXT: vmov d1, r2, r3
+; CHECK-NEXT: vld1.64 {d2, d3}, [r12]
+; CHECK-NEXT: vmov d0, r0, r1
+; CHECK-NEXT: vmul.f32 s11, s3, s7
+; CHECK-NEXT: vmul.f32 s10, s2, s6
+; CHECK-NEXT: vmul.f32 s9, s1, s5
+; CHECK-NEXT: vmul.f32 s8, s0, s4
+; CHECK-NEXT: vmov r2, r3, d5
+; CHECK-NEXT: vmov r0, r1, d4
+; CHECK-NEXT: bx lr
+ %val = call <4 x float> @llvm.experimental.constrained.fmul.v4f32(<4 x
float> %x, <4 x float> %y, metadata !"round.tonearest", metadata
!"fpexcept.strict") #0
+ ret <4 x float> %val
+}
+
+define <4 x float> @div_v4f32(<4 x float> %x, <4 x float> %y) #0 {
+; CHECK-LABEL: div_v4f32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: mov r12, sp
+; CHECK-NEXT: vmov d1, r2, r3
+; CHECK-NEXT: vld1.64 {d2, d3}, [r12]
+; CHECK-NEXT: vmov d0, r0, r1
+; CHECK-NEXT: vdiv.f32 s11, s3, s7
+; CHECK-NEXT: vdiv.f32 s10, s2, s6
+; CHECK-NEXT: vdiv.f32 s9, s1, s5
+; CHECK-NEXT: vdiv.f32 s8, s0, s4
+; CHECK-NEXT: vmov r2, r3, d5
+; CHECK-NEXT: vmov r0, r1, d4
+; CHECK-NEXT: bx lr
+ %val = call <4 x float> @llvm.experimental.constrained.fdiv.v4f32(<4 x
float> %x, <4 x float> %y, metadata !"round.tonearest", metadata
!"fpexcept.strict") #0
+ ret <4 x float> %val
+}
+
+define <4 x float> @fma_v4f32(<4 x float> %x, <4 x float> %y, <4 x float> %z)
#0 {
+; CHECK-LABEL: fma_v4f32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: .save {r11, lr}
+; CHECK-NEXT: push {r11, lr}
+; CHECK-NEXT: add r12, sp, #24
+; CHECK-NEXT: add lr, sp, #8
+; CHECK-NEXT: vmov d3, r2, r3
+; CHECK-NEXT: vld1.64 {d4, d5}, [lr]
+; CHECK-NEXT: vld1.64 {d0, d1}, [r12]
+; CHECK-NEXT: vmov d2, r0, r1
+; CHECK-NEXT: vfma.f32 s3, s7, s11
+; CHECK-NEXT: vfma.f32 s2, s6, s10
+; CHECK-NEXT: vfma.f32 s1, s5, s9
+; CHECK-NEXT: vfma.f32 s0, s4, s8
+; CHECK-NEXT: vmov r2, r3, d1
+; CHECK-NEXT: vmov r0, r1, d0
+; CHECK-NEXT: pop {r11, pc}
+ %val = call <4 x float> @llvm.experimental.constrained.fma.v4f32(<4 x float>
%x, <4 x float> %y, <4 x float> %z, metadata !"round.tonearest", metadata
!"fpexcept.strict") #0
+ ret <4 x float> %val
+}
+
+define <4 x i32> @fptosi_v4i32_v4f32(<4 x float> %x) #0 {
+; CHECK-LABEL: fptosi_v4i32_v4f32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vmov d1, r2, r3
+; CHECK-NEXT: vmov d0, r0, r1
+; CHECK-NEXT: vcvt.s32.f32 s8, s2
+; CHECK-NEXT: vcvt.s32.f32 s4, s0
+; CHECK-NEXT: vcvt.s32.f32 s6, s1
+; CHECK-NEXT: vcvt.s32.f32 s0, s3
+; CHECK-NEXT: vmov r2, s8
+; CHECK-NEXT: vmov r0, s4
+; CHECK-NEXT: vmov r1, s6
+; CHECK-NEXT: vmov r3, s0
+; CHECK-NEXT: bx lr
+ %val = call <4 x i32> @llvm.experimental.constrained.fptosi.v4i32.v4f32(<4 x
float> %x, metadata !"fpexcept.strict") #0
+ ret <4 x i32> %val
+}
+
+define <4 x i32> @fptoui_v4i32_v4f32(<4 x float> %x) #0 {
+; CHECK-LABEL: fptoui_v4i32_v4f32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vmov d1, r2, r3
+; CHECK-NEXT: vmov d0, r0, r1
+; CHECK-NEXT: vcvt.u32.f32 s8, s2
+; CHECK-NEXT: vcvt.u32.f32 s4, s0
+; CHECK-NEXT: vcvt.u32.f32 s6, s1
+; CHECK-NEXT: vcvt.u32.f32 s0, s3
+; CHECK-NEXT: vmov r2, s8
+; CHECK-NEXT: vmov r0, s4
+; CHECK-NEXT: vmov r1, s6
+; CHECK-NEXT: vmov r3, s0
+; CHECK-NEXT: bx lr
+ %val = call <4 x i32> @llvm.experimental.constrained.fptoui.v4i32.v4f32(<4 x
float> %x, metadata !"fpexcept.strict") #0
+ ret <4 x i32> %val
+}
+
+define <4 x i64> @fptosi_v4i64_v4f32(<4 x float> %x) #0 {
+; CHECK-LABEL: fptosi_v4i64_v4f32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: .save {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT: push {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT: .vsave {d8, d9, d10, d11}
+; CHECK-NEXT: vpush {d8, d9, d10, d11}
+; CHECK-NEXT: vmov d8, r2, r3
+; CHECK-NEXT: mov r4, r0
+; CHECK-NEXT: vmov r0, s16
+; CHECK-NEXT: bl __aeabi_f2lz
+; CHECK-NEXT: mov r5, r0
+; CHECK-NEXT: vmov r0, s17
+; CHECK-NEXT: mov r6, r1
+; CHECK-NEXT: bl __aeabi_f2lz
+; CHECK-NEXT: vldr d8, [sp, #56]
+; CHECK-NEXT: mov r7, r1
+; CHECK-NEXT: vmov.32 d11[0], r0
+; CHECK-NEXT: vmov.32 d10[0], r5
+; CHECK-NEXT: vmov r1, s17
+; CHECK-NEXT: mov r0, r1
+; CHECK-NEXT: bl __aeabi_f2lz
+; CHECK-NEXT: mov r5, r1
+; CHECK-NEXT: vmov r1, s16
+; CHECK-NEXT: vmov.32 d11[1], r7
+; CHECK-NEXT: vmov.32 d9[0], r0
+; CHECK-NEXT: mov r0, r1
+; CHECK-NEXT: vmov.32 d10[1], r6
+; CHECK-NEXT: bl __aeabi_f2lz
+; CHECK-NEXT: vmov.32 d8[0], r0
+; CHECK-NEXT: vst1.64 {d10, d11}, [r4:128]!
+; CHECK-NEXT: vmov.32 d9[1], r5
+; CHECK-NEXT: vmov.32 d8[1], r1
+; CHECK-NEXT: vst1.64 {d8, d9}, [r4:128]
+; CHECK-NEXT: vpop {d8, d9, d10, d11}
+; CHECK-NEXT: pop {r4, r5, r6, r7, r11, pc}
+ %val = call <4 x i64> @llvm.experimental.constrained.fptosi.v4i64.v4f32(<4 x
float> %x, metadata !"fpexcept.strict") #0
+ ret <4 x i64> %val
+}
+
+define <4 x i64> @fptoui_v4i64_v4f32(<4 x float> %x) #0 {
+; CHECK-LABEL: fptoui_v4i64_v4f32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: .save {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT: push {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT: .vsave {d8, d9, d10, d11}
+; CHECK-NEXT: vpush {d8, d9, d10, d11}
+; CHECK-NEXT: vmov d8, r2, r3
+; CHECK-NEXT: mov r4, r0
+; CHECK-NEXT: vmov r0, s16
+; CHECK-NEXT: bl __aeabi_f2ulz
+; CHECK-NEXT: mov r5, r0
+; CHECK-NEXT: vmov r0, s17
+; CHECK-NEXT: mov r6, r1
+; CHECK-NEXT: bl __aeabi_f2ulz
+; CHECK-NEXT: vldr d8, [sp, #56]
+; CHECK-NEXT: mov r7, r1
+; CHECK-NEXT: vmov.32 d11[0], r0
+; CHECK-NEXT: vmov.32 d10[0], r5
+; CHECK-NEXT: vmov r1, s17
+; CHECK-NEXT: mov r0, r1
+; CHECK-NEXT: bl __aeabi_f2ulz
+; CHECK-NEXT: mov r5, r1
+; CHECK-NEXT: vmov r1, s16
+; CHECK-NEXT: vmov.32 d11[1], r7
+; CHECK-NEXT: vmov.32 d9[0], r0
+; CHECK-NEXT: mov r0, r1
+; CHECK-NEXT: vmov.32 d10[1], r6
+; CHECK-NEXT: bl __aeabi_f2ulz
+; CHECK-NEXT: vmov.32 d8[0], r0
+; CHECK-NEXT: vst1.64 {d10, d11}, [r4:128]!
+; CHECK-NEXT: vmov.32 d9[1], r5
+; CHECK-NEXT: vmov.32 d8[1], r1
+; CHECK-NEXT: vst1.64 {d8, d9}, [r4:128]
+; CHECK-NEXT: vpop {d8, d9, d10, d11}
+; CHECK-NEXT: pop {r4, r5, r6, r7, r11, pc}
+ %val = call <4 x i64> @llvm.experimental.constrained.fptoui.v4i64.v4f32(<4 x
float> %x, metadata !"fpexcept.strict") #0
+ ret <4 x i64> %val
+}
+
+define <4 x float> @sitofp_v4f32_v4i32(<4 x i32> %x) #0 {
+; CHECK-LABEL: sitofp_v4f32_v4i32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: .pad #32
+; CHECK-NEXT: sub sp, sp, #32
+; CHECK-NEXT: movw r12, #0
+; CHECK-NEXT: eor r3, r3, #-2147483648
+; CHECK-NEXT: eor r2, r2, #-2147483648
+; CHECK-NEXT: eor r1, r1, #-2147483648
+; CHECK-NEXT: eor r0, r0, #-2147483648
+; CHECK-NEXT: vldr d16, .LCPI9_0
+; CHECK-NEXT: movt r12, #17200
+; CHECK-NEXT: str r3, [sp, #24]
+; CHECK-NEXT: str r12, [sp, #28]
+; CHECK-NEXT: str r12, [sp, #20]
+; CHECK-NEXT: str r2, [sp, #16]
+; CHECK-NEXT: str r12, [sp, #12]
+; CHECK-NEXT: str r1, [sp, #8]
+; CHECK-NEXT: str r12, [sp, #4]
+; CHECK-NEXT: str r0, [sp]
+; CHECK-NEXT: vldr d17, [sp, #24]
+; CHECK-NEXT: vldr d18, [sp, #16]
+; CHECK-NEXT: vldr d19, [sp, #8]
+; CHECK-NEXT: vldr d20, [sp]
+; CHECK-NEXT: vsub.f64 d17, d17, d16
+; CHECK-NEXT: vsub.f64 d18, d18, d16
+; CHECK-NEXT: vsub.f64 d19, d19, d16
+; CHECK-NEXT: vsub.f64 d16, d20, d16
+; CHECK-NEXT: vcvt.f32.f64 s3, d17
+; CHECK-NEXT: vcvt.f32.f64 s2, d18
+; CHECK-NEXT: vcvt.f32.f64 s1, d19
+; CHECK-NEXT: vcvt.f32.f64 s0, d16
+; CHECK-NEXT: vmov r2, r3, d1
+; CHECK-NEXT: vmov r0, r1, d0
+; CHECK-NEXT: add sp, sp, #32
+; CHECK-NEXT: bx lr
+; CHECK-NEXT: .p2align 3
+; CHECK-NEXT: @ %bb.1:
+; CHECK-NEXT: .LCPI9_0:
+; CHECK-NEXT: .long 2147483648 @ double 4503601774854144
+; CHECK-NEXT: .long 1127219200
+ %val = call <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i32(<4
x i32> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret <4 x float> %val
+}
+
+define <4 x float> @uitofp_v4f32_v4i32(<4 x i32> %x) #0 {
+; CHECK-LABEL: uitofp_v4f32_v4i32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: .pad #32
+; CHECK-NEXT: sub sp, sp, #32
+; CHECK-NEXT: movw r12, #0
+; CHECK-NEXT: str r3, [sp, #24]
+; CHECK-NEXT: vldr d16, .LCPI10_0
+; CHECK-NEXT: movt r12, #17200
+; CHECK-NEXT: str r12, [sp, #28]
+; CHECK-NEXT: str r12, [sp, #20]
+; CHECK-NEXT: str r2, [sp, #16]
+; CHECK-NEXT: str r12, [sp, #12]
+; CHECK-NEXT: str r1, [sp, #8]
+; CHECK-NEXT: stm sp, {r0, r12}
+; CHECK-NEXT: vldr d17, [sp, #24]
+; CHECK-NEXT: vldr d18, [sp, #16]
+; CHECK-NEXT: vldr d19, [sp, #8]
+; CHECK-NEXT: vldr d20, [sp]
+; CHECK-NEXT: vsub.f64 d17, d17, d16
+; CHECK-NEXT: vsub.f64 d18, d18, d16
+; CHECK-NEXT: vsub.f64 d19, d19, d16
+; CHECK-NEXT: vsub.f64 d16, d20, d16
+; CHECK-NEXT: vcvt.f32.f64 s3, d17
+; CHECK-NEXT: vcvt.f32.f64 s2, d18
+; CHECK-NEXT: vcvt.f32.f64 s1, d19
+; CHECK-NEXT: vcvt.f32.f64 s0, d16
+; CHECK-NEXT: vmov r2, r3, d1
+; CHECK-NEXT: vmov r0, r1, d0
+; CHECK-NEXT: add sp, sp, #32
+; CHECK-NEXT: bx lr
+; CHECK-NEXT: .p2align 3
+; CHECK-NEXT: @ %bb.1:
+; CHECK-NEXT: .LCPI10_0:
+; CHECK-NEXT: .long 0 @ double 4503599627370496
+; CHECK-NEXT: .long 1127219200
+ %val = call <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i32(<4
x i32> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret <4 x float> %val
+}
+
+define <4 x float> @sitofp_v4f32_v4i64(<4 x i64> %x) #0 {
+; CHECK-LABEL: sitofp_v4f32_v4i64:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: .save {r4, r5, r6, lr}
+; CHECK-NEXT: push {r4, r5, r6, lr}
+; CHECK-NEXT: .vsave {d8, d9}
+; CHECK-NEXT: vpush {d8, d9}
+; CHECK-NEXT: mov r5, r3
+; CHECK-NEXT: mov r6, r2
+; CHECK-NEXT: bl __aeabi_l2f
+; CHECK-NEXT: mov r4, r0
+; CHECK-NEXT: mov r0, r6
+; CHECK-NEXT: mov r1, r5
+; CHECK-NEXT: bl __aeabi_l2f
+; CHECK-NEXT: mov r5, r0
+; CHECK-NEXT: add r0, sp, #32
+; CHECK-NEXT: vld1.64 {d8, d9}, [r0]
+; CHECK-NEXT: vmov r0, r1, d9
+; CHECK-NEXT: bl __aeabi_l2f
+; CHECK-NEXT: mov r6, r0
+; CHECK-NEXT: vmov r0, r1, d8
+; CHECK-NEXT: bl __aeabi_l2f
+; CHECK-NEXT: mov r2, r0
+; CHECK-NEXT: mov r0, r4
+; CHECK-NEXT: mov r1, r5
+; CHECK-NEXT: mov r3, r6
+; CHECK-NEXT: vpop {d8, d9}
+; CHECK-NEXT: pop {r4, r5, r6, pc}
+ %val = call <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i64(<4
x i64> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret <4 x float> %val
+}
+
+define <4 x float> @uitofp_v4f32_v4i64(<4 x i64> %x) #0 {
+; CHECK-LABEL: uitofp_v4f32_v4i64:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: .save {r4, r5, r6, lr}
+; CHECK-NEXT: push {r4, r5, r6, lr}
+; CHECK-NEXT: .vsave {d8, d9}
+; CHECK-NEXT: vpush {d8, d9}
+; CHECK-NEXT: mov r5, r3
+; CHECK-NEXT: mov r6, r2
+; CHECK-NEXT: bl __aeabi_ul2f
+; CHECK-NEXT: mov r4, r0
+; CHECK-NEXT: mov r0, r6
+; CHECK-NEXT: mov r1, r5
+; CHECK-NEXT: bl __aeabi_ul2f
+; CHECK-NEXT: mov r5, r0
+; CHECK-NEXT: add r0, sp, #32
+; CHECK-NEXT: vld1.64 {d8, d9}, [r0]
+; CHECK-NEXT: vmov r0, r1, d9
+; CHECK-NEXT: bl __aeabi_ul2f
+; CHECK-NEXT: mov r6, r0
+; CHECK-NEXT: vmov r0, r1, d8
+; CHECK-NEXT: bl __aeabi_ul2f
+; CHECK-NEXT: mov r2, r0
+; CHECK-NEXT: mov r0, r4
+; CHECK-NEXT: mov r1, r5
+; CHECK-NEXT: mov r3, r6
+; CHECK-NEXT: vpop {d8, d9}
+; CHECK-NEXT: pop {r4, r5, r6, pc}
+ %val = call <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i64(<4
x i64> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret <4 x float> %val
+}
+
+define <4 x float> @sqrt_v4f32(<4 x float> %x) #0 {
+; CHECK-LABEL: sqrt_v4f32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vmov d1, r2, r3
+; CHECK-NEXT: vmov d0, r0, r1
+; CHECK-NEXT: vsqrt.f32 s7, s3
+; CHECK-NEXT: vsqrt.f32 s6, s2
+; CHECK-NEXT: vsqrt.f32 s5, s1
+; CHECK-NEXT: vsqrt.f32 s4, s0
+; CHECK-NEXT: vmov r2, r3, d3
+; CHECK-NEXT: vmov r0, r1, d2
+; CHECK-NEXT: bx lr
+ %val = call <4 x float> @llvm.experimental.constrained.sqrt.v4f32(<4 x
float> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret <4 x float> %val
+}
+
+define <4 x float> @rint_v4f32(<4 x float> %x) #0 {
+; CHECK-LABEL: rint_v4f32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vmov d1, r2, r3
+; CHECK-NEXT: vmov d0, r0, r1
+; CHECK-NEXT: vrintx.f32 s7, s3
+; CHECK-NEXT: vrintx.f32 s6, s2
+; CHECK-NEXT: vrintx.f32 s5, s1
+; CHECK-NEXT: vrintx.f32 s4, s0
+; CHECK-NEXT: vmov r2, r3, d3
+; CHECK-NEXT: vmov r0, r1, d2
+; CHECK-NEXT: bx lr
+ %val = call <4 x float> @llvm.experimental.constrained.rint.v4f32(<4 x
float> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret <4 x float> %val
+}
+
+define <4 x float> @nearbyint_v4f32(<4 x float> %x) #0 {
+; CHECK-LABEL: nearbyint_v4f32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vmov d1, r2, r3
+; CHECK-NEXT: vmov d0, r0, r1
+; CHECK-NEXT: vrintr.f32 s7, s3
+; CHECK-NEXT: vrintr.f32 s6, s2
+; CHECK-NEXT: vrintr.f32 s5, s1
+; CHECK-NEXT: vrintr.f32 s4, s0
+; CHECK-NEXT: vmov r2, r3, d3
+; CHECK-NEXT: vmov r0, r1, d2
+; CHECK-NEXT: bx lr
+ %val = call <4 x float> @llvm.experimental.constrained.nearbyint.v4f32(<4 x
float> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret <4 x float> %val
+}
+
+define <4 x float> @maxnum_v4f32(<4 x float> %x, <4 x float> %y) #0 {
+; CHECK-LABEL: maxnum_v4f32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: mov r12, sp
+; CHECK-NEXT: vmov d1, r2, r3
+; CHECK-NEXT: vld1.64 {d2, d3}, [r12]
+; CHECK-NEXT: vmov d0, r0, r1
+; CHECK-NEXT: vmaxnm.f32 s11, s3, s7
+; CHECK-NEXT: vmaxnm.f32 s10, s2, s6
+; CHECK-NEXT: vmaxnm.f32 s9, s1, s5
+; CHECK-NEXT: vmaxnm.f32 s8, s0, s4
+; CHECK-NEXT: vmov r2, r3, d5
+; CHECK-NEXT: vmov r0, r1, d4
+; CHECK-NEXT: bx lr
+ %val = call <4 x float> @llvm.experimental.constrained.maxnum.v4f32(<4 x
float> %x, <4 x float> %y, metadata !"fpexcept.strict") #0
+ ret <4 x float> %val
+}
+
+define <4 x float> @minnum_v4f32(<4 x float> %x, <4 x float> %y) #0 {
+; CHECK-LABEL: minnum_v4f32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: mov r12, sp
+; CHECK-NEXT: vmov d1, r2, r3
+; CHECK-NEXT: vld1.64 {d2, d3}, [r12]
+; CHECK-NEXT: vmov d0, r0, r1
+; CHECK-NEXT: vminnm.f32 s11, s3, s7
+; CHECK-NEXT: vminnm.f32 s10, s2, s6
+; CHECK-NEXT: vminnm.f32 s9, s1, s5
+; CHECK-NEXT: vminnm.f32 s8, s0, s4
+; CHECK-NEXT: vmov r2, r3, d5
+; CHECK-NEXT: vmov r0, r1, d4
+; CHECK-NEXT: bx lr
+ %val = call <4 x float> @llvm.experimental.constrained.minnum.v4f32(<4 x
float> %x, <4 x float> %y, metadata !"fpexcept.strict") #0
+ ret <4 x float> %val
+}
+
+define <4 x float> @ceil_v4f32(<4 x float> %x) #0 {
+; CHECK-LABEL: ceil_v4f32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vmov d1, r2, r3
+; CHECK-NEXT: vmov d0, r0, r1
+; CHECK-NEXT: vrintp.f32 s7, s3
+; CHECK-NEXT: vrintp.f32 s6, s2
+; CHECK-NEXT: vrintp.f32 s5, s1
+; CHECK-NEXT: vrintp.f32 s4, s0
+; CHECK-NEXT: vmov r2, r3, d3
+; CHECK-NEXT: vmov r0, r1, d2
+; CHECK-NEXT: bx lr
+ %val = call <4 x float> @llvm.experimental.constrained.ceil.v4f32(<4 x
float> %x, metadata !"fpexcept.strict") #0
+ ret <4 x float> %val
+}
+
+define <4 x float> @floor_v4f32(<4 x float> %x) #0 {
+; CHECK-LABEL: floor_v4f32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vmov d1, r2, r3
+; CHECK-NEXT: vmov d0, r0, r1
+; CHECK-NEXT: vrintm.f32 s7, s3
+; CHECK-NEXT: vrintm.f32 s6, s2
+; CHECK-NEXT: vrintm.f32 s5, s1
+; CHECK-NEXT: vrintm.f32 s4, s0
+; CHECK-NEXT: vmov r2, r3, d3
+; CHECK-NEXT: vmov r0, r1, d2
+; CHECK-NEXT: bx lr
+ %val = call <4 x float> @llvm.experimental.constrained.floor.v4f32(<4 x
float> %x, metadata !"fpexcept.strict") #0
+ ret <4 x float> %val
+}
+
+define <4 x float> @round_v4f32(<4 x float> %x) #0 {
+; CHECK-LABEL: round_v4f32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vmov d1, r2, r3
+; CHECK-NEXT: vmov d0, r0, r1
+; CHECK-NEXT: vrinta.f32 s7, s3
+; CHECK-NEXT: vrinta.f32 s6, s2
+; CHECK-NEXT: vrinta.f32 s5, s1
+; CHECK-NEXT: vrinta.f32 s4, s0
+; CHECK-NEXT: vmov r2, r3, d3
+; CHECK-NEXT: vmov r0, r1, d2
+; CHECK-NEXT: bx lr
+ %val = call <4 x float> @llvm.experimental.constrained.round.v4f32(<4 x
float> %x, metadata !"fpexcept.strict") #0
+ ret <4 x float> %val
+}
+
+define <4 x float> @roundeven_v4f32(<4 x float> %x) #0 {
+; CHECK-LABEL: roundeven_v4f32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vmov d1, r2, r3
+; CHECK-NEXT: vmov d0, r0, r1
+; CHECK-NEXT: vrintn.f32 s7, s3
+; CHECK-NEXT: vrintn.f32 s6, s2
+; CHECK-NEXT: vrintn.f32 s5, s1
+; CHECK-NEXT: vrintn.f32 s4, s0
+; CHECK-NEXT: vmov r2, r3, d3
+; CHECK-NEXT: vmov r0, r1, d2
+; CHECK-NEXT: bx lr
+ %val = call <4 x float> @llvm.experimental.constrained.roundeven.v4f32(<4 x
float> %x, metadata !"fpexcept.strict") #0
+ ret <4 x float> %val
+}
+
+define <4 x float> @trunc_v4f32(<4 x float> %x) #0 {
+; CHECK-LABEL: trunc_v4f32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vmov d1, r2, r3
+; CHECK-NEXT: vmov d0, r0, r1
+; CHECK-NEXT: vrintz.f32 s7, s3
+; CHECK-NEXT: vrintz.f32 s6, s2
+; CHECK-NEXT: vrintz.f32 s5, s1
+; CHECK-NEXT: vrintz.f32 s4, s0
+; CHECK-NEXT: vmov r2, r3, d3
+; CHECK-NEXT: vmov r0, r1, d2
+; CHECK-NEXT: bx lr
+ %val = call <4 x float> @llvm.experimental.constrained.trunc.v4f32(<4 x
float> %x, metadata !"fpexcept.strict") #0
+ ret <4 x float> %val
+}
+
+define <4 x i1> @fcmp_v4f32(<4 x float> %x, <4 x float> %y) #0 {
+; CHECK-LABEL: fcmp_v4f32:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: mov r12, sp
+; CHECK-NEXT: vmov d1, r2, r3
+; CHECK-NEXT: mov r2, #0
+; CHECK-NEXT: mov r3, #0
+; CHECK-NEXT: vld1.64 {d2, d3}, [r12]
+; CHECK-NEXT: vmov d0, r0, r1
+; CHECK-NEXT: mov r0, #0
+; CHECK-NEXT: mov r1, #0
+; CHECK-NEXT: vcmp.f32 s1, s5
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: vcmp.f32 s3, s7
+; CHECK-NEXT: movweq r0, #1
+; CHECK-NEXT: cmp r0, #0
+; CHECK-NEXT: mvnne r0, #0
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: vcmp.f32 s0, s4
+; CHECK-NEXT: movweq r2, #1
+; CHECK-NEXT: cmp r2, #0
+; CHECK-NEXT: mvnne r2, #0
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: vcmp.f32 s2, s6
+; CHECK-NEXT: movweq r3, #1
+; CHECK-NEXT: cmp r3, #0
+; CHECK-NEXT: mvnne r3, #0
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: movweq r1, #1
+; CHECK-NEXT: cmp r1, #0
+; CHECK-NEXT: mvnne r1, #0
+; CHECK-NEXT: vmov.32 d17[0], r1
+; CHECK-NEXT: vmov.32 d16[0], r3
+; CHECK-NEXT: vmov.32 d17[1], r2
+; CHECK-NEXT: vmov.32 d16[1], r0
+; CHECK-NEXT: vmovn.i32 d16, q8
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: bx lr
+entry:
+ %val = call <4 x i1> @llvm.experimental.constrained.fcmp.v4f64(<4 x float>
%x, <4 x float> %y, metadata !"oeq", metadata !"fpexcept.strict")
+ ret <4 x i1> %val
+}
+
+define <4 x i1> @fcmps_v4f32(<4 x float> %x, <4 x float> %y) #0 {
+; CHECK-LABEL: fcmps_v4f32:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: mov r12, sp
+; CHECK-NEXT: vmov d1, r2, r3
+; CHECK-NEXT: mov r2, #0
+; CHECK-NEXT: mov r3, #0
+; CHECK-NEXT: vld1.64 {d2, d3}, [r12]
+; CHECK-NEXT: vmov d0, r0, r1
+; CHECK-NEXT: mov r0, #0
+; CHECK-NEXT: mov r1, #0
+; CHECK-NEXT: vcmpe.f32 s1, s5
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: vcmpe.f32 s3, s7
+; CHECK-NEXT: movweq r0, #1
+; CHECK-NEXT: cmp r0, #0
+; CHECK-NEXT: mvnne r0, #0
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: vcmpe.f32 s0, s4
+; CHECK-NEXT: movweq r2, #1
+; CHECK-NEXT: cmp r2, #0
+; CHECK-NEXT: mvnne r2, #0
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: vcmpe.f32 s2, s6
+; CHECK-NEXT: movweq r3, #1
+; CHECK-NEXT: cmp r3, #0
+; CHECK-NEXT: mvnne r3, #0
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: movweq r1, #1
+; CHECK-NEXT: cmp r1, #0
+; CHECK-NEXT: mvnne r1, #0
+; CHECK-NEXT: vmov.32 d17[0], r1
+; CHECK-NEXT: vmov.32 d16[0], r3
+; CHECK-NEXT: vmov.32 d17[1], r2
+; CHECK-NEXT: vmov.32 d16[1], r0
+; CHECK-NEXT: vmovn.i32 d16, q8
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: bx lr
+entry:
+ %val = call <4 x i1> @llvm.experimental.constrained.fcmps.v4f32(<4 x float>
%x, <4 x float> %y, metadata !"oeq", metadata !"fpexcept.strict")
+ ret <4 x i1> %val
+}
+
+
+
+define <2 x double> @add_v2f64(<2 x double> %x, <2 x double> %y) #0 {
+; CHECK-LABEL: add_v2f64:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: mov r12, sp
+; CHECK-NEXT: vmov d18, r0, r1
+; CHECK-NEXT: vmov d19, r2, r3
+; CHECK-NEXT: vld1.64 {d16, d17}, [r12]
+; CHECK-NEXT: vadd.f64 d18, d18, d16
+; CHECK-NEXT: vadd.f64 d16, d19, d17
+; CHECK-NEXT: vmov r0, r1, d18
+; CHECK-NEXT: vmov r2, r3, d16
+; CHECK-NEXT: bx lr
+ %val = call <2 x double> @llvm.experimental.constrained.fadd.v2f64(<2 x
double> %x, <2 x double> %y, metadata !"round.tonearest", metadata
!"fpexcept.strict") #0
+ ret <2 x double> %val
+}
+
+define <2 x double> @sub_v2f64(<2 x double> %x, <2 x double> %y) #0 {
+; CHECK-LABEL: sub_v2f64:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: mov r12, sp
+; CHECK-NEXT: vmov d18, r0, r1
+; CHECK-NEXT: vmov d19, r2, r3
+; CHECK-NEXT: vld1.64 {d16, d17}, [r12]
+; CHECK-NEXT: vsub.f64 d18, d18, d16
+; CHECK-NEXT: vsub.f64 d16, d19, d17
+; CHECK-NEXT: vmov r0, r1, d18
+; CHECK-NEXT: vmov r2, r3, d16
+; CHECK-NEXT: bx lr
+ %val = call <2 x double> @llvm.experimental.constrained.fsub.v2f64(<2 x
double> %x, <2 x double> %y, metadata !"round.tonearest", metadata
!"fpexcept.strict") #0
+ ret <2 x double> %val
+}
+
+define <2 x double> @mul_v2f64(<2 x double> %x, <2 x double> %y) #0 {
+; CHECK-LABEL: mul_v2f64:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: mov r12, sp
+; CHECK-NEXT: vmov d18, r0, r1
+; CHECK-NEXT: vmov d19, r2, r3
+; CHECK-NEXT: vld1.64 {d16, d17}, [r12]
+; CHECK-NEXT: vmul.f64 d18, d18, d16
+; CHECK-NEXT: vmul.f64 d16, d19, d17
+; CHECK-NEXT: vmov r0, r1, d18
+; CHECK-NEXT: vmov r2, r3, d16
+; CHECK-NEXT: bx lr
+ %val = call <2 x double> @llvm.experimental.constrained.fmul.v2f64(<2 x
double> %x, <2 x double> %y, metadata !"round.tonearest", metadata
!"fpexcept.strict") #0
+ ret <2 x double> %val
+}
+
+define <2 x double> @div_v2f64(<2 x double> %x, <2 x double> %y) #0 {
+; CHECK-LABEL: div_v2f64:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: mov r12, sp
+; CHECK-NEXT: vmov d18, r0, r1
+; CHECK-NEXT: vmov d19, r2, r3
+; CHECK-NEXT: vld1.64 {d16, d17}, [r12]
+; CHECK-NEXT: vdiv.f64 d18, d18, d16
+; CHECK-NEXT: vdiv.f64 d16, d19, d17
+; CHECK-NEXT: vmov r0, r1, d18
+; CHECK-NEXT: vmov r2, r3, d16
+; CHECK-NEXT: bx lr
+ %val = call <2 x double> @llvm.experimental.constrained.fdiv.v2f64(<2 x
double> %x, <2 x double> %y, metadata !"round.tonearest", metadata
!"fpexcept.strict") #0
+ ret <2 x double> %val
+}
+
+define <2 x double> @fma_v2f64(<2 x double> %x, <2 x double> %y, <2 x double>
%z) #0 {
+; CHECK-LABEL: fma_v2f64:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: .save {r11, lr}
+; CHECK-NEXT: push {r11, lr}
+; CHECK-NEXT: add r12, sp, #24
+; CHECK-NEXT: add lr, sp, #8
+; CHECK-NEXT: vmov d20, r0, r1
+; CHECK-NEXT: vmov d21, r2, r3
+; CHECK-NEXT: vld1.64 {d16, d17}, [lr]
+; CHECK-NEXT: vld1.64 {d18, d19}, [r12]
+; CHECK-NEXT: vfma.f64 d18, d20, d16
+; CHECK-NEXT: vfma.f64 d19, d21, d17
+; CHECK-NEXT: vmov r0, r1, d18
+; CHECK-NEXT: vmov r2, r3, d19
+; CHECK-NEXT: pop {r11, pc}
+ %val = call <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x
double> %x, <2 x double> %y, <2 x double> %z, metadata !"round.tonearest",
metadata !"fpexcept.strict") #0
+ ret <2 x double> %val
+}
+
+define <2 x i32> @fptosi_v2i32_v2f64(<2 x double> %x) #0 {
+; CHECK-LABEL: fptosi_v2i32_v2f64:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vmov d16, r0, r1
+; CHECK-NEXT: vmov d17, r2, r3
+; CHECK-NEXT: vcvt.s32.f64 s0, d16
+; CHECK-NEXT: vcvt.s32.f64 s2, d17
+; CHECK-NEXT: vmov r0, s0
+; CHECK-NEXT: vmov r1, s2
+; CHECK-NEXT: bx lr
+ %val = call <2 x i32> @llvm.experimental.constrained.fptosi.v2i32.v2f64(<2 x
double> %x, metadata !"fpexcept.strict") #0
+ ret <2 x i32> %val
+}
+
+define <2 x i32> @fptoui_v2i32_v2f64(<2 x double> %x) #0 {
+; CHECK-LABEL: fptoui_v2i32_v2f64:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vmov d16, r0, r1
+; CHECK-NEXT: vmov d17, r2, r3
+; CHECK-NEXT: vcvt.u32.f64 s0, d16
+; CHECK-NEXT: vcvt.u32.f64 s2, d17
+; CHECK-NEXT: vmov r0, s0
+; CHECK-NEXT: vmov r1, s2
+; CHECK-NEXT: bx lr
+ %val = call <2 x i32> @llvm.experimental.constrained.fptoui.v2i32.v2f64(<2 x
double> %x, metadata !"fpexcept.strict") #0
+ ret <2 x i32> %val
+}
+
+define <2 x i64> @fptosi_v2i64_v2f64(<2 x double> %x) #0 {
+; CHECK-LABEL: fptosi_v2i64_v2f64:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: .save {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT: push {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT: mov r4, r1
+; CHECK-NEXT: mov r5, r0
+; CHECK-NEXT: mov r0, r2
+; CHECK-NEXT: mov r1, r3
+; CHECK-NEXT: bl __aeabi_d2lz
+; CHECK-NEXT: mov r6, r0
+; CHECK-NEXT: mov r7, r1
+; CHECK-NEXT: mov r0, r5
+; CHECK-NEXT: mov r1, r4
+; CHECK-NEXT: bl __aeabi_d2lz
+; CHECK-NEXT: mov r2, r6
+; CHECK-NEXT: mov r3, r7
+; CHECK-NEXT: pop {r4, r5, r6, r7, r11, pc}
+ %val = call <2 x i64> @llvm.experimental.constrained.fptosi.v2i64.v2f64(<2 x
double> %x, metadata !"fpexcept.strict") #0
+ ret <2 x i64> %val
+}
+
+define <2 x i64> @fptoui_v2i64_v2f64(<2 x double> %x) #0 {
+; CHECK-LABEL: fptoui_v2i64_v2f64:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: .save {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT: push {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT: mov r4, r1
+; CHECK-NEXT: mov r5, r0
+; CHECK-NEXT: mov r0, r2
+; CHECK-NEXT: mov r1, r3
+; CHECK-NEXT: bl __aeabi_d2ulz
+; CHECK-NEXT: mov r6, r0
+; CHECK-NEXT: mov r7, r1
+; CHECK-NEXT: mov r0, r5
+; CHECK-NEXT: mov r1, r4
+; CHECK-NEXT: bl __aeabi_d2ulz
+; CHECK-NEXT: mov r2, r6
+; CHECK-NEXT: mov r3, r7
+; CHECK-NEXT: pop {r4, r5, r6, r7, r11, pc}
+ %val = call <2 x i64> @llvm.experimental.constrained.fptoui.v2i64.v2f64(<2 x
double> %x, metadata !"fpexcept.strict") #0
+ ret <2 x i64> %val
+}
+
+define <2 x double> @sitofp_v2f64_v2i32(<2 x i32> %x) #0 {
+; CHECK-LABEL: sitofp_v2f64_v2i32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: .pad #16
+; CHECK-NEXT: sub sp, sp, #16
+; CHECK-NEXT: vmov d16, r0, r1
+; CHECK-NEXT: movw r2, #0
+; CHECK-NEXT: vmov.32 r0, d16[0]
+; CHECK-NEXT: movt r2, #17200
+; CHECK-NEXT: eor r0, r0, #-2147483648
+; CHECK-NEXT: str r2, [sp, #12]
+; CHECK-NEXT: str r0, [sp, #8]
+; CHECK-NEXT: vmov.32 r0, d16[1]
+; CHECK-NEXT: str r2, [sp, #4]
+; CHECK-NEXT: vldr d16, .LCPI34_0
+; CHECK-NEXT: eor r0, r0, #-2147483648
+; CHECK-NEXT: str r0, [sp]
+; CHECK-NEXT: vldr d17, [sp, #8]
+; CHECK-NEXT: vldr d18, [sp]
+; CHECK-NEXT: vsub.f64 d17, d17, d16
+; CHECK-NEXT: vsub.f64 d16, d18, d16
+; CHECK-NEXT: vmov r0, r1, d17
+; CHECK-NEXT: vmov r2, r3, d16
+; CHECK-NEXT: add sp, sp, #16
+; CHECK-NEXT: bx lr
+; CHECK-NEXT: .p2align 3
+; CHECK-NEXT: @ %bb.1:
+; CHECK-NEXT: .LCPI34_0:
+; CHECK-NEXT: .long 2147483648 @ double 4503601774854144
+; CHECK-NEXT: .long 1127219200
+ %val = call <2 x double>
@llvm.experimental.constrained.sitofp.v2f64.v2i32(<2 x i32> %x, metadata
!"round.tonearest", metadata !"fpexcept.strict") #0
+ ret <2 x double> %val
+}
+
+define <2 x double> @uitofp_v2f64_v2i32(<2 x i32> %x) #0 {
+; CHECK-LABEL: uitofp_v2f64_v2i32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: .pad #16
+; CHECK-NEXT: sub sp, sp, #16
+; CHECK-NEXT: vmov d16, r0, r1
+; CHECK-NEXT: add r2, sp, #8
+; CHECK-NEXT: mov r0, sp
+; CHECK-NEXT: vst1.32 {d16[0]}, [r2:32]
+; CHECK-NEXT: vst1.32 {d16[1]}, [r0:32]
+; CHECK-NEXT: movw r0, #0
+; CHECK-NEXT: vldr d16, .LCPI35_0
+; CHECK-NEXT: movt r0, #17200
+; CHECK-NEXT: str r0, [sp, #12]
+; CHECK-NEXT: str r0, [sp, #4]
+; CHECK-NEXT: vldr d17, [sp, #8]
+; CHECK-NEXT: vldr d18, [sp]
+; CHECK-NEXT: vsub.f64 d17, d17, d16
+; CHECK-NEXT: vsub.f64 d16, d18, d16
+; CHECK-NEXT: vmov r0, r1, d17
+; CHECK-NEXT: vmov r2, r3, d16
+; CHECK-NEXT: add sp, sp, #16
+; CHECK-NEXT: bx lr
+; CHECK-NEXT: .p2align 3
+; CHECK-NEXT: @ %bb.1:
+; CHECK-NEXT: .LCPI35_0:
+; CHECK-NEXT: .long 0 @ double 4503599627370496
+; CHECK-NEXT: .long 1127219200
+ %val = call <2 x double>
@llvm.experimental.constrained.uitofp.v2f64.v2i32(<2 x i32> %x, metadata
!"round.tonearest", metadata !"fpexcept.strict") #0
+ ret <2 x double> %val
+}
+
+define <2 x double> @sitofp_v2f64_v2i64(<2 x i64> %x) #0 {
+; CHECK-LABEL: sitofp_v2f64_v2i64:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: .save {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT: push {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT: mov r4, r1
+; CHECK-NEXT: mov r5, r0
+; CHECK-NEXT: mov r0, r2
+; CHECK-NEXT: mov r1, r3
+; CHECK-NEXT: bl __aeabi_l2d
+; CHECK-NEXT: mov r6, r0
+; CHECK-NEXT: mov r7, r1
+; CHECK-NEXT: mov r0, r5
+; CHECK-NEXT: mov r1, r4
+; CHECK-NEXT: bl __aeabi_l2d
+; CHECK-NEXT: mov r2, r6
+; CHECK-NEXT: mov r3, r7
+; CHECK-NEXT: pop {r4, r5, r6, r7, r11, pc}
+ %val = call <2 x double>
@llvm.experimental.constrained.sitofp.v2f64.v2i64(<2 x i64> %x, metadata
!"round.tonearest", metadata !"fpexcept.strict") #0
+ ret <2 x double> %val
+}
+
+define <2 x double> @uitofp_v2f64_v2i64(<2 x i64> %x) #0 {
+; CHECK-LABEL: uitofp_v2f64_v2i64:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: .save {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT: push {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT: mov r4, r1
+; CHECK-NEXT: mov r5, r0
+; CHECK-NEXT: mov r0, r2
+; CHECK-NEXT: mov r1, r3
+; CHECK-NEXT: bl __aeabi_ul2d
+; CHECK-NEXT: mov r6, r0
+; CHECK-NEXT: mov r7, r1
+; CHECK-NEXT: mov r0, r5
+; CHECK-NEXT: mov r1, r4
+; CHECK-NEXT: bl __aeabi_ul2d
+; CHECK-NEXT: mov r2, r6
+; CHECK-NEXT: mov r3, r7
+; CHECK-NEXT: pop {r4, r5, r6, r7, r11, pc}
+ %val = call <2 x double>
@llvm.experimental.constrained.uitofp.v2f64.v2i64(<2 x i64> %x, metadata
!"round.tonearest", metadata !"fpexcept.strict") #0
+ ret <2 x double> %val
+}
+
+define <2 x double> @sqrt_v2f64(<2 x double> %x) #0 {
+; CHECK-LABEL: sqrt_v2f64:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vmov d16, r0, r1
+; CHECK-NEXT: vmov d17, r2, r3
+; CHECK-NEXT: vsqrt.f64 d16, d16
+; CHECK-NEXT: vsqrt.f64 d17, d17
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: vmov r2, r3, d17
+; CHECK-NEXT: bx lr
+ %val = call <2 x double> @llvm.experimental.constrained.sqrt.v2f64(<2 x
double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret <2 x double> %val
+}
+
+define <2 x double> @rint_v2f64(<2 x double> %x) #0 {
+; CHECK-LABEL: rint_v2f64:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vmov d16, r0, r1
+; CHECK-NEXT: vmov d17, r2, r3
+; CHECK-NEXT: vrintx.f64 d16, d16
+; CHECK-NEXT: vrintx.f64 d17, d17
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: vmov r2, r3, d17
+; CHECK-NEXT: bx lr
+ %val = call <2 x double> @llvm.experimental.constrained.rint.v2f64(<2 x
double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret <2 x double> %val
+}
+
+define <2 x double> @nearbyint_v2f64(<2 x double> %x) #0 {
+; CHECK-LABEL: nearbyint_v2f64:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vmov d16, r0, r1
+; CHECK-NEXT: vmov d17, r2, r3
+; CHECK-NEXT: vrintr.f64 d16, d16
+; CHECK-NEXT: vrintr.f64 d17, d17
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: vmov r2, r3, d17
+; CHECK-NEXT: bx lr
+ %val = call <2 x double> @llvm.experimental.constrained.nearbyint.v2f64(<2 x
double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret <2 x double> %val
+}
+
+define <2 x double> @maxnum_v2f64(<2 x double> %x, <2 x double> %y) #0 {
+; CHECK-LABEL: maxnum_v2f64:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: mov r12, sp
+; CHECK-NEXT: vmov d18, r0, r1
+; CHECK-NEXT: vmov d19, r2, r3
+; CHECK-NEXT: vld1.64 {d16, d17}, [r12]
+; CHECK-NEXT: vmaxnm.f64 d18, d18, d16
+; CHECK-NEXT: vmaxnm.f64 d16, d19, d17
+; CHECK-NEXT: vmov r0, r1, d18
+; CHECK-NEXT: vmov r2, r3, d16
+; CHECK-NEXT: bx lr
+ %val = call <2 x double> @llvm.experimental.constrained.maxnum.v2f64(<2 x
double> %x, <2 x double> %y, metadata !"fpexcept.strict") #0
+ ret <2 x double> %val
+}
+
+define <2 x double> @minnum_v2f64(<2 x double> %x, <2 x double> %y) #0 {
+; CHECK-LABEL: minnum_v2f64:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: mov r12, sp
+; CHECK-NEXT: vmov d18, r0, r1
+; CHECK-NEXT: vmov d19, r2, r3
+; CHECK-NEXT: vld1.64 {d16, d17}, [r12]
+; CHECK-NEXT: vminnm.f64 d18, d18, d16
+; CHECK-NEXT: vminnm.f64 d16, d19, d17
+; CHECK-NEXT: vmov r0, r1, d18
+; CHECK-NEXT: vmov r2, r3, d16
+; CHECK-NEXT: bx lr
+ %val = call <2 x double> @llvm.experimental.constrained.minnum.v2f64(<2 x
double> %x, <2 x double> %y, metadata !"fpexcept.strict") #0
+ ret <2 x double> %val
+}
+
+define <2 x double> @ceil_v2f64(<2 x double> %x) #0 {
+; CHECK-LABEL: ceil_v2f64:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vmov d16, r0, r1
+; CHECK-NEXT: vmov d17, r2, r3
+; CHECK-NEXT: vrintp.f64 d16, d16
+; CHECK-NEXT: vrintp.f64 d17, d17
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: vmov r2, r3, d17
+; CHECK-NEXT: bx lr
+ %val = call <2 x double> @llvm.experimental.constrained.ceil.v2f64(<2 x
double> %x, metadata !"fpexcept.strict") #0
+ ret <2 x double> %val
+}
+
+define <2 x double> @floor_v2f64(<2 x double> %x) #0 {
+; CHECK-LABEL: floor_v2f64:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vmov d16, r0, r1
+; CHECK-NEXT: vmov d17, r2, r3
+; CHECK-NEXT: vrintm.f64 d16, d16
+; CHECK-NEXT: vrintm.f64 d17, d17
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: vmov r2, r3, d17
+; CHECK-NEXT: bx lr
+ %val = call <2 x double> @llvm.experimental.constrained.floor.v2f64(<2 x
double> %x, metadata !"fpexcept.strict") #0
+ ret <2 x double> %val
+}
+
+define <2 x double> @round_v2f64(<2 x double> %x) #0 {
+; CHECK-LABEL: round_v2f64:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vmov d16, r0, r1
+; CHECK-NEXT: vmov d17, r2, r3
+; CHECK-NEXT: vrinta.f64 d16, d16
+; CHECK-NEXT: vrinta.f64 d17, d17
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: vmov r2, r3, d17
+; CHECK-NEXT: bx lr
+ %val = call <2 x double> @llvm.experimental.constrained.round.v2f64(<2 x
double> %x, metadata !"fpexcept.strict") #0
+ ret <2 x double> %val
+}
+
+define <2 x double> @roundeven_v2f64(<2 x double> %x) #0 {
+; CHECK-LABEL: roundeven_v2f64:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vmov d16, r0, r1
+; CHECK-NEXT: vmov d17, r2, r3
+; CHECK-NEXT: vrintn.f64 d16, d16
+; CHECK-NEXT: vrintn.f64 d17, d17
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: vmov r2, r3, d17
+; CHECK-NEXT: bx lr
+ %val = call <2 x double> @llvm.experimental.constrained.roundeven.v2f64(<2 x
double> %x, metadata !"fpexcept.strict") #0
+ ret <2 x double> %val
+}
+
+define <2 x double> @trunc_v2f64(<2 x double> %x) #0 {
+; CHECK-LABEL: trunc_v2f64:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vmov d16, r0, r1
+; CHECK-NEXT: vmov d17, r2, r3
+; CHECK-NEXT: vrintz.f64 d16, d16
+; CHECK-NEXT: vrintz.f64 d17, d17
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: vmov r2, r3, d17
+; CHECK-NEXT: bx lr
+ %val = call <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x
double> %x, metadata !"fpexcept.strict") #0
+ ret <2 x double> %val
+}
+
+define <2 x i1> @fcmp_v2f64(<2 x double> %x, <2 x double> %y) #0 {
+; CHECK-LABEL: fcmp_v2f64:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: mov r12, sp
+; CHECK-NEXT: vmov d18, r2, r3
+; CHECK-NEXT: mov r3, #0
+; CHECK-NEXT: mov r2, #0
+; CHECK-NEXT: vld1.64 {d16, d17}, [r12]
+; CHECK-NEXT: vcmp.f64 d18, d17
+; CHECK-NEXT: vmov d18, r0, r1
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: vcmp.f64 d18, d16
+; CHECK-NEXT: movweq r3, #1
+; CHECK-NEXT: cmp r3, #0
+; CHECK-NEXT: mvnne r3, #0
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: movweq r2, #1
+; CHECK-NEXT: mov r1, r3
+; CHECK-NEXT: cmp r2, #0
+; CHECK-NEXT: mvnne r2, #0
+; CHECK-NEXT: mov r0, r2
+; CHECK-NEXT: bx lr
+entry:
+ %val = call <2 x i1> @llvm.experimental.constrained.fcmp.v2f64(<2 x double>
%x, <2 x double> %y, metadata !"oeq", metadata !"fpexcept.strict")
+ ret <2 x i1> %val
+}
+
+define <2 x i1> @fcmps_v2f64(<2 x double> %x, <2 x double> %y) #0 {
+; CHECK-LABEL: fcmps_v2f64:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: mov r12, sp
+; CHECK-NEXT: vmov d18, r2, r3
+; CHECK-NEXT: mov r3, #0
+; CHECK-NEXT: mov r2, #0
+; CHECK-NEXT: vld1.64 {d16, d17}, [r12]
+; CHECK-NEXT: vcmpe.f64 d18, d17
+; CHECK-NEXT: vmov d18, r0, r1
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: vcmpe.f64 d18, d16
+; CHECK-NEXT: movweq r3, #1
+; CHECK-NEXT: cmp r3, #0
+; CHECK-NEXT: mvnne r3, #0
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: movweq r2, #1
+; CHECK-NEXT: mov r1, r3
+; CHECK-NEXT: cmp r2, #0
+; CHECK-NEXT: mvnne r2, #0
+; CHECK-NEXT: mov r0, r2
+; CHECK-NEXT: bx lr
+entry:
+ %val = call <2 x i1> @llvm.experimental.constrained.fcmps.v2f64(<2 x double>
%x, <2 x double> %y, metadata !"oeq", metadata !"fpexcept.strict")
+ ret <2 x i1> %val
+}
+
+
+
+define <1 x double> @add_v1f64(<1 x double> %x, <1 x double> %y) #0 {
+; CHECK-LABEL: add_v1f64:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vmov d16, r2, r3
+; CHECK-NEXT: vmov d17, r0, r1
+; CHECK-NEXT: vadd.f64 d16, d17, d16
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: bx lr
+ %val = call <1 x double> @llvm.experimental.constrained.fadd.v1f64(<1 x
double> %x, <1 x double> %y, metadata !"round.tonearest", metadata
!"fpexcept.strict") #0
+ ret <1 x double> %val
+}
+
+define <1 x double> @sub_v1f64(<1 x double> %x, <1 x double> %y) #0 {
+; CHECK-LABEL: sub_v1f64:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vmov d16, r2, r3
+; CHECK-NEXT: vmov d17, r0, r1
+; CHECK-NEXT: vsub.f64 d16, d17, d16
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: bx lr
+ %val = call <1 x double> @llvm.experimental.constrained.fsub.v1f64(<1 x
double> %x, <1 x double> %y, metadata !"round.tonearest", metadata
!"fpexcept.strict") #0
+ ret <1 x double> %val
+}
+
+define <1 x double> @mul_v1f64(<1 x double> %x, <1 x double> %y) #0 {
+; CHECK-LABEL: mul_v1f64:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vmov d16, r2, r3
+; CHECK-NEXT: vmov d17, r0, r1
+; CHECK-NEXT: vmul.f64 d16, d17, d16
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: bx lr
+ %val = call <1 x double> @llvm.experimental.constrained.fmul.v1f64(<1 x
double> %x, <1 x double> %y, metadata !"round.tonearest", metadata
!"fpexcept.strict") #0
+ ret <1 x double> %val
+}
+
+define <1 x double> @div_v1f64(<1 x double> %x, <1 x double> %y) #0 {
+; CHECK-LABEL: div_v1f64:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vmov d16, r2, r3
+; CHECK-NEXT: vmov d17, r0, r1
+; CHECK-NEXT: vdiv.f64 d16, d17, d16
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: bx lr
+ %val = call <1 x double> @llvm.experimental.constrained.fdiv.v1f64(<1 x
double> %x, <1 x double> %y, metadata !"round.tonearest", metadata
!"fpexcept.strict") #0
+ ret <1 x double> %val
+}
+
+define <1 x double> @fma_v1f64(<1 x double> %x, <1 x double> %y, <1 x double>
%z) #0 {
+; CHECK-LABEL: fma_v1f64:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vldr d16, [sp]
+; CHECK-NEXT: vmov d17, r2, r3
+; CHECK-NEXT: vmov d18, r0, r1
+; CHECK-NEXT: vfma.f64 d16, d18, d17
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: bx lr
+ %val = call <1 x double> @llvm.experimental.constrained.fma.v1f64(<1 x
double> %x, <1 x double> %y, <1 x double> %z, metadata !"round.tonearest",
metadata !"fpexcept.strict") #0
+ ret <1 x double> %val
+}
+
+define <1 x i32> @fptosi_v1i32_v1f64(<1 x double> %x) #0 {
+; CHECK-LABEL: fptosi_v1i32_v1f64:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vmov d16, r0, r1
+; CHECK-NEXT: vcvt.s32.f64 s0, d16
+; CHECK-NEXT: vmov r0, s0
+; CHECK-NEXT: bx lr
+ %val = call <1 x i32> @llvm.experimental.constrained.fptosi.v1i32.v1f64(<1 x
double> %x, metadata !"fpexcept.strict") #0
+ ret <1 x i32> %val
+}
+
+define <1 x i32> @fptoui_v1i32_v1f64(<1 x double> %x) #0 {
+; CHECK-LABEL: fptoui_v1i32_v1f64:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vmov d16, r0, r1
+; CHECK-NEXT: vcvt.u32.f64 s0, d16
+; CHECK-NEXT: vmov r0, s0
+; CHECK-NEXT: bx lr
+ %val = call <1 x i32> @llvm.experimental.constrained.fptoui.v1i32.v1f64(<1 x
double> %x, metadata !"fpexcept.strict") #0
+ ret <1 x i32> %val
+}
+
+define <1 x i64> @fptosi_v1i64_v1f64(<1 x double> %x) #0 {
+; CHECK-LABEL: fptosi_v1i64_v1f64:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: .save {r11, lr}
+; CHECK-NEXT: push {r11, lr}
+; CHECK-NEXT: bl __aeabi_d2lz
+; CHECK-NEXT: pop {r11, pc}
+ %val = call <1 x i64> @llvm.experimental.constrained.fptosi.v1i64.v1f64(<1 x
double> %x, metadata !"fpexcept.strict") #0
+ ret <1 x i64> %val
+}
+
+define <1 x i64> @fptoui_v1i64_v1f64(<1 x double> %x) #0 {
+; CHECK-LABEL: fptoui_v1i64_v1f64:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: .save {r11, lr}
+; CHECK-NEXT: push {r11, lr}
+; CHECK-NEXT: bl __aeabi_d2ulz
+; CHECK-NEXT: pop {r11, pc}
+ %val = call <1 x i64> @llvm.experimental.constrained.fptoui.v1i64.v1f64(<1 x
double> %x, metadata !"fpexcept.strict") #0
+ ret <1 x i64> %val
+}
+
+define <1 x double> @sitofp_v1f64_v1i32(<1 x i32> %x) #0 {
+; CHECK-LABEL: sitofp_v1f64_v1i32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: .pad #8
+; CHECK-NEXT: sub sp, sp, #8
+; CHECK-NEXT: movw r1, #0
+; CHECK-NEXT: eor r0, r0, #-2147483648
+; CHECK-NEXT: vldr d16, .LCPI59_0
+; CHECK-NEXT: movt r1, #17200
+; CHECK-NEXT: str r0, [sp]
+; CHECK-NEXT: str r1, [sp, #4]
+; CHECK-NEXT: vldr d17, [sp]
+; CHECK-NEXT: vsub.f64 d16, d17, d16
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: add sp, sp, #8
+; CHECK-NEXT: bx lr
+; CHECK-NEXT: .p2align 3
+; CHECK-NEXT: @ %bb.1:
+; CHECK-NEXT: .LCPI59_0:
+; CHECK-NEXT: .long 2147483648 @ double 4503601774854144
+; CHECK-NEXT: .long 1127219200
+ %val = call <1 x double>
@llvm.experimental.constrained.sitofp.v1f64.v1i32(<1 x i32> %x, metadata
!"round.tonearest", metadata !"fpexcept.strict") #0
+ ret <1 x double> %val
+}
+
+define <1 x double> @uitofp_v1f64_v1i32(<1 x i32> %x) #0 {
+; CHECK-LABEL: uitofp_v1f64_v1i32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: .pad #8
+; CHECK-NEXT: sub sp, sp, #8
+; CHECK-NEXT: movw r1, #0
+; CHECK-NEXT: vldr d16, .LCPI60_0
+; CHECK-NEXT: movt r1, #17200
+; CHECK-NEXT: stm sp, {r0, r1}
+; CHECK-NEXT: vldr d17, [sp]
+; CHECK-NEXT: vsub.f64 d16, d17, d16
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: add sp, sp, #8
+; CHECK-NEXT: bx lr
+; CHECK-NEXT: .p2align 3
+; CHECK-NEXT: @ %bb.1:
+; CHECK-NEXT: .LCPI60_0:
+; CHECK-NEXT: .long 0 @ double 4503599627370496
+; CHECK-NEXT: .long 1127219200
+ %val = call <1 x double>
@llvm.experimental.constrained.uitofp.v1f64.v1i32(<1 x i32> %x, metadata
!"round.tonearest", metadata !"fpexcept.strict") #0
+ ret <1 x double> %val
+}
+
+define <1 x double> @sitofp_v1f64_v1i64(<1 x i64> %x) #0 {
+; CHECK-LABEL: sitofp_v1f64_v1i64:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: .save {r11, lr}
+; CHECK-NEXT: push {r11, lr}
+; CHECK-NEXT: vmov d16, r0, r1
+; CHECK-NEXT: vmov.32 r0, d16[0]
+; CHECK-NEXT: vmov.32 r1, d16[1]
+; CHECK-NEXT: bl __aeabi_l2d
+; CHECK-NEXT: pop {r11, pc}
+ %val = call <1 x double>
@llvm.experimental.constrained.sitofp.v1f64.v1i64(<1 x i64> %x, metadata
!"round.tonearest", metadata !"fpexcept.strict") #0
+ ret <1 x double> %val
+}
+
+define <1 x double> @uitofp_v1f64_v1i64(<1 x i64> %x) #0 {
+; CHECK-LABEL: uitofp_v1f64_v1i64:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: .save {r11, lr}
+; CHECK-NEXT: push {r11, lr}
+; CHECK-NEXT: vmov d16, r0, r1
+; CHECK-NEXT: vmov.32 r0, d16[0]
+; CHECK-NEXT: vmov.32 r1, d16[1]
+; CHECK-NEXT: bl __aeabi_ul2d
+; CHECK-NEXT: pop {r11, pc}
+ %val = call <1 x double>
@llvm.experimental.constrained.uitofp.v1f64.v1i64(<1 x i64> %x, metadata
!"round.tonearest", metadata !"fpexcept.strict") #0
+ ret <1 x double> %val
+}
+
+define <1 x double> @sqrt_v1f64(<1 x double> %x) #0 {
+; CHECK-LABEL: sqrt_v1f64:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vmov d16, r0, r1
+; CHECK-NEXT: vsqrt.f64 d16, d16
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: bx lr
+ %val = call <1 x double> @llvm.experimental.constrained.sqrt.v1f64(<1 x
double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret <1 x double> %val
+}
+
+define <1 x double> @rint_v1f64(<1 x double> %x) #0 {
+; CHECK-LABEL: rint_v1f64:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vmov d16, r0, r1
+; CHECK-NEXT: vrintx.f64 d16, d16
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: bx lr
+ %val = call <1 x double> @llvm.experimental.constrained.rint.v1f64(<1 x
double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret <1 x double> %val
+}
+
+define <1 x double> @nearbyint_v1f64(<1 x double> %x) #0 {
+; CHECK-LABEL: nearbyint_v1f64:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vmov d16, r0, r1
+; CHECK-NEXT: vrintr.f64 d16, d16
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: bx lr
+ %val = call <1 x double> @llvm.experimental.constrained.nearbyint.v1f64(<1 x
double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret <1 x double> %val
+}
+
+define <1 x double> @maxnum_v1f64(<1 x double> %x, <1 x double> %y) #0 {
+; CHECK-LABEL: maxnum_v1f64:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vmov d16, r2, r3
+; CHECK-NEXT: vmov d17, r0, r1
+; CHECK-NEXT: vmaxnm.f64 d16, d17, d16
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: bx lr
+ %val = call <1 x double> @llvm.experimental.constrained.maxnum.v1f64(<1 x
double> %x, <1 x double> %y, metadata !"fpexcept.strict") #0
+ ret <1 x double> %val
+}
+
+define <1 x double> @minnum_v1f64(<1 x double> %x, <1 x double> %y) #0 {
+; CHECK-LABEL: minnum_v1f64:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vmov d16, r2, r3
+; CHECK-NEXT: vmov d17, r0, r1
+; CHECK-NEXT: vminnm.f64 d16, d17, d16
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: bx lr
+ %val = call <1 x double> @llvm.experimental.constrained.minnum.v1f64(<1 x
double> %x, <1 x double> %y, metadata !"fpexcept.strict") #0
+ ret <1 x double> %val
+}
+
+define <1 x double> @ceil_v1f64(<1 x double> %x) #0 {
+; CHECK-LABEL: ceil_v1f64:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vmov d16, r0, r1
+; CHECK-NEXT: vrintp.f64 d16, d16
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: bx lr
+ %val = call <1 x double> @llvm.experimental.constrained.ceil.v1f64(<1 x
double> %x, metadata !"fpexcept.strict") #0
+ ret <1 x double> %val
+}
+
+define <1 x double> @floor_v1f64(<1 x double> %x) #0 {
+; CHECK-LABEL: floor_v1f64:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vmov d16, r0, r1
+; CHECK-NEXT: vrintm.f64 d16, d16
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: bx lr
+ %val = call <1 x double> @llvm.experimental.constrained.floor.v1f64(<1 x
double> %x, metadata !"fpexcept.strict") #0
+ ret <1 x double> %val
+}
+
+define <1 x double> @round_v1f64(<1 x double> %x) #0 {
+; CHECK-LABEL: round_v1f64:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vmov d16, r0, r1
+; CHECK-NEXT: vrinta.f64 d16, d16
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: bx lr
+ %val = call <1 x double> @llvm.experimental.constrained.round.v1f64(<1 x
double> %x, metadata !"fpexcept.strict") #0
+ ret <1 x double> %val
+}
+
+define <1 x double> @roundeven_v1f64(<1 x double> %x) #0 {
+; CHECK-LABEL: roundeven_v1f64:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vmov d16, r0, r1
+; CHECK-NEXT: vrintn.f64 d16, d16
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: bx lr
+ %val = call <1 x double> @llvm.experimental.constrained.roundeven.v1f64(<1 x
double> %x, metadata !"fpexcept.strict") #0
+ ret <1 x double> %val
+}
+
+define <1 x double> @trunc_v1f64(<1 x double> %x) #0 {
+; CHECK-LABEL: trunc_v1f64:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vmov d16, r0, r1
+; CHECK-NEXT: vrintz.f64 d16, d16
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: bx lr
+ %val = call <1 x double> @llvm.experimental.constrained.trunc.v1f64(<1 x
double> %x, metadata !"fpexcept.strict") #0
+ ret <1 x double> %val
+}
+
+define <1 x i1> @fcmp_v1f61(<1 x double> %x, <1 x double> %y) #0 {
+; CHECK-LABEL: fcmp_v1f61:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov d17, r0, r1
+; CHECK-NEXT: vmov d16, r2, r3
+; CHECK-NEXT: mov r0, #0
+; CHECK-NEXT: vcmp.f64 d17, d16
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: movweq r0, #1
+; CHECK-NEXT: bx lr
+entry:
+ %val = call <1 x i1> @llvm.experimental.constrained.fcmp.v1f64(<1 x double>
%x, <1 x double> %y, metadata !"oeq", metadata !"fpexcept.strict")
+ ret <1 x i1> %val
+}
+
+define <1 x i1> @fcmps_v1f61(<1 x double> %x, <1 x double> %y) #0 {
+; CHECK-LABEL: fcmps_v1f61:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov d17, r0, r1
+; CHECK-NEXT: vmov d16, r2, r3
+; CHECK-NEXT: mov r0, #0
+; CHECK-NEXT: vcmpe.f64 d17, d16
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: movweq r0, #1
+; CHECK-NEXT: bx lr
+entry:
+ %val = call <1 x i1> @llvm.experimental.constrained.fcmps.v1f64(<1 x double>
%x, <1 x double> %y, metadata !"oeq", metadata !"fpexcept.strict")
+ ret <1 x i1> %val
+}
+
+
+
+define <2 x float> @fptrunc_v2f32_v2f64(<2 x double> %x) #0 {
+; CHECK-LABEL: fptrunc_v2f32_v2f64:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vmov d16, r2, r3
+; CHECK-NEXT: vmov d17, r0, r1
+; CHECK-NEXT: vcvt.f32.f64 s1, d16
+; CHECK-NEXT: vcvt.f32.f64 s0, d17
+; CHECK-NEXT: vmov r0, r1, d0
+; CHECK-NEXT: bx lr
+ %val = call <2 x float>
@llvm.experimental.constrained.fptrunc.v2f32.v2f64(<2 x double> %x, metadata
!"round.tonearest", metadata !"fpexcept.strict") #0
+ ret <2 x float> %val
+}
+
+define <2 x double> @fpext_v2f64_v2f32(<2 x float> %x) #0 {
+; CHECK-LABEL: fpext_v2f64_v2f32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vmov d0, r0, r1
+; CHECK-NEXT: vcvt.f64.f32 d16, s0
+; CHECK-NEXT: vcvt.f64.f32 d17, s1
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: vmov r2, r3, d17
+; CHECK-NEXT: bx lr
+ %val = call <2 x double> @llvm.experimental.constrained.fpext.v2f64.v2f32(<2
x float> %x, metadata !"fpexcept.strict") #0
+ ret <2 x double> %val
+}
+
+
+attributes #0 = { strictfp }
+
+declare <4 x float> @llvm.experimental.constrained.fadd.v4f32(<4 x float>, <4
x float>, metadata, metadata)
----------------
davemgreen wrote:
Can remove all of these nowadays.
https://github.com/llvm/llvm-project/pull/137101
_______________________________________________
cfe-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits