https://github.com/CarolineConcatto updated 
https://github.com/llvm/llvm-project/pull/169661

>From da39b98a9e362ecabf5d203832353f477800f638 Mon Sep 17 00:00:00 2001
From: CarolineConcatto <[email protected]>
Date: Tue, 25 Nov 2025 14:38:25 +0000
Subject: [PATCH] [Clang][LLVM][AArch64]Add support for svrint{32|64}{z|x}
 intrinsics

This patch add supports in Clang for these assembly intrinsics:

FRINT32X
FRINT32Z
FRINT64X
FRINT64Z

By adding support for these intrinsics:

// Variant is available for _f64
svfloat32_t svrint32x[_f32]_z(svbool_t pg, svfloat32_t zn);
// Variant is available for _f64
svfloat32_t svrint32x[_f32]_x(svbool_t pg, svfloat32_t zn);
// Variant is available for _f64
svfloat32_t svrint32x[_f32]_m(svfloat32_t inactive, svbool_t pg, svfloat32_t 
zn);

// Variant is available for _f64
svfloat32_t svrint32z[_f32]_z(svbool_t pg, svfloat32_t zn);
// Variant is available for _f64
svfloat32_t svrint32z[_f32]_x(svbool_t pg, svfloat32_t zn);
// Variant is available for _f64
svfloat32_t svrint32z[_f32]_m(svfloat32_t inactive, svbool_t pg, svfloat32_t 
zn);

// Variant is available for _f64
svfloat32_t svrint64x[_f32]_z(svbool_t pg, svfloat32_t zn);
// Variant is available for _f64
svfloat32_t svrint64x[_f32]_x(svbool_t pg, svfloat32_t zn);
// Variant is available for _f64
svfloat32_t svrint64x[_f32]_m(svfloat32_t inactive, svbool_t pg, svfloat32_t 
zn);

// Variant is available for _f64
svfloat32_t svrint64z[_f32]_z(svbool_t pg, svfloat32_t zn);
// Variant is available for _f64
svfloat32_t svrint64z[_f32]_x(svbool_t pg, svfloat32_t zn);
// Variant is available for _f64
svfloat32_t svrint64z[_f32]_m(svfloat32_t inactive, svbool_t pg, svfloat32_t 
zn);
```
according to the ACLE[1]

[1]https://github.com/ARM-software/acle/pull/412
---
 clang/include/clang/Basic/arm_sve.td          |   7 +
 .../sve2p2-intrinsics/acle_sve_rintx.c        | 445 ++++++++++++++++++
 llvm/include/llvm/IR/IntrinsicsAArch64.td     |   4 +
 .../Target/AArch64/AArch64ISelLowering.cpp    |  20 +
 .../lib/Target/AArch64/AArch64SVEInstrInfo.td |  20 +-
 llvm/lib/Target/AArch64/SVEInstrFormats.td    |  19 +-
 .../AArch64/sve2p2-intrinsics-fp-arith.ll     | 283 +++++++++++
 7 files changed, 788 insertions(+), 10 deletions(-)
 create mode 100644 
clang/test/CodeGen/AArch64/sve2p2-intrinsics/acle_sve_rintx.c
 create mode 100644 llvm/test/CodeGen/AArch64/sve2p2-intrinsics-fp-arith.ll

diff --git a/clang/include/clang/Basic/arm_sve.td 
b/clang/include/clang/Basic/arm_sve.td
index d2b7b78b9970f..070c2b9509de8 100644
--- a/clang/include/clang/Basic/arm_sve.td
+++ b/clang/include/clang/Basic/arm_sve.td
@@ -778,6 +778,13 @@ defm SVRINTX : SInstZPZ<"svrintx", "hfd", 
"aarch64_sve_frintx">;
 defm SVRINTZ : SInstZPZ<"svrintz", "hfd", "aarch64_sve_frintz">;
 defm SVSQRT  : SInstZPZ<"svsqrt",  "hfd", "aarch64_sve_fsqrt">;
 
+let SVETargetGuard = "sve2p2|sme2p2", SMETargetGuard = "sve2p2|sme2p2" in {
+defm SVRINT32X : SInstZPZ<"svrint32x", "fd", "aarch64_sve_frint32x">;
+defm SVRINT32Z : SInstZPZ<"svrint32z", "fd", "aarch64_sve_frint32z">;
+defm SVRINT64X : SInstZPZ<"svrint64x", "fd", "aarch64_sve_frint64x">;
+defm SVRINT64Z : SInstZPZ<"svrint64z", "fd", "aarch64_sve_frint64z">;
+}
+
 let SMETargetGuard = "sme2,ssve-fexpa" in {
 def SVEXPA  : SInst<"svexpa[_{d}]",  "du",   "hfd", MergeNone, 
"aarch64_sve_fexpa_x", [VerifyRuntimeMode]>;
 }
diff --git a/clang/test/CodeGen/AArch64/sve2p2-intrinsics/acle_sve_rintx.c 
b/clang/test/CodeGen/AArch64/sve2p2-intrinsics/acle_sve_rintx.c
new file mode 100644
index 0000000000000..2b275210e9446
--- /dev/null
+++ b/clang/test/CodeGen/AArch64/sve2p2-intrinsics/acle_sve_rintx.c
@@ -0,0 +1,445 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: aarch64-registered-target
+// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve 
-target-feature +sve2 -target-feature +sve2p2 \
+// RUN:   -Werror -emit-llvm -disable-O0-optnone -o - %s | opt -S -p 
mem2reg,instcombine,tailcallelim | FileCheck %s
+// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sme 
-target-feature +sme2 -target-feature +sme2p2 \
+// RUN:   -Werror -emit-llvm -disable-O0-optnone -o - %s | opt -S -p 
mem2reg,instcombine,tailcallelim | FileCheck %s
+// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve 
-target-feature +sme -target-feature +sve2p2 \
+// RUN:   -Werror -emit-llvm -disable-O0-optnone -o - %s | opt -S -p 
mem2reg,instcombine,tailcallelim | FileCheck %s
+// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve 
-target-feature +sme -target-feature +sme2p2 \
+// RUN:   -Werror -emit-llvm -disable-O0-optnone -o - %s | opt -S -p 
mem2reg,instcombine,tailcallelim | FileCheck %s
+// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu 
-target-feature +sve -target-feature +sve2 -target-feature +sve2p2 \
+// RUN:   -Werror -emit-llvm -disable-O0-optnone -o - %s | opt -S -p 
mem2reg,instcombine,tailcallelim | FileCheck %s
+// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve 
-target-feature +sve2 -target-feature +sve2p2 \
+// RUN:   -Werror -emit-llvm -disable-O0-optnone -o - -x c++ %s | opt -S -p 
mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
+// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu 
-target-feature +sve -target-feature +sve2 -target-feature +sve2p2 \
+// RUN:   -Werror -emit-llvm -disable-O0-optnone -o - -x c++ %s | opt -S -p 
mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
+// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve 
-target-feature +sve2 -target-feature +sve2p2 \
+// RUN:   -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
+//
+#include <arm_sve.h>
+
+#if defined __ARM_FEATURE_SME
+#define MODE_ATTR __arm_streaming
+#else
+#define MODE_ATTR
+#endif
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+// FRINT32X
+// CHECK-LABEL: @test_svrint32x_f32_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 4 x float> 
@llvm.aarch64.sve.frint32x.nxv4f32(<vscale x 4 x float> [[INACTIVE:%.*]], 
<vscale x 4 x i1> [[TMP0]], <vscale x 4 x float> [[OP:%.*]])
+// CHECK-NEXT:    ret <vscale x 4 x float> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z20test_svrint32x_f32_mu13__SVFloat32_tu10__SVBool_tS_(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 4 x float> 
@llvm.aarch64.sve.frint32x.nxv4f32(<vscale x 4 x float> [[INACTIVE:%.*]], 
<vscale x 4 x i1> [[TMP0]], <vscale x 4 x float> [[OP:%.*]])
+// CPP-CHECK-NEXT:    ret <vscale x 4 x float> [[TMP1]]
+//
+svfloat32_t test_svrint32x_f32_m(svfloat32_t inactive, svbool_t pg, 
svfloat32_t op) MODE_ATTR
+{
+  return SVE_ACLE_FUNC(svrint32x,_f32,_m,)(inactive, pg, op);
+}
+
+// CHECK-LABEL: @test_svrint32x_f64_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 2 x double> 
@llvm.aarch64.sve.frint32x.nxv2f64(<vscale x 2 x double> [[INACTIVE:%.*]], 
<vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> [[OP:%.*]])
+// CHECK-NEXT:    ret <vscale x 2 x double> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z20test_svrint32x_f64_mu13__SVFloat64_tu10__SVBool_tS_(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 2 x double> 
@llvm.aarch64.sve.frint32x.nxv2f64(<vscale x 2 x double> [[INACTIVE:%.*]], 
<vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> [[OP:%.*]])
+// CPP-CHECK-NEXT:    ret <vscale x 2 x double> [[TMP1]]
+//
+svfloat64_t test_svrint32x_f64_m(svfloat64_t inactive, svbool_t pg, 
svfloat64_t op) MODE_ATTR
+{
+  return SVE_ACLE_FUNC(svrint32x,_f64,_m,)(inactive, pg, op);
+}
+
+// CHECK-LABEL: @test_svrint32x_f32_x(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 4 x float> 
@llvm.aarch64.sve.frint32x.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x 
i1> [[TMP0]], <vscale x 4 x float> [[OP:%.*]])
+// CHECK-NEXT:    ret <vscale x 4 x float> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z20test_svrint32x_f32_xu10__SVBool_tu13__SVFloat32_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 4 x float> 
@llvm.aarch64.sve.frint32x.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x 
i1> [[TMP0]], <vscale x 4 x float> [[OP:%.*]])
+// CPP-CHECK-NEXT:    ret <vscale x 4 x float> [[TMP1]]
+//
+svfloat32_t test_svrint32x_f32_x(svbool_t pg, svfloat32_t op) MODE_ATTR
+{
+  return SVE_ACLE_FUNC(svrint32x,_f32,_x,)(pg, op);
+}
+
+// CHECK-LABEL: @test_svrint32x_f64_x(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 2 x double> 
@llvm.aarch64.sve.frint32x.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x 
i1> [[TMP0]], <vscale x 2 x double> [[OP:%.*]])
+// CHECK-NEXT:    ret <vscale x 2 x double> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z20test_svrint32x_f64_xu10__SVBool_tu13__SVFloat64_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 2 x double> 
@llvm.aarch64.sve.frint32x.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x 
i1> [[TMP0]], <vscale x 2 x double> [[OP:%.*]])
+// CPP-CHECK-NEXT:    ret <vscale x 2 x double> [[TMP1]]
+//
+svfloat64_t test_svrint32x_f64_x(svbool_t pg, svfloat64_t op) MODE_ATTR
+{
+  return SVE_ACLE_FUNC(svrint32x,_f64,_x,)(pg, op);
+}
+
+// CHECK-LABEL: @test_svrint32x_f32_z(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 4 x float> 
@llvm.aarch64.sve.frint32x.nxv4f32(<vscale x 4 x float> zeroinitializer, 
<vscale x 4 x i1> [[TMP0]], <vscale x 4 x float> [[OP:%.*]])
+// CHECK-NEXT:    ret <vscale x 4 x float> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z20test_svrint32x_f32_zu10__SVBool_tu13__SVFloat32_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 4 x float> 
@llvm.aarch64.sve.frint32x.nxv4f32(<vscale x 4 x float> zeroinitializer, 
<vscale x 4 x i1> [[TMP0]], <vscale x 4 x float> [[OP:%.*]])
+// CPP-CHECK-NEXT:    ret <vscale x 4 x float> [[TMP1]]
+//
+svfloat32_t test_svrint32x_f32_z(svbool_t pg, svfloat32_t op) MODE_ATTR
+{
+  return SVE_ACLE_FUNC(svrint32x,_f32,_z,)(pg, op);
+}
+
+// CHECK-LABEL: @test_svrint32x_f64_z(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 2 x double> 
@llvm.aarch64.sve.frint32x.nxv2f64(<vscale x 2 x double> zeroinitializer, 
<vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> [[OP:%.*]])
+// CHECK-NEXT:    ret <vscale x 2 x double> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z20test_svrint32x_f64_zu10__SVBool_tu13__SVFloat64_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 2 x double> 
@llvm.aarch64.sve.frint32x.nxv2f64(<vscale x 2 x double> zeroinitializer, 
<vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> [[OP:%.*]])
+// CPP-CHECK-NEXT:    ret <vscale x 2 x double> [[TMP1]]
+//
+svfloat64_t test_svrint32x_f64_z(svbool_t pg, svfloat64_t op) MODE_ATTR
+{
+  return SVE_ACLE_FUNC(svrint32x,_f64,_z,)(pg, op);
+}
+
+// FRINT32Z
+// CHECK-LABEL: @test_svrint32z_f32_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 4 x float> 
@llvm.aarch64.sve.frint32z.nxv4f32(<vscale x 4 x float> [[INACTIVE:%.*]], 
<vscale x 4 x i1> [[TMP0]], <vscale x 4 x float> [[OP:%.*]])
+// CHECK-NEXT:    ret <vscale x 4 x float> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z20test_svrint32z_f32_mu13__SVFloat32_tu10__SVBool_tS_(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 4 x float> 
@llvm.aarch64.sve.frint32z.nxv4f32(<vscale x 4 x float> [[INACTIVE:%.*]], 
<vscale x 4 x i1> [[TMP0]], <vscale x 4 x float> [[OP:%.*]])
+// CPP-CHECK-NEXT:    ret <vscale x 4 x float> [[TMP1]]
+//
+svfloat32_t test_svrint32z_f32_m(svfloat32_t inactive, svbool_t pg, 
svfloat32_t op) MODE_ATTR
+{
+  return SVE_ACLE_FUNC(svrint32z,_f32,_m,)(inactive, pg, op);
+}
+
+// CHECK-LABEL: @test_svrint32z_f64_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 2 x double> 
@llvm.aarch64.sve.frint32z.nxv2f64(<vscale x 2 x double> [[INACTIVE:%.*]], 
<vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> [[OP:%.*]])
+// CHECK-NEXT:    ret <vscale x 2 x double> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z20test_svrint32z_f64_mu13__SVFloat64_tu10__SVBool_tS_(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 2 x double> 
@llvm.aarch64.sve.frint32z.nxv2f64(<vscale x 2 x double> [[INACTIVE:%.*]], 
<vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> [[OP:%.*]])
+// CPP-CHECK-NEXT:    ret <vscale x 2 x double> [[TMP1]]
+//
+svfloat64_t test_svrint32z_f64_m(svfloat64_t inactive, svbool_t pg, 
svfloat64_t op) MODE_ATTR
+{
+  return SVE_ACLE_FUNC(svrint32z,_f64,_m,)(inactive, pg, op);
+}
+
+// CHECK-LABEL: @test_svrint32z_f32_x(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 4 x float> 
@llvm.aarch64.sve.frint32z.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x 
i1> [[TMP0]], <vscale x 4 x float> [[OP:%.*]])
+// CHECK-NEXT:    ret <vscale x 4 x float> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z20test_svrint32z_f32_xu10__SVBool_tu13__SVFloat32_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 4 x float> 
@llvm.aarch64.sve.frint32z.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x 
i1> [[TMP0]], <vscale x 4 x float> [[OP:%.*]])
+// CPP-CHECK-NEXT:    ret <vscale x 4 x float> [[TMP1]]
+//
+svfloat32_t test_svrint32z_f32_x(svbool_t pg, svfloat32_t op) MODE_ATTR
+{
+  return SVE_ACLE_FUNC(svrint32z,_f32,_x,)(pg, op);
+}
+
+// CHECK-LABEL: @test_svrint32z_f64_x(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 2 x double> 
@llvm.aarch64.sve.frint32z.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x 
i1> [[TMP0]], <vscale x 2 x double> [[OP:%.*]])
+// CHECK-NEXT:    ret <vscale x 2 x double> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z20test_svrint32z_f64_xu10__SVBool_tu13__SVFloat64_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 2 x double> 
@llvm.aarch64.sve.frint32z.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x 
i1> [[TMP0]], <vscale x 2 x double> [[OP:%.*]])
+// CPP-CHECK-NEXT:    ret <vscale x 2 x double> [[TMP1]]
+//
+svfloat64_t test_svrint32z_f64_x(svbool_t pg, svfloat64_t op) MODE_ATTR
+{
+  return SVE_ACLE_FUNC(svrint32z,_f64,_x,)(pg, op);
+}
+
+// CHECK-LABEL: @test_svrint32z_f32_z(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 4 x float> 
@llvm.aarch64.sve.frint32z.nxv4f32(<vscale x 4 x float> zeroinitializer, 
<vscale x 4 x i1> [[TMP0]], <vscale x 4 x float> [[OP:%.*]])
+// CHECK-NEXT:    ret <vscale x 4 x float> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z20test_svrint32z_f32_zu10__SVBool_tu13__SVFloat32_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 4 x float> 
@llvm.aarch64.sve.frint32z.nxv4f32(<vscale x 4 x float> zeroinitializer, 
<vscale x 4 x i1> [[TMP0]], <vscale x 4 x float> [[OP:%.*]])
+// CPP-CHECK-NEXT:    ret <vscale x 4 x float> [[TMP1]]
+//
+svfloat32_t test_svrint32z_f32_z(svbool_t pg, svfloat32_t op) MODE_ATTR
+{
+  return SVE_ACLE_FUNC(svrint32z,_f32,_z,)(pg, op);
+}
+
+// CHECK-LABEL: @test_svrint32z_f64_z(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 2 x double> 
@llvm.aarch64.sve.frint32z.nxv2f64(<vscale x 2 x double> zeroinitializer, 
<vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> [[OP:%.*]])
+// CHECK-NEXT:    ret <vscale x 2 x double> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z20test_svrint32z_f64_zu10__SVBool_tu13__SVFloat64_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 2 x double> 
@llvm.aarch64.sve.frint32z.nxv2f64(<vscale x 2 x double> zeroinitializer, 
<vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> [[OP:%.*]])
+// CPP-CHECK-NEXT:    ret <vscale x 2 x double> [[TMP1]]
+//
+svfloat64_t test_svrint32z_f64_z(svbool_t pg, svfloat64_t op) MODE_ATTR
+{
+  return SVE_ACLE_FUNC(svrint32z,_f64,_z,)(pg, op);
+}
+
+// FRINT64X
+// CHECK-LABEL: @test_svrint64x_f32_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 4 x float> 
@llvm.aarch64.sve.frint64x.nxv4f32(<vscale x 4 x float> [[INACTIVE:%.*]], 
<vscale x 4 x i1> [[TMP0]], <vscale x 4 x float> [[OP:%.*]])
+// CHECK-NEXT:    ret <vscale x 4 x float> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z20test_svrint64x_f32_mu13__SVFloat32_tu10__SVBool_tS_(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 4 x float> 
@llvm.aarch64.sve.frint64x.nxv4f32(<vscale x 4 x float> [[INACTIVE:%.*]], 
<vscale x 4 x i1> [[TMP0]], <vscale x 4 x float> [[OP:%.*]])
+// CPP-CHECK-NEXT:    ret <vscale x 4 x float> [[TMP1]]
+//
+svfloat32_t test_svrint64x_f32_m(svfloat32_t inactive, svbool_t pg, 
svfloat32_t op) MODE_ATTR
+{
+  return SVE_ACLE_FUNC(svrint64x,_f32,_m,)(inactive, pg, op);
+}
+
+// CHECK-LABEL: @test_svrint64x_f64_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 2 x double> 
@llvm.aarch64.sve.frint64x.nxv2f64(<vscale x 2 x double> [[INACTIVE:%.*]], 
<vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> [[OP:%.*]])
+// CHECK-NEXT:    ret <vscale x 2 x double> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z20test_svrint64x_f64_mu13__SVFloat64_tu10__SVBool_tS_(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 2 x double> 
@llvm.aarch64.sve.frint64x.nxv2f64(<vscale x 2 x double> [[INACTIVE:%.*]], 
<vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> [[OP:%.*]])
+// CPP-CHECK-NEXT:    ret <vscale x 2 x double> [[TMP1]]
+//
+svfloat64_t test_svrint64x_f64_m(svfloat64_t inactive, svbool_t pg, 
svfloat64_t op) MODE_ATTR
+{
+  return SVE_ACLE_FUNC(svrint64x,_f64,_m,)(inactive, pg, op);
+}
+
+// CHECK-LABEL: @test_svrint64x_f32_x(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 4 x float> 
@llvm.aarch64.sve.frint64x.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x 
i1> [[TMP0]], <vscale x 4 x float> [[OP:%.*]])
+// CHECK-NEXT:    ret <vscale x 4 x float> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z20test_svrint64x_f32_xu10__SVBool_tu13__SVFloat32_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 4 x float> 
@llvm.aarch64.sve.frint64x.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x 
i1> [[TMP0]], <vscale x 4 x float> [[OP:%.*]])
+// CPP-CHECK-NEXT:    ret <vscale x 4 x float> [[TMP1]]
+//
+svfloat32_t test_svrint64x_f32_x(svbool_t pg, svfloat32_t op) MODE_ATTR
+{
+  return SVE_ACLE_FUNC(svrint64x,_f32,_x,)(pg, op);
+}
+
+// CHECK-LABEL: @test_svrint64x_f64_x(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 2 x double> 
@llvm.aarch64.sve.frint64x.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x 
i1> [[TMP0]], <vscale x 2 x double> [[OP:%.*]])
+// CHECK-NEXT:    ret <vscale x 2 x double> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z20test_svrint64x_f64_xu10__SVBool_tu13__SVFloat64_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 2 x double> 
@llvm.aarch64.sve.frint64x.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x 
i1> [[TMP0]], <vscale x 2 x double> [[OP:%.*]])
+// CPP-CHECK-NEXT:    ret <vscale x 2 x double> [[TMP1]]
+//
+svfloat64_t test_svrint64x_f64_x(svbool_t pg, svfloat64_t op) MODE_ATTR
+{
+  return SVE_ACLE_FUNC(svrint64x,_f64,_x,)(pg, op);
+}
+
+// CHECK-LABEL: @test_svrint64x_f32_z(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 4 x float> 
@llvm.aarch64.sve.frint64x.nxv4f32(<vscale x 4 x float> zeroinitializer, 
<vscale x 4 x i1> [[TMP0]], <vscale x 4 x float> [[OP:%.*]])
+// CHECK-NEXT:    ret <vscale x 4 x float> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z20test_svrint64x_f32_zu10__SVBool_tu13__SVFloat32_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 4 x float> 
@llvm.aarch64.sve.frint64x.nxv4f32(<vscale x 4 x float> zeroinitializer, 
<vscale x 4 x i1> [[TMP0]], <vscale x 4 x float> [[OP:%.*]])
+// CPP-CHECK-NEXT:    ret <vscale x 4 x float> [[TMP1]]
+//
+svfloat32_t test_svrint64x_f32_z(svbool_t pg, svfloat32_t op) MODE_ATTR
+{
+  return SVE_ACLE_FUNC(svrint64x,_f32,_z,)(pg, op);
+}
+
+// CHECK-LABEL: @test_svrint64x_f64_z(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 2 x double> 
@llvm.aarch64.sve.frint64x.nxv2f64(<vscale x 2 x double> zeroinitializer, 
<vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> [[OP:%.*]])
+// CHECK-NEXT:    ret <vscale x 2 x double> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z20test_svrint64x_f64_zu10__SVBool_tu13__SVFloat64_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 2 x double> 
@llvm.aarch64.sve.frint64x.nxv2f64(<vscale x 2 x double> zeroinitializer, 
<vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> [[OP:%.*]])
+// CPP-CHECK-NEXT:    ret <vscale x 2 x double> [[TMP1]]
+//
+svfloat64_t test_svrint64x_f64_z(svbool_t pg, svfloat64_t op) MODE_ATTR
+{
+  return SVE_ACLE_FUNC(svrint64x,_f64,_z,)(pg, op);
+}
+
+// FRINT64Z
+// CHECK-LABEL: @test_svrint64z_f32_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 4 x float> 
@llvm.aarch64.sve.frint64z.nxv4f32(<vscale x 4 x float> [[INACTIVE:%.*]], 
<vscale x 4 x i1> [[TMP0]], <vscale x 4 x float> [[OP:%.*]])
+// CHECK-NEXT:    ret <vscale x 4 x float> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z20test_svrint64z_f32_mu13__SVFloat32_tu10__SVBool_tS_(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 4 x float> 
@llvm.aarch64.sve.frint64z.nxv4f32(<vscale x 4 x float> [[INACTIVE:%.*]], 
<vscale x 4 x i1> [[TMP0]], <vscale x 4 x float> [[OP:%.*]])
+// CPP-CHECK-NEXT:    ret <vscale x 4 x float> [[TMP1]]
+//
+svfloat32_t test_svrint64z_f32_m(svfloat32_t inactive, svbool_t pg, 
svfloat32_t op) MODE_ATTR
+{
+  return SVE_ACLE_FUNC(svrint64z,_f32,_m,)(inactive, pg, op);
+}
+
+// CHECK-LABEL: @test_svrint64z_f64_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 2 x double> 
@llvm.aarch64.sve.frint64z.nxv2f64(<vscale x 2 x double> [[INACTIVE:%.*]], 
<vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> [[OP:%.*]])
+// CHECK-NEXT:    ret <vscale x 2 x double> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z20test_svrint64z_f64_mu13__SVFloat64_tu10__SVBool_tS_(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 2 x double> 
@llvm.aarch64.sve.frint64z.nxv2f64(<vscale x 2 x double> [[INACTIVE:%.*]], 
<vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> [[OP:%.*]])
+// CPP-CHECK-NEXT:    ret <vscale x 2 x double> [[TMP1]]
+//
+svfloat64_t test_svrint64z_f64_m(svfloat64_t inactive, svbool_t pg, 
svfloat64_t op) MODE_ATTR
+{
+  return SVE_ACLE_FUNC(svrint64z,_f64,_m,)(inactive, pg, op);
+}
+
+// CHECK-LABEL: @test_svrint64z_f32_x(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 4 x float> 
@llvm.aarch64.sve.frint64z.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x 
i1> [[TMP0]], <vscale x 4 x float> [[OP:%.*]])
+// CHECK-NEXT:    ret <vscale x 4 x float> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z20test_svrint64z_f32_xu10__SVBool_tu13__SVFloat32_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 4 x float> 
@llvm.aarch64.sve.frint64z.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x 
i1> [[TMP0]], <vscale x 4 x float> [[OP:%.*]])
+// CPP-CHECK-NEXT:    ret <vscale x 4 x float> [[TMP1]]
+//
+svfloat32_t test_svrint64z_f32_x(svbool_t pg, svfloat32_t op) MODE_ATTR
+{
+  return SVE_ACLE_FUNC(svrint64z,_f32,_x,)(pg, op);
+}
+
+// CHECK-LABEL: @test_svrint64z_f64_x(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 2 x double> 
@llvm.aarch64.sve.frint64z.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x 
i1> [[TMP0]], <vscale x 2 x double> [[OP:%.*]])
+// CHECK-NEXT:    ret <vscale x 2 x double> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z20test_svrint64z_f64_xu10__SVBool_tu13__SVFloat64_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 2 x double> 
@llvm.aarch64.sve.frint64z.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x 
i1> [[TMP0]], <vscale x 2 x double> [[OP:%.*]])
+// CPP-CHECK-NEXT:    ret <vscale x 2 x double> [[TMP1]]
+//
+svfloat64_t test_svrint64z_f64_x(svbool_t pg, svfloat64_t op) MODE_ATTR
+{
+  return SVE_ACLE_FUNC(svrint64z,_f64,_x,)(pg, op);
+}
+
+// CHECK-LABEL: @test_svrint64z_f32_z(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 4 x float> 
@llvm.aarch64.sve.frint64z.nxv4f32(<vscale x 4 x float> zeroinitializer, 
<vscale x 4 x i1> [[TMP0]], <vscale x 4 x float> [[OP:%.*]])
+// CHECK-NEXT:    ret <vscale x 4 x float> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z20test_svrint64z_f32_zu10__SVBool_tu13__SVFloat32_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 4 x float> 
@llvm.aarch64.sve.frint64z.nxv4f32(<vscale x 4 x float> zeroinitializer, 
<vscale x 4 x i1> [[TMP0]], <vscale x 4 x float> [[OP:%.*]])
+// CPP-CHECK-NEXT:    ret <vscale x 4 x float> [[TMP1]]
+//
+svfloat32_t test_svrint64z_f32_z(svbool_t pg, svfloat32_t op) MODE_ATTR
+{
+  return SVE_ACLE_FUNC(svrint64z,_f32,_z,)(pg, op);
+}
+
+// CHECK-LABEL: @test_svrint64z_f64_z(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 2 x double> 
@llvm.aarch64.sve.frint64z.nxv2f64(<vscale x 2 x double> zeroinitializer, 
<vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> [[OP:%.*]])
+// CHECK-NEXT:    ret <vscale x 2 x double> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z20test_svrint64z_f64_zu10__SVBool_tu13__SVFloat64_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 2 x double> 
@llvm.aarch64.sve.frint64z.nxv2f64(<vscale x 2 x double> zeroinitializer, 
<vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> [[OP:%.*]])
+// CPP-CHECK-NEXT:    ret <vscale x 2 x double> [[TMP1]]
+//
+svfloat64_t test_svrint64z_f64_z(svbool_t pg, svfloat64_t op) MODE_ATTR
+{
+  return SVE_ACLE_FUNC(svrint64z,_f64,_z,)(pg, op);
+}
diff --git a/llvm/include/llvm/IR/IntrinsicsAArch64.td 
b/llvm/include/llvm/IR/IntrinsicsAArch64.td
index 77fdb8295faa8..f94cb802f397e 100644
--- a/llvm/include/llvm/IR/IntrinsicsAArch64.td
+++ b/llvm/include/llvm/IR/IntrinsicsAArch64.td
@@ -2153,6 +2153,10 @@ def int_aarch64_sve_frintn     : 
AdvSIMD_Merged1VectorArg_Intrinsic;
 def int_aarch64_sve_frintp     : AdvSIMD_Merged1VectorArg_Intrinsic;
 def int_aarch64_sve_frintx     : AdvSIMD_Merged1VectorArg_Intrinsic;
 def int_aarch64_sve_frintz     : AdvSIMD_Merged1VectorArg_Intrinsic;
+def int_aarch64_sve_frint32x   : AdvSIMD_Merged1VectorArg_Intrinsic;
+def int_aarch64_sve_frint32z   : AdvSIMD_Merged1VectorArg_Intrinsic;
+def int_aarch64_sve_frint64x   : AdvSIMD_Merged1VectorArg_Intrinsic;
+def int_aarch64_sve_frint64z   : AdvSIMD_Merged1VectorArg_Intrinsic;
 def int_aarch64_sve_frsqrte_x  : AdvSIMD_1VectorArg_Intrinsic;
 def int_aarch64_sve_frsqrts_x  : AdvSIMD_2VectorArg_Intrinsic;
 def int_aarch64_sve_fscale     : AdvSIMD_SVE_SCALE_Intrinsic;
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp 
b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 83ce39fa314d1..ae90899824e45 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -271,9 +271,13 @@ static bool isMergePassthruOpcode(unsigned Opc) {
   case AArch64ISD::FFLOOR_MERGE_PASSTHRU:
   case AArch64ISD::FNEARBYINT_MERGE_PASSTHRU:
   case AArch64ISD::FRINT_MERGE_PASSTHRU:
+  case AArch64ISD::FRINT32_MERGE_PASSTHRU:
+  case AArch64ISD::FRINT64_MERGE_PASSTHRU:
   case AArch64ISD::FROUND_MERGE_PASSTHRU:
   case AArch64ISD::FROUNDEVEN_MERGE_PASSTHRU:
   case AArch64ISD::FTRUNC_MERGE_PASSTHRU:
+  case AArch64ISD::FTRUNC32_MERGE_PASSTHRU:
+  case AArch64ISD::FTRUNC64_MERGE_PASSTHRU:
   case AArch64ISD::FP_ROUND_MERGE_PASSTHRU:
   case AArch64ISD::FP_EXTEND_MERGE_PASSTHRU:
   case AArch64ISD::SINT_TO_FP_MERGE_PASSTHRU:
@@ -6514,6 +6518,14 @@ SDValue 
AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
   case Intrinsic::aarch64_sve_frintx:
     return DAG.getNode(AArch64ISD::FRINT_MERGE_PASSTHRU, DL, Op.getValueType(),
                        Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
+  case Intrinsic::aarch64_sve_frint32x:
+    return DAG.getNode(AArch64ISD::FRINT32_MERGE_PASSTHRU, DL,
+                       Op.getValueType(), Op.getOperand(2), Op.getOperand(3),
+                       Op.getOperand(1));
+  case Intrinsic::aarch64_sve_frint64x:
+    return DAG.getNode(AArch64ISD::FRINT64_MERGE_PASSTHRU, DL,
+                       Op.getValueType(), Op.getOperand(2), Op.getOperand(3),
+                       Op.getOperand(1));
   case Intrinsic::aarch64_sve_frinta:
     return DAG.getNode(AArch64ISD::FROUND_MERGE_PASSTHRU, DL, 
Op.getValueType(),
                        Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
@@ -6524,6 +6536,14 @@ SDValue 
AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
   case Intrinsic::aarch64_sve_frintz:
     return DAG.getNode(AArch64ISD::FTRUNC_MERGE_PASSTHRU, DL, 
Op.getValueType(),
                        Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
+  case Intrinsic::aarch64_sve_frint32z:
+    return DAG.getNode(AArch64ISD::FTRUNC32_MERGE_PASSTHRU, DL,
+                       Op.getValueType(), Op.getOperand(2), Op.getOperand(3),
+                       Op.getOperand(1));
+  case Intrinsic::aarch64_sve_frint64z:
+    return DAG.getNode(AArch64ISD::FTRUNC64_MERGE_PASSTHRU, DL,
+                       Op.getValueType(), Op.getOperand(2), Op.getOperand(3),
+                       Op.getOperand(1));
   case Intrinsic::aarch64_sve_ucvtf:
     return DAG.getNode(AArch64ISD::UINT_TO_FP_MERGE_PASSTHRU, DL,
                        Op.getValueType(), Op.getOperand(2), Op.getOperand(3),
diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td 
b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
index e99b3f8ff07e0..64145c0dfe1c2 100644
--- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -290,9 +290,13 @@ def AArch64frintp_mt : 
SDNode<"AArch64ISD::FCEIL_MERGE_PASSTHRU", SDT_AArch64Ari
 def AArch64frintm_mt : SDNode<"AArch64ISD::FFLOOR_MERGE_PASSTHRU", 
SDT_AArch64Arith>;
 def AArch64frinti_mt : SDNode<"AArch64ISD::FNEARBYINT_MERGE_PASSTHRU", 
SDT_AArch64Arith>;
 def AArch64frintx_mt : SDNode<"AArch64ISD::FRINT_MERGE_PASSTHRU", 
SDT_AArch64Arith>;
+def AArch64frint32x_mt : SDNode<"AArch64ISD::FRINT32_MERGE_PASSTHRU", 
SDT_AArch64Arith>;
+def AArch64frint64x_mt : SDNode<"AArch64ISD::FRINT64_MERGE_PASSTHRU", 
SDT_AArch64Arith>;
 def AArch64frinta_mt : SDNode<"AArch64ISD::FROUND_MERGE_PASSTHRU", 
SDT_AArch64Arith>;
 def AArch64frintn_mt : SDNode<"AArch64ISD::FROUNDEVEN_MERGE_PASSTHRU", 
SDT_AArch64Arith>;
 def AArch64frintz_mt : SDNode<"AArch64ISD::FTRUNC_MERGE_PASSTHRU", 
SDT_AArch64Arith>;
+def AArch64frint32z_mt : SDNode<"AArch64ISD::FTRUNC32_MERGE_PASSTHRU", 
SDT_AArch64Arith>;
+def AArch64frint64z_mt : SDNode<"AArch64ISD::FTRUNC64_MERGE_PASSTHRU", 
SDT_AArch64Arith>;
 def AArch64fsqrt_mt  : SDNode<"AArch64ISD::FSQRT_MERGE_PASSTHRU", 
SDT_AArch64Arith>;
 def AArch64frecpx_mt : SDNode<"AArch64ISD::FRECPX_MERGE_PASSTHRU", 
SDT_AArch64Arith>;
 def AArch64rbit_mt   : SDNode<"AArch64ISD::BITREVERSE_MERGE_PASSTHRU", 
SDT_AArch64Arith>;
@@ -4581,15 +4585,15 @@ let Predicates = [HasSVE2p2_or_SME2p2] in {
 
   // Floating point round to integral fp value in integer size range
   // Merging
-  defm FRINT32Z_ZPmZ : sve_fp_2op_p_zd_frint<0b00, "frint32z">;
-  defm FRINT32X_ZPmZ : sve_fp_2op_p_zd_frint<0b01, "frint32x">;
-  defm FRINT64X_ZPmZ : sve_fp_2op_p_zd_frint<0b10, "frint64z">;
-  defm FRINT64Z_ZPmZ : sve_fp_2op_p_zd_frint<0b11, "frint64x">;
+  defm FRINT32Z_ZPmZ : sve_fp_2op_p_zd_frint<0b00, "frint32z", 
AArch64frint32z_mt>;
+  defm FRINT32X_ZPmZ : sve_fp_2op_p_zd_frint<0b01, "frint32x", 
AArch64frint32x_mt>;
+  defm FRINT64Z_ZPmZ : sve_fp_2op_p_zd_frint<0b10, "frint64z", 
AArch64frint64z_mt>;
+  defm FRINT64X_ZPmZ : sve_fp_2op_p_zd_frint<0b11, "frint64x", 
AArch64frint64x_mt>;
   // Zeroing
-  defm FRINT32Z_ZPzZ : sve_fp_z2op_p_zd_frint<0b00, "frint32z">;
-  defm FRINT32X_ZPzZ : sve_fp_z2op_p_zd_frint<0b01, "frint32x">;
-  defm FRINT64Z_ZPzZ : sve_fp_z2op_p_zd_frint<0b10, "frint64z">;
-  defm FRINT64X_ZPzZ : sve_fp_z2op_p_zd_frint<0b11, "frint64x">;
+  defm FRINT32Z_ZPzZ : sve_fp_z2op_p_zd_frint<0b00, "frint32z", 
AArch64frint32z_mt>;
+  defm FRINT32X_ZPzZ : sve_fp_z2op_p_zd_frint<0b01, "frint32x", 
AArch64frint32x_mt>;
+  defm FRINT64Z_ZPzZ : sve_fp_z2op_p_zd_frint<0b10, "frint64z", 
AArch64frint64z_mt>;
+  defm FRINT64X_ZPzZ : sve_fp_z2op_p_zd_frint<0b11, "frint64x", 
AArch64frint64x_mt>;
 
   // Floating-point round to integral fp value, zeroing predicate
   defm FRINTN_ZPzZ : sve_fp_z2op_p_zd_hsd<0b00000, "frintn", AArch64frintn_mt>;
diff --git a/llvm/lib/Target/AArch64/SVEInstrFormats.td 
b/llvm/lib/Target/AArch64/SVEInstrFormats.td
index c63ae8660cad2..145b9b90bb98f 100644
--- a/llvm/lib/Target/AArch64/SVEInstrFormats.td
+++ b/llvm/lib/Target/AArch64/SVEInstrFormats.td
@@ -3252,9 +3252,20 @@ multiclass sve2_fp_convert_down_odd_rounding<string asm, 
string op, SDPatternOpe
   def : SVE_1_Op_Passthru_Pat<nxv2f32, ir_op, nxv2i1, nxv2f64, 
!cast<Instruction>(NAME # _DtoS)>;
 }
 
-multiclass sve_fp_2op_p_zd_frint<bits<2> opc, string asm> {
+multiclass sve_fp_2op_p_zd_frint<bits<2> opc, string asm, SDPatternOperator op 
= null_frag> {
   def _S : sve_fp_2op_p_zd<{ 0b0010, opc{1}, 0, opc{0} }, asm, ZPR32, ZPR32, 
ElementSizeS>;
   def _D : sve_fp_2op_p_zd<{ 0b0010, opc{1}, 1, opc{0} }, asm, ZPR64, ZPR64, 
ElementSizeD>;
+
+  def : SVE_1_Op_Passthru_Pat<nxv4f32, op, nxv4i1, nxv4f32, 
!cast<Instruction>(NAME # _S)>;
+  def : SVE_1_Op_Passthru_Pat<nxv2f32, op, nxv2i1, nxv2f32, 
!cast<Instruction>(NAME # _S)>;
+  def : SVE_1_Op_Passthru_Pat<nxv2f64, op, nxv2i1, nxv2f64, 
!cast<Instruction>(NAME # _D)>;
+
+  def _S_UNDEF : PredOneOpPassthruPseudo<NAME # _S, ZPR32>;
+  def _D_UNDEF : PredOneOpPassthruPseudo<NAME # _D, ZPR64>;
+
+  defm : SVE_1_Op_PassthruUndef_Pat<nxv4f32, op, nxv4i1, nxv4f32, 
!cast<Instruction>(NAME # _S_UNDEF)>;
+  defm : SVE_1_Op_PassthruUndef_Pat<nxv2f32, op, nxv2i1, nxv2f32, 
!cast<Instruction>(NAME # _S_UNDEF)>;
+  defm : SVE_1_Op_PassthruUndef_Pat<nxv2f64, op, nxv2i1, nxv2f64, 
!cast<Instruction>(NAME # _D_UNDEF)>;
 }
 
 
//===----------------------------------------------------------------------===//
@@ -3337,9 +3348,13 @@ multiclass sve_fp_z2op_p_zd_hsd<bits<5> opc, string asm, 
SDPatternOperator op> {
   defm : SVE_1_Op_PassthruUndefZero_Pat<nxv2f64, op, nxv2i1, nxv2f64, 
!cast<Instruction>(NAME # _D)>;
 }
 
-multiclass sve_fp_z2op_p_zd_frint<bits<2> opc, string asm> {
+multiclass sve_fp_z2op_p_zd_frint<bits<2> opc, string asm, SDPatternOperator 
op = null_frag> {
   def _S : sve_fp_z2op_p_zd<{ 0b0010, opc{1}, 0, opc{0} }, asm, ZPR32, ZPR32>;
   def _D : sve_fp_z2op_p_zd<{ 0b0010, opc{1}, 1, opc{0} }, asm, ZPR64, ZPR64>;
+
+  defm : SVE_1_Op_PassthruUndefZero_Pat<nxv4f32, op, nxv4i1, nxv4f32, 
!cast<Instruction>(NAME # _S)>;
+  defm : SVE_1_Op_PassthruUndefZero_Pat<nxv2f32, op, nxv2i1, nxv2f32, 
!cast<Instruction>(NAME # _S)>;
+  defm : SVE_1_Op_PassthruUndefZero_Pat<nxv2f64, op, nxv2i1, nxv2f64, 
!cast<Instruction>(NAME # _D)>;
 }
 
 multiclass sve_fp_z2op_p_zd_bfcvt<string asm, SDPatternOperator op> {
diff --git a/llvm/test/CodeGen/AArch64/sve2p2-intrinsics-fp-arith.ll 
b/llvm/test/CodeGen/AArch64/sve2p2-intrinsics-fp-arith.ll
new file mode 100644
index 0000000000000..659c75074a7fb
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve2p2-intrinsics-fp-arith.ll
@@ -0,0 +1,283 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 
UTC_ARGS: --version 6
+; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2p2 < %s | FileCheck %s
+
+;
+; FRINT32X
+;
+
+define <vscale x 4 x float> @frint32x_m_s(<vscale x 4 x float> %a, <vscale x 4 
x i1> %pg, <vscale x 4 x float> %b) {
+; CHECK-LABEL: frint32x_m_s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frint32x z0.s, p0/m, z1.s
+; CHECK-NEXT:    ret
+  %out = call <vscale x 4 x float> @llvm.aarch64.sve.frint32x.nxv4f32(<vscale 
x 4 x float> %a,
+                                                                    <vscale x 
4 x i1> %pg,
+                                                                    <vscale x 
4 x float> %b)
+  ret <vscale x 4 x float> %out
+}
+
+define <vscale x 2 x double> @frint32x_m_d(<vscale x 2 x double> %a, <vscale x 
2 x i1> %pg, <vscale x 2 x double> %b) {
+; CHECK-LABEL: frint32x_m_d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frint32x z0.d, p0/m, z1.d
+; CHECK-NEXT:    ret
+  %out = call <vscale x 2 x double> @llvm.aarch64.sve.frint32x.nxv2f64(<vscale 
x 2 x double> %a,
+                                                                     <vscale x 
2 x i1> %pg,
+                                                                     <vscale x 
2 x double> %b)
+  ret <vscale x 2 x double> %out
+}
+
+define <vscale x 4 x float> @frint32x_x_s(<vscale x 4 x i1> %pg, <vscale x 4 x 
float> %b) {
+; CHECK-LABEL: frint32x_x_s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frint32x z0.s, p0/z, z0.s
+; CHECK-NEXT:    ret
+  %out = call <vscale x 4 x float> @llvm.aarch64.sve.frint32x.nxv4f32(<vscale 
x 4 x float> poison,
+                                                                    <vscale x 
4 x i1> %pg,
+                                                                    <vscale x 
4 x float> %b)
+  ret <vscale x 4 x float> %out
+}
+
+define <vscale x 2 x double> @frint32x_x_d(<vscale x 2 x i1> %pg, <vscale x 2 
x double> %b) {
+; CHECK-LABEL: frint32x_x_d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frint32x z0.d, p0/z, z0.d
+; CHECK-NEXT:    ret
+  %out = call <vscale x 2 x double> @llvm.aarch64.sve.frint32x.nxv2f64(<vscale 
x 2 x double> poison,
+                                                                     <vscale x 
2 x i1> %pg,
+                                                                     <vscale x 
2 x double> %b)
+  ret <vscale x 2 x double> %out
+}
+
+define <vscale x 4 x float> @frint32x_z_s(<vscale x 4 x float> %a, <vscale x 4 
x i1> %pg, <vscale x 4 x float> %b) {
+; CHECK-LABEL: frint32x_z_s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frint32x z0.s, p0/z, z1.s
+; CHECK-NEXT:    ret
+  %out = call <vscale x 4 x float> @llvm.aarch64.sve.frint32x.nxv4f32(<vscale 
x 4 x float> zeroinitializer,
+                                                                    <vscale x 
4 x i1> %pg,
+                                                                    <vscale x 
4 x float> %b)
+  ret <vscale x 4 x float> %out
+}
+
+define <vscale x 2 x double> @frint32x_z_d(<vscale x 2 x double> %a, <vscale x 
2 x i1> %pg, <vscale x 2 x double> %b) {
+; CHECK-LABEL: frint32x_z_d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frint32x z0.d, p0/z, z1.d
+; CHECK-NEXT:    ret
+  %out = call <vscale x 2 x double> @llvm.aarch64.sve.frint32x.nxv2f64(<vscale 
x 2 x double> zeroinitializer,
+                                                                     <vscale x 
2 x i1> %pg,
+                                                                     <vscale x 
2 x double> %b)
+  ret <vscale x 2 x double> %out
+}
+
+;
+; FRINT32Z
+;
+
+define <vscale x 4 x float> @frint32z_m_s(<vscale x 4 x float> %a, <vscale x 4 
x i1> %pg, <vscale x 4 x float> %b) {
+; CHECK-LABEL: frint32z_m_s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frint32z z0.s, p0/m, z1.s
+; CHECK-NEXT:    ret
+  %out = call <vscale x 4 x float> @llvm.aarch64.sve.frint32z.nxv4f32(<vscale 
x 4 x float> %a,
+                                                                    <vscale x 
4 x i1> %pg,
+                                                                    <vscale x 
4 x float> %b)
+  ret <vscale x 4 x float> %out
+}
+
+define <vscale x 2 x double> @frint32z_m_d(<vscale x 2 x double> %a, <vscale x 
2 x i1> %pg, <vscale x 2 x double> %b) {
+; CHECK-LABEL: frint32z_m_d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frint32z z0.d, p0/m, z1.d
+; CHECK-NEXT:    ret
+  %out = call <vscale x 2 x double> @llvm.aarch64.sve.frint32z.nxv2f64(<vscale 
x 2 x double> %a,
+                                                                     <vscale x 
2 x i1> %pg,
+                                                                     <vscale x 
2 x double> %b)
+  ret <vscale x 2 x double> %out
+}
+
+define <vscale x 4 x float> @frint32z_x_s(<vscale x 4 x i1> %pg, <vscale x 4 x 
float> %b) {
+; CHECK-LABEL: frint32z_x_s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frint32z z0.s, p0/z, z0.s
+; CHECK-NEXT:    ret
+  %out = call <vscale x 4 x float> @llvm.aarch64.sve.frint32z.nxv4f32(<vscale 
x 4 x float> poison,
+                                                                    <vscale x 
4 x i1> %pg,
+                                                                    <vscale x 
4 x float> %b)
+  ret <vscale x 4 x float> %out
+}
+
+define <vscale x 2 x double> @frint32z_x_d(<vscale x 2 x i1> %pg, <vscale x 2 
x double> %b) {
+; CHECK-LABEL: frint32z_x_d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frint32z z0.d, p0/z, z0.d
+; CHECK-NEXT:    ret
+  %out = call <vscale x 2 x double> @llvm.aarch64.sve.frint32z.nxv2f64(<vscale 
x 2 x double> poison,
+                                                                     <vscale x 
2 x i1> %pg,
+                                                                     <vscale x 
2 x double> %b)
+  ret <vscale x 2 x double> %out
+}
+
+define <vscale x 4 x float> @frint32z_z_s(<vscale x 4 x float> %a, <vscale x 4 
x i1> %pg, <vscale x 4 x float> %b) {
+; CHECK-LABEL: frint32z_z_s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frint32z z0.s, p0/z, z1.s
+; CHECK-NEXT:    ret
+  %out = call <vscale x 4 x float> @llvm.aarch64.sve.frint32z.nxv4f32(<vscale 
x 4 x float> zeroinitializer,
+                                                                    <vscale x 
4 x i1> %pg,
+                                                                    <vscale x 
4 x float> %b)
+  ret <vscale x 4 x float> %out
+}
+
+define <vscale x 2 x double> @frint32z_z_d(<vscale x 2 x double> %a, <vscale x 
2 x i1> %pg, <vscale x 2 x double> %b) {
+; CHECK-LABEL: frint32z_z_d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frint32z z0.d, p0/z, z1.d
+; CHECK-NEXT:    ret
+  %out = call <vscale x 2 x double> @llvm.aarch64.sve.frint32z.nxv2f64(<vscale 
x 2 x double> zeroinitializer,
+                                                                     <vscale x 
2 x i1> %pg,
+                                                                     <vscale x 
2 x double> %b)
+  ret <vscale x 2 x double> %out
+}
+
+;
+; FRINT64X
+;
+
+define <vscale x 4 x float> @frint64x_m_s(<vscale x 4 x float> %a, <vscale x 4 
x i1> %pg, <vscale x 4 x float> %b) {
+; CHECK-LABEL: frint64x_m_s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frint64x z0.s, p0/m, z1.s
+; CHECK-NEXT:    ret
+  %out = call <vscale x 4 x float> @llvm.aarch64.sve.frint64x.nxv4f32(<vscale 
x 4 x float> %a,
+                                                                      <vscale 
x 4 x i1> %pg,
+                                                                     <vscale x 
4 x float> %b)
+  ret <vscale x 4 x float> %out
+}
+
+define <vscale x 2 x double> @frint64x_m_d(<vscale x 2 x double> %a, <vscale x 
2 x i1> %pg, <vscale x 2 x double> %b) {
+; CHECK-LABEL: frint64x_m_d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frint64x z0.d, p0/m, z1.d
+; CHECK-NEXT:    ret
+  %out = call <vscale x 2 x double> @llvm.aarch64.sve.frint64x.nxv2f64(<vscale 
x 2 x double> %a,
+                                                                     <vscale x 
2 x i1> %pg,
+                                                                     <vscale x 
2 x double> %b)
+  ret <vscale x 2 x double> %out
+}
+
+define <vscale x 4 x float> @frint64x_x_s(<vscale x 4 x i1> %pg, <vscale x 4 x 
float> %b) {
+; CHECK-LABEL: frint64x_x_s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frint64x z0.s, p0/z, z0.s
+; CHECK-NEXT:    ret
+  %out = call <vscale x 4 x float> @llvm.aarch64.sve.frint64x.nxv4f32(<vscale 
x 4 x float> poison,
+                                                                    <vscale x 
4 x i1> %pg,
+                                                                    <vscale x 
4 x float> %b)
+  ret <vscale x 4 x float> %out
+}
+
+define <vscale x 2 x double> @frint64x_x_d(<vscale x 2 x i1> %pg, <vscale x 2 
x double> %b) {
+; CHECK-LABEL: frint64x_x_d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frint64x z0.d, p0/z, z0.d
+; CHECK-NEXT:    ret
+  %out = call <vscale x 2 x double> @llvm.aarch64.sve.frint64x.nxv2f64(<vscale 
x 2 x double> poison,
+                                                                     <vscale x 
2 x i1> %pg,
+                                                                     <vscale x 
2 x double> %b)
+  ret <vscale x 2 x double> %out
+}
+
+define <vscale x 4 x float> @frint64x_z_s(<vscale x 4 x float> %a, <vscale x 4 
x i1> %pg, <vscale x 4 x float> %b) {
+; CHECK-LABEL: frint64x_z_s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frint64x z0.s, p0/z, z1.s
+; CHECK-NEXT:    ret
+  %out = call <vscale x 4 x float> @llvm.aarch64.sve.frint64x.nxv4f32(<vscale 
x 4 x float> zeroinitializer,
+                                                                    <vscale x 
4 x i1> %pg,
+                                                                    <vscale x 
4 x float> %b)
+  ret <vscale x 4 x float> %out
+}
+
+define <vscale x 2 x double> @frint64x_z_d(<vscale x 2 x double> %a, <vscale x 
2 x i1> %pg, <vscale x 2 x double> %b) {
+; CHECK-LABEL: frint64x_z_d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frint64x z0.d, p0/z, z1.d
+; CHECK-NEXT:    ret
+  %out = call <vscale x 2 x double> @llvm.aarch64.sve.frint64x.nxv2f64(<vscale 
x 2 x double> zeroinitializer,
+                                                                     <vscale x 
2 x i1> %pg,
+                                                                     <vscale x 
2 x double> %b)
+  ret <vscale x 2 x double> %out
+}
+
+;
+; FRINT64Z
+;
+
+define <vscale x 4 x float> @frint64z_m_s(<vscale x 4 x float> %a, <vscale x 4 
x i1> %pg, <vscale x 4 x float> %b) {
+; CHECK-LABEL: frint64z_m_s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frint64z z0.s, p0/m, z1.s
+; CHECK-NEXT:    ret
+  %out = call <vscale x 4 x float> @llvm.aarch64.sve.frint64z.nxv4f32(<vscale 
x 4 x float> %a,
+                                                                    <vscale x 
4 x i1> %pg,
+                                                                    <vscale x 
4 x float> %b)
+  ret <vscale x 4 x float> %out
+}
+
+define <vscale x 2 x double> @frint64z_m_d(<vscale x 2 x double> %a, <vscale x 
2 x i1> %pg, <vscale x 2 x double> %b) {
+; CHECK-LABEL: frint64z_m_d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frint64z z0.d, p0/m, z1.d
+; CHECK-NEXT:    ret
+  %out = call <vscale x 2 x double> @llvm.aarch64.sve.frint64z.nxv2f64(<vscale 
x 2 x double> %a,
+                                                                     <vscale x 
2 x i1> %pg,
+                                                                     <vscale x 
2 x double> %b)
+  ret <vscale x 2 x double> %out
+}
+
+define <vscale x 4 x float> @frint64z_x_s(<vscale x 4 x i1> %pg, <vscale x 4 x 
float> %b) {
+; CHECK-LABEL: frint64z_x_s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frint64z z0.s, p0/z, z0.s
+; CHECK-NEXT:    ret
+  %out = call <vscale x 4 x float> @llvm.aarch64.sve.frint64z.nxv4f32(<vscale 
x 4 x float> poison,
+                                                                    <vscale x 
4 x i1> %pg,
+                                                                    <vscale x 
4 x float> %b)
+  ret <vscale x 4 x float> %out
+}
+
+define <vscale x 2 x double> @frint64z_x_d(<vscale x 2 x i1> %pg, <vscale x 2 
x double> %b) {
+; CHECK-LABEL: frint64z_x_d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frint64z z0.d, p0/z, z0.d
+; CHECK-NEXT:    ret
+  %out = call <vscale x 2 x double> @llvm.aarch64.sve.frint64z.nxv2f64(<vscale 
x 2 x double> poison,
+                                                                     <vscale x 
2 x i1> %pg,
+                                                                     <vscale x 
2 x double> %b)
+  ret <vscale x 2 x double> %out
+}
+
+define <vscale x 4 x float> @frint64z_z_s(<vscale x 4 x float> %a, <vscale x 4 
x i1> %pg, <vscale x 4 x float> %b) {
+; CHECK-LABEL: frint64z_z_s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frint64z z0.s, p0/z, z1.s
+; CHECK-NEXT:    ret
+  %out = call <vscale x 4 x float> @llvm.aarch64.sve.frint64z.nxv4f32(<vscale 
x 4 x float> zeroinitializer,
+                                                                    <vscale x 
4 x i1> %pg,
+                                                                    <vscale x 
4 x float> %b)
+  ret <vscale x 4 x float> %out
+}
+
+define <vscale x 2 x double> @frint64z_z_d(<vscale x 2 x double> %a, <vscale x 
2 x i1> %pg, <vscale x 2 x double> %b) {
+; CHECK-LABEL: frint64z_z_d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frint64z z0.d, p0/z, z1.d
+; CHECK-NEXT:    ret
+  %out = call <vscale x 2 x double> @llvm.aarch64.sve.frint64z.nxv2f64(<vscale 
x 2 x double> zeroinitializer,
+                                                                     <vscale x 
2 x i1> %pg,
+                                                                     <vscale x 
2 x double> %b)
+  ret <vscale x 2 x double> %out
+}
+

_______________________________________________
cfe-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to