[PATCH] D77054: [AArch64][SVE] Add SVE intrinsics for saturating add & subtract

2020-04-06 Thread Kerry McLaughlin via Phabricator via cfe-commits
This revision was automatically updated to reflect the committed changes.
Closed by commit rG944e322f8897: [AArch64][SVE] Add SVE intrinsics for 
saturating add  subtract (authored by kmclaughlin).

Repository:
  rG LLVM Github Monorepo

CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D77054/new/

https://reviews.llvm.org/D77054

Files:
  llvm/include/llvm/IR/IntrinsicsAArch64.td
  llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
  llvm/lib/Target/AArch64/SVEInstrFormats.td
  llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith-imm.ll
  llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith.ll

Index: llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith.ll
===
--- llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith.ll
+++ llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith.ll
@@ -134,6 +134,82 @@
   ret  %out
 }
 
+; SQADD
+
+define  @sqadd_i8( %a,  %b) {
+; CHECK-LABEL: sqadd_i8:
+; CHECK: sqadd z0.b, z0.b, z1.b
+; CHECK-NEXT: ret
+  %out = call  @llvm.aarch64.sve.sqadd.x.nxv16i8( %a,
+%b)
+  ret  %out
+}
+
+define  @sqadd_i16( %a,  %b) {
+; CHECK-LABEL: sqadd_i16:
+; CHECK: sqadd z0.h, z0.h, z1.h
+; CHECK-NEXT: ret
+  %out = call  @llvm.aarch64.sve.sqadd.x.nxv8i16( %a,
+%b)
+  ret  %out
+}
+
+define  @sqadd_i32( %a,  %b) {
+; CHECK-LABEL: sqadd_i32:
+; CHECK: sqadd z0.s, z0.s, z1.s
+; CHECK-NEXT: ret
+  %out = call  @llvm.aarch64.sve.sqadd.x.nxv4i32( %a,
+%b)
+  ret  %out
+}
+
+define  @sqadd_i64( %a,  %b) {
+; CHECK-LABEL: sqadd_i64:
+; CHECK: sqadd z0.d, z0.d, z1.d
+; CHECK-NEXT: ret
+  %out = call  @llvm.aarch64.sve.sqadd.x.nxv2i64( %a,
+%b)
+  ret  %out
+}
+
+; SQSUB
+
+define  @sqsub_i8( %a,  %b) {
+; CHECK-LABEL: sqsub_i8:
+; CHECK: sqsub z0.b, z0.b, z1.b
+; CHECK-NEXT: ret
+  %out = call  @llvm.aarch64.sve.sqsub.x.nxv16i8( %a,
+%b)
+  ret  %out
+}
+
+define  @sqsub_i16( %a,  %b) {
+; CHECK-LABEL: sqsub_i16:
+; CHECK: sqsub z0.h, z0.h, z1.h
+; CHECK-NEXT: ret
+  %out = call  @llvm.aarch64.sve.sqsub.x.nxv8i16( %a,
+%b)
+  ret  %out
+}
+
+define  @sqsub_i32( %a,  %b) {
+; CHECK-LABEL: sqsub_i32:
+; CHECK: sqsub z0.s, z0.s, z1.s
+; CHECK-NEXT: ret
+  %out = call  @llvm.aarch64.sve.sqsub.x.nxv4i32( %a,
+%b)
+  ret  %out
+}
+
+define  @sqsub_i64( %a,  %b) {
+; CHECK-LABEL: sqsub_i64:
+; CHECK: sqsub z0.d, z0.d, z1.d
+; CHECK-NEXT: ret
+  %out = call  @llvm.aarch64.sve.sqsub.x.nxv2i64( %a,
+%b)
+  ret  %out
+}
+
 ; UDOT
 
 define  @udot_i32( %a,  %b,  %c) {
@@ -169,6 +245,82 @@
   ret  %out
 }
 
+; UQADD
+
+define  @uqadd_i8( %a,  %b) {
+; CHECK-LABEL: uqadd_i8:
+; CHECK: uqadd z0.b, z0.b, z1.b
+; CHECK-NEXT: ret
+  %out = call  @llvm.aarch64.sve.uqadd.x.nxv16i8( %a,
+%b)
+  ret  %out
+}
+
+define  @uqadd_i16( %a,  %b) {
+; CHECK-LABEL: uqadd_i16:
+; CHECK: uqadd z0.h, z0.h, z1.h
+; CHECK-NEXT: ret
+  %out = call  @llvm.aarch64.sve.uqadd.x.nxv8i16( %a,
+%b)
+  ret  %out
+}
+
+define  @uqadd_i32( %a,  %b) {
+; CHECK-LABEL: uqadd_i32:
+; CHECK: uqadd z0.s, z0.s, z1.s
+; CHECK-NEXT: ret
+  %out = call  @llvm.aarch64.sve.uqadd.x.nxv4i32( %a,
+%b)
+  ret  %out
+}
+
+define  @uqadd_i64( %a,  %b) {
+; CHECK-LABEL: uqadd_i64:
+; CHECK: uqadd z0.d, z0.d, z1.d
+; CHECK-NEXT: ret
+  %out = call  @llvm.aarch64.sve.uqadd.x.nxv2i64( %a,
+%b)
+  ret  %out
+}
+
+; UQSUB
+
+define  @uqsub_i8( %a,  %b) {
+; CHECK-LABEL: uqsub_i8:
+; CHECK: uqsub z0.b, z0.b, z1.b
+; CHECK-NEXT: ret
+  %out = call  @llvm.aarch64.sve.uqsub.x.nxv16i8( %a,
+%b)
+  ret  %out
+}
+
+define  @uqsub_i16( %a,  %b) {
+; CHECK-LABEL: uqsub_i16:
+; CHECK: uqsub z0.h, z0.h, z1.h
+; CHECK-NEXT: ret
+  %out = call  @llvm.aarch64.sve.uqsub.x.nxv8i16( %a,
+%b)
+  ret  %out
+}
+
+define  @uqsub_i32( %a,  %b) {
+; CHECK-LABEL: uqsub_i32:
+; CHECK: uqsub z0.s, z0.s, z1.s
+; CHECK-NEXT: ret
+  %out = call  @llvm.aarch64.sve.uqsub.x.nxv4i32( %a,
+%b)
+  ret  %out
+}
+
+define  @uqsub_i64( %a,  %b) {
+; CHECK-LABEL: uqsub_i64:
+; CHECK: uqsub z0.d, z0.d, z1.d
+; CHECK-NEXT: ret
+  %out = call  

[PATCH] D77054: [AArch64][SVE] Add SVE intrinsics for saturating add & subtract

2020-04-03 Thread Eli Friedman via Phabricator via cfe-commits
efriedma accepted this revision.
efriedma added a comment.
This revision is now accepted and ready to land.

LGTM


CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D77054/new/

https://reviews.llvm.org/D77054



___
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits


[PATCH] D77054: [AArch64][SVE] Add SVE intrinsics for saturating add & subtract

2020-04-03 Thread Kerry McLaughlin via Phabricator via cfe-commits
kmclaughlin updated this revision to Diff 254742.
kmclaughlin added a comment.

Moved patterns for the new intrinsics into the// sve_int_bin_cons_arit_0// and 
//sve_int_arith_imm0// multiclasses


CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D77054/new/

https://reviews.llvm.org/D77054

Files:
  llvm/include/llvm/IR/IntrinsicsAArch64.td
  llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
  llvm/lib/Target/AArch64/SVEInstrFormats.td
  llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith-imm.ll
  llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith.ll

Index: llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith.ll
===
--- llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith.ll
+++ llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith.ll
@@ -134,6 +134,82 @@
   ret  %out
 }
 
+; SQADD
+
+define  @sqadd_i8( %a,  %b) {
+; CHECK-LABEL: sqadd_i8:
+; CHECK: sqadd z0.b, z0.b, z1.b
+; CHECK-NEXT: ret
+  %out = call  @llvm.aarch64.sve.sqadd.x.nxv16i8( %a,
+%b)
+  ret  %out
+}
+
+define  @sqadd_i16( %a,  %b) {
+; CHECK-LABEL: sqadd_i16:
+; CHECK: sqadd z0.h, z0.h, z1.h
+; CHECK-NEXT: ret
+  %out = call  @llvm.aarch64.sve.sqadd.x.nxv8i16( %a,
+%b)
+  ret  %out
+}
+
+define  @sqadd_i32( %a,  %b) {
+; CHECK-LABEL: sqadd_i32:
+; CHECK: sqadd z0.s, z0.s, z1.s
+; CHECK-NEXT: ret
+  %out = call  @llvm.aarch64.sve.sqadd.x.nxv4i32( %a,
+%b)
+  ret  %out
+}
+
+define  @sqadd_i64( %a,  %b) {
+; CHECK-LABEL: sqadd_i64:
+; CHECK: sqadd z0.d, z0.d, z1.d
+; CHECK-NEXT: ret
+  %out = call  @llvm.aarch64.sve.sqadd.x.nxv2i64( %a,
+%b)
+  ret  %out
+}
+
+; SQSUB
+
+define  @sqsub_i8( %a,  %b) {
+; CHECK-LABEL: sqsub_i8:
+; CHECK: sqsub z0.b, z0.b, z1.b
+; CHECK-NEXT: ret
+  %out = call  @llvm.aarch64.sve.sqsub.x.nxv16i8( %a,
+%b)
+  ret  %out
+}
+
+define  @sqsub_i16( %a,  %b) {
+; CHECK-LABEL: sqsub_i16:
+; CHECK: sqsub z0.h, z0.h, z1.h
+; CHECK-NEXT: ret
+  %out = call  @llvm.aarch64.sve.sqsub.x.nxv8i16( %a,
+%b)
+  ret  %out
+}
+
+define  @sqsub_i32( %a,  %b) {
+; CHECK-LABEL: sqsub_i32:
+; CHECK: sqsub z0.s, z0.s, z1.s
+; CHECK-NEXT: ret
+  %out = call  @llvm.aarch64.sve.sqsub.x.nxv4i32( %a,
+%b)
+  ret  %out
+}
+
+define  @sqsub_i64( %a,  %b) {
+; CHECK-LABEL: sqsub_i64:
+; CHECK: sqsub z0.d, z0.d, z1.d
+; CHECK-NEXT: ret
+  %out = call  @llvm.aarch64.sve.sqsub.x.nxv2i64( %a,
+%b)
+  ret  %out
+}
+
 ; UDOT
 
 define  @udot_i32( %a,  %b,  %c) {
@@ -169,6 +245,82 @@
   ret  %out
 }
 
+; UQADD
+
+define  @uqadd_i8( %a,  %b) {
+; CHECK-LABEL: uqadd_i8:
+; CHECK: uqadd z0.b, z0.b, z1.b
+; CHECK-NEXT: ret
+  %out = call  @llvm.aarch64.sve.uqadd.x.nxv16i8( %a,
+%b)
+  ret  %out
+}
+
+define  @uqadd_i16( %a,  %b) {
+; CHECK-LABEL: uqadd_i16:
+; CHECK: uqadd z0.h, z0.h, z1.h
+; CHECK-NEXT: ret
+  %out = call  @llvm.aarch64.sve.uqadd.x.nxv8i16( %a,
+%b)
+  ret  %out
+}
+
+define  @uqadd_i32( %a,  %b) {
+; CHECK-LABEL: uqadd_i32:
+; CHECK: uqadd z0.s, z0.s, z1.s
+; CHECK-NEXT: ret
+  %out = call  @llvm.aarch64.sve.uqadd.x.nxv4i32( %a,
+%b)
+  ret  %out
+}
+
+define  @uqadd_i64( %a,  %b) {
+; CHECK-LABEL: uqadd_i64:
+; CHECK: uqadd z0.d, z0.d, z1.d
+; CHECK-NEXT: ret
+  %out = call  @llvm.aarch64.sve.uqadd.x.nxv2i64( %a,
+%b)
+  ret  %out
+}
+
+; UQSUB
+
+define  @uqsub_i8( %a,  %b) {
+; CHECK-LABEL: uqsub_i8:
+; CHECK: uqsub z0.b, z0.b, z1.b
+; CHECK-NEXT: ret
+  %out = call  @llvm.aarch64.sve.uqsub.x.nxv16i8( %a,
+%b)
+  ret  %out
+}
+
+define  @uqsub_i16( %a,  %b) {
+; CHECK-LABEL: uqsub_i16:
+; CHECK: uqsub z0.h, z0.h, z1.h
+; CHECK-NEXT: ret
+  %out = call  @llvm.aarch64.sve.uqsub.x.nxv8i16( %a,
+%b)
+  ret  %out
+}
+
+define  @uqsub_i32( %a,  %b) {
+; CHECK-LABEL: uqsub_i32:
+; CHECK: uqsub z0.s, z0.s, z1.s
+; CHECK-NEXT: ret
+  %out = call  @llvm.aarch64.sve.uqsub.x.nxv4i32( %a,
+%b)
+  ret  %out
+}
+
+define  @uqsub_i64( %a,  %b) {
+; CHECK-LABEL: uqsub_i64:
+; CHECK: uqsub z0.d, z0.d, z1.d
+; CHECK-NEXT: ret
+  %out = call  @llvm.aarch64.sve.uqsub.x.nxv2i64( %a,
+ 

[PATCH] D77054: [AArch64][SVE] Add SVE intrinsics for saturating add & subtract

2020-04-02 Thread Eli Friedman via Phabricator via cfe-commits
efriedma added a comment.

You should be able to refactor the patterns into the definitions of the 
multiclasses sve_int_bin_cons_arit_0 and sve_int_arith_imm0, to avoid repeating 
them four times.  (You might want to look at other places using null_frag in 
SVEInstrFormats.td for inspiration.)


CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D77054/new/

https://reviews.llvm.org/D77054



___
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits


[PATCH] D77054: [AArch64][SVE] Add SVE intrinsics for saturating add & subtract

2020-04-02 Thread Kerry McLaughlin via Phabricator via cfe-commits
kmclaughlin updated this revision to Diff 254558.
kmclaughlin added a comment.

Added patterns to AArch64SVEInstrInfo.td to support llvm.[s|u]add & 
llvm.[s|u]sub again, which was removed by my previous patch


CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D77054/new/

https://reviews.llvm.org/D77054

Files:
  llvm/include/llvm/IR/IntrinsicsAArch64.td
  llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
  llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith-imm.ll
  llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith.ll

Index: llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith.ll
===
--- llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith.ll
+++ llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith.ll
@@ -134,6 +134,82 @@
   ret  %out
 }
 
+; SQADD
+
+define  @sqadd_i8( %a,  %b) {
+; CHECK-LABEL: sqadd_i8:
+; CHECK: sqadd z0.b, z0.b, z1.b
+; CHECK-NEXT: ret
+  %out = call  @llvm.aarch64.sve.sqadd.x.nxv16i8( %a,
+%b)
+  ret  %out
+}
+
+define  @sqadd_i16( %a,  %b) {
+; CHECK-LABEL: sqadd_i16:
+; CHECK: sqadd z0.h, z0.h, z1.h
+; CHECK-NEXT: ret
+  %out = call  @llvm.aarch64.sve.sqadd.x.nxv8i16( %a,
+%b)
+  ret  %out
+}
+
+define  @sqadd_i32( %a,  %b) {
+; CHECK-LABEL: sqadd_i32:
+; CHECK: sqadd z0.s, z0.s, z1.s
+; CHECK-NEXT: ret
+  %out = call  @llvm.aarch64.sve.sqadd.x.nxv4i32( %a,
+%b)
+  ret  %out
+}
+
+define  @sqadd_i64( %a,  %b) {
+; CHECK-LABEL: sqadd_i64:
+; CHECK: sqadd z0.d, z0.d, z1.d
+; CHECK-NEXT: ret
+  %out = call  @llvm.aarch64.sve.sqadd.x.nxv2i64( %a,
+%b)
+  ret  %out
+}
+
+; SQSUB
+
+define  @sqsub_i8( %a,  %b) {
+; CHECK-LABEL: sqsub_i8:
+; CHECK: sqsub z0.b, z0.b, z1.b
+; CHECK-NEXT: ret
+  %out = call  @llvm.aarch64.sve.sqsub.x.nxv16i8( %a,
+%b)
+  ret  %out
+}
+
+define  @sqsub_i16( %a,  %b) {
+; CHECK-LABEL: sqsub_i16:
+; CHECK: sqsub z0.h, z0.h, z1.h
+; CHECK-NEXT: ret
+  %out = call  @llvm.aarch64.sve.sqsub.x.nxv8i16( %a,
+%b)
+  ret  %out
+}
+
+define  @sqsub_i32( %a,  %b) {
+; CHECK-LABEL: sqsub_i32:
+; CHECK: sqsub z0.s, z0.s, z1.s
+; CHECK-NEXT: ret
+  %out = call  @llvm.aarch64.sve.sqsub.x.nxv4i32( %a,
+%b)
+  ret  %out
+}
+
+define  @sqsub_i64( %a,  %b) {
+; CHECK-LABEL: sqsub_i64:
+; CHECK: sqsub z0.d, z0.d, z1.d
+; CHECK-NEXT: ret
+  %out = call  @llvm.aarch64.sve.sqsub.x.nxv2i64( %a,
+%b)
+  ret  %out
+}
+
 ; UDOT
 
 define  @udot_i32( %a,  %b,  %c) {
@@ -169,6 +245,82 @@
   ret  %out
 }
 
+; UQADD
+
+define  @uqadd_i8( %a,  %b) {
+; CHECK-LABEL: uqadd_i8:
+; CHECK: uqadd z0.b, z0.b, z1.b
+; CHECK-NEXT: ret
+  %out = call  @llvm.aarch64.sve.uqadd.x.nxv16i8( %a,
+%b)
+  ret  %out
+}
+
+define  @uqadd_i16( %a,  %b) {
+; CHECK-LABEL: uqadd_i16:
+; CHECK: uqadd z0.h, z0.h, z1.h
+; CHECK-NEXT: ret
+  %out = call  @llvm.aarch64.sve.uqadd.x.nxv8i16( %a,
+%b)
+  ret  %out
+}
+
+define  @uqadd_i32( %a,  %b) {
+; CHECK-LABEL: uqadd_i32:
+; CHECK: uqadd z0.s, z0.s, z1.s
+; CHECK-NEXT: ret
+  %out = call  @llvm.aarch64.sve.uqadd.x.nxv4i32( %a,
+%b)
+  ret  %out
+}
+
+define  @uqadd_i64( %a,  %b) {
+; CHECK-LABEL: uqadd_i64:
+; CHECK: uqadd z0.d, z0.d, z1.d
+; CHECK-NEXT: ret
+  %out = call  @llvm.aarch64.sve.uqadd.x.nxv2i64( %a,
+%b)
+  ret  %out
+}
+
+; UQSUB
+
+define  @uqsub_i8( %a,  %b) {
+; CHECK-LABEL: uqsub_i8:
+; CHECK: uqsub z0.b, z0.b, z1.b
+; CHECK-NEXT: ret
+  %out = call  @llvm.aarch64.sve.uqsub.x.nxv16i8( %a,
+%b)
+  ret  %out
+}
+
+define  @uqsub_i16( %a,  %b) {
+; CHECK-LABEL: uqsub_i16:
+; CHECK: uqsub z0.h, z0.h, z1.h
+; CHECK-NEXT: ret
+  %out = call  @llvm.aarch64.sve.uqsub.x.nxv8i16( %a,
+%b)
+  ret  %out
+}
+
+define  @uqsub_i32( %a,  %b) {
+; CHECK-LABEL: uqsub_i32:
+; CHECK: uqsub z0.s, z0.s, z1.s
+; CHECK-NEXT: ret
+  %out = call  @llvm.aarch64.sve.uqsub.x.nxv4i32( %a,
+%b)
+  ret  %out
+}
+
+define  @uqsub_i64( %a,  %b) {
+; CHECK-LABEL: uqsub_i64:
+; CHECK: uqsub z0.d, z0.d, z1.d
+; CHECK-NEXT: ret
+  %out = call  @llvm.aarch64.sve.uqsub.x.nxv2i64( %a,
+ 

[PATCH] D77054: [AArch64][SVE] Add SVE intrinsics for saturating add & subtract

2020-03-30 Thread Eli Friedman via Phabricator via cfe-commits
efriedma added a comment.

I can understand why you might want the new intrinsics as a temporary measure, 
but I don't see the point of removing the already working support for 
llvm.sadd. etc.


Repository:
  rG LLVM Github Monorepo

CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D77054/new/

https://reviews.llvm.org/D77054



___
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits


[PATCH] D77054: [AArch64][SVE] Add SVE intrinsics for saturating add & subtract

2020-03-30 Thread Kerry McLaughlin via Phabricator via cfe-commits
kmclaughlin created this revision.
kmclaughlin added reviewers: sdesmalen, c-rhodes, dancgr, efriedma, 
cameron.mcinally.
Herald added subscribers: danielkiss, psnobl, rkruppe, hiraditya, 
kristof.beyls, tschuett.
Herald added a reviewer: rengolin.
Herald added a project: LLVM.

Adds the following intrinsics:

- @llvm.aarch64.sve.[s|u]qadd.x
- @llvm.aarch64.sve.[s|u]qsub.x


Repository:
  rG LLVM Github Monorepo

https://reviews.llvm.org/D77054

Files:
  llvm/include/llvm/IR/IntrinsicsAArch64.td
  llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
  llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
  llvm/test/CodeGen/AArch64/sve-int-arith.ll
  llvm/test/CodeGen/AArch64/sve-int-imm.ll
  llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith.ll

Index: llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith.ll
===
--- llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith.ll
+++ llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith.ll
@@ -134,6 +134,82 @@
   ret  %out
 }
 
+; SQADD
+
+define  @sqadd_i8( %a,  %b) {
+; CHECK-LABEL: sqadd_i8:
+; CHECK: sqadd z0.b, z0.b, z1.b
+; CHECK-NEXT: ret
+  %out = call  @llvm.aarch64.sve.sqadd.x.nxv16i8( %a,
+%b)
+  ret  %out
+}
+
+define  @sqadd_i16( %a,  %b) {
+; CHECK-LABEL: sqadd_i16:
+; CHECK: sqadd z0.h, z0.h, z1.h
+; CHECK-NEXT: ret
+  %out = call  @llvm.aarch64.sve.sqadd.x.nxv8i16( %a,
+%b)
+  ret  %out
+}
+
+define  @sqadd_i32( %a,  %b) {
+; CHECK-LABEL: sqadd_i32:
+; CHECK: sqadd z0.s, z0.s, z1.s
+; CHECK-NEXT: ret
+  %out = call  @llvm.aarch64.sve.sqadd.x.nxv4i32( %a,
+%b)
+  ret  %out
+}
+
+define  @sqadd_i64( %a,  %b) {
+; CHECK-LABEL: sqadd_i64:
+; CHECK: sqadd z0.d, z0.d, z1.d
+; CHECK-NEXT: ret
+  %out = call  @llvm.aarch64.sve.sqadd.x.nxv2i64( %a,
+%b)
+  ret  %out
+}
+
+; SQSUB
+
+define  @sqsub_i8( %a,  %b) {
+; CHECK-LABEL: sqsub_i8:
+; CHECK: sqsub z0.b, z0.b, z1.b
+; CHECK-NEXT: ret
+  %out = call  @llvm.aarch64.sve.sqsub.x.nxv16i8( %a,
+%b)
+  ret  %out
+}
+
+define  @sqsub_i16( %a,  %b) {
+; CHECK-LABEL: sqsub_i16:
+; CHECK: sqsub z0.h, z0.h, z1.h
+; CHECK-NEXT: ret
+  %out = call  @llvm.aarch64.sve.sqsub.x.nxv8i16( %a,
+%b)
+  ret  %out
+}
+
+define  @sqsub_i32( %a,  %b) {
+; CHECK-LABEL: sqsub_i32:
+; CHECK: sqsub z0.s, z0.s, z1.s
+; CHECK-NEXT: ret
+  %out = call  @llvm.aarch64.sve.sqsub.x.nxv4i32( %a,
+%b)
+  ret  %out
+}
+
+define  @sqsub_i64( %a,  %b) {
+; CHECK-LABEL: sqsub_i64:
+; CHECK: sqsub z0.d, z0.d, z1.d
+; CHECK-NEXT: ret
+  %out = call  @llvm.aarch64.sve.sqsub.x.nxv2i64( %a,
+%b)
+  ret  %out
+}
+
 ; UDOT
 
 define  @udot_i32( %a,  %b,  %c) {
@@ -169,6 +245,82 @@
   ret  %out
 }
 
+; UQADD
+
+define  @uqadd_i8( %a,  %b) {
+; CHECK-LABEL: uqadd_i8:
+; CHECK: uqadd z0.b, z0.b, z1.b
+; CHECK-NEXT: ret
+  %out = call  @llvm.aarch64.sve.uqadd.x.nxv16i8( %a,
+%b)
+  ret  %out
+}
+
+define  @uqadd_i16( %a,  %b) {
+; CHECK-LABEL: uqadd_i16:
+; CHECK: uqadd z0.h, z0.h, z1.h
+; CHECK-NEXT: ret
+  %out = call  @llvm.aarch64.sve.uqadd.x.nxv8i16( %a,
+%b)
+  ret  %out
+}
+
+define  @uqadd_i32( %a,  %b) {
+; CHECK-LABEL: uqadd_i32:
+; CHECK: uqadd z0.s, z0.s, z1.s
+; CHECK-NEXT: ret
+  %out = call  @llvm.aarch64.sve.uqadd.x.nxv4i32( %a,
+%b)
+  ret  %out
+}
+
+define  @uqadd_i64( %a,  %b) {
+; CHECK-LABEL: uqadd_i64:
+; CHECK: uqadd z0.d, z0.d, z1.d
+; CHECK-NEXT: ret
+  %out = call  @llvm.aarch64.sve.uqadd.x.nxv2i64( %a,
+%b)
+  ret  %out
+}
+
+; UQSUB
+
+define  @uqsub_i8( %a,  %b) {
+; CHECK-LABEL: uqsub_i8:
+; CHECK: uqsub z0.b, z0.b, z1.b
+; CHECK-NEXT: ret
+  %out = call  @llvm.aarch64.sve.uqsub.x.nxv16i8( %a,
+%b)
+  ret  %out
+}
+
+define  @uqsub_i16( %a,  %b) {
+; CHECK-LABEL: uqsub_i16:
+; CHECK: uqsub z0.h, z0.h, z1.h
+; CHECK-NEXT: ret
+  %out = call  @llvm.aarch64.sve.uqsub.x.nxv8i16( %a,
+%b)
+  ret  %out
+}
+
+define  @uqsub_i32( %a,  %b) {
+; CHECK-LABEL: uqsub_i32:
+; CHECK: uqsub z0.s, z0.s, z1.s
+; CHECK-NEXT: ret
+  %out = call  @llvm.aarch64.sve.uqsub.x.nxv4i32( %a,
+%b)
+  ret