https://github.com/JoshdRod created 
https://github.com/llvm/llvm-project/pull/170832

Many neon right shift intrinsics were not supported by GlobalISel, mainly due 
to a lack of legalisation logic. This logic has now been implemented.

>From 7e897eac1eee87148b1f3529a42e4b927b556d44 Mon Sep 17 00:00:00 2001
From: Josh Rodriguez <[email protected]>
Date: Thu, 27 Nov 2025 15:34:40 +0000
Subject: [PATCH 1/8] [AArch64][GlobalISel] Removed fallback for sqshlu
 intrinsic

Added G_SQSHLU node, which lowers the llvm ir intrinsic aarch64_neon_sqshlu to 
the machine intrinsic sqshlu. Generated code is slightly less efficient compare 
to SDAG.
---
 llvm/lib/Target/AArch64/AArch64InstrGISel.td  |  8 +++
 .../AArch64/GISel/AArch64LegalizerInfo.cpp    | 12 +++++
 .../AArch64/GISel/AArch64RegisterBankInfo.cpp |  9 ++++
 llvm/test/CodeGen/AArch64/arm64-vshift.ll     | 49 ++++++++++---------
 4 files changed, 56 insertions(+), 22 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64InstrGISel.td 
b/llvm/lib/Target/AArch64/AArch64InstrGISel.td
index 7d99786830e3d..7469a081d9787 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrGISel.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrGISel.td
@@ -252,6 +252,12 @@ def G_USDOT : AArch64GenericInstruction {
   let hasSideEffects = 0;
 }
 
+def G_SQSHLU : AArch64GenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins type0:$src1, type0:$src2);
+  let hasSideEffects = 0;
+}
+
 // Generic instruction for the BSP pseudo. It is expanded into BSP, which
 // expands into BSL/BIT/BIF after register allocation.
 def G_BSP : AArch64GenericInstruction {
@@ -300,6 +306,8 @@ def : GINodeEquiv<G_UDOT, AArch64udot>;
 def : GINodeEquiv<G_SDOT, AArch64sdot>;
 def : GINodeEquiv<G_USDOT, AArch64usdot>;
 
+def : GINodeEquiv<G_SQSHLU, AArch64sqshlui>;
+
 def : GINodeEquiv<G_EXTRACT_VECTOR_ELT, vector_extract>;
 
 def : GINodeEquiv<G_AARCH64_PREFETCH, AArch64Prefetch>;
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp 
b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
index 1025b2502211a..0010834e01894 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
@@ -1857,6 +1857,18 @@ bool 
AArch64LegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
     return LowerBinOp(TargetOpcode::G_SAVGFLOOR);
   case Intrinsic::aarch64_neon_srhadd:
     return LowerBinOp(TargetOpcode::G_SAVGCEIL);
+  case Intrinsic::aarch64_neon_sqshlu: {
+    // Check if last operand is constant vector dup
+    auto shiftAmount = 
isConstantOrConstantSplatVector(*MRI.getVRegDef(MI.getOperand(3).getReg()), 
MRI);
+    if (shiftAmount) {
+           // If so, create a new intrinsic with the correct shift amount
+           MIB.buildInstr(AArch64::G_SQSHLU, {MI.getOperand(0)}, 
{MI.getOperand(2)}).addImm(shiftAmount->getSExtValue());
+           MI.eraseFromParent();
+           return true;
+    } else {
+           return false;
+    }
+  }
   case Intrinsic::aarch64_neon_abs: {
     // Lower the intrinsic to G_ABS.
     MIB.buildInstr(TargetOpcode::G_ABS, {MI.getOperand(0)}, 
{MI.getOperand(2)});
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp 
b/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
index 652a31f4e65f2..aa1517533b753 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
@@ -1072,6 +1072,15 @@ AArch64RegisterBankInfo::getInstrMapping(const 
MachineInstr &MI) const {
     // Index needs to be a GPR.
     OpRegBankIdx[2] = PMI_FirstGPR;
     break;
+  case AArch64::G_SQSHLU:
+    // Destination and source need to be FPRs.
+    OpRegBankIdx[0] = PMI_FirstFPR;
+    OpRegBankIdx[1] = PMI_FirstFPR;
+
+    // Shift Index needs to be a GPR.
+    OpRegBankIdx[2] = PMI_FirstGPR;
+    break;
+
   case TargetOpcode::G_INSERT_VECTOR_ELT:
     OpRegBankIdx[0] = PMI_FirstFPR;
     OpRegBankIdx[1] = PMI_FirstFPR;
diff --git a/llvm/test/CodeGen/AArch64/arm64-vshift.ll 
b/llvm/test/CodeGen/AArch64/arm64-vshift.ll
index 34843835d284a..961788f311041 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vshift.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vshift.ll
@@ -2,17 +2,7 @@
 ; RUN: llc < %s -mtriple=arm64-eabi -global-isel=0 | FileCheck %s 
--check-prefixes=CHECK,CHECK-SD
 ; RUN: llc < %s -mtriple=arm64-eabi -global-isel=1 -global-isel-abort=2 2>&1 | 
FileCheck %s --check-prefixes=CHECK,CHECK-GI
 
-; CHECK-GI:    warning: Instruction selection used fallback path for sqshlu8b
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for 
sqshlu4h
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for 
sqshlu2s
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for 
sqshlu16b
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for 
sqshlu8h
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for 
sqshlu4s
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for 
sqshlu2d
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for 
sqshlu1d_constant
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for 
sqshlu_i64_constant
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for 
sqshlu_i32_constant
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for 
sqshrn1s
+; CHECK-GI:    warning: Instruction selection used fallback path for sqshrn1s
 ; CHECK-GI NEXT:    warning: Instruction selection used fallback path for 
sqshrn8b
 ; CHECK-GI NEXT:    warning: Instruction selection used fallback path for 
sqshrn4h
 ; CHECK-GI NEXT:    warning: Instruction selection used fallback path for 
sqshrn2s
@@ -1543,23 +1533,38 @@ define <2 x i64> @sqshlu2d(ptr %A) nounwind {
 }
 
 define <1 x i64> @sqshlu1d_constant(ptr %A) nounwind {
-; CHECK-LABEL: sqshlu1d_constant:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ldr d0, [x0]
-; CHECK-NEXT:    sqshlu d0, d0, #1
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: sqshlu1d_constant:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    ldr d0, [x0]
+; CHECK-SD-NEXT:    sqshlu d0, d0, #1
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: sqshlu1d_constant:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    ldr x8, [x0]
+; CHECK-GI-NEXT:    fmov d0, x8
+; CHECK-GI-NEXT:    sqshlu d0, d0, #1
+; CHECK-GI-NEXT:    ret
   %tmp1 = load <1 x i64>, ptr %A
   %tmp3 = call <1 x i64> @llvm.aarch64.neon.sqshlu.v1i64(<1 x i64> %tmp1, <1 x 
i64> <i64 1>)
   ret <1 x i64> %tmp3
 }
 
 define i64 @sqshlu_i64_constant(ptr %A) nounwind {
-; CHECK-LABEL: sqshlu_i64_constant:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ldr d0, [x0]
-; CHECK-NEXT:    sqshlu d0, d0, #1
-; CHECK-NEXT:    fmov x0, d0
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: sqshlu_i64_constant:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    ldr d0, [x0]
+; CHECK-SD-NEXT:    sqshlu d0, d0, #1
+; CHECK-SD-NEXT:    fmov x0, d0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: sqshlu_i64_constant:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    ldr x8, [x0]
+; CHECK-GI-NEXT:    fmov d0, x8
+; CHECK-GI-NEXT:    sqshlu d0, d0, #1
+; CHECK-GI-NEXT:    fmov x0, d0
+; CHECK-GI-NEXT:    ret
   %tmp1 = load i64, ptr %A
   %tmp3 = call i64 @llvm.aarch64.neon.sqshlu.i64(i64 %tmp1, i64 1)
   ret i64 %tmp3

>From dbbf2b148dc8534e14d4f780e1e1382a3227d9ed Mon Sep 17 00:00:00 2001
From: Josh Rodriguez <[email protected]>
Date: Wed, 3 Dec 2025 16:44:30 +0000
Subject: [PATCH 2/8] [AArch64][GlobalISel] Removed fallback from sqshrn
 intrinsic

In legalisation, the IR intrinsic is lowered to two GI instructions: a vector 
right shift (G_VASHR), and a truncate with saturation (G_TRUNC_SSAT_S). The 
result of the G_VASHR is the operand of the G_TRUNC_SSAT_S. Vectors that are 
treated as i64/i32 are dealt with in TableGen, so are not handled here.
---
 .../lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp | 11 +++++++++++
 .../Target/AArch64/GISel/AArch64RegisterBankInfo.cpp  |  1 +
 llvm/test/CodeGen/AArch64/arm64-vshift.ll             | 10 +---------
 3 files changed, 13 insertions(+), 9 deletions(-)

diff --git a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp 
b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
index 0010834e01894..d69e17fac6ba3 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
@@ -1857,6 +1857,17 @@ bool 
AArch64LegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
     return LowerBinOp(TargetOpcode::G_SAVGFLOOR);
   case Intrinsic::aarch64_neon_srhadd:
     return LowerBinOp(TargetOpcode::G_SAVGCEIL);
+  case Intrinsic::aarch64_neon_sqshrn: {
+    if (MRI.getType(MI.getOperand(0).getReg()).isVector())
+    {
+      // Create right shift instruction. Get v. register the output is written 
to
+      auto Shr = MIB.buildInstr(AArch64::G_VASHR, 
{MRI.getType(MI.getOperand(2).getReg())}, {MI.getOperand(2), 
MI.getOperand(3).getImm()});
+      // Build the narrow intrinsic, taking in the v. register of the shift
+      MIB.buildInstr(TargetOpcode::G_TRUNC_SSAT_S, {MI.getOperand(0)}, {Shr});
+      MI.eraseFromParent();
+    }
+    break;
+  }
   case Intrinsic::aarch64_neon_sqshlu: {
     // Check if last operand is constant vector dup
     auto shiftAmount = 
isConstantOrConstantSplatVector(*MRI.getVRegDef(MI.getOperand(3).getReg()), 
MRI);
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp 
b/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
index aa1517533b753..04b3affb234a3 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
@@ -489,6 +489,7 @@ static bool isFPIntrinsic(const MachineRegisterInfo &MRI,
   case Intrinsic::aarch64_neon_uqshl:
   case Intrinsic::aarch64_neon_sqrshl:
   case Intrinsic::aarch64_neon_uqrshl:
+  case Intrinsic::aarch64_neon_sqshrn:
   case Intrinsic::aarch64_crypto_sha1c:
   case Intrinsic::aarch64_crypto_sha1p:
   case Intrinsic::aarch64_crypto_sha1m:
diff --git a/llvm/test/CodeGen/AArch64/arm64-vshift.ll 
b/llvm/test/CodeGen/AArch64/arm64-vshift.ll
index 961788f311041..292546ab66e1a 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vshift.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vshift.ll
@@ -2,14 +2,7 @@
 ; RUN: llc < %s -mtriple=arm64-eabi -global-isel=0 | FileCheck %s 
--check-prefixes=CHECK,CHECK-SD
 ; RUN: llc < %s -mtriple=arm64-eabi -global-isel=1 -global-isel-abort=2 2>&1 | 
FileCheck %s --check-prefixes=CHECK,CHECK-GI
 
-; CHECK-GI:    warning: Instruction selection used fallback path for sqshrn1s
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for 
sqshrn8b
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for 
sqshrn4h
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for 
sqshrn2s
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for 
sqshrn16b
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for 
sqshrn8h
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for 
sqshrn4s
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for 
sqshrun1s
+; CHECK-GI:    warning: Instruction selection used fallback path for sqshrun1s
 ; CHECK-GI NEXT:    warning: Instruction selection used fallback path for 
sqshrun8b
 ; CHECK-GI NEXT:    warning: Instruction selection used fallback path for 
sqshrun4h
 ; CHECK-GI NEXT:    warning: Instruction selection used fallback path for 
sqshrun2s
@@ -58,7 +51,6 @@
 ; CHECK-GI NEXT:    warning: Instruction selection used fallback path for sli8h
 ; CHECK-GI NEXT:    warning: Instruction selection used fallback path for sli4s
 ; CHECK-GI NEXT:    warning: Instruction selection used fallback path for sli2d
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for 
sqshlu_zero_shift_amount
 
 define <8 x i8> @sqshl8b(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: sqshl8b:

>From 670e8b9c84a95f5224d420cac66ccf4c80e174d7 Mon Sep 17 00:00:00 2001
From: Josh Rodriguez <[email protected]>
Date: Wed, 3 Dec 2025 16:54:42 +0000
Subject: [PATCH 3/8] [AArch64][GlobalISel] Removed fallback from sqshrun
 intrinsic

In legalisation, the IR intrinsic is lowered to two GI instructions: a vector 
right shift (G_VASHR), and an unsigned truncate with saturation 
(G_TRUNC_SSAT_U). The result of the G_VASHR is the operand of the 
G_TRUNC_SSAT_U.
---
 .../AArch64/GISel/AArch64LegalizerInfo.cpp    | 11 +++
 .../AArch64/GISel/AArch64RegisterBankInfo.cpp |  1 +
 llvm/test/CodeGen/AArch64/arm64-vshift.ll     | 91 +++++++++----------
 3 files changed, 54 insertions(+), 49 deletions(-)

diff --git a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp 
b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
index d69e17fac6ba3..9e05fc3d60b98 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
@@ -1868,6 +1868,17 @@ bool 
AArch64LegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
     }
     break;
   }
+  case Intrinsic::aarch64_neon_sqshrun: {
+    if (MRI.getType(MI.getOperand(0).getReg()).isVector())
+    {
+      // Create right shift instruction. Get v. register the output is written 
to
+      auto Shr = MIB.buildInstr(AArch64::G_VASHR, 
{MRI.getType(MI.getOperand(2).getReg())}, {MI.getOperand(2), 
MI.getOperand(3).getImm()});
+      // Build the narrow intrinsic, taking in the v. register of the shift
+      MIB.buildInstr(TargetOpcode::G_TRUNC_SSAT_U, {MI.getOperand(0)}, {Shr});
+      MI.eraseFromParent();
+    }
+    break;
+  }
   case Intrinsic::aarch64_neon_sqshlu: {
     // Check if last operand is constant vector dup
     auto shiftAmount = 
isConstantOrConstantSplatVector(*MRI.getVRegDef(MI.getOperand(3).getReg()), 
MRI);
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp 
b/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
index 04b3affb234a3..32588aaa0f510 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
@@ -490,6 +490,7 @@ static bool isFPIntrinsic(const MachineRegisterInfo &MRI,
   case Intrinsic::aarch64_neon_sqrshl:
   case Intrinsic::aarch64_neon_uqrshl:
   case Intrinsic::aarch64_neon_sqshrn:
+  case Intrinsic::aarch64_neon_sqshrun:
   case Intrinsic::aarch64_crypto_sha1c:
   case Intrinsic::aarch64_crypto_sha1p:
   case Intrinsic::aarch64_crypto_sha1m:
diff --git a/llvm/test/CodeGen/AArch64/arm64-vshift.ll 
b/llvm/test/CodeGen/AArch64/arm64-vshift.ll
index 292546ab66e1a..fd6926b1f1ddb 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vshift.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vshift.ll
@@ -2,55 +2,48 @@
 ; RUN: llc < %s -mtriple=arm64-eabi -global-isel=0 | FileCheck %s 
--check-prefixes=CHECK,CHECK-SD
 ; RUN: llc < %s -mtriple=arm64-eabi -global-isel=1 -global-isel-abort=2 2>&1 | 
FileCheck %s --check-prefixes=CHECK,CHECK-GI
 
-; CHECK-GI:    warning: Instruction selection used fallback path for sqshrun1s
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for 
sqshrun8b
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for 
sqshrun4h
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for 
sqshrun2s
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for 
sqshrun16b
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for 
sqshrun8h
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for 
sqshrun4s
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for 
sqrshrn1s
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for 
sqrshrn8b
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for 
sqrshrn4h
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for 
sqrshrn2s
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for 
sqrshrn16b
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for 
sqrshrn8h
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for 
sqrshrn4s
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for 
sqrshrun1s
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for 
sqrshrun8b
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for 
sqrshrun4h
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for 
sqrshrun2s
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for 
sqrshrun16b
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for 
sqrshrun8h
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for 
sqrshrun4s
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for 
uqrshrn1s
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for 
uqrshrn8b
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for 
uqrshrn4h
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for 
uqrshrn2s
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for 
uqrshrn16b
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for 
uqrshrn8h
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for 
uqrshrn4s
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for 
uqshrn1s
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for 
uqshrn8b
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for 
uqshrn4h
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for 
uqshrn2s
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for 
uqshrn16b
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for 
uqshrn8h
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for 
uqshrn4s
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for 
neon_ushl_vscalar_constant_shift
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for 
neon_ushl_scalar_constant_shift
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for 
neon_sshll_vscalar_constant_shift
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for 
neon_sshll_scalar_constant_shift
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for 
neon_sshll_scalar_constant_shift_m1
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for sli8b
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for sli4h
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for sli2s
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for sli1d
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for 
sli1d_imm0
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for 
sli16b
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for sli8h
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for sli4s
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for sli2d
+; CHECK-GI:    warning: Instruction selection used fallback path for sqrshrn1s
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
sqrshrn8b
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
sqrshrn4h
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
sqrshrn2s
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
sqrshrn16b
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
sqrshrn8h
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
sqrshrn4s
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
sqrshrun1s
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
sqrshrun8b
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
sqrshrun4h
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
sqrshrun2s
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
sqrshrun16b
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
sqrshrun8h
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
sqrshrun4s
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
uqrshrn1s
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
uqrshrn8b
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
uqrshrn4h
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
uqrshrn2s
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
uqrshrn16b
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
uqrshrn8h
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
uqrshrn4s
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
uqshrn1s
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
uqshrn8b
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
uqshrn4h
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
uqshrn2s
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
uqshrn16b
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
uqshrn8h
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
uqshrn4s
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
neon_ushl_vscalar_constant_shift
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
neon_ushl_scalar_constant_shift
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
neon_sshll_vscalar_constant_shift
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
neon_sshll_scalar_constant_shift
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
neon_sshll_scalar_constant_shift_m1
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for sli8b
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for sli4h
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for sli2s
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for sli1d
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
sli1d_imm0
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
sli16b
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for sli8h
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for sli4s
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for sli2d
 
 define <8 x i8> @sqshl8b(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: sqshl8b:

>From ba37ca1786d8e718e8d06ccaf7526e74bd01d03e Mon Sep 17 00:00:00 2001
From: Josh Rodriguez <[email protected]>
Date: Thu, 4 Dec 2025 11:09:04 +0000
Subject: [PATCH 4/8] [AArch64][GlobalISel] Removed fallback for sqrshrn
 intrinsic

GISel now legalises sqrshrn into G_TRUNC_SSAT_S(G_SRSHR(vec, shift)).
---
 llvm/lib/Target/AArch64/AArch64InstrGISel.td      |  7 +++++++
 .../Target/AArch64/GISel/AArch64LegalizerInfo.cpp | 15 +++++++++++++--
 .../AArch64/GISel/AArch64RegisterBankInfo.cpp     |  1 +
 llvm/test/CodeGen/AArch64/arm64-vshift.ll         |  9 +--------
 4 files changed, 22 insertions(+), 10 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64InstrGISel.td 
b/llvm/lib/Target/AArch64/AArch64InstrGISel.td
index 7469a081d9787..0c8cbc3b5b864 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrGISel.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrGISel.td
@@ -258,6 +258,12 @@ def G_SQSHLU : AArch64GenericInstruction {
   let hasSideEffects = 0;
 }
 
+def G_SRSHR: AArch64GenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins type0:$src1, type0:$src2);
+  let hasSideEffects = 0;
+}
+
 // Generic instruction for the BSP pseudo. It is expanded into BSP, which
 // expands into BSL/BIT/BIF after register allocation.
 def G_BSP : AArch64GenericInstruction {
@@ -307,6 +313,7 @@ def : GINodeEquiv<G_SDOT, AArch64sdot>;
 def : GINodeEquiv<G_USDOT, AArch64usdot>;
 
 def : GINodeEquiv<G_SQSHLU, AArch64sqshlui>;
+def : GINodeEquiv<G_SRSHR, AArch64srshri>;
 
 def : GINodeEquiv<G_EXTRACT_VECTOR_ELT, vector_extract>;
 
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp 
b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
index 9e05fc3d60b98..b572e07793559 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
@@ -1863,8 +1863,7 @@ bool 
AArch64LegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
       // Create right shift instruction. Get v. register the output is written 
to
       auto Shr = MIB.buildInstr(AArch64::G_VASHR, 
{MRI.getType(MI.getOperand(2).getReg())}, {MI.getOperand(2), 
MI.getOperand(3).getImm()});
       // Build the narrow intrinsic, taking in the v. register of the shift
-      MIB.buildInstr(TargetOpcode::G_TRUNC_SSAT_S, {MI.getOperand(0)}, {Shr});
-      MI.eraseFromParent();
+      MIB.buildInstr(TargetOpcode::G_TRUNC_SSAT_S, {MI.getOperand(0)}, {Shr}); 
MI.eraseFromParent();
     }
     break;
   }
@@ -1879,6 +1878,18 @@ bool 
AArch64LegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
     }
     break;
   }
+  case Intrinsic::aarch64_neon_sqrshrn: {
+    if (MRI.getType(MI.getOperand(0).getReg()).isVector())
+    {
+      // Create right shift instruction. Get v. register the output is written 
to
+      auto Shr = MIB.buildInstr(AArch64::G_SRSHR, 
{MRI.getType(MI.getOperand(2).getReg())}, {MI.getOperand(2), 
MI.getOperand(3).getImm()});
+      // Build the narrow intrinsic, taking in the v. register of the shift
+      MIB.buildInstr(TargetOpcode::G_TRUNC_SSAT_S, {MI.getOperand(0)}, {Shr});
+      MI.eraseFromParent();
+    }
+    break;
+  }
+
   case Intrinsic::aarch64_neon_sqshlu: {
     // Check if last operand is constant vector dup
     auto shiftAmount = 
isConstantOrConstantSplatVector(*MRI.getVRegDef(MI.getOperand(3).getReg()), 
MRI);
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp 
b/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
index 32588aaa0f510..881a4010c4f45 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
@@ -491,6 +491,7 @@ static bool isFPIntrinsic(const MachineRegisterInfo &MRI,
   case Intrinsic::aarch64_neon_uqrshl:
   case Intrinsic::aarch64_neon_sqshrn:
   case Intrinsic::aarch64_neon_sqshrun:
+  case Intrinsic::aarch64_neon_sqrshrn:
   case Intrinsic::aarch64_crypto_sha1c:
   case Intrinsic::aarch64_crypto_sha1p:
   case Intrinsic::aarch64_crypto_sha1m:
diff --git a/llvm/test/CodeGen/AArch64/arm64-vshift.ll 
b/llvm/test/CodeGen/AArch64/arm64-vshift.ll
index fd6926b1f1ddb..fd7e42a312bc3 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vshift.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vshift.ll
@@ -2,14 +2,7 @@
 ; RUN: llc < %s -mtriple=arm64-eabi -global-isel=0 | FileCheck %s 
--check-prefixes=CHECK,CHECK-SD
 ; RUN: llc < %s -mtriple=arm64-eabi -global-isel=1 -global-isel-abort=2 2>&1 | 
FileCheck %s --check-prefixes=CHECK,CHECK-GI
 
-; CHECK-GI:    warning: Instruction selection used fallback path for sqrshrn1s
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
sqrshrn8b
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
sqrshrn4h
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
sqrshrn2s
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
sqrshrn16b
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
sqrshrn8h
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
sqrshrn4s
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
sqrshrun1s
+; CHECK-GI:    warning: Instruction selection used fallback path for sqrshrun1s
 ; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
sqrshrun8b
 ; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
sqrshrun4h
 ; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
sqrshrun2s

>From ee2a1f02e8422ccb82008f5e3b6b6ede0fd945f6 Mon Sep 17 00:00:00 2001
From: Josh Rodriguez <[email protected]>
Date: Thu, 4 Dec 2025 11:36:46 +0000
Subject: [PATCH 5/8] [AArch64][GlobalISel] Removed fallback for sqrshrun
 intrinsic

GlobalISel now legalises sqrshrun to G_TRUNC_SSAT_U(G_SRSHR(vec, shift)).
---
 .../Target/AArch64/GISel/AArch64LegalizerInfo.cpp    | 12 +++++++++++-
 .../Target/AArch64/GISel/AArch64RegisterBankInfo.cpp |  1 +
 llvm/test/CodeGen/AArch64/arm64-vshift.ll            |  9 +--------
 3 files changed, 13 insertions(+), 9 deletions(-)

diff --git a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp 
b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
index b572e07793559..a7bc58e5db8db 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
@@ -1889,7 +1889,17 @@ bool 
AArch64LegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
     }
     break;
   }
-
+  case Intrinsic::aarch64_neon_sqrshrun: {
+    if (MRI.getType(MI.getOperand(0).getReg()).isVector())
+    {
+      // Create right shift instruction. Get v. register the output is written 
to
+      auto Shr = MIB.buildInstr(AArch64::G_SRSHR, 
{MRI.getType(MI.getOperand(2).getReg())}, {MI.getOperand(2), 
MI.getOperand(3).getImm()});
+      // Build the narrow intrinsic, taking in the v. register of the shift
+      MIB.buildInstr(TargetOpcode::G_TRUNC_SSAT_U, {MI.getOperand(0)}, {Shr});
+      MI.eraseFromParent();
+    }
+    break;
+  }
   case Intrinsic::aarch64_neon_sqshlu: {
     // Check if last operand is constant vector dup
     auto shiftAmount = 
isConstantOrConstantSplatVector(*MRI.getVRegDef(MI.getOperand(3).getReg()), 
MRI);
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp 
b/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
index 881a4010c4f45..ba7ff4db35997 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
@@ -492,6 +492,7 @@ static bool isFPIntrinsic(const MachineRegisterInfo &MRI,
   case Intrinsic::aarch64_neon_sqshrn:
   case Intrinsic::aarch64_neon_sqshrun:
   case Intrinsic::aarch64_neon_sqrshrn:
+  case Intrinsic::aarch64_neon_sqrshrun:
   case Intrinsic::aarch64_crypto_sha1c:
   case Intrinsic::aarch64_crypto_sha1p:
   case Intrinsic::aarch64_crypto_sha1m:
diff --git a/llvm/test/CodeGen/AArch64/arm64-vshift.ll 
b/llvm/test/CodeGen/AArch64/arm64-vshift.ll
index fd7e42a312bc3..41a03293e36b8 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vshift.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vshift.ll
@@ -2,14 +2,7 @@
 ; RUN: llc < %s -mtriple=arm64-eabi -global-isel=0 | FileCheck %s 
--check-prefixes=CHECK,CHECK-SD
 ; RUN: llc < %s -mtriple=arm64-eabi -global-isel=1 -global-isel-abort=2 2>&1 | 
FileCheck %s --check-prefixes=CHECK,CHECK-GI
 
-; CHECK-GI:    warning: Instruction selection used fallback path for sqrshrun1s
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
sqrshrun8b
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
sqrshrun4h
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
sqrshrun2s
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
sqrshrun16b
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
sqrshrun8h
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
sqrshrun4s
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
uqrshrn1s
+; CHECK-GI:    warning: Instruction selection used fallback path for uqrshrn1s
 ; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
uqrshrn8b
 ; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
uqrshrn4h
 ; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
uqrshrn2s

>From ef9c6465f5b30d6114f8ca4fa2e71013262c5713 Mon Sep 17 00:00:00 2001
From: Josh Rodriguez <[email protected]>
Date: Thu, 4 Dec 2025 14:27:47 +0000
Subject: [PATCH 6/8] [AArch64][GlobalISel] Removed fallback for uqrshrn
 intrinsic

GlobalISel now lowers uqrshrn to G_TRUNC_USATU(G_URSHR(vec, shift)).
---
 llvm/lib/Target/AArch64/AArch64InstrGISel.td          |  7 +++++++
 .../lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp | 11 +++++++++++
 .../Target/AArch64/GISel/AArch64RegisterBankInfo.cpp  |  1 +
 llvm/test/CodeGen/AArch64/arm64-vshift.ll             |  9 +--------
 4 files changed, 20 insertions(+), 8 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64InstrGISel.td 
b/llvm/lib/Target/AArch64/AArch64InstrGISel.td
index 0c8cbc3b5b864..75354e4098fb4 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrGISel.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrGISel.td
@@ -264,6 +264,12 @@ def G_SRSHR: AArch64GenericInstruction {
   let hasSideEffects = 0;
 }
 
+def G_URSHR: AArch64GenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins type0:$src1, type0:$src2);
+  let hasSideEffects = 0;
+}
+
 // Generic instruction for the BSP pseudo. It is expanded into BSP, which
 // expands into BSL/BIT/BIF after register allocation.
 def G_BSP : AArch64GenericInstruction {
@@ -314,6 +320,7 @@ def : GINodeEquiv<G_USDOT, AArch64usdot>;
 
 def : GINodeEquiv<G_SQSHLU, AArch64sqshlui>;
 def : GINodeEquiv<G_SRSHR, AArch64srshri>;
+def : GINodeEquiv<G_URSHR, AArch64urshri>;
 
 def : GINodeEquiv<G_EXTRACT_VECTOR_ELT, vector_extract>;
 
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp 
b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
index a7bc58e5db8db..d73c47ce833d9 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
@@ -1900,6 +1900,17 @@ bool 
AArch64LegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
     }
     break;
   }
+  case Intrinsic::aarch64_neon_uqrshrn: {
+    if (MRI.getType(MI.getOperand(0).getReg()).isVector())
+    {
+      // Create right shift instruction. Get v. register the output is written 
to
+      auto Shr = MIB.buildInstr(AArch64::G_URSHR, 
{MRI.getType(MI.getOperand(2).getReg())}, {MI.getOperand(2), 
MI.getOperand(3).getImm()});
+      // Build the narrow intrinsic, taking in the v. register of the shift
+      MIB.buildInstr(TargetOpcode::G_TRUNC_USAT_U, {MI.getOperand(0)}, {Shr});
+      MI.eraseFromParent();
+    }
+    break;
+  }
   case Intrinsic::aarch64_neon_sqshlu: {
     // Check if last operand is constant vector dup
     auto shiftAmount = 
isConstantOrConstantSplatVector(*MRI.getVRegDef(MI.getOperand(3).getReg()), 
MRI);
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp 
b/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
index ba7ff4db35997..4f9e8195b656d 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
@@ -493,6 +493,7 @@ static bool isFPIntrinsic(const MachineRegisterInfo &MRI,
   case Intrinsic::aarch64_neon_sqshrun:
   case Intrinsic::aarch64_neon_sqrshrn:
   case Intrinsic::aarch64_neon_sqrshrun:
+  case Intrinsic::aarch64_neon_uqrshrn:
   case Intrinsic::aarch64_crypto_sha1c:
   case Intrinsic::aarch64_crypto_sha1p:
   case Intrinsic::aarch64_crypto_sha1m:
diff --git a/llvm/test/CodeGen/AArch64/arm64-vshift.ll 
b/llvm/test/CodeGen/AArch64/arm64-vshift.ll
index 41a03293e36b8..0042bed85b4f5 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vshift.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vshift.ll
@@ -2,14 +2,7 @@
 ; RUN: llc < %s -mtriple=arm64-eabi -global-isel=0 | FileCheck %s 
--check-prefixes=CHECK,CHECK-SD
 ; RUN: llc < %s -mtriple=arm64-eabi -global-isel=1 -global-isel-abort=2 2>&1 | 
FileCheck %s --check-prefixes=CHECK,CHECK-GI
 
-; CHECK-GI:    warning: Instruction selection used fallback path for uqrshrn1s
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
uqrshrn8b
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
uqrshrn4h
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
uqrshrn2s
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
uqrshrn16b
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
uqrshrn8h
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
uqrshrn4s
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
uqshrn1s
+; CHECK-GI:    warning: Instruction selection used fallback path for uqshrn1s
 ; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
uqshrn8b
 ; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
uqshrn4h
 ; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
uqshrn2s

>From 6a89b7b7c9f271a6faf1e19f73075f763ae3ba60 Mon Sep 17 00:00:00 2001
From: Josh Rodriguez <[email protected]>
Date: Thu, 4 Dec 2025 15:19:35 +0000
Subject: [PATCH 7/8] [AArch64][GlobalISel] Removed fallback for uqshrn
 intrinsic

GlobalISel now lowers uqshrn to G_TRUNC_USATU(VLSHR(vec, shift)).
---
 llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp | 10 ++++++++++
 .../Target/AArch64/GISel/AArch64RegisterBankInfo.cpp   |  1 +
 llvm/test/CodeGen/AArch64/arm64-vshift.ll              |  9 +--------
 3 files changed, 12 insertions(+), 8 deletions(-)

diff --git a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp 
b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
index d73c47ce833d9..0c8472b759132 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
@@ -1911,6 +1911,16 @@ bool 
AArch64LegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
     }
     break;
   }
+  case Intrinsic::aarch64_neon_uqshrn: {
+    if (MRI.getType(MI.getOperand(0).getReg()).isVector())
+    {
+      // Create right shift instruction. Get v. register the output is written 
to
+      auto Shr = MIB.buildInstr(AArch64::G_VLSHR, 
{MRI.getType(MI.getOperand(2).getReg())}, {MI.getOperand(2), 
MI.getOperand(3).getImm()});
+      // Build the narrow intrinsic, taking in the v. register of the shift
+      MIB.buildInstr(TargetOpcode::G_TRUNC_USAT_U, {MI.getOperand(0)}, {Shr}); 
MI.eraseFromParent();
+    }
+    break;
+  }
   case Intrinsic::aarch64_neon_sqshlu: {
     // Check if last operand is constant vector dup
     auto shiftAmount = 
isConstantOrConstantSplatVector(*MRI.getVRegDef(MI.getOperand(3).getReg()), 
MRI);
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp 
b/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
index 4f9e8195b656d..173d4d9b47b1e 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
@@ -493,6 +493,7 @@ static bool isFPIntrinsic(const MachineRegisterInfo &MRI,
   case Intrinsic::aarch64_neon_sqshrun:
   case Intrinsic::aarch64_neon_sqrshrn:
   case Intrinsic::aarch64_neon_sqrshrun:
+  case Intrinsic::aarch64_neon_uqshrn:
   case Intrinsic::aarch64_neon_uqrshrn:
   case Intrinsic::aarch64_crypto_sha1c:
   case Intrinsic::aarch64_crypto_sha1p:
diff --git a/llvm/test/CodeGen/AArch64/arm64-vshift.ll 
b/llvm/test/CodeGen/AArch64/arm64-vshift.ll
index 0042bed85b4f5..67054f60cc362 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vshift.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vshift.ll
@@ -2,14 +2,7 @@
 ; RUN: llc < %s -mtriple=arm64-eabi -global-isel=0 | FileCheck %s 
--check-prefixes=CHECK,CHECK-SD
 ; RUN: llc < %s -mtriple=arm64-eabi -global-isel=1 -global-isel-abort=2 2>&1 | 
FileCheck %s --check-prefixes=CHECK,CHECK-GI
 
-; CHECK-GI:    warning: Instruction selection used fallback path for uqshrn1s
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
uqshrn8b
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
uqshrn4h
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
uqshrn2s
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
uqshrn16b
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
uqshrn8h
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
uqshrn4s
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
neon_ushl_vscalar_constant_shift
+; CHECK-GI:    warning: Instruction selection used fallback path for 
neon_ushl_vscalar_constant_shift
 ; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
neon_ushl_scalar_constant_shift
 ; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
neon_sshll_vscalar_constant_shift
 ; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for 
neon_sshll_scalar_constant_shift

>From 566d54e3985186d1d2a769d0140e944182d1dd2f Mon Sep 17 00:00:00 2001
From: Josh Rodriguez <[email protected]>
Date: Thu, 4 Dec 2025 16:36:19 +0000
Subject: [PATCH 8/8] [AArch64][GlobalISel] Updated test checks

---
 llvm/test/CodeGen/AArch64/arm64-int-neon.ll |  13 +--
 llvm/test/CodeGen/AArch64/arm64-vshift.ll   | 107 ++++++--------------
 2 files changed, 36 insertions(+), 84 deletions(-)

diff --git a/llvm/test/CodeGen/AArch64/arm64-int-neon.ll 
b/llvm/test/CodeGen/AArch64/arm64-int-neon.ll
index f33d41b0dd6ef..eb86728e6d22f 100644
--- a/llvm/test/CodeGen/AArch64/arm64-int-neon.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-int-neon.ll
@@ -3,20 +3,11 @@
 ; RUN: llc < %s -mtriple aarch64-unknown-unknown -global-isel 
-global-isel-abort=2 -mattr=+fprcvt,+fullfp16 2>&1 | FileCheck %s 
--check-prefixes=CHECK,CHECK-GI
 
 
-; CHECK-GI:       warning: Instruction selection used fallback path for 
test_sqrshl_s32
-; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for 
test_sqrshl_s64
-; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for 
test_sqshl_s32
-; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for 
test_sqshl_s64
-; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for 
test_uqrshl_s32
-; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for 
test_uqrshl_s64
-; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for 
test_uqshl_s32
-; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for 
test_uqshl_s64
-; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for 
test_uqadd_s32
+; CHECK-GI:  warning: Instruction selection used fallback path for 
test_uqadd_s32
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for 
test_uqadd_s64
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for 
test_uqsub_s32
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for 
test_uqsub_s64
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for 
test_sqdmulls_scalar
-
 define i32 @test_sqrshl_s32(float noundef %a){
 ; CHECK-LABEL: test_sqrshl_s32:
 ; CHECK:       // %bb.0: // %entry
@@ -236,3 +227,5 @@ define i64 @test_sqdmulls_scalar(float %A){
   %prod = call i64 @llvm.aarch64.neon.sqdmulls.scalar(i32  %cvt, i32  %cvt)
   ret i64 %prod
 }
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add 
tests below this line:
+; CHECK-GI: {{.*}}
diff --git a/llvm/test/CodeGen/AArch64/arm64-vshift.ll 
b/llvm/test/CodeGen/AArch64/arm64-vshift.ll
index 67054f60cc362..df3ca03ddcb62 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vshift.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vshift.ll
@@ -89,23 +89,13 @@ define <1 x i64> @sqshl1d_constant(ptr %A) nounwind {
 }
 
 define i64 @sqshl_scalar(ptr %A, ptr %B) nounwind {
-; CHECK-SD-LABEL: sqshl_scalar:
-; CHECK-SD:       // %bb.0:
-; CHECK-SD-NEXT:    ldr x8, [x0]
-; CHECK-SD-NEXT:    ldr x9, [x1]
-; CHECK-SD-NEXT:    fmov d0, x8
-; CHECK-SD-NEXT:    fmov d1, x9
-; CHECK-SD-NEXT:    sqshl d0, d0, d1
-; CHECK-SD-NEXT:    fmov x0, d0
-; CHECK-SD-NEXT:    ret
-;
-; CHECK-GI-LABEL: sqshl_scalar:
-; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    ldr d0, [x0]
-; CHECK-GI-NEXT:    ldr d1, [x1]
-; CHECK-GI-NEXT:    sqshl d0, d0, d1
-; CHECK-GI-NEXT:    fmov x0, d0
-; CHECK-GI-NEXT:    ret
+; CHECK-LABEL: sqshl_scalar:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    sqshl d0, d0, d1
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
   %tmp1 = load i64, ptr %A
   %tmp2 = load i64, ptr %B
   %tmp3 = call i64 @llvm.aarch64.neon.sqshl.i64(i64 %tmp1, i64 %tmp2)
@@ -309,23 +299,13 @@ define <1 x i64> @uqshl1d_constant(ptr %A) nounwind {
 }
 
 define i64 @uqshl_scalar(ptr %A, ptr %B) nounwind {
-; CHECK-SD-LABEL: uqshl_scalar:
-; CHECK-SD:       // %bb.0:
-; CHECK-SD-NEXT:    ldr x8, [x0]
-; CHECK-SD-NEXT:    ldr x9, [x1]
-; CHECK-SD-NEXT:    fmov d0, x8
-; CHECK-SD-NEXT:    fmov d1, x9
-; CHECK-SD-NEXT:    uqshl d0, d0, d1
-; CHECK-SD-NEXT:    fmov x0, d0
-; CHECK-SD-NEXT:    ret
-;
-; CHECK-GI-LABEL: uqshl_scalar:
-; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    ldr d0, [x0]
-; CHECK-GI-NEXT:    ldr d1, [x1]
-; CHECK-GI-NEXT:    uqshl d0, d0, d1
-; CHECK-GI-NEXT:    fmov x0, d0
-; CHECK-GI-NEXT:    ret
+; CHECK-LABEL: uqshl_scalar:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    uqshl d0, d0, d1
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
   %tmp1 = load i64, ptr %A
   %tmp2 = load i64, ptr %B
   %tmp3 = call i64 @llvm.aarch64.neon.uqshl.i64(i64 %tmp1, i64 %tmp2)
@@ -885,23 +865,13 @@ define <1 x i64> @sqrshl1d_constant(ptr %A) nounwind {
 }
 
 define i64 @sqrshl_scalar(ptr %A, ptr %B) nounwind {
-; CHECK-SD-LABEL: sqrshl_scalar:
-; CHECK-SD:       // %bb.0:
-; CHECK-SD-NEXT:    ldr x8, [x0]
-; CHECK-SD-NEXT:    ldr x9, [x1]
-; CHECK-SD-NEXT:    fmov d0, x8
-; CHECK-SD-NEXT:    fmov d1, x9
-; CHECK-SD-NEXT:    sqrshl d0, d0, d1
-; CHECK-SD-NEXT:    fmov x0, d0
-; CHECK-SD-NEXT:    ret
-;
-; CHECK-GI-LABEL: sqrshl_scalar:
-; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    ldr d0, [x0]
-; CHECK-GI-NEXT:    ldr d1, [x1]
-; CHECK-GI-NEXT:    sqrshl d0, d0, d1
-; CHECK-GI-NEXT:    fmov x0, d0
-; CHECK-GI-NEXT:    ret
+; CHECK-LABEL: sqrshl_scalar:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    sqrshl d0, d0, d1
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
   %tmp1 = load i64, ptr %A
   %tmp2 = load i64, ptr %B
   %tmp3 = call i64 @llvm.aarch64.neon.sqrshl.i64(i64 %tmp1, i64 %tmp2)
@@ -911,10 +881,9 @@ define i64 @sqrshl_scalar(ptr %A, ptr %B) nounwind {
 define i64 @sqrshl_scalar_constant(ptr %A) nounwind {
 ; CHECK-SD-LABEL: sqrshl_scalar_constant:
 ; CHECK-SD:       // %bb.0:
-; CHECK-SD-NEXT:    ldr x9, [x0]
-; CHECK-SD-NEXT:    mov w8, #1 // =0x1
+; CHECK-SD-NEXT:    mov x8, #1 // =0x1
+; CHECK-SD-NEXT:    ldr d0, [x0]
 ; CHECK-SD-NEXT:    fmov d1, x8
-; CHECK-SD-NEXT:    fmov d0, x9
 ; CHECK-SD-NEXT:    sqrshl d0, d0, d1
 ; CHECK-SD-NEXT:    fmov x0, d0
 ; CHECK-SD-NEXT:    ret
@@ -1011,23 +980,13 @@ define <1 x i64> @uqrshl1d_constant(ptr %A) nounwind {
 }
 
 define i64 @uqrshl_scalar(ptr %A, ptr %B) nounwind {
-; CHECK-SD-LABEL: uqrshl_scalar:
-; CHECK-SD:       // %bb.0:
-; CHECK-SD-NEXT:    ldr x8, [x0]
-; CHECK-SD-NEXT:    ldr x9, [x1]
-; CHECK-SD-NEXT:    fmov d0, x8
-; CHECK-SD-NEXT:    fmov d1, x9
-; CHECK-SD-NEXT:    uqrshl d0, d0, d1
-; CHECK-SD-NEXT:    fmov x0, d0
-; CHECK-SD-NEXT:    ret
-;
-; CHECK-GI-LABEL: uqrshl_scalar:
-; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    ldr d0, [x0]
-; CHECK-GI-NEXT:    ldr d1, [x1]
-; CHECK-GI-NEXT:    uqrshl d0, d0, d1
-; CHECK-GI-NEXT:    fmov x0, d0
-; CHECK-GI-NEXT:    ret
+; CHECK-LABEL: uqrshl_scalar:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    uqrshl d0, d0, d1
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
   %tmp1 = load i64, ptr %A
   %tmp2 = load i64, ptr %B
   %tmp3 = call i64 @llvm.aarch64.neon.uqrshl.i64(i64 %tmp1, i64 %tmp2)
@@ -1037,10 +996,9 @@ define i64 @uqrshl_scalar(ptr %A, ptr %B) nounwind {
 define i64 @uqrshl_scalar_constant(ptr %A) nounwind {
 ; CHECK-SD-LABEL: uqrshl_scalar_constant:
 ; CHECK-SD:       // %bb.0:
-; CHECK-SD-NEXT:    ldr x9, [x0]
-; CHECK-SD-NEXT:    mov w8, #1 // =0x1
+; CHECK-SD-NEXT:    mov x8, #1 // =0x1
+; CHECK-SD-NEXT:    ldr d0, [x0]
 ; CHECK-SD-NEXT:    fmov d1, x8
-; CHECK-SD-NEXT:    fmov d0, x9
 ; CHECK-SD-NEXT:    uqrshl d0, d0, d1
 ; CHECK-SD-NEXT:    fmov x0, d0
 ; CHECK-SD-NEXT:    ret
@@ -2708,6 +2666,7 @@ define <4 x i32> 
@neon_sshl4s_wrong_ext_constant_shift(ptr %A) nounwind {
 ; CHECK-GI-NEXT:    ret
   %tmp1 = load <4 x i8>, ptr %A
   %tmp2 = sext <4 x i8> %tmp1 to <4 x i32>
+  %tmp3 = call <4 x i32> @llvm.aarch64.neon.sshl.v4i32(<4 x i32> %tmp2, <4 x 
i32> <i32 1, i32 1, i32 1, i32 1>)
   ret <4 x i32> %tmp3
 }
 

_______________________________________________
llvm-branch-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-branch-commits

Reply via email to