Author: Jim Lin
Date: 2025-05-12T10:41:56+08:00
New Revision: 07bd6454806aa8149809c49833b6e7c165a2eb51

URL: 
https://github.com/llvm/llvm-project/commit/07bd6454806aa8149809c49833b6e7c165a2eb51
DIFF: 
https://github.com/llvm/llvm-project/commit/07bd6454806aa8149809c49833b6e7c165a2eb51.diff

LOG: Revert "[RISCV] Implement codegen for XAndesPerf lea instructions 
(#137925)"

This reverts commit a788a1abd9c881aa113f5932d100e1a2e3898e14.

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/lib/Target/RISCV/RISCVInstrInfoXAndes.td
    llvm/lib/Target/RISCV/RISCVInstrInfoZb.td
    llvm/test/CodeGen/RISCV/rv32zba.ll
    llvm/test/CodeGen/RISCV/rv64zba.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp 
b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 158a3afdb864c..134d82d84b237 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -14516,8 +14516,8 @@ static SDValue combineBinOpToReduce(SDNode *N, 
SelectionDAG &DAG,
 //          (SLLI (SH*ADD x, y), c0), if c1-c0 equals to [1|2|3].
 static SDValue transformAddShlImm(SDNode *N, SelectionDAG &DAG,
                                   const RISCVSubtarget &Subtarget) {
-  // Perform this optimization only in the zba/xandesperf extension.
-  if (!Subtarget.hasStdExtZba() && !Subtarget.hasVendorXAndesPerf())
+  // Perform this optimization only in the zba extension.
+  if (!Subtarget.hasStdExtZba())
     return SDValue();
 
   // Skip for vector types and larger types.
@@ -15448,9 +15448,8 @@ static SDValue expandMul(SDNode *N, SelectionDAG &DAG,
   if (VT != Subtarget.getXLenVT())
     return SDValue();
 
-  const bool HasShlAdd = Subtarget.hasStdExtZba() ||
-                         Subtarget.hasVendorXTHeadBa() ||
-                         Subtarget.hasVendorXAndesPerf();
+  const bool HasShlAdd =
+      Subtarget.hasStdExtZba() || Subtarget.hasVendorXTHeadBa();
 
   ConstantSDNode *CNode = dyn_cast<ConstantSDNode>(N->getOperand(1));
   if (!CNode)

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXAndes.td 
b/llvm/lib/Target/RISCV/RISCVInstrInfoXAndes.td
index 4e01b93d76e80..2ec768435259c 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoXAndes.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXAndes.td
@@ -135,16 +135,6 @@ class NDSRVInstRR<bits<7> funct7, string opcodestr>
   let mayStore = 0;
 }
 
-class NDSRVInstLEA<bits<7> funct7, string opcodestr>
-    : RVInstR<funct7, 0b000, OPC_CUSTOM_2,
-              (outs GPR:$rd), (ins GPR:$rs2, GPR:$rs1),
-              opcodestr, "$rd, $rs1, $rs2">,
-      Sched<[WriteIALU, ReadIALU, ReadIALU]> {
-  let hasSideEffects = 0;
-  let mayLoad = 0;
-  let mayStore = 0;
-}
-
 // GP: ADDI, LB, LBU
 class NDSRVInstLBGP<bits<2> funct2, string opcodestr>
     : RVInst<(outs GPR:$rd), (ins simm18:$imm18),
@@ -331,9 +321,9 @@ def NDS_BNEC : NDSRVInstBC<0b110, "nds.bnec">;
 def NDS_BFOS : NDSRVInstBFO<0b011, "nds.bfos">;
 def NDS_BFOZ : NDSRVInstBFO<0b010, "nds.bfoz">;
 
-def NDS_LEA_H : NDSRVInstLEA<0b0000101, "nds.lea.h">;
-def NDS_LEA_W : NDSRVInstLEA<0b0000110, "nds.lea.w">;
-def NDS_LEA_D : NDSRVInstLEA<0b0000111, "nds.lea.d">;
+def NDS_LEA_H : NDSRVInstRR<0b0000101, "nds.lea.h">;
+def NDS_LEA_W : NDSRVInstRR<0b0000110, "nds.lea.w">;
+def NDS_LEA_D : NDSRVInstRR<0b0000111, "nds.lea.d">;
 
 let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
 def NDS_ADDIGP : NDSRVInstLBGP<0b01, "nds.addigp">;
@@ -355,10 +345,10 @@ def NDS_FLMISM  : NDSRVInstRR<0b0010011, "nds.flmism">;
 } // Predicates = [HasVendorXAndesPerf]
 
 let Predicates = [HasVendorXAndesPerf, IsRV64] in {
-def NDS_LEA_B_ZE : NDSRVInstLEA<0b0001000, "nds.lea.b.ze">;
-def NDS_LEA_H_ZE : NDSRVInstLEA<0b0001001, "nds.lea.h.ze">;
-def NDS_LEA_W_ZE : NDSRVInstLEA<0b0001010, "nds.lea.w.ze">;
-def NDS_LEA_D_ZE : NDSRVInstLEA<0b0001011, "nds.lea.d.ze">;
+def NDS_LEA_B_ZE : NDSRVInstRR<0b0001000, "nds.lea.b.ze">;
+def NDS_LEA_H_ZE : NDSRVInstRR<0b0001001, "nds.lea.h.ze">;
+def NDS_LEA_W_ZE : NDSRVInstRR<0b0001010, "nds.lea.w.ze">;
+def NDS_LEA_D_ZE : NDSRVInstRR<0b0001011, "nds.lea.d.ze">;
 
 def NDS_LWUGP : NDSRVInstLWGP<0b110, "nds.lwugp">;
 def NDS_LDGP  : NDSRVInstLDGP<0b011, "nds.ldgp">;
@@ -366,32 +356,3 @@ def NDS_LDGP  : NDSRVInstLDGP<0b011, "nds.ldgp">;
 def NDS_SDGP  : NDSRVInstSDGP<0b111, "nds.sdgp">;
 } // Predicates = [HasVendorXAndesPerf, IsRV64]
 } // DecoderNamespace = "XAndes"
-
-// Patterns
-
-let Predicates = [HasVendorXAndesPerf] in {
-
-defm : ShxAddPat<1, NDS_LEA_H>;
-defm : ShxAddPat<2, NDS_LEA_W>;
-defm : ShxAddPat<3, NDS_LEA_D>;
-
-def : CSImm12MulBy4Pat<NDS_LEA_W>;
-def : CSImm12MulBy8Pat<NDS_LEA_D>;
-} // Predicates = [HasVendorXAndesPerf]
-
-let Predicates = [HasVendorXAndesPerf, IsRV64] in {
-
-defm : ADD_UWPat<NDS_LEA_B_ZE>;
-
-defm : ShxAdd_UWPat<1, NDS_LEA_H_ZE>;
-defm : ShxAdd_UWPat<2, NDS_LEA_W_ZE>;
-defm : ShxAdd_UWPat<3, NDS_LEA_D_ZE>;
-
-defm : Sh1Add_UWPat<NDS_LEA_H_ZE>;
-defm : Sh2Add_UWPat<NDS_LEA_W_ZE>;
-defm : Sh3Add_UWPat<NDS_LEA_D_ZE>;
-
-def : Sh1AddPat<NDS_LEA_H_ZE>;
-def : Sh2AddPat<NDS_LEA_W_ZE>;
-def : Sh3AddPat<NDS_LEA_D_ZE>;
-} // Predicates = [HasVendorXAndesPerf, IsRV64]

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td 
b/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td
index 4353e94bdb1d0..9227c1b1fc18c 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td
@@ -672,7 +672,10 @@ def : Pat<(i32 (and GPR:$rs, 0xFFFF)), (PACK GPR:$rs, 
(XLenVT X0))>;
 let Predicates = [HasStdExtZbkb, NoStdExtZbb, IsRV64] in
 def : Pat<(i64 (and GPR:$rs, 0xFFFF)), (PACKW GPR:$rs, (XLenVT X0))>;
 
-multiclass ShxAddPat<int i, Instruction shxadd> {
+let Predicates = [HasStdExtZba] in {
+
+foreach i = {1,2,3} in {
+  defvar shxadd = !cast<Instruction>("SH"#i#"ADD");
   def : Pat<(XLenVT (add_like_non_imm12 (shl GPR:$rs1, (XLenVT i)), GPR:$rs2)),
             (shxadd GPR:$rs1, GPR:$rs2)>;
   def : Pat<(XLenVT (riscv_shl_add GPR:$rs1, (XLenVT i), GPR:$rs2)),
@@ -684,90 +687,15 @@ multiclass ShxAddPat<int i, Instruction shxadd> {
             (shxadd pat:$rs1, GPR:$rs2)>;
 }
 
-class CSImm12MulBy4Pat<Instruction sh2add>
-    : Pat<(add_like (XLenVT GPR:$r), CSImm12MulBy4:$i),
-          (sh2add (XLenVT (ADDI (XLenVT X0), CSImm12MulBy4:$i)),
+def : Pat<(add_like (XLenVT GPR:$r), CSImm12MulBy4:$i),
+          (SH2ADD (XLenVT (ADDI (XLenVT X0), CSImm12MulBy4:$i)),
                   GPR:$r)>;
-
-class CSImm12MulBy8Pat<Instruction sh3add>
-    : Pat<(add_like (XLenVT GPR:$r), CSImm12MulBy8:$i),
-          (sh3add (XLenVT (ADDI (XLenVT X0), CSImm12MulBy8:$i)),
+def : Pat<(add_like (XLenVT GPR:$r), CSImm12MulBy8:$i),
+          (SH3ADD (XLenVT (ADDI (XLenVT X0), CSImm12MulBy8:$i)),
                   GPR:$r)>;
 
-let Predicates = [HasStdExtZba] in {
-foreach i = {1,2,3} in {
-  defvar shxadd = !cast<Instruction>("SH"#i#"ADD");
-  defm : ShxAddPat<i, shxadd>;
-}
-
-def : CSImm12MulBy4Pat<SH2ADD>;
-def : CSImm12MulBy8Pat<SH3ADD>;
 } // Predicates = [HasStdExtZba]
 
-multiclass ADD_UWPat<Instruction add_uw> {
-  def : Pat<(i64 (add_like_non_imm12 (and GPR:$rs1, 0xFFFFFFFF), GPR:$rs2)),
-            (add_uw GPR:$rs1, GPR:$rs2)>;
-  def : Pat<(i64 (and GPR:$rs, 0xFFFFFFFF)), (add_uw GPR:$rs, (XLenVT X0))>;
-}
-
-multiclass ShxAdd_UWPat<int i, Instruction shxadd_uw> {
-  def : Pat<(i64 (add_like_non_imm12 (shl (and GPR:$rs1, 0xFFFFFFFF), (i64 i)),
-                                     (XLenVT GPR:$rs2))),
-            (shxadd_uw GPR:$rs1, GPR:$rs2)>;
-  def : Pat<(i64 (riscv_shl_add (and GPR:$rs1, 0xFFFFFFFF), (i64 i), 
GPR:$rs2)),
-            (shxadd_uw GPR:$rs1, GPR:$rs2)>;
-
-  defvar pat = !cast<ComplexPattern>("sh"#i#"add_uw_op");
-  // More complex cases use a ComplexPattern.
-  def : Pat<(i64 (add_like_non_imm12 pat:$rs1, (XLenVT GPR:$rs2))),
-            (shxadd_uw pat:$rs1, GPR:$rs2)>;
-}
-
-multiclass Sh1Add_UWPat<Instruction sh1add_uw> {
-  def : Pat<(i64 (add_like_non_imm12 (and (shl GPR:$rs1, (i64 1)), 
0x1FFFFFFFF),
-                                     (XLenVT GPR:$rs2))),
-            (sh1add_uw GPR:$rs1, GPR:$rs2)>;
-  // Use SRLI to clear the LSBs and SHXADD_UW to mask and shift.
-  def : Pat<(i64 (add_like_non_imm12 (and GPR:$rs1, 0x1FFFFFFFE),
-                                     (XLenVT GPR:$rs2))),
-            (sh1add_uw (XLenVT (SRLI GPR:$rs1, 1)), GPR:$rs2)>;
-}
-
-multiclass Sh2Add_UWPat<Instruction sh2add_uw> {
-  def : Pat<(i64 (add_like_non_imm12 (and (shl GPR:$rs1, (i64 2)), 
0x3FFFFFFFF),
-                                     (XLenVT GPR:$rs2))),
-            (sh2add_uw GPR:$rs1, GPR:$rs2)>;
-  // Use SRLI to clear the LSBs and SHXADD_UW to mask and shift.
-  def : Pat<(i64 (add_like_non_imm12 (and GPR:$rs1, 0x3FFFFFFFC),
-                                     (XLenVT GPR:$rs2))),
-            (sh2add_uw (XLenVT (SRLI GPR:$rs1, 2)), GPR:$rs2)>;
-}
-
-multiclass Sh3Add_UWPat<Instruction sh3add_uw> {
-  def : Pat<(i64 (add_like_non_imm12 (and GPR:$rs1, 0xFFFFFFF8),
-                                     (XLenVT GPR:$rs2))),
-            (sh3add_uw (XLenVT (SRLIW GPR:$rs1, 3)), GPR:$rs2)>;
-  // Use SRLI to clear the LSBs and SHXADD_UW to mask and shift.
-  def : Pat<(i64 (add_like_non_imm12 (and GPR:$rs1, 0x7FFFFFFF8),
-                                     (XLenVT GPR:$rs2))),
-            (sh3add_uw (XLenVT (SRLI GPR:$rs1, 3)), GPR:$rs2)>;
-}
-
-class Sh1AddPat<Instruction sh1add>
-    : Pat<(i64 (add_like_non_imm12 (and GPR:$rs1, 0xFFFFFFFE),
-                                   (XLenVT GPR:$rs2))),
-          (sh1add (XLenVT (SRLIW GPR:$rs1, 1)), GPR:$rs2)>;
-
-class Sh2AddPat<Instruction sh2add>
-    : Pat<(i64 (add_like_non_imm12 (and GPR:$rs1, 0xFFFFFFFC),
-                                   (XLenVT GPR:$rs2))),
-          (sh2add (XLenVT (SRLIW GPR:$rs1, 2)), GPR:$rs2)>;
-
-class Sh3AddPat<Instruction sh3add>
-    : Pat<(i64 (add_like_non_imm12 (and GPR:$rs1, 0xFFFFFFF8),
-                                   (XLenVT GPR:$rs2))),
-          (sh3add (XLenVT (SRLIW GPR:$rs1, 3)), GPR:$rs2)>;
-
 let Predicates = [HasStdExtZba, IsRV64] in {
 def : Pat<(i64 (shl (and GPR:$rs1, 0xFFFFFFFF), uimm5:$shamt)),
           (SLLI_UW GPR:$rs1, uimm5:$shamt)>;
@@ -776,21 +704,47 @@ def : Pat<(i64 (shl (and GPR:$rs1, 0xFFFFFFFF), 
uimm5:$shamt)),
 def : Pat<(i64 (and GPR:$rs1, Shifted32OnesMask:$mask)),
           (SLLI_UW (XLenVT (SRLI GPR:$rs1, Shifted32OnesMask:$mask)),
                    Shifted32OnesMask:$mask)>;
-
-defm : ADD_UWPat<ADD_UW>;
+def : Pat<(i64 (add_like_non_imm12 (and GPR:$rs1, 0xFFFFFFFF), GPR:$rs2)),
+          (ADD_UW GPR:$rs1, GPR:$rs2)>;
+def : Pat<(i64 (and GPR:$rs, 0xFFFFFFFF)), (ADD_UW GPR:$rs, (XLenVT X0))>;
 
 foreach i = {1,2,3} in {
   defvar shxadd_uw = !cast<Instruction>("SH"#i#"ADD_UW");
-  defm : ShxAdd_UWPat<i, shxadd_uw>;
+  def : Pat<(i64 (add_like_non_imm12 (shl (and GPR:$rs1, 0xFFFFFFFF), (i64 
i)), (XLenVT GPR:$rs2))),
+            (shxadd_uw GPR:$rs1, GPR:$rs2)>;
+  def : Pat<(i64 (riscv_shl_add (and GPR:$rs1, 0xFFFFFFFF), (i64 i), 
GPR:$rs2)),
+            (shxadd_uw GPR:$rs1, GPR:$rs2)>;
+}
+
+def : Pat<(i64 (add_like_non_imm12 (and (shl GPR:$rs1, (i64 1)), 0x1FFFFFFFF), 
(XLenVT GPR:$rs2))),
+          (SH1ADD_UW GPR:$rs1, GPR:$rs2)>;
+def : Pat<(i64 (add_like_non_imm12 (and (shl GPR:$rs1, (i64 2)), 0x3FFFFFFFF), 
(XLenVT GPR:$rs2))),
+          (SH2ADD_UW GPR:$rs1, GPR:$rs2)>;
+def : Pat<(i64 (add_like_non_imm12 (and (shl GPR:$rs1, (i64 3)), 0x7FFFFFFFF), 
(XLenVT GPR:$rs2))),
+          (SH3ADD_UW GPR:$rs1, GPR:$rs2)>;
+
+// More complex cases use a ComplexPattern.
+foreach i = {1,2,3} in {
+  defvar pat = !cast<ComplexPattern>("sh"#i#"add_uw_op");
+  def : Pat<(i64 (add_like_non_imm12 pat:$rs1, (XLenVT GPR:$rs2))),
+            (!cast<Instruction>("SH"#i#"ADD_UW") pat:$rs1, GPR:$rs2)>;
 }
 
-defm : Sh1Add_UWPat<SH1ADD_UW>;
-defm : Sh2Add_UWPat<SH2ADD_UW>;
-defm : Sh3Add_UWPat<SH3ADD_UW>;
+def : Pat<(i64 (add_like_non_imm12 (and GPR:$rs1, 0xFFFFFFFE), (XLenVT 
GPR:$rs2))),
+          (SH1ADD (XLenVT (SRLIW GPR:$rs1, 1)), GPR:$rs2)>;
+def : Pat<(i64 (add_like_non_imm12 (and GPR:$rs1, 0xFFFFFFFC), (XLenVT 
GPR:$rs2))),
+          (SH2ADD (XLenVT (SRLIW GPR:$rs1, 2)), GPR:$rs2)>;
+def : Pat<(i64 (add_like_non_imm12 (and GPR:$rs1, 0xFFFFFFF8), (XLenVT 
GPR:$rs2))),
+          (SH3ADD (XLenVT (SRLIW GPR:$rs1, 3)), GPR:$rs2)>;
+
+// Use SRLI to clear the LSBs and SHXADD_UW to mask and shift.
+def : Pat<(i64 (add_like_non_imm12 (and GPR:$rs1, 0x1FFFFFFFE), (XLenVT 
GPR:$rs2))),
+          (SH1ADD_UW (XLenVT (SRLI GPR:$rs1, 1)), GPR:$rs2)>;
+def : Pat<(i64 (add_like_non_imm12 (and GPR:$rs1, 0x3FFFFFFFC), (XLenVT 
GPR:$rs2))),
+          (SH2ADD_UW (XLenVT (SRLI GPR:$rs1, 2)), GPR:$rs2)>;
+def : Pat<(i64 (add_like_non_imm12 (and GPR:$rs1, 0x7FFFFFFF8), (XLenVT 
GPR:$rs2))),
+          (SH3ADD_UW (XLenVT (SRLI GPR:$rs1, 3)), GPR:$rs2)>;
 
-def : Sh1AddPat<SH1ADD>;
-def : Sh2AddPat<SH2ADD>;
-def : Sh3AddPat<SH3ADD>;
 } // Predicates = [HasStdExtZba, IsRV64]
 
 let Predicates = [HasStdExtZbcOrZbkc] in {

diff  --git a/llvm/test/CodeGen/RISCV/rv32zba.ll 
b/llvm/test/CodeGen/RISCV/rv32zba.ll
index ab099103b4216..f8ca41782c6e1 100644
--- a/llvm/test/CodeGen/RISCV/rv32zba.ll
+++ b/llvm/test/CodeGen/RISCV/rv32zba.ll
@@ -3,8 +3,6 @@
 ; RUN:   | FileCheck %s -check-prefixes=CHECK,RV32I
 ; RUN: llc -mtriple=riscv32 -mattr=+m,+zba -verify-machineinstrs < %s \
 ; RUN:   | FileCheck %s -check-prefixes=CHECK,RV32ZBA
-; RUN: llc -mtriple=riscv32 -mattr=+m,+xandesperf -verify-machineinstrs < %s \
-; RUN:   | FileCheck %s -check-prefixes=CHECK,RV32XANDESPERF
 
 define signext i16 @sh1add(i64 %0, ptr %1) {
 ; RV32I-LABEL: sh1add:
@@ -19,12 +17,6 @@ define signext i16 @sh1add(i64 %0, ptr %1) {
 ; RV32ZBA-NEXT:    sh1add a0, a0, a2
 ; RV32ZBA-NEXT:    lh a0, 0(a0)
 ; RV32ZBA-NEXT:    ret
-;
-; RV32XANDESPERF-LABEL: sh1add:
-; RV32XANDESPERF:       # %bb.0:
-; RV32XANDESPERF-NEXT:    nds.lea.h a0, a2, a0
-; RV32XANDESPERF-NEXT:    lh a0, 0(a0)
-; RV32XANDESPERF-NEXT:    ret
   %3 = getelementptr inbounds i16, ptr %1, i64 %0
   %4 = load i16, ptr %3
   ret i16 %4
@@ -43,12 +35,6 @@ define i32 @sh2add(i64 %0, ptr %1) {
 ; RV32ZBA-NEXT:    sh2add a0, a0, a2
 ; RV32ZBA-NEXT:    lw a0, 0(a0)
 ; RV32ZBA-NEXT:    ret
-;
-; RV32XANDESPERF-LABEL: sh2add:
-; RV32XANDESPERF:       # %bb.0:
-; RV32XANDESPERF-NEXT:    nds.lea.w a0, a2, a0
-; RV32XANDESPERF-NEXT:    lw a0, 0(a0)
-; RV32XANDESPERF-NEXT:    ret
   %3 = getelementptr inbounds i32, ptr %1, i64 %0
   %4 = load i32, ptr %3
   ret i32 %4
@@ -69,13 +55,6 @@ define i64 @sh3add(i64 %0, ptr %1) {
 ; RV32ZBA-NEXT:    lw a0, 0(a1)
 ; RV32ZBA-NEXT:    lw a1, 4(a1)
 ; RV32ZBA-NEXT:    ret
-;
-; RV32XANDESPERF-LABEL: sh3add:
-; RV32XANDESPERF:       # %bb.0:
-; RV32XANDESPERF-NEXT:    nds.lea.d a1, a2, a0
-; RV32XANDESPERF-NEXT:    lw a0, 0(a1)
-; RV32XANDESPERF-NEXT:    lw a1, 4(a1)
-; RV32XANDESPERF-NEXT:    ret
   %3 = getelementptr inbounds i64, ptr %1, i64 %0
   %4 = load i64, ptr %3
   ret i64 %4
@@ -95,12 +74,6 @@ define i32 @addmul6(i32 %a, i32 %b) {
 ; RV32ZBA-NEXT:    sh1add a0, a0, a0
 ; RV32ZBA-NEXT:    sh1add a0, a0, a1
 ; RV32ZBA-NEXT:    ret
-;
-; RV32XANDESPERF-LABEL: addmul6:
-; RV32XANDESPERF:       # %bb.0:
-; RV32XANDESPERF-NEXT:    nds.lea.h a0, a0, a0
-; RV32XANDESPERF-NEXT:    nds.lea.h a0, a1, a0
-; RV32XANDESPERF-NEXT:    ret
   %c = mul i32 %a, 6
   %d = add i32 %c, %b
   ret i32 %d
@@ -120,12 +93,6 @@ define i32 @addmul10(i32 %a, i32 %b) {
 ; RV32ZBA-NEXT:    sh2add a0, a0, a0
 ; RV32ZBA-NEXT:    sh1add a0, a0, a1
 ; RV32ZBA-NEXT:    ret
-;
-; RV32XANDESPERF-LABEL: addmul10:
-; RV32XANDESPERF:       # %bb.0:
-; RV32XANDESPERF-NEXT:    nds.lea.w a0, a0, a0
-; RV32XANDESPERF-NEXT:    nds.lea.h a0, a1, a0
-; RV32XANDESPERF-NEXT:    ret
   %c = mul i32 %a, 10
   %d = add i32 %c, %b
   ret i32 %d
@@ -145,12 +112,6 @@ define i32 @addmul12(i32 %a, i32 %b) {
 ; RV32ZBA-NEXT:    sh1add a0, a0, a0
 ; RV32ZBA-NEXT:    sh2add a0, a0, a1
 ; RV32ZBA-NEXT:    ret
-;
-; RV32XANDESPERF-LABEL: addmul12:
-; RV32XANDESPERF:       # %bb.0:
-; RV32XANDESPERF-NEXT:    nds.lea.h a0, a0, a0
-; RV32XANDESPERF-NEXT:    nds.lea.w a0, a1, a0
-; RV32XANDESPERF-NEXT:    ret
   %c = mul i32 %a, 12
   %d = add i32 %c, %b
   ret i32 %d
@@ -170,12 +131,6 @@ define i32 @addmul18(i32 %a, i32 %b) {
 ; RV32ZBA-NEXT:    sh3add a0, a0, a0
 ; RV32ZBA-NEXT:    sh1add a0, a0, a1
 ; RV32ZBA-NEXT:    ret
-;
-; RV32XANDESPERF-LABEL: addmul18:
-; RV32XANDESPERF:       # %bb.0:
-; RV32XANDESPERF-NEXT:    nds.lea.d a0, a0, a0
-; RV32XANDESPERF-NEXT:    nds.lea.h a0, a1, a0
-; RV32XANDESPERF-NEXT:    ret
   %c = mul i32 %a, 18
   %d = add i32 %c, %b
   ret i32 %d
@@ -195,12 +150,6 @@ define i32 @addmul20(i32 %a, i32 %b) {
 ; RV32ZBA-NEXT:    sh2add a0, a0, a0
 ; RV32ZBA-NEXT:    sh2add a0, a0, a1
 ; RV32ZBA-NEXT:    ret
-;
-; RV32XANDESPERF-LABEL: addmul20:
-; RV32XANDESPERF:       # %bb.0:
-; RV32XANDESPERF-NEXT:    nds.lea.w a0, a0, a0
-; RV32XANDESPERF-NEXT:    nds.lea.w a0, a1, a0
-; RV32XANDESPERF-NEXT:    ret
   %c = mul i32 %a, 20
   %d = add i32 %c, %b
   ret i32 %d
@@ -220,12 +169,6 @@ define i32 @addmul24(i32 %a, i32 %b) {
 ; RV32ZBA-NEXT:    sh1add a0, a0, a0
 ; RV32ZBA-NEXT:    sh3add a0, a0, a1
 ; RV32ZBA-NEXT:    ret
-;
-; RV32XANDESPERF-LABEL: addmul24:
-; RV32XANDESPERF:       # %bb.0:
-; RV32XANDESPERF-NEXT:    nds.lea.h a0, a0, a0
-; RV32XANDESPERF-NEXT:    nds.lea.d a0, a1, a0
-; RV32XANDESPERF-NEXT:    ret
   %c = mul i32 %a, 24
   %d = add i32 %c, %b
   ret i32 %d
@@ -245,12 +188,6 @@ define i32 @addmul36(i32 %a, i32 %b) {
 ; RV32ZBA-NEXT:    sh3add a0, a0, a0
 ; RV32ZBA-NEXT:    sh2add a0, a0, a1
 ; RV32ZBA-NEXT:    ret
-;
-; RV32XANDESPERF-LABEL: addmul36:
-; RV32XANDESPERF:       # %bb.0:
-; RV32XANDESPERF-NEXT:    nds.lea.d a0, a0, a0
-; RV32XANDESPERF-NEXT:    nds.lea.w a0, a1, a0
-; RV32XANDESPERF-NEXT:    ret
   %c = mul i32 %a, 36
   %d = add i32 %c, %b
   ret i32 %d
@@ -270,12 +207,6 @@ define i32 @addmul40(i32 %a, i32 %b) {
 ; RV32ZBA-NEXT:    sh2add a0, a0, a0
 ; RV32ZBA-NEXT:    sh3add a0, a0, a1
 ; RV32ZBA-NEXT:    ret
-;
-; RV32XANDESPERF-LABEL: addmul40:
-; RV32XANDESPERF:       # %bb.0:
-; RV32XANDESPERF-NEXT:    nds.lea.w a0, a0, a0
-; RV32XANDESPERF-NEXT:    nds.lea.d a0, a1, a0
-; RV32XANDESPERF-NEXT:    ret
   %c = mul i32 %a, 40
   %d = add i32 %c, %b
   ret i32 %d
@@ -295,12 +226,6 @@ define i32 @addmul72(i32 %a, i32 %b) {
 ; RV32ZBA-NEXT:    sh3add a0, a0, a0
 ; RV32ZBA-NEXT:    sh3add a0, a0, a1
 ; RV32ZBA-NEXT:    ret
-;
-; RV32XANDESPERF-LABEL: addmul72:
-; RV32XANDESPERF:       # %bb.0:
-; RV32XANDESPERF-NEXT:    nds.lea.d a0, a0, a0
-; RV32XANDESPERF-NEXT:    nds.lea.d a0, a1, a0
-; RV32XANDESPERF-NEXT:    ret
   %c = mul i32 %a, 72
   %d = add i32 %c, %b
   ret i32 %d
@@ -319,12 +244,6 @@ define i32 @mul96(i32 %a) {
 ; RV32ZBA-NEXT:    sh1add a0, a0, a0
 ; RV32ZBA-NEXT:    slli a0, a0, 5
 ; RV32ZBA-NEXT:    ret
-;
-; RV32XANDESPERF-LABEL: mul96:
-; RV32XANDESPERF:       # %bb.0:
-; RV32XANDESPERF-NEXT:    nds.lea.h a0, a0, a0
-; RV32XANDESPERF-NEXT:    slli a0, a0, 5
-; RV32XANDESPERF-NEXT:    ret
   %c = mul i32 %a, 96
   ret i32 %c
 }
@@ -342,12 +261,6 @@ define i32 @mul160(i32 %a) {
 ; RV32ZBA-NEXT:    sh2add a0, a0, a0
 ; RV32ZBA-NEXT:    slli a0, a0, 5
 ; RV32ZBA-NEXT:    ret
-;
-; RV32XANDESPERF-LABEL: mul160:
-; RV32XANDESPERF:       # %bb.0:
-; RV32XANDESPERF-NEXT:    nds.lea.w a0, a0, a0
-; RV32XANDESPERF-NEXT:    slli a0, a0, 5
-; RV32XANDESPERF-NEXT:    ret
   %c = mul i32 %a, 160
   ret i32 %c
 }
@@ -365,12 +278,6 @@ define i32 @mul288(i32 %a) {
 ; RV32ZBA-NEXT:    sh3add a0, a0, a0
 ; RV32ZBA-NEXT:    slli a0, a0, 5
 ; RV32ZBA-NEXT:    ret
-;
-; RV32XANDESPERF-LABEL: mul288:
-; RV32XANDESPERF:       # %bb.0:
-; RV32XANDESPERF-NEXT:    nds.lea.d a0, a0, a0
-; RV32XANDESPERF-NEXT:    slli a0, a0, 5
-; RV32XANDESPERF-NEXT:    ret
   %c = mul i32 %a, 288
   ret i32 %c
 }
@@ -388,12 +295,6 @@ define i32 @mul258(i32 %a) {
 ; RV32ZBA-NEXT:    slli a1, a0, 8
 ; RV32ZBA-NEXT:    sh1add a0, a0, a1
 ; RV32ZBA-NEXT:    ret
-;
-; RV32XANDESPERF-LABEL: mul258:
-; RV32XANDESPERF:       # %bb.0:
-; RV32XANDESPERF-NEXT:    slli a1, a0, 8
-; RV32XANDESPERF-NEXT:    nds.lea.h a0, a1, a0
-; RV32XANDESPERF-NEXT:    ret
   %c = mul i32 %a, 258
   ret i32 %c
 }
@@ -411,12 +312,6 @@ define i32 @mul260(i32 %a) {
 ; RV32ZBA-NEXT:    slli a1, a0, 8
 ; RV32ZBA-NEXT:    sh2add a0, a0, a1
 ; RV32ZBA-NEXT:    ret
-;
-; RV32XANDESPERF-LABEL: mul260:
-; RV32XANDESPERF:       # %bb.0:
-; RV32XANDESPERF-NEXT:    slli a1, a0, 8
-; RV32XANDESPERF-NEXT:    nds.lea.w a0, a1, a0
-; RV32XANDESPERF-NEXT:    ret
   %c = mul i32 %a, 260
   ret i32 %c
 }
@@ -434,12 +329,6 @@ define i32 @mul264(i32 %a) {
 ; RV32ZBA-NEXT:    slli a1, a0, 8
 ; RV32ZBA-NEXT:    sh3add a0, a0, a1
 ; RV32ZBA-NEXT:    ret
-;
-; RV32XANDESPERF-LABEL: mul264:
-; RV32XANDESPERF:       # %bb.0:
-; RV32XANDESPERF-NEXT:    slli a1, a0, 8
-; RV32XANDESPERF-NEXT:    nds.lea.d a0, a1, a0
-; RV32XANDESPERF-NEXT:    ret
   %c = mul i32 %a, 264
   ret i32 %c
 }
@@ -456,12 +345,6 @@ define i32 @mul11(i32 %a) {
 ; RV32ZBA-NEXT:    sh2add a1, a0, a0
 ; RV32ZBA-NEXT:    sh1add a0, a1, a0
 ; RV32ZBA-NEXT:    ret
-;
-; RV32XANDESPERF-LABEL: mul11:
-; RV32XANDESPERF:       # %bb.0:
-; RV32XANDESPERF-NEXT:    nds.lea.w a1, a0, a0
-; RV32XANDESPERF-NEXT:    nds.lea.h a0, a0, a1
-; RV32XANDESPERF-NEXT:    ret
   %c = mul i32 %a, 11
   ret i32 %c
 }
@@ -478,12 +361,6 @@ define i32 @mul19(i32 %a) {
 ; RV32ZBA-NEXT:    sh3add a1, a0, a0
 ; RV32ZBA-NEXT:    sh1add a0, a1, a0
 ; RV32ZBA-NEXT:    ret
-;
-; RV32XANDESPERF-LABEL: mul19:
-; RV32XANDESPERF:       # %bb.0:
-; RV32XANDESPERF-NEXT:    nds.lea.d a1, a0, a0
-; RV32XANDESPERF-NEXT:    nds.lea.h a0, a0, a1
-; RV32XANDESPERF-NEXT:    ret
   %c = mul i32 %a, 19
   ret i32 %c
 }
@@ -500,12 +377,6 @@ define i32 @mul13(i32 %a) {
 ; RV32ZBA-NEXT:    sh1add a1, a0, a0
 ; RV32ZBA-NEXT:    sh2add a0, a1, a0
 ; RV32ZBA-NEXT:    ret
-;
-; RV32XANDESPERF-LABEL: mul13:
-; RV32XANDESPERF:       # %bb.0:
-; RV32XANDESPERF-NEXT:    nds.lea.h a1, a0, a0
-; RV32XANDESPERF-NEXT:    nds.lea.w a0, a0, a1
-; RV32XANDESPERF-NEXT:    ret
   %c = mul i32 %a, 13
   ret i32 %c
 }
@@ -522,12 +393,6 @@ define i32 @mul21(i32 %a) {
 ; RV32ZBA-NEXT:    sh2add a1, a0, a0
 ; RV32ZBA-NEXT:    sh2add a0, a1, a0
 ; RV32ZBA-NEXT:    ret
-;
-; RV32XANDESPERF-LABEL: mul21:
-; RV32XANDESPERF:       # %bb.0:
-; RV32XANDESPERF-NEXT:    nds.lea.w a1, a0, a0
-; RV32XANDESPERF-NEXT:    nds.lea.w a0, a0, a1
-; RV32XANDESPERF-NEXT:    ret
   %c = mul i32 %a, 21
   ret i32 %c
 }
@@ -544,12 +409,6 @@ define i32 @mul37(i32 %a) {
 ; RV32ZBA-NEXT:    sh3add a1, a0, a0
 ; RV32ZBA-NEXT:    sh2add a0, a1, a0
 ; RV32ZBA-NEXT:    ret
-;
-; RV32XANDESPERF-LABEL: mul37:
-; RV32XANDESPERF:       # %bb.0:
-; RV32XANDESPERF-NEXT:    nds.lea.d a1, a0, a0
-; RV32XANDESPERF-NEXT:    nds.lea.w a0, a0, a1
-; RV32XANDESPERF-NEXT:    ret
   %c = mul i32 %a, 37
   ret i32 %c
 }
@@ -566,12 +425,6 @@ define i32 @mul25(i32 %a) {
 ; RV32ZBA-NEXT:    sh2add a0, a0, a0
 ; RV32ZBA-NEXT:    sh2add a0, a0, a0
 ; RV32ZBA-NEXT:    ret
-;
-; RV32XANDESPERF-LABEL: mul25:
-; RV32XANDESPERF:       # %bb.0:
-; RV32XANDESPERF-NEXT:    nds.lea.w a0, a0, a0
-; RV32XANDESPERF-NEXT:    nds.lea.w a0, a0, a0
-; RV32XANDESPERF-NEXT:    ret
   %c = mul i32 %a, 25
   ret i32 %c
 }
@@ -588,12 +441,6 @@ define i32 @mul41(i32 %a) {
 ; RV32ZBA-NEXT:    sh2add a1, a0, a0
 ; RV32ZBA-NEXT:    sh3add a0, a1, a0
 ; RV32ZBA-NEXT:    ret
-;
-; RV32XANDESPERF-LABEL: mul41:
-; RV32XANDESPERF:       # %bb.0:
-; RV32XANDESPERF-NEXT:    nds.lea.w a1, a0, a0
-; RV32XANDESPERF-NEXT:    nds.lea.d a0, a0, a1
-; RV32XANDESPERF-NEXT:    ret
   %c = mul i32 %a, 41
   ret i32 %c
 }
@@ -610,12 +457,6 @@ define i32 @mul73(i32 %a) {
 ; RV32ZBA-NEXT:    sh3add a1, a0, a0
 ; RV32ZBA-NEXT:    sh3add a0, a1, a0
 ; RV32ZBA-NEXT:    ret
-;
-; RV32XANDESPERF-LABEL: mul73:
-; RV32XANDESPERF:       # %bb.0:
-; RV32XANDESPERF-NEXT:    nds.lea.d a1, a0, a0
-; RV32XANDESPERF-NEXT:    nds.lea.d a0, a0, a1
-; RV32XANDESPERF-NEXT:    ret
   %c = mul i32 %a, 73
   ret i32 %c
 }
@@ -632,12 +473,6 @@ define i32 @mul27(i32 %a) {
 ; RV32ZBA-NEXT:    sh1add a0, a0, a0
 ; RV32ZBA-NEXT:    sh3add a0, a0, a0
 ; RV32ZBA-NEXT:    ret
-;
-; RV32XANDESPERF-LABEL: mul27:
-; RV32XANDESPERF:       # %bb.0:
-; RV32XANDESPERF-NEXT:    nds.lea.h a0, a0, a0
-; RV32XANDESPERF-NEXT:    nds.lea.d a0, a0, a0
-; RV32XANDESPERF-NEXT:    ret
   %c = mul i32 %a, 27
   ret i32 %c
 }
@@ -654,12 +489,6 @@ define i32 @mul45(i32 %a) {
 ; RV32ZBA-NEXT:    sh2add a0, a0, a0
 ; RV32ZBA-NEXT:    sh3add a0, a0, a0
 ; RV32ZBA-NEXT:    ret
-;
-; RV32XANDESPERF-LABEL: mul45:
-; RV32XANDESPERF:       # %bb.0:
-; RV32XANDESPERF-NEXT:    nds.lea.w a0, a0, a0
-; RV32XANDESPERF-NEXT:    nds.lea.d a0, a0, a0
-; RV32XANDESPERF-NEXT:    ret
   %c = mul i32 %a, 45
   ret i32 %c
 }
@@ -676,12 +505,6 @@ define i32 @mul81(i32 %a) {
 ; RV32ZBA-NEXT:    sh3add a0, a0, a0
 ; RV32ZBA-NEXT:    sh3add a0, a0, a0
 ; RV32ZBA-NEXT:    ret
-;
-; RV32XANDESPERF-LABEL: mul81:
-; RV32XANDESPERF:       # %bb.0:
-; RV32XANDESPERF-NEXT:    nds.lea.d a0, a0, a0
-; RV32XANDESPERF-NEXT:    nds.lea.d a0, a0, a0
-; RV32XANDESPERF-NEXT:    ret
   %c = mul i32 %a, 81
   ret i32 %c
 }
@@ -699,12 +522,6 @@ define i32 @mul4098(i32 %a) {
 ; RV32ZBA-NEXT:    slli a1, a0, 12
 ; RV32ZBA-NEXT:    sh1add a0, a0, a1
 ; RV32ZBA-NEXT:    ret
-;
-; RV32XANDESPERF-LABEL: mul4098:
-; RV32XANDESPERF:       # %bb.0:
-; RV32XANDESPERF-NEXT:    slli a1, a0, 12
-; RV32XANDESPERF-NEXT:    nds.lea.h a0, a1, a0
-; RV32XANDESPERF-NEXT:    ret
   %c = mul i32 %a, 4098
   ret i32 %c
 }
@@ -722,12 +539,6 @@ define i32 @mul4100(i32 %a) {
 ; RV32ZBA-NEXT:    slli a1, a0, 12
 ; RV32ZBA-NEXT:    sh2add a0, a0, a1
 ; RV32ZBA-NEXT:    ret
-;
-; RV32XANDESPERF-LABEL: mul4100:
-; RV32XANDESPERF:       # %bb.0:
-; RV32XANDESPERF-NEXT:    slli a1, a0, 12
-; RV32XANDESPERF-NEXT:    nds.lea.w a0, a1, a0
-; RV32XANDESPERF-NEXT:    ret
   %c = mul i32 %a, 4100
   ret i32 %c
 }
@@ -745,12 +556,6 @@ define i32 @mul4104(i32 %a) {
 ; RV32ZBA-NEXT:    slli a1, a0, 12
 ; RV32ZBA-NEXT:    sh3add a0, a0, a1
 ; RV32ZBA-NEXT:    ret
-;
-; RV32XANDESPERF-LABEL: mul4104:
-; RV32XANDESPERF:       # %bb.0:
-; RV32XANDESPERF-NEXT:    slli a1, a0, 12
-; RV32XANDESPERF-NEXT:    nds.lea.d a0, a1, a0
-; RV32XANDESPERF-NEXT:    ret
   %c = mul i32 %a, 4104
   ret i32 %c
 }
@@ -768,12 +573,6 @@ define i32 @add4104(i32 %a) {
 ; RV32ZBA-NEXT:    li a1, 1026
 ; RV32ZBA-NEXT:    sh2add a0, a1, a0
 ; RV32ZBA-NEXT:    ret
-;
-; RV32XANDESPERF-LABEL: add4104:
-; RV32XANDESPERF:       # %bb.0:
-; RV32XANDESPERF-NEXT:    li a1, 1026
-; RV32XANDESPERF-NEXT:    nds.lea.w a0, a0, a1
-; RV32XANDESPERF-NEXT:    ret
   %c = add i32 %a, 4104
   ret i32 %c
 }
@@ -791,12 +590,6 @@ define i32 @add8208(i32 %a) {
 ; RV32ZBA-NEXT:    li a1, 1026
 ; RV32ZBA-NEXT:    sh3add a0, a1, a0
 ; RV32ZBA-NEXT:    ret
-;
-; RV32XANDESPERF-LABEL: add8208:
-; RV32XANDESPERF:       # %bb.0:
-; RV32XANDESPERF-NEXT:    li a1, 1026
-; RV32XANDESPERF-NEXT:    nds.lea.d a0, a0, a1
-; RV32XANDESPERF-NEXT:    ret
   %c = add i32 %a, 8208
   ret i32 %c
 }
@@ -824,12 +617,6 @@ define i32 @addshl_5_6(i32 %a, i32 %b) {
 ; RV32ZBA-NEXT:    sh1add a0, a1, a0
 ; RV32ZBA-NEXT:    slli a0, a0, 5
 ; RV32ZBA-NEXT:    ret
-;
-; RV32XANDESPERF-LABEL: addshl_5_6:
-; RV32XANDESPERF:       # %bb.0:
-; RV32XANDESPERF-NEXT:    nds.lea.h a0, a0, a1
-; RV32XANDESPERF-NEXT:    slli a0, a0, 5
-; RV32XANDESPERF-NEXT:    ret
   %c = shl i32 %a, 5
   %d = shl i32 %b, 6
   %e = add i32 %c, %d
@@ -849,12 +636,6 @@ define i32 @addshl_5_7(i32 %a, i32 %b) {
 ; RV32ZBA-NEXT:    sh2add a0, a1, a0
 ; RV32ZBA-NEXT:    slli a0, a0, 5
 ; RV32ZBA-NEXT:    ret
-;
-; RV32XANDESPERF-LABEL: addshl_5_7:
-; RV32XANDESPERF:       # %bb.0:
-; RV32XANDESPERF-NEXT:    nds.lea.w a0, a0, a1
-; RV32XANDESPERF-NEXT:    slli a0, a0, 5
-; RV32XANDESPERF-NEXT:    ret
   %c = shl i32 %a, 5
   %d = shl i32 %b, 7
   %e = add i32 %c, %d
@@ -874,12 +655,6 @@ define i32 @addshl_5_8(i32 %a, i32 %b) {
 ; RV32ZBA-NEXT:    sh3add a0, a1, a0
 ; RV32ZBA-NEXT:    slli a0, a0, 5
 ; RV32ZBA-NEXT:    ret
-;
-; RV32XANDESPERF-LABEL: addshl_5_8:
-; RV32XANDESPERF:       # %bb.0:
-; RV32XANDESPERF-NEXT:    nds.lea.d a0, a0, a1
-; RV32XANDESPERF-NEXT:    slli a0, a0, 5
-; RV32XANDESPERF-NEXT:    ret
   %c = shl i32 %a, 5
   %d = shl i32 %b, 8
   %e = add i32 %c, %d
@@ -901,13 +676,6 @@ define i32 @srli_1_sh2add(ptr %0, i32 %1) {
 ; RV32ZBA-NEXT:    sh2add a0, a1, a0
 ; RV32ZBA-NEXT:    lw a0, 0(a0)
 ; RV32ZBA-NEXT:    ret
-;
-; RV32XANDESPERF-LABEL: srli_1_sh2add:
-; RV32XANDESPERF:       # %bb.0:
-; RV32XANDESPERF-NEXT:    srli a1, a1, 1
-; RV32XANDESPERF-NEXT:    nds.lea.w a0, a0, a1
-; RV32XANDESPERF-NEXT:    lw a0, 0(a0)
-; RV32XANDESPERF-NEXT:    ret
   %3 = lshr i32 %1, 1
   %4 = getelementptr inbounds i32, ptr %0, i32 %3
   %5 = load i32, ptr %4, align 4
@@ -931,14 +699,6 @@ define i64 @srli_2_sh3add(ptr %0, i32 %1) {
 ; RV32ZBA-NEXT:    lw a0, 0(a1)
 ; RV32ZBA-NEXT:    lw a1, 4(a1)
 ; RV32ZBA-NEXT:    ret
-;
-; RV32XANDESPERF-LABEL: srli_2_sh3add:
-; RV32XANDESPERF:       # %bb.0:
-; RV32XANDESPERF-NEXT:    srli a1, a1, 2
-; RV32XANDESPERF-NEXT:    nds.lea.d a1, a0, a1
-; RV32XANDESPERF-NEXT:    lw a0, 0(a1)
-; RV32XANDESPERF-NEXT:    lw a1, 4(a1)
-; RV32XANDESPERF-NEXT:    ret
   %3 = lshr i32 %1, 2
   %4 = getelementptr inbounds i64, ptr %0, i32 %3
   %5 = load i64, ptr %4, align 8
@@ -960,13 +720,6 @@ define signext i16 @srli_2_sh1add(ptr %0, i32 %1) {
 ; RV32ZBA-NEXT:    sh1add a0, a1, a0
 ; RV32ZBA-NEXT:    lh a0, 0(a0)
 ; RV32ZBA-NEXT:    ret
-;
-; RV32XANDESPERF-LABEL: srli_2_sh1add:
-; RV32XANDESPERF:       # %bb.0:
-; RV32XANDESPERF-NEXT:    srli a1, a1, 2
-; RV32XANDESPERF-NEXT:    nds.lea.h a0, a0, a1
-; RV32XANDESPERF-NEXT:    lh a0, 0(a0)
-; RV32XANDESPERF-NEXT:    ret
   %3 = lshr i32 %1, 2
   %4 = getelementptr inbounds i16, ptr %0, i32 %3
   %5 = load i16, ptr %4, align 2
@@ -988,13 +741,6 @@ define i32 @srli_3_sh2add(ptr %0, i32 %1) {
 ; RV32ZBA-NEXT:    sh2add a0, a1, a0
 ; RV32ZBA-NEXT:    lw a0, 0(a0)
 ; RV32ZBA-NEXT:    ret
-;
-; RV32XANDESPERF-LABEL: srli_3_sh2add:
-; RV32XANDESPERF:       # %bb.0:
-; RV32XANDESPERF-NEXT:    srli a1, a1, 3
-; RV32XANDESPERF-NEXT:    nds.lea.w a0, a0, a1
-; RV32XANDESPERF-NEXT:    lw a0, 0(a0)
-; RV32XANDESPERF-NEXT:    ret
   %3 = lshr i32 %1, 3
   %4 = getelementptr inbounds i32, ptr %0, i32 %3
   %5 = load i32, ptr %4, align 4
@@ -1018,14 +764,6 @@ define i64 @srli_4_sh3add(ptr %0, i32 %1) {
 ; RV32ZBA-NEXT:    lw a0, 0(a1)
 ; RV32ZBA-NEXT:    lw a1, 4(a1)
 ; RV32ZBA-NEXT:    ret
-;
-; RV32XANDESPERF-LABEL: srli_4_sh3add:
-; RV32XANDESPERF:       # %bb.0:
-; RV32XANDESPERF-NEXT:    srli a1, a1, 4
-; RV32XANDESPERF-NEXT:    nds.lea.d a1, a0, a1
-; RV32XANDESPERF-NEXT:    lw a0, 0(a1)
-; RV32XANDESPERF-NEXT:    lw a1, 4(a1)
-; RV32XANDESPERF-NEXT:    ret
   %3 = lshr i32 %1, 4
   %4 = getelementptr inbounds i64, ptr %0, i32 %3
   %5 = load i64, ptr %4, align 8
@@ -1064,12 +802,6 @@ define i32 @mul_neg3(i32 %a) {
 ; RV32ZBA-NEXT:    sh1add a0, a0, a0
 ; RV32ZBA-NEXT:    neg a0, a0
 ; RV32ZBA-NEXT:    ret
-;
-; RV32XANDESPERF-LABEL: mul_neg3:
-; RV32XANDESPERF:       # %bb.0:
-; RV32XANDESPERF-NEXT:    nds.lea.h a0, a0, a0
-; RV32XANDESPERF-NEXT:    neg a0, a0
-; RV32XANDESPERF-NEXT:    ret
   %c = mul i32 %a, -3
   ret i32 %c
 }
@@ -1097,12 +829,6 @@ define i32 @mul_neg5(i32 %a) {
 ; RV32ZBA-NEXT:    sh2add a0, a0, a0
 ; RV32ZBA-NEXT:    neg a0, a0
 ; RV32ZBA-NEXT:    ret
-;
-; RV32XANDESPERF-LABEL: mul_neg5:
-; RV32XANDESPERF:       # %bb.0:
-; RV32XANDESPERF-NEXT:    nds.lea.w a0, a0, a0
-; RV32XANDESPERF-NEXT:    neg a0, a0
-; RV32XANDESPERF-NEXT:    ret
   %c = mul i32 %a, -5
   ret i32 %c
 }

diff  --git a/llvm/test/CodeGen/RISCV/rv64zba.ll 
b/llvm/test/CodeGen/RISCV/rv64zba.ll
index a0238458aff81..e362e5ebd8192 100644
--- a/llvm/test/CodeGen/RISCV/rv64zba.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zba.ll
@@ -7,8 +7,6 @@
 ; RUN:   | FileCheck %s 
-check-prefixes=CHECK,RV64ZBA,RV64ZBAZBB,RV64ZBAZBBNOZBS
 ; RUN: llc -mtriple=riscv64 -mattr=+m,+zba,+zbb,+zbs -verify-machineinstrs < 
%s \
 ; RUN:   | FileCheck %s -check-prefixes=CHECK,RV64ZBA,RV64ZBAZBB,RV64ZBAZBBZBS
-; RUN: llc -mtriple=riscv64 -mattr=+m,+xandesperf -verify-machineinstrs < %s \
-; RUN:   | FileCheck %s -check-prefixes=CHECK,RV64XANDESPERF
 
 define i64 @slliuw(i64 %a) nounwind {
 ; RV64I-LABEL: slliuw:
@@ -21,12 +19,6 @@ define i64 @slliuw(i64 %a) nounwind {
 ; RV64ZBA:       # %bb.0:
 ; RV64ZBA-NEXT:    slli.uw a0, a0, 1
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: slliuw:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    slli a0, a0, 32
-; RV64XANDESPERF-NEXT:    srli a0, a0, 31
-; RV64XANDESPERF-NEXT:    ret
   %conv1 = shl i64 %a, 1
   %shl = and i64 %conv1, 8589934590
   ret i64 %shl
@@ -49,15 +41,6 @@ define i128 @slliuw_2(i32 signext %0, ptr %1) {
 ; RV64ZBA-NEXT:    ld a0, 0(a1)
 ; RV64ZBA-NEXT:    ld a1, 8(a1)
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: slliuw_2:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    slli a0, a0, 32
-; RV64XANDESPERF-NEXT:    srli a0, a0, 28
-; RV64XANDESPERF-NEXT:    add a1, a1, a0
-; RV64XANDESPERF-NEXT:    ld a0, 0(a1)
-; RV64XANDESPERF-NEXT:    ld a1, 8(a1)
-; RV64XANDESPERF-NEXT:    ret
   %3 = zext i32 %0 to i64
   %4 = getelementptr inbounds i128, ptr %1, i64 %3
   %5 = load i128, ptr %4
@@ -76,11 +59,6 @@ define i64 @adduw(i64 %a, i64 %b) nounwind {
 ; RV64ZBA:       # %bb.0:
 ; RV64ZBA-NEXT:    add.uw a0, a1, a0
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: adduw:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.b.ze a0, a0, a1
-; RV64XANDESPERF-NEXT:    ret
   %and = and i64 %b, 4294967295
   %add = add i64 %and, %a
   ret i64 %add
@@ -100,12 +78,6 @@ define signext i8 @adduw_2(i32 signext %0, ptr %1) {
 ; RV64ZBA-NEXT:    add.uw a0, a0, a1
 ; RV64ZBA-NEXT:    lb a0, 0(a0)
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: adduw_2:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.b.ze a0, a1, a0
-; RV64XANDESPERF-NEXT:    lb a0, 0(a0)
-; RV64XANDESPERF-NEXT:    ret
   %3 = zext i32 %0 to i64
   %4 = getelementptr inbounds i8, ptr %1, i64 %3
   %5 = load i8, ptr %4
@@ -123,11 +95,6 @@ define i64 @zextw_i64(i64 %a) nounwind {
 ; RV64ZBA:       # %bb.0:
 ; RV64ZBA-NEXT:    zext.w a0, a0
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: zextw_i64:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.b.ze a0, zero, a0
-; RV64XANDESPERF-NEXT:    ret
   %and = and i64 %a, 4294967295
   ret i64 %and
 }
@@ -147,12 +114,6 @@ define i64 @zextw_demandedbits_i64(i64 %0) {
 ; RV64ZBA-NEXT:    ori a0, a0, 1
 ; RV64ZBA-NEXT:    zext.w a0, a0
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: zextw_demandedbits_i64:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    ori a0, a0, 1
-; RV64XANDESPERF-NEXT:    nds.lea.b.ze a0, zero, a0
-; RV64XANDESPERF-NEXT:    ret
   %2 = and i64 %0, 4294967294
   %3 = or i64 %2, 1
   ret i64 %3
@@ -171,12 +132,6 @@ define signext i16 @sh1add(i64 %0, ptr %1) {
 ; RV64ZBA-NEXT:    sh1add a0, a0, a1
 ; RV64ZBA-NEXT:    lh a0, 0(a0)
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: sh1add:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.h a0, a1, a0
-; RV64XANDESPERF-NEXT:    lh a0, 0(a0)
-; RV64XANDESPERF-NEXT:    ret
   %3 = getelementptr inbounds i16, ptr %1, i64 %0
   %4 = load i16, ptr %3
   ret i16 %4
@@ -195,12 +150,6 @@ define signext i32 @sh2add(i64 %0, ptr %1) {
 ; RV64ZBA-NEXT:    sh2add a0, a0, a1
 ; RV64ZBA-NEXT:    lw a0, 0(a0)
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: sh2add:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.w a0, a1, a0
-; RV64XANDESPERF-NEXT:    lw a0, 0(a0)
-; RV64XANDESPERF-NEXT:    ret
   %3 = getelementptr inbounds i32, ptr %1, i64 %0
   %4 = load i32, ptr %3
   ret i32 %4
@@ -219,12 +168,6 @@ define i64 @sh3add(i64 %0, ptr %1) {
 ; RV64ZBA-NEXT:    sh3add a0, a0, a1
 ; RV64ZBA-NEXT:    ld a0, 0(a0)
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: sh3add:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.d a0, a1, a0
-; RV64XANDESPERF-NEXT:    ld a0, 0(a0)
-; RV64XANDESPERF-NEXT:    ret
   %3 = getelementptr inbounds i64, ptr %1, i64 %0
   %4 = load i64, ptr %3
   ret i64 %4
@@ -244,12 +187,6 @@ define signext i16 @sh1adduw(i32 signext %0, ptr %1) {
 ; RV64ZBA-NEXT:    sh1add.uw a0, a0, a1
 ; RV64ZBA-NEXT:    lh a0, 0(a0)
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: sh1adduw:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.h.ze a0, a1, a0
-; RV64XANDESPERF-NEXT:    lh a0, 0(a0)
-; RV64XANDESPERF-NEXT:    ret
   %3 = zext i32 %0 to i64
   %4 = getelementptr inbounds i16, ptr %1, i64 %3
   %5 = load i16, ptr %4
@@ -268,11 +205,6 @@ define i64 @sh1adduw_2(i64 %0, i64 %1) {
 ; RV64ZBA:       # %bb.0:
 ; RV64ZBA-NEXT:    sh1add.uw a0, a0, a1
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: sh1adduw_2:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.h.ze a0, a1, a0
-; RV64XANDESPERF-NEXT:    ret
   %3 = shl i64 %0, 1
   %4 = and i64 %3, 8589934590
   %5 = add i64 %4, %1
@@ -291,11 +223,6 @@ define i64 @sh1adduw_3(i64 %0, i64 %1) {
 ; RV64ZBA:       # %bb.0:
 ; RV64ZBA-NEXT:    sh1add.uw a0, a0, a1
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: sh1adduw_3:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.h.ze a0, a1, a0
-; RV64XANDESPERF-NEXT:    ret
   %3 = shl i64 %0, 1
   %4 = and i64 %3, 8589934590
   %5 = or disjoint i64 %4, %1
@@ -316,12 +243,6 @@ define signext i32 @sh2adduw(i32 signext %0, ptr %1) {
 ; RV64ZBA-NEXT:    sh2add.uw a0, a0, a1
 ; RV64ZBA-NEXT:    lw a0, 0(a0)
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: sh2adduw:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.w.ze a0, a1, a0
-; RV64XANDESPERF-NEXT:    lw a0, 0(a0)
-; RV64XANDESPERF-NEXT:    ret
   %3 = zext i32 %0 to i64
   %4 = getelementptr inbounds i32, ptr %1, i64 %3
   %5 = load i32, ptr %4
@@ -340,11 +261,6 @@ define i64 @sh2adduw_2(i64 %0, i64 %1) {
 ; RV64ZBA:       # %bb.0:
 ; RV64ZBA-NEXT:    sh2add.uw a0, a0, a1
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: sh2adduw_2:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.w.ze a0, a1, a0
-; RV64XANDESPERF-NEXT:    ret
   %3 = shl i64 %0, 2
   %4 = and i64 %3, 17179869180
   %5 = add i64 %4, %1
@@ -363,11 +279,6 @@ define i64 @sh2adduw_3(i64 %0, i64 %1) {
 ; RV64ZBA:       # %bb.0:
 ; RV64ZBA-NEXT:    sh2add.uw a0, a0, a1
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: sh2adduw_3:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.w.ze a0, a1, a0
-; RV64XANDESPERF-NEXT:    ret
   %3 = shl i64 %0, 2
   %4 = and i64 %3, 17179869180
   %5 = or disjoint i64 %4, %1
@@ -388,12 +299,6 @@ define i64 @sh3adduw(i32 signext %0, ptr %1) {
 ; RV64ZBA-NEXT:    sh3add.uw a0, a0, a1
 ; RV64ZBA-NEXT:    ld a0, 0(a0)
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: sh3adduw:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.d.ze a0, a1, a0
-; RV64XANDESPERF-NEXT:    ld a0, 0(a0)
-; RV64XANDESPERF-NEXT:    ret
   %3 = zext i32 %0 to i64
   %4 = getelementptr inbounds i64, ptr %1, i64 %3
   %5 = load i64, ptr %4
@@ -410,17 +315,8 @@ define i64 @sh3adduw_2(i64 %0, i64 %1) {
 ;
 ; RV64ZBA-LABEL: sh3adduw_2:
 ; RV64ZBA:       # %bb.0:
-; RV64ZBA-NEXT:    slli a0, a0, 3
-; RV64ZBA-NEXT:    srli a0, a0, 3
 ; RV64ZBA-NEXT:    sh3add.uw a0, a0, a1
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: sh3adduw_2:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    slli a0, a0, 3
-; RV64XANDESPERF-NEXT:    srli a0, a0, 3
-; RV64XANDESPERF-NEXT:    nds.lea.d.ze a0, a1, a0
-; RV64XANDESPERF-NEXT:    ret
   %3 = shl i64 %0, 3
   %4 = and i64 %3, 34359738360
   %5 = add i64 %4, %1
@@ -437,17 +333,8 @@ define i64 @sh3adduw_3(i64 %0, i64 %1) {
 ;
 ; RV64ZBA-LABEL: sh3adduw_3:
 ; RV64ZBA:       # %bb.0:
-; RV64ZBA-NEXT:    slli a0, a0, 3
-; RV64ZBA-NEXT:    srli a0, a0, 3
 ; RV64ZBA-NEXT:    sh3add.uw a0, a0, a1
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: sh3adduw_3:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    slli a0, a0, 3
-; RV64XANDESPERF-NEXT:    srli a0, a0, 3
-; RV64XANDESPERF-NEXT:    nds.lea.d.ze a0, a1, a0
-; RV64XANDESPERF-NEXT:    ret
   %3 = shl i64 %0, 3
   %4 = and i64 %3, 34359738360
   %5 = or disjoint i64 %4, %1
@@ -476,14 +363,6 @@ define i64 @sh2add_extra_sext(i32 %x, i32 %y, i32 %z) {
 ; RV64ZBA-NEXT:    sraiw a0, a0, 2
 ; RV64ZBA-NEXT:    mul a0, a1, a0
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: sh2add_extra_sext:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.w a0, a1, a0
-; RV64XANDESPERF-NEXT:    sllw a1, a2, a0
-; RV64XANDESPERF-NEXT:    sraiw a0, a0, 2
-; RV64XANDESPERF-NEXT:    mul a0, a1, a0
-; RV64XANDESPERF-NEXT:    ret
   %a = shl i32 %x, 2
   %b = add i32 %a, %y
   %c = shl i32 %z, %b
@@ -508,12 +387,6 @@ define i64 @addmul6(i64 %a, i64 %b) {
 ; RV64ZBA-NEXT:    sh1add a0, a0, a0
 ; RV64ZBA-NEXT:    sh1add a0, a0, a1
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: addmul6:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.h a0, a0, a0
-; RV64XANDESPERF-NEXT:    nds.lea.h a0, a1, a0
-; RV64XANDESPERF-NEXT:    ret
   %c = mul i64 %a, 6
   %d = add i64 %c, %b
   ret i64 %d
@@ -533,12 +406,6 @@ define i64 @disjointormul6(i64 %a, i64 %b) {
 ; RV64ZBA-NEXT:    sh1add a0, a0, a0
 ; RV64ZBA-NEXT:    sh1add a0, a0, a1
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: disjointormul6:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.h a0, a0, a0
-; RV64XANDESPERF-NEXT:    nds.lea.h a0, a1, a0
-; RV64XANDESPERF-NEXT:    ret
   %c = mul i64 %a, 6
   %d = or disjoint i64 %c, %b
   ret i64 %d
@@ -558,12 +425,6 @@ define i64 @addmul10(i64 %a, i64 %b) {
 ; RV64ZBA-NEXT:    sh2add a0, a0, a0
 ; RV64ZBA-NEXT:    sh1add a0, a0, a1
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: addmul10:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.w a0, a0, a0
-; RV64XANDESPERF-NEXT:    nds.lea.h a0, a1, a0
-; RV64XANDESPERF-NEXT:    ret
   %c = mul i64 %a, 10
   %d = add i64 %c, %b
   ret i64 %d
@@ -583,12 +444,6 @@ define i64 @addmul12(i64 %a, i64 %b) {
 ; RV64ZBA-NEXT:    sh1add a0, a0, a0
 ; RV64ZBA-NEXT:    sh2add a0, a0, a1
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: addmul12:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.h a0, a0, a0
-; RV64XANDESPERF-NEXT:    nds.lea.w a0, a1, a0
-; RV64XANDESPERF-NEXT:    ret
   %c = mul i64 %a, 12
   %d = add i64 %c, %b
   ret i64 %d
@@ -608,12 +463,6 @@ define i64 @addmul18(i64 %a, i64 %b) {
 ; RV64ZBA-NEXT:    sh3add a0, a0, a0
 ; RV64ZBA-NEXT:    sh1add a0, a0, a1
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: addmul18:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.d a0, a0, a0
-; RV64XANDESPERF-NEXT:    nds.lea.h a0, a1, a0
-; RV64XANDESPERF-NEXT:    ret
   %c = mul i64 %a, 18
   %d = add i64 %c, %b
   ret i64 %d
@@ -633,12 +482,6 @@ define i64 @addmul20(i64 %a, i64 %b) {
 ; RV64ZBA-NEXT:    sh2add a0, a0, a0
 ; RV64ZBA-NEXT:    sh2add a0, a0, a1
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: addmul20:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.w a0, a0, a0
-; RV64XANDESPERF-NEXT:    nds.lea.w a0, a1, a0
-; RV64XANDESPERF-NEXT:    ret
   %c = mul i64 %a, 20
   %d = add i64 %c, %b
   ret i64 %d
@@ -670,12 +513,6 @@ define i64 @addmul24(i64 %a, i64 %b) {
 ; RV64ZBA-NEXT:    sh1add a0, a0, a0
 ; RV64ZBA-NEXT:    sh3add a0, a0, a1
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: addmul24:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.h a0, a0, a0
-; RV64XANDESPERF-NEXT:    nds.lea.d a0, a1, a0
-; RV64XANDESPERF-NEXT:    ret
   %c = mul i64 %a, 24
   %d = add i64 %c, %b
   ret i64 %d
@@ -695,12 +532,6 @@ define i64 @addmul36(i64 %a, i64 %b) {
 ; RV64ZBA-NEXT:    sh3add a0, a0, a0
 ; RV64ZBA-NEXT:    sh2add a0, a0, a1
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: addmul36:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.d a0, a0, a0
-; RV64XANDESPERF-NEXT:    nds.lea.w a0, a1, a0
-; RV64XANDESPERF-NEXT:    ret
   %c = mul i64 %a, 36
   %d = add i64 %c, %b
   ret i64 %d
@@ -720,12 +551,6 @@ define i64 @addmul40(i64 %a, i64 %b) {
 ; RV64ZBA-NEXT:    sh2add a0, a0, a0
 ; RV64ZBA-NEXT:    sh3add a0, a0, a1
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: addmul40:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.w a0, a0, a0
-; RV64XANDESPERF-NEXT:    nds.lea.d a0, a1, a0
-; RV64XANDESPERF-NEXT:    ret
   %c = mul i64 %a, 40
   %d = add i64 %c, %b
   ret i64 %d
@@ -745,12 +570,6 @@ define i64 @addmul72(i64 %a, i64 %b) {
 ; RV64ZBA-NEXT:    sh3add a0, a0, a0
 ; RV64ZBA-NEXT:    sh3add a0, a0, a1
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: addmul72:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.d a0, a0, a0
-; RV64XANDESPERF-NEXT:    nds.lea.d a0, a1, a0
-; RV64XANDESPERF-NEXT:    ret
   %c = mul i64 %a, 72
   %d = add i64 %c, %b
   ret i64 %d
@@ -769,13 +588,6 @@ define i64 @mul50(i64 %a) {
 ; RV64ZBA-NEXT:    sh2add a0, a0, a0
 ; RV64ZBA-NEXT:    slli a0, a0, 1
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: mul50:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.w a0, a0, a0
-; RV64XANDESPERF-NEXT:    nds.lea.w a0, a0, a0
-; RV64XANDESPERF-NEXT:    slli a0, a0, 1
-; RV64XANDESPERF-NEXT:    ret
   %c = mul i64 %a, 50
   ret i64 %c
 }
@@ -794,13 +606,6 @@ define i64 @addmul50(i64 %a, i64 %b) {
 ; RV64ZBA-NEXT:    sh2add a0, a0, a0
 ; RV64ZBA-NEXT:    sh1add a0, a0, a1
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: addmul50:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.w a0, a0, a0
-; RV64XANDESPERF-NEXT:    nds.lea.w a0, a0, a0
-; RV64XANDESPERF-NEXT:    nds.lea.h a0, a1, a0
-; RV64XANDESPERF-NEXT:    ret
   %c = mul i64 %a, 50
   %d = add i64 %c, %b
   ret i64 %d
@@ -819,13 +624,6 @@ define i64 @mul100(i64 %a) {
 ; RV64ZBA-NEXT:    sh2add a0, a0, a0
 ; RV64ZBA-NEXT:    slli a0, a0, 2
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: mul100:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.w a0, a0, a0
-; RV64XANDESPERF-NEXT:    nds.lea.w a0, a0, a0
-; RV64XANDESPERF-NEXT:    slli a0, a0, 2
-; RV64XANDESPERF-NEXT:    ret
   %c = mul i64 %a, 100
   ret i64 %c
 }
@@ -844,13 +642,6 @@ define i64 @addmul100(i64 %a, i64 %b) {
 ; RV64ZBA-NEXT:    sh2add a0, a0, a0
 ; RV64ZBA-NEXT:    sh2add a0, a0, a1
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: addmul100:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.w a0, a0, a0
-; RV64XANDESPERF-NEXT:    nds.lea.w a0, a0, a0
-; RV64XANDESPERF-NEXT:    nds.lea.w a0, a1, a0
-; RV64XANDESPERF-NEXT:    ret
   %c = mul i64 %a, 100
   %d = add i64 %c, %b
   ret i64 %d
@@ -869,13 +660,6 @@ define i64 @mul162(i64 %a) {
 ; RV64ZBA-NEXT:    sh3add a0, a0, a0
 ; RV64ZBA-NEXT:    slli a0, a0, 1
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: mul162:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.d a0, a0, a0
-; RV64XANDESPERF-NEXT:    nds.lea.d a0, a0, a0
-; RV64XANDESPERF-NEXT:    slli a0, a0, 1
-; RV64XANDESPERF-NEXT:    ret
   %c = mul i64 %a, 162
   ret i64 %c
 }
@@ -894,13 +678,6 @@ define i64 @addmul162(i64 %a, i64 %b) {
 ; RV64ZBA-NEXT:    sh3add a0, a0, a0
 ; RV64ZBA-NEXT:    sh1add a0, a0, a1
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: addmul162:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.d a0, a0, a0
-; RV64XANDESPERF-NEXT:    nds.lea.d a0, a0, a0
-; RV64XANDESPERF-NEXT:    nds.lea.h a0, a1, a0
-; RV64XANDESPERF-NEXT:    ret
   %c = mul i64 %a, 162
   %d = add i64 %c, %b
   ret i64 %d
@@ -919,13 +696,6 @@ define i64 @mul180(i64 %a) {
 ; RV64ZBA-NEXT:    sh3add a0, a0, a0
 ; RV64ZBA-NEXT:    slli a0, a0, 2
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: mul180:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.w a0, a0, a0
-; RV64XANDESPERF-NEXT:    nds.lea.d a0, a0, a0
-; RV64XANDESPERF-NEXT:    slli a0, a0, 2
-; RV64XANDESPERF-NEXT:    ret
   %c = mul i64 %a, 180
   ret i64 %c
 }
@@ -944,13 +714,6 @@ define i64 @addmul180(i64 %a, i64 %b) {
 ; RV64ZBA-NEXT:    sh3add a0, a0, a0
 ; RV64ZBA-NEXT:    sh2add a0, a0, a1
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: addmul180:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.w a0, a0, a0
-; RV64XANDESPERF-NEXT:    nds.lea.d a0, a0, a0
-; RV64XANDESPERF-NEXT:    nds.lea.w a0, a1, a0
-; RV64XANDESPERF-NEXT:    ret
   %c = mul i64 %a, 180
   %d = add i64 %c, %b
   ret i64 %d
@@ -971,14 +734,6 @@ define i64 @add255mul180(i64 %a) {
 ; RV64ZBA-NEXT:    slli a0, a0, 2
 ; RV64ZBA-NEXT:    addi a0, a0, 255
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: add255mul180:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.w a0, a0, a0
-; RV64XANDESPERF-NEXT:    nds.lea.d a0, a0, a0
-; RV64XANDESPERF-NEXT:    slli a0, a0, 2
-; RV64XANDESPERF-NEXT:    addi a0, a0, 255
-; RV64XANDESPERF-NEXT:    ret
   %c = mul i64 %a, 180
   %d = add i64 %c, 255
   ret i64 %d
@@ -997,13 +752,6 @@ define i64 @mul200(i64 %a) {
 ; RV64ZBA-NEXT:    sh2add a0, a0, a0
 ; RV64ZBA-NEXT:    slli a0, a0, 3
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: mul200:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.w a0, a0, a0
-; RV64XANDESPERF-NEXT:    nds.lea.w a0, a0, a0
-; RV64XANDESPERF-NEXT:    slli a0, a0, 3
-; RV64XANDESPERF-NEXT:    ret
   %c = mul i64 %a, 200
   ret i64 %c
 }
@@ -1022,13 +770,6 @@ define i64 @addmul200(i64 %a, i64 %b) {
 ; RV64ZBA-NEXT:    sh2add a0, a0, a0
 ; RV64ZBA-NEXT:    sh3add a0, a0, a1
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: addmul200:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.w a0, a0, a0
-; RV64XANDESPERF-NEXT:    nds.lea.w a0, a0, a0
-; RV64XANDESPERF-NEXT:    nds.lea.d a0, a1, a0
-; RV64XANDESPERF-NEXT:    ret
   %c = mul i64 %a, 200
   %d = add i64 %c, %b
   ret i64 %d
@@ -1071,12 +812,6 @@ define i64 @mul96(i64 %a) {
 ; RV64ZBA-NEXT:    sh1add a0, a0, a0
 ; RV64ZBA-NEXT:    slli a0, a0, 5
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: mul96:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.h a0, a0, a0
-; RV64XANDESPERF-NEXT:    slli a0, a0, 5
-; RV64XANDESPERF-NEXT:    ret
   %c = mul i64 %a, 96
   ret i64 %c
 }
@@ -1094,13 +829,6 @@ define i64 @mul119(i64 %a) {
 ; RV64ZBA-NEXT:    slli a0, a0, 7
 ; RV64ZBA-NEXT:    sub a0, a0, a1
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: mul119:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.d a1, a0, a0
-; RV64XANDESPERF-NEXT:    slli a0, a0, 7
-; RV64XANDESPERF-NEXT:    sub a0, a0, a1
-; RV64XANDESPERF-NEXT:    ret
   %c = mul i64 %a, 119
   ret i64 %c
 }
@@ -1118,13 +846,6 @@ define i64 @mul123(i64 %a) {
 ; RV64ZBA-NEXT:    slli a0, a0, 7
 ; RV64ZBA-NEXT:    sub a0, a0, a1
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: mul123:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.w a1, a0, a0
-; RV64XANDESPERF-NEXT:    slli a0, a0, 7
-; RV64XANDESPERF-NEXT:    sub a0, a0, a1
-; RV64XANDESPERF-NEXT:    ret
   %c = mul i64 %a, 123
   ret i64 %c
 }
@@ -1142,13 +863,6 @@ define i64 @mul125(i64 %a) {
 ; RV64ZBA-NEXT:    slli a0, a0, 7
 ; RV64ZBA-NEXT:    sub a0, a0, a1
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: mul125:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.h a1, a0, a0
-; RV64XANDESPERF-NEXT:    slli a0, a0, 7
-; RV64XANDESPERF-NEXT:    sub a0, a0, a1
-; RV64XANDESPERF-NEXT:    ret
   %c = mul i64 %a, 125
   ret i64 %c
 }
@@ -1166,13 +880,6 @@ define i64 @mul131(i64 %a) {
 ; RV64ZBA-NEXT:    slli a0, a0, 7
 ; RV64ZBA-NEXT:    add a0, a0, a1
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: mul131:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.h a1, a0, a0
-; RV64XANDESPERF-NEXT:    slli a0, a0, 7
-; RV64XANDESPERF-NEXT:    add a0, a0, a1
-; RV64XANDESPERF-NEXT:    ret
   %c = mul i64 %a, 131
   ret i64 %c
 }
@@ -1190,13 +897,6 @@ define i64 @mul133(i64 %a) {
 ; RV64ZBA-NEXT:    slli a0, a0, 7
 ; RV64ZBA-NEXT:    add a0, a0, a1
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: mul133:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.w a1, a0, a0
-; RV64XANDESPERF-NEXT:    slli a0, a0, 7
-; RV64XANDESPERF-NEXT:    add a0, a0, a1
-; RV64XANDESPERF-NEXT:    ret
   %c = mul i64 %a, 133
   ret i64 %c
 }
@@ -1214,13 +914,6 @@ define i64 @mul137(i64 %a) {
 ; RV64ZBA-NEXT:    slli a0, a0, 7
 ; RV64ZBA-NEXT:    add a0, a0, a1
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: mul137:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.d a1, a0, a0
-; RV64XANDESPERF-NEXT:    slli a0, a0, 7
-; RV64XANDESPERF-NEXT:    add a0, a0, a1
-; RV64XANDESPERF-NEXT:    ret
   %c = mul i64 %a, 137
   ret i64 %c
 }
@@ -1238,12 +931,6 @@ define i64 @mul160(i64 %a) {
 ; RV64ZBA-NEXT:    sh2add a0, a0, a0
 ; RV64ZBA-NEXT:    slli a0, a0, 5
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: mul160:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.w a0, a0, a0
-; RV64XANDESPERF-NEXT:    slli a0, a0, 5
-; RV64XANDESPERF-NEXT:    ret
   %c = mul i64 %a, 160
   ret i64 %c
 }
@@ -1261,12 +948,6 @@ define i64 @mul288(i64 %a) {
 ; RV64ZBA-NEXT:    sh3add a0, a0, a0
 ; RV64ZBA-NEXT:    slli a0, a0, 5
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: mul288:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.d a0, a0, a0
-; RV64XANDESPERF-NEXT:    slli a0, a0, 5
-; RV64XANDESPERF-NEXT:    ret
   %c = mul i64 %a, 288
   ret i64 %c
 }
@@ -1285,13 +966,6 @@ define i64 @zext_mul68(i32 signext %a) {
 ; RV64ZBA-NEXT:    slli.uw a1, a0, 6
 ; RV64ZBA-NEXT:    sh2add.uw a0, a0, a1
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: zext_mul68:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    slli a1, a0, 32
-; RV64XANDESPERF-NEXT:    srli a1, a1, 26
-; RV64XANDESPERF-NEXT:    nds.lea.w.ze a0, a1, a0
-; RV64XANDESPERF-NEXT:    ret
   %b = zext i32 %a to i64
   %c = mul i64 %b, 68
   ret i64 %c
@@ -1311,13 +985,6 @@ define i64 @zext_mul96(i32 signext %a) {
 ; RV64ZBA-NEXT:    slli.uw a0, a0, 5
 ; RV64ZBA-NEXT:    sh1add a0, a0, a0
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: zext_mul96:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    slli a0, a0, 32
-; RV64XANDESPERF-NEXT:    srli a0, a0, 27
-; RV64XANDESPERF-NEXT:    nds.lea.h a0, a0, a0
-; RV64XANDESPERF-NEXT:    ret
   %b = zext i32 %a to i64
   %c = mul i64 %b, 96
   ret i64 %c
@@ -1337,13 +1004,6 @@ define i64 @zext_mul160(i32 signext %a) {
 ; RV64ZBA-NEXT:    slli.uw a0, a0, 5
 ; RV64ZBA-NEXT:    sh2add a0, a0, a0
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: zext_mul160:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    slli a0, a0, 32
-; RV64XANDESPERF-NEXT:    srli a0, a0, 27
-; RV64XANDESPERF-NEXT:    nds.lea.w a0, a0, a0
-; RV64XANDESPERF-NEXT:    ret
   %b = zext i32 %a to i64
   %c = mul i64 %b, 160
   ret i64 %c
@@ -1363,13 +1023,6 @@ define i64 @zext_mul288(i32 signext %a) {
 ; RV64ZBA-NEXT:    slli.uw a0, a0, 5
 ; RV64ZBA-NEXT:    sh3add a0, a0, a0
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: zext_mul288:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    slli a0, a0, 32
-; RV64XANDESPERF-NEXT:    srli a0, a0, 27
-; RV64XANDESPERF-NEXT:    nds.lea.d a0, a0, a0
-; RV64XANDESPERF-NEXT:    ret
   %b = zext i32 %a to i64
   %c = mul i64 %b, 288
   ret i64 %c
@@ -1389,12 +1042,6 @@ define i64 @zext_mul12884901888(i32 signext %a) {
 ; RV64ZBA-NEXT:    sh1add a0, a0, a0
 ; RV64ZBA-NEXT:    slli a0, a0, 32
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: zext_mul12884901888:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.h a0, a0, a0
-; RV64XANDESPERF-NEXT:    slli a0, a0, 32
-; RV64XANDESPERF-NEXT:    ret
   %b = zext i32 %a to i64
   %c = mul i64 %b, 12884901888
   ret i64 %c
@@ -1414,12 +1061,6 @@ define i64 @zext_mul21474836480(i32 signext %a) {
 ; RV64ZBA-NEXT:    sh2add a0, a0, a0
 ; RV64ZBA-NEXT:    slli a0, a0, 32
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: zext_mul21474836480:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.w a0, a0, a0
-; RV64XANDESPERF-NEXT:    slli a0, a0, 32
-; RV64XANDESPERF-NEXT:    ret
   %b = zext i32 %a to i64
   %c = mul i64 %b, 21474836480
   ret i64 %c
@@ -1439,12 +1080,6 @@ define i64 @zext_mul38654705664(i32 signext %a) {
 ; RV64ZBA-NEXT:    sh3add a0, a0, a0
 ; RV64ZBA-NEXT:    slli a0, a0, 32
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: zext_mul38654705664:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.d a0, a0, a0
-; RV64XANDESPERF-NEXT:    slli a0, a0, 32
-; RV64XANDESPERF-NEXT:    ret
   %b = zext i32 %a to i64
   %c = mul i64 %b, 38654705664
   ret i64 %c
@@ -1496,13 +1131,6 @@ define i64 @sh1adduw_imm(i32 signext %0) {
 ; RV64ZBA-NEXT:    slli.uw a0, a0, 1
 ; RV64ZBA-NEXT:    addi a0, a0, 11
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: sh1adduw_imm:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    slli a0, a0, 32
-; RV64XANDESPERF-NEXT:    srli a0, a0, 31
-; RV64XANDESPERF-NEXT:    addi a0, a0, 11
-; RV64XANDESPERF-NEXT:    ret
   %a = zext i32 %0 to i64
   %b = shl i64 %a, 1
   %c = add i64 %b, 11
@@ -1522,13 +1150,6 @@ define i64 @sh2adduw_imm(i32 signext %0) {
 ; RV64ZBA-NEXT:    slli.uw a0, a0, 2
 ; RV64ZBA-NEXT:    addi a0, a0, -12
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: sh2adduw_imm:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    slli a0, a0, 32
-; RV64XANDESPERF-NEXT:    srli a0, a0, 30
-; RV64XANDESPERF-NEXT:    addi a0, a0, -12
-; RV64XANDESPERF-NEXT:    ret
   %a = zext i32 %0 to i64
   %b = shl i64 %a, 2
   %c = add i64 %b, -12
@@ -1548,13 +1169,6 @@ define i64 @sh3adduw_imm(i32 signext %0) {
 ; RV64ZBA-NEXT:    slli.uw a0, a0, 3
 ; RV64ZBA-NEXT:    addi a0, a0, 13
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: sh3adduw_imm:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    slli a0, a0, 32
-; RV64XANDESPERF-NEXT:    srli a0, a0, 29
-; RV64XANDESPERF-NEXT:    addi a0, a0, 13
-; RV64XANDESPERF-NEXT:    ret
   %a = zext i32 %0 to i64
   %b = shl i64 %a, 3
   %c = add i64 %b, 13
@@ -1574,12 +1188,6 @@ define i64 @adduw_imm(i32 signext %0) nounwind {
 ; RV64ZBA-NEXT:    zext.w a0, a0
 ; RV64ZBA-NEXT:    addi a0, a0, 5
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: adduw_imm:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.b.ze a0, zero, a0
-; RV64XANDESPERF-NEXT:    addi a0, a0, 5
-; RV64XANDESPERF-NEXT:    ret
   %a = zext i32 %0 to i64
   %b = add i64 %a, 5
   ret i64 %b
@@ -1598,12 +1206,6 @@ define i64 @mul258(i64 %a) {
 ; RV64ZBA-NEXT:    slli a1, a0, 8
 ; RV64ZBA-NEXT:    sh1add a0, a0, a1
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: mul258:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    slli a1, a0, 8
-; RV64XANDESPERF-NEXT:    nds.lea.h a0, a1, a0
-; RV64XANDESPERF-NEXT:    ret
   %c = mul i64 %a, 258
   ret i64 %c
 }
@@ -1621,12 +1223,6 @@ define i64 @mul260(i64 %a) {
 ; RV64ZBA-NEXT:    slli a1, a0, 8
 ; RV64ZBA-NEXT:    sh2add a0, a0, a1
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: mul260:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    slli a1, a0, 8
-; RV64XANDESPERF-NEXT:    nds.lea.w a0, a1, a0
-; RV64XANDESPERF-NEXT:    ret
   %c = mul i64 %a, 260
   ret i64 %c
 }
@@ -1644,12 +1240,6 @@ define i64 @mul264(i64 %a) {
 ; RV64ZBA-NEXT:    slli a1, a0, 8
 ; RV64ZBA-NEXT:    sh3add a0, a0, a1
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: mul264:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    slli a1, a0, 8
-; RV64XANDESPERF-NEXT:    nds.lea.d a0, a1, a0
-; RV64XANDESPERF-NEXT:    ret
   %c = mul i64 %a, 264
   ret i64 %c
 }
@@ -1667,13 +1257,6 @@ define i64 @imm_zextw() nounwind {
 ; RV64ZBA-NEXT:    li a0, -2
 ; RV64ZBA-NEXT:    zext.w a0, a0
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: imm_zextw:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    li a0, 1
-; RV64XANDESPERF-NEXT:    slli a0, a0, 32
-; RV64XANDESPERF-NEXT:    addi a0, a0, -2
-; RV64XANDESPERF-NEXT:    ret
   ret i64 4294967294 ; -2 in 32 bits.
 }
 
@@ -1689,12 +1272,6 @@ define i64 @mul11(i64 %a) {
 ; RV64ZBA-NEXT:    sh2add a1, a0, a0
 ; RV64ZBA-NEXT:    sh1add a0, a1, a0
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: mul11:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.w a1, a0, a0
-; RV64XANDESPERF-NEXT:    nds.lea.h a0, a0, a1
-; RV64XANDESPERF-NEXT:    ret
   %c = mul i64 %a, 11
   ret i64 %c
 }
@@ -1711,12 +1288,6 @@ define i64 @mul19(i64 %a) {
 ; RV64ZBA-NEXT:    sh3add a1, a0, a0
 ; RV64ZBA-NEXT:    sh1add a0, a1, a0
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: mul19:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.d a1, a0, a0
-; RV64XANDESPERF-NEXT:    nds.lea.h a0, a0, a1
-; RV64XANDESPERF-NEXT:    ret
   %c = mul i64 %a, 19
   ret i64 %c
 }
@@ -1733,12 +1304,6 @@ define i64 @mul13(i64 %a) {
 ; RV64ZBA-NEXT:    sh1add a1, a0, a0
 ; RV64ZBA-NEXT:    sh2add a0, a1, a0
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: mul13:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.h a1, a0, a0
-; RV64XANDESPERF-NEXT:    nds.lea.w a0, a0, a1
-; RV64XANDESPERF-NEXT:    ret
   %c = mul i64 %a, 13
   ret i64 %c
 }
@@ -1755,12 +1320,6 @@ define i64 @mul21(i64 %a) {
 ; RV64ZBA-NEXT:    sh2add a1, a0, a0
 ; RV64ZBA-NEXT:    sh2add a0, a1, a0
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: mul21:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.w a1, a0, a0
-; RV64XANDESPERF-NEXT:    nds.lea.w a0, a0, a1
-; RV64XANDESPERF-NEXT:    ret
   %c = mul i64 %a, 21
   ret i64 %c
 }
@@ -1777,12 +1336,6 @@ define i64 @mul37(i64 %a) {
 ; RV64ZBA-NEXT:    sh3add a1, a0, a0
 ; RV64ZBA-NEXT:    sh2add a0, a1, a0
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: mul37:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.d a1, a0, a0
-; RV64XANDESPERF-NEXT:    nds.lea.w a0, a0, a1
-; RV64XANDESPERF-NEXT:    ret
   %c = mul i64 %a, 37
   ret i64 %c
 }
@@ -1799,12 +1352,6 @@ define i64 @mul25(i64 %a) {
 ; RV64ZBA-NEXT:    sh2add a0, a0, a0
 ; RV64ZBA-NEXT:    sh2add a0, a0, a0
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: mul25:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.w a0, a0, a0
-; RV64XANDESPERF-NEXT:    nds.lea.w a0, a0, a0
-; RV64XANDESPERF-NEXT:    ret
   %c = mul i64 %a, 25
   ret i64 %c
 }
@@ -1821,12 +1368,6 @@ define i64 @mul41(i64 %a) {
 ; RV64ZBA-NEXT:    sh2add a1, a0, a0
 ; RV64ZBA-NEXT:    sh3add a0, a1, a0
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: mul41:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.w a1, a0, a0
-; RV64XANDESPERF-NEXT:    nds.lea.d a0, a0, a1
-; RV64XANDESPERF-NEXT:    ret
   %c = mul i64 %a, 41
   ret i64 %c
 }
@@ -1843,12 +1384,6 @@ define i64 @mul73(i64 %a) {
 ; RV64ZBA-NEXT:    sh3add a1, a0, a0
 ; RV64ZBA-NEXT:    sh3add a0, a1, a0
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: mul73:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.d a1, a0, a0
-; RV64XANDESPERF-NEXT:    nds.lea.d a0, a0, a1
-; RV64XANDESPERF-NEXT:    ret
   %c = mul i64 %a, 73
   ret i64 %c
 }
@@ -1865,12 +1400,6 @@ define i64 @mul27(i64 %a) {
 ; RV64ZBA-NEXT:    sh1add a0, a0, a0
 ; RV64ZBA-NEXT:    sh3add a0, a0, a0
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: mul27:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.h a0, a0, a0
-; RV64XANDESPERF-NEXT:    nds.lea.d a0, a0, a0
-; RV64XANDESPERF-NEXT:    ret
   %c = mul i64 %a, 27
   ret i64 %c
 }
@@ -1887,12 +1416,6 @@ define i64 @mul45(i64 %a) {
 ; RV64ZBA-NEXT:    sh2add a0, a0, a0
 ; RV64ZBA-NEXT:    sh3add a0, a0, a0
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: mul45:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.w a0, a0, a0
-; RV64XANDESPERF-NEXT:    nds.lea.d a0, a0, a0
-; RV64XANDESPERF-NEXT:    ret
   %c = mul i64 %a, 45
   ret i64 %c
 }
@@ -1909,12 +1432,6 @@ define i64 @mul81(i64 %a) {
 ; RV64ZBA-NEXT:    sh3add a0, a0, a0
 ; RV64ZBA-NEXT:    sh3add a0, a0, a0
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: mul81:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.d a0, a0, a0
-; RV64XANDESPERF-NEXT:    nds.lea.d a0, a0, a0
-; RV64XANDESPERF-NEXT:    ret
   %c = mul i64 %a, 81
   ret i64 %c
 }
@@ -1932,12 +1449,6 @@ define i64 @mul4098(i64 %a) {
 ; RV64ZBA-NEXT:    slli a1, a0, 12
 ; RV64ZBA-NEXT:    sh1add a0, a0, a1
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: mul4098:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    slli a1, a0, 12
-; RV64XANDESPERF-NEXT:    nds.lea.h a0, a1, a0
-; RV64XANDESPERF-NEXT:    ret
   %c = mul i64 %a, 4098
   ret i64 %c
 }
@@ -1955,12 +1466,6 @@ define i64 @mul4100(i64 %a) {
 ; RV64ZBA-NEXT:    slli a1, a0, 12
 ; RV64ZBA-NEXT:    sh2add a0, a0, a1
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: mul4100:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    slli a1, a0, 12
-; RV64XANDESPERF-NEXT:    nds.lea.w a0, a1, a0
-; RV64XANDESPERF-NEXT:    ret
   %c = mul i64 %a, 4100
   ret i64 %c
 }
@@ -1978,12 +1483,6 @@ define i64 @mul4104(i64 %a) {
 ; RV64ZBA-NEXT:    slli a1, a0, 12
 ; RV64ZBA-NEXT:    sh3add a0, a0, a1
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: mul4104:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    slli a1, a0, 12
-; RV64XANDESPERF-NEXT:    nds.lea.d a0, a1, a0
-; RV64XANDESPERF-NEXT:    ret
   %c = mul i64 %a, 4104
   ret i64 %c
 }
@@ -2001,12 +1500,6 @@ define signext i32 @mulw192(i32 signext %a) {
 ; RV64ZBA-NEXT:    sh1add a0, a0, a0
 ; RV64ZBA-NEXT:    slliw a0, a0, 6
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: mulw192:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.h a0, a0, a0
-; RV64XANDESPERF-NEXT:    slliw a0, a0, 6
-; RV64XANDESPERF-NEXT:    ret
   %c = mul i32 %a, 192
   ret i32 %c
 }
@@ -2024,12 +1517,6 @@ define signext i32 @mulw320(i32 signext %a) {
 ; RV64ZBA-NEXT:    sh2add a0, a0, a0
 ; RV64ZBA-NEXT:    slliw a0, a0, 6
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: mulw320:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.w a0, a0, a0
-; RV64XANDESPERF-NEXT:    slliw a0, a0, 6
-; RV64XANDESPERF-NEXT:    ret
   %c = mul i32 %a, 320
   ret i32 %c
 }
@@ -2047,12 +1534,6 @@ define signext i32 @mulw576(i32 signext %a) {
 ; RV64ZBA-NEXT:    sh3add a0, a0, a0
 ; RV64ZBA-NEXT:    slliw a0, a0, 6
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: mulw576:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.d a0, a0, a0
-; RV64XANDESPERF-NEXT:    slliw a0, a0, 6
-; RV64XANDESPERF-NEXT:    ret
   %c = mul i32 %a, 576
   ret i32 %c
 }
@@ -2070,12 +1551,6 @@ define i64 @add4104(i64 %a) {
 ; RV64ZBA-NEXT:    li a1, 1026
 ; RV64ZBA-NEXT:    sh2add a0, a1, a0
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: add4104:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    li a1, 1026
-; RV64XANDESPERF-NEXT:    nds.lea.w a0, a0, a1
-; RV64XANDESPERF-NEXT:    ret
   %c = add i64 %a, 4104
   ret i64 %c
 }
@@ -2093,12 +1568,6 @@ define i64 @add4104_2(i64 %a) {
 ; RV64ZBA-NEXT:    li a1, 1026
 ; RV64ZBA-NEXT:    sh2add a0, a1, a0
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: add4104_2:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    li a1, 1026
-; RV64XANDESPERF-NEXT:    nds.lea.w a0, a0, a1
-; RV64XANDESPERF-NEXT:    ret
   %c = or disjoint i64 %a, 4104
   ret i64 %c
 }
@@ -2116,12 +1585,6 @@ define i64 @add8208(i64 %a) {
 ; RV64ZBA-NEXT:    li a1, 1026
 ; RV64ZBA-NEXT:    sh3add a0, a1, a0
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: add8208:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    li a1, 1026
-; RV64XANDESPERF-NEXT:    nds.lea.d a0, a0, a1
-; RV64XANDESPERF-NEXT:    ret
   %c = add i64 %a, 8208
   ret i64 %c
 }
@@ -2161,12 +1624,6 @@ define signext i32 @addshl32_5_6(i32 signext %a, i32 
signext %b) {
 ; RV64ZBA-NEXT:    sh1add a0, a1, a0
 ; RV64ZBA-NEXT:    slliw a0, a0, 5
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: addshl32_5_6:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.h a0, a0, a1
-; RV64XANDESPERF-NEXT:    slliw a0, a0, 5
-; RV64XANDESPERF-NEXT:    ret
   %c = shl i32 %a, 5
   %d = shl i32 %b, 6
   %e = add i32 %c, %d
@@ -2186,12 +1643,6 @@ define i64 @addshl64_5_6(i64 %a, i64 %b) {
 ; RV64ZBA-NEXT:    sh1add a0, a1, a0
 ; RV64ZBA-NEXT:    slli a0, a0, 5
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: addshl64_5_6:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.h a0, a0, a1
-; RV64XANDESPERF-NEXT:    slli a0, a0, 5
-; RV64XANDESPERF-NEXT:    ret
   %c = shl i64 %a, 5
   %d = shl i64 %b, 6
   %e = add i64 %c, %d
@@ -2211,12 +1662,6 @@ define signext i32 @addshl32_5_7(i32 signext %a, i32 
signext %b) {
 ; RV64ZBA-NEXT:    sh2add a0, a1, a0
 ; RV64ZBA-NEXT:    slliw a0, a0, 5
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: addshl32_5_7:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.w a0, a0, a1
-; RV64XANDESPERF-NEXT:    slliw a0, a0, 5
-; RV64XANDESPERF-NEXT:    ret
   %c = shl i32 %a, 5
   %d = shl i32 %b, 7
   %e = add i32 %c, %d
@@ -2236,12 +1681,6 @@ define i64 @addshl64_5_7(i64 %a, i64 %b) {
 ; RV64ZBA-NEXT:    sh2add a0, a1, a0
 ; RV64ZBA-NEXT:    slli a0, a0, 5
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: addshl64_5_7:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.w a0, a0, a1
-; RV64XANDESPERF-NEXT:    slli a0, a0, 5
-; RV64XANDESPERF-NEXT:    ret
   %c = shl i64 %a, 5
   %d = shl i64 %b, 7
   %e = add i64 %c, %d
@@ -2261,12 +1700,6 @@ define signext i32 @addshl32_5_8(i32 signext %a, i32 
signext %b) {
 ; RV64ZBA-NEXT:    sh3add a0, a1, a0
 ; RV64ZBA-NEXT:    slliw a0, a0, 5
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: addshl32_5_8:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.d a0, a0, a1
-; RV64XANDESPERF-NEXT:    slliw a0, a0, 5
-; RV64XANDESPERF-NEXT:    ret
   %c = shl i32 %a, 5
   %d = shl i32 %b, 8
   %e = add i32 %c, %d
@@ -2286,12 +1719,6 @@ define i64 @addshl64_5_8(i64 %a, i64 %b) {
 ; RV64ZBA-NEXT:    sh3add a0, a1, a0
 ; RV64ZBA-NEXT:    slli a0, a0, 5
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: addshl64_5_8:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.d a0, a0, a1
-; RV64XANDESPERF-NEXT:    slli a0, a0, 5
-; RV64XANDESPERF-NEXT:    ret
   %c = shl i64 %a, 5
   %d = shl i64 %b, 8
   %e = add i64 %c, %d
@@ -2320,13 +1747,6 @@ define zeroext i32 @sext_ashr_zext_i8(i8 %a) nounwind {
 ; RV64ZBAZBB-NEXT:    slli a0, a0, 23
 ; RV64ZBAZBB-NEXT:    srli a0, a0, 32
 ; RV64ZBAZBB-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: sext_ashr_zext_i8:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    slli a0, a0, 56
-; RV64XANDESPERF-NEXT:    srai a0, a0, 31
-; RV64XANDESPERF-NEXT:    srli a0, a0, 32
-; RV64XANDESPERF-NEXT:    ret
   %ext = sext i8 %a to i32
   %1 = ashr i32 %ext, 9
   ret i32 %1
@@ -2346,12 +1766,6 @@ define i64 @sh6_sh3_add1(i64 noundef %x, i64 noundef %y, 
i64 noundef %z) {
 ; RV64ZBA-NEXT:    sh3add a1, a1, a2
 ; RV64ZBA-NEXT:    sh3add a0, a1, a0
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: sh6_sh3_add1:
-; RV64XANDESPERF:       # %bb.0: # %entry
-; RV64XANDESPERF-NEXT:    nds.lea.d a1, a2, a1
-; RV64XANDESPERF-NEXT:    nds.lea.d a0, a0, a1
-; RV64XANDESPERF-NEXT:    ret
 entry:
   %shl = shl i64 %z, 3
   %shl1 = shl i64 %y, 6
@@ -2374,13 +1788,6 @@ define i64 @sh6_sh3_add2(i64 noundef %x, i64 noundef %y, 
i64 noundef %z) {
 ; RV64ZBA-NEXT:    sh3add a1, a1, a2
 ; RV64ZBA-NEXT:    sh3add a0, a1, a0
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: sh6_sh3_add2:
-; RV64XANDESPERF:       # %bb.0: # %entry
-; RV64XANDESPERF-NEXT:    slli a1, a1, 6
-; RV64XANDESPERF-NEXT:    add a0, a1, a0
-; RV64XANDESPERF-NEXT:    nds.lea.d a0, a0, a2
-; RV64XANDESPERF-NEXT:    ret
 entry:
   %shl = shl i64 %z, 3
   %shl1 = shl i64 %y, 6
@@ -2403,12 +1810,6 @@ define i64 @sh6_sh3_add3(i64 noundef %x, i64 noundef %y, 
i64 noundef %z) {
 ; RV64ZBA-NEXT:    sh3add a1, a1, a2
 ; RV64ZBA-NEXT:    sh3add a0, a1, a0
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: sh6_sh3_add3:
-; RV64XANDESPERF:       # %bb.0: # %entry
-; RV64XANDESPERF-NEXT:    nds.lea.d a1, a2, a1
-; RV64XANDESPERF-NEXT:    nds.lea.d a0, a0, a1
-; RV64XANDESPERF-NEXT:    ret
 entry:
   %shl = shl i64 %z, 3
   %shl1 = shl i64 %y, 6
@@ -2432,13 +1833,6 @@ define i64 @sh6_sh3_add4(i64 noundef %x, i64 noundef %y, 
i64 noundef %z) {
 ; RV64ZBA-NEXT:    sh3add a0, a2, a0
 ; RV64ZBA-NEXT:    add a0, a0, a1
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: sh6_sh3_add4:
-; RV64XANDESPERF:       # %bb.0: # %entry
-; RV64XANDESPERF-NEXT:    slli a1, a1, 6
-; RV64XANDESPERF-NEXT:    nds.lea.d a0, a0, a2
-; RV64XANDESPERF-NEXT:    add a0, a0, a1
-; RV64XANDESPERF-NEXT:    ret
 entry:
   %shl = shl i64 %z, 3
   %shl1 = shl i64 %y, 6
@@ -2469,13 +1863,6 @@ define zeroext i32 @sext_ashr_zext_i16(i16 %a) nounwind {
 ; RV64ZBAZBB-NEXT:    slli a0, a0, 23
 ; RV64ZBAZBB-NEXT:    srli a0, a0, 32
 ; RV64ZBAZBB-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: sext_ashr_zext_i16:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    slli a0, a0, 48
-; RV64XANDESPERF-NEXT:    srai a0, a0, 25
-; RV64XANDESPERF-NEXT:    srli a0, a0, 32
-; RV64XANDESPERF-NEXT:    ret
   %ext = sext i16 %a to i32
   %1 = ashr i32 %ext, 9
   ret i32 %1
@@ -2500,13 +1887,6 @@ define signext i16 @sh1adduw_ptr
diff (i64 %
diff , ptr %baseptr) {
 ; RV64ZBA-NEXT:    sh1add.uw a0, a0, a1
 ; RV64ZBA-NEXT:    lh a0, 0(a0)
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: sh1adduw_ptr
diff :
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    srli a0, a0, 1
-; RV64XANDESPERF-NEXT:    nds.lea.h.ze a0, a1, a0
-; RV64XANDESPERF-NEXT:    lh a0, 0(a0)
-; RV64XANDESPERF-NEXT:    ret
   %ptr
diff  = lshr exact i64 %
diff , 1
   %cast = and i64 %ptr
diff , 4294967295
   %ptr = getelementptr inbounds i16, ptr %baseptr, i64 %cast
@@ -2531,13 +1911,6 @@ define signext i32 @sh2adduw_ptr
diff (i64 %
diff , ptr %baseptr) {
 ; RV64ZBA-NEXT:    sh2add.uw a0, a0, a1
 ; RV64ZBA-NEXT:    lw a0, 0(a0)
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: sh2adduw_ptr
diff :
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    srli a0, a0, 2
-; RV64XANDESPERF-NEXT:    nds.lea.w.ze a0, a1, a0
-; RV64XANDESPERF-NEXT:    lw a0, 0(a0)
-; RV64XANDESPERF-NEXT:    ret
   %ptr
diff  = lshr exact i64 %
diff , 2
   %cast = and i64 %ptr
diff , 4294967295
   %ptr = getelementptr inbounds i32, ptr %baseptr, i64 %cast
@@ -2562,13 +1935,6 @@ define i64 @sh3adduw_ptr
diff (i64 %
diff , ptr %baseptr) {
 ; RV64ZBA-NEXT:    sh3add.uw a0, a0, a1
 ; RV64ZBA-NEXT:    ld a0, 0(a0)
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: sh3adduw_ptr
diff :
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    srli a0, a0, 3
-; RV64XANDESPERF-NEXT:    nds.lea.d.ze a0, a1, a0
-; RV64XANDESPERF-NEXT:    ld a0, 0(a0)
-; RV64XANDESPERF-NEXT:    ret
   %ptr
diff  = lshr exact i64 %
diff , 3
   %cast = and i64 %ptr
diff , 4294967295
   %ptr = getelementptr inbounds i64, ptr %baseptr, i64 %cast
@@ -2591,13 +1957,6 @@ define signext i16 @srliw_1_sh1add(ptr %0, i32 signext 
%1) {
 ; RV64ZBA-NEXT:    sh1add a0, a1, a0
 ; RV64ZBA-NEXT:    lh a0, 0(a0)
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: srliw_1_sh1add:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    srliw a1, a1, 1
-; RV64XANDESPERF-NEXT:    nds.lea.h.ze a0, a0, a1
-; RV64XANDESPERF-NEXT:    lh a0, 0(a0)
-; RV64XANDESPERF-NEXT:    ret
   %3 = lshr i32 %1, 1
   %4 = zext i32 %3 to i64
   %5 = getelementptr inbounds i16, ptr %0, i64 %4
@@ -2625,17 +1984,6 @@ define i128 @slliuw_ptr
diff (i64 %
diff , ptr %baseptr) {
 ; RV64ZBA-NEXT:    ld a0, 0(a1)
 ; RV64ZBA-NEXT:    ld a1, 8(a1)
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: slliuw_ptr
diff :
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    li a2, 1
-; RV64XANDESPERF-NEXT:    slli a2, a2, 36
-; RV64XANDESPERF-NEXT:    addi a2, a2, -16
-; RV64XANDESPERF-NEXT:    and a0, a0, a2
-; RV64XANDESPERF-NEXT:    add a1, a1, a0
-; RV64XANDESPERF-NEXT:    ld a0, 0(a1)
-; RV64XANDESPERF-NEXT:    ld a1, 8(a1)
-; RV64XANDESPERF-NEXT:    ret
   %ptr
diff  = lshr exact i64 %
diff , 4
   %cast = and i64 %ptr
diff , 4294967295
   %ptr = getelementptr inbounds i128, ptr %baseptr, i64 %cast
@@ -2658,13 +2006,6 @@ define signext i32 @srliw_2_sh2add(ptr %0, i32 signext 
%1) {
 ; RV64ZBA-NEXT:    sh2add a0, a1, a0
 ; RV64ZBA-NEXT:    lw a0, 0(a0)
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: srliw_2_sh2add:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    srliw a1, a1, 2
-; RV64XANDESPERF-NEXT:    nds.lea.w.ze a0, a0, a1
-; RV64XANDESPERF-NEXT:    lw a0, 0(a0)
-; RV64XANDESPERF-NEXT:    ret
   %3 = lshr i32 %1, 2
   %4 = zext i32 %3 to i64
   %5 = getelementptr inbounds i32, ptr %0, i64 %4
@@ -2684,16 +2025,9 @@ define i64 @srliw_3_sh3add(ptr %0, i32 signext %1) {
 ; RV64ZBA-LABEL: srliw_3_sh3add:
 ; RV64ZBA:       # %bb.0:
 ; RV64ZBA-NEXT:    srliw a1, a1, 3
-; RV64ZBA-NEXT:    sh3add.uw a0, a1, a0
+; RV64ZBA-NEXT:    sh3add a0, a1, a0
 ; RV64ZBA-NEXT:    ld a0, 0(a0)
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: srliw_3_sh3add:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    srliw a1, a1, 3
-; RV64XANDESPERF-NEXT:    nds.lea.d.ze a0, a0, a1
-; RV64XANDESPERF-NEXT:    ld a0, 0(a0)
-; RV64XANDESPERF-NEXT:    ret
   %3 = lshr i32 %1, 3
   %4 = zext i32 %3 to i64
   %5 = getelementptr inbounds i64, ptr %0, i64 %4
@@ -2716,13 +2050,6 @@ define signext i32 @srliw_1_sh2add(ptr %0, i32 signext 
%1) {
 ; RV64ZBA-NEXT:    sh2add a0, a1, a0
 ; RV64ZBA-NEXT:    lw a0, 0(a0)
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: srliw_1_sh2add:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    srliw a1, a1, 1
-; RV64XANDESPERF-NEXT:    nds.lea.w a0, a0, a1
-; RV64XANDESPERF-NEXT:    lw a0, 0(a0)
-; RV64XANDESPERF-NEXT:    ret
   %3 = lshr i32 %1, 1
   %4 = zext i32 %3 to i64
   %5 = getelementptr inbounds i32, ptr %0, i64 %4
@@ -2745,13 +2072,6 @@ define i64 @srliw_1_sh3add(ptr %0, i32 signext %1) {
 ; RV64ZBA-NEXT:    sh3add a0, a1, a0
 ; RV64ZBA-NEXT:    ld a0, 0(a0)
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: srliw_1_sh3add:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    srliw a1, a1, 1
-; RV64XANDESPERF-NEXT:    nds.lea.d a0, a0, a1
-; RV64XANDESPERF-NEXT:    ld a0, 0(a0)
-; RV64XANDESPERF-NEXT:    ret
   %3 = lshr i32 %1, 1
   %4 = zext i32 %3 to i64
   %5 = getelementptr inbounds i64, ptr %0, i64 %4
@@ -2774,13 +2094,6 @@ define i64 @srliw_2_sh3add(ptr %0, i32 signext %1) {
 ; RV64ZBA-NEXT:    sh3add a0, a1, a0
 ; RV64ZBA-NEXT:    ld a0, 0(a0)
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: srliw_2_sh3add:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    srliw a1, a1, 2
-; RV64XANDESPERF-NEXT:    nds.lea.d a0, a0, a1
-; RV64XANDESPERF-NEXT:    ld a0, 0(a0)
-; RV64XANDESPERF-NEXT:    ret
   %3 = lshr i32 %1, 2
   %4 = zext i32 %3 to i64
   %5 = getelementptr inbounds i64, ptr %0, i64 %4
@@ -2803,13 +2116,6 @@ define signext i16 @srliw_2_sh1add(ptr %0, i32 signext 
%1) {
 ; RV64ZBA-NEXT:    sh1add a0, a1, a0
 ; RV64ZBA-NEXT:    lh a0, 0(a0)
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: srliw_2_sh1add:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    srliw a1, a1, 2
-; RV64XANDESPERF-NEXT:    nds.lea.h a0, a0, a1
-; RV64XANDESPERF-NEXT:    lh a0, 0(a0)
-; RV64XANDESPERF-NEXT:    ret
   %3 = lshr i32 %1, 2
   %4 = zext i32 %3 to i64
   %5 = getelementptr inbounds i16, ptr %0, i64 %4
@@ -2833,13 +2139,6 @@ define signext i32 @srliw_3_sh2add(ptr %0, i32 signext 
%1) {
 ; RV64ZBA-NEXT:    sh2add a0, a1, a0
 ; RV64ZBA-NEXT:    lw a0, 0(a0)
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: srliw_3_sh2add:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    srliw a1, a1, 3
-; RV64XANDESPERF-NEXT:    nds.lea.w a0, a0, a1
-; RV64XANDESPERF-NEXT:    lw a0, 0(a0)
-; RV64XANDESPERF-NEXT:    ret
   %3 = lshr i32 %1, 3
   %4 = zext i32 %3 to i64
   %5 = getelementptr inbounds i32, ptr %0, i64 %4
@@ -2862,13 +2161,6 @@ define i64 @srliw_4_sh3add(ptr %0, i32 signext %1) {
 ; RV64ZBA-NEXT:    sh3add a0, a1, a0
 ; RV64ZBA-NEXT:    ld a0, 0(a0)
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: srliw_4_sh3add:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    srliw a1, a1, 4
-; RV64XANDESPERF-NEXT:    nds.lea.d a0, a0, a1
-; RV64XANDESPERF-NEXT:    ld a0, 0(a0)
-; RV64XANDESPERF-NEXT:    ret
   %3 = lshr i32 %1, 4
   %4 = zext i32 %3 to i64
   %5 = getelementptr inbounds i64, ptr %0, i64 %4
@@ -2891,13 +2183,6 @@ define signext i32 @srli_1_sh2add(ptr %0, i64 %1) {
 ; RV64ZBA-NEXT:    sh2add a0, a1, a0
 ; RV64ZBA-NEXT:    lw a0, 0(a0)
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: srli_1_sh2add:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    srli a1, a1, 1
-; RV64XANDESPERF-NEXT:    nds.lea.w a0, a0, a1
-; RV64XANDESPERF-NEXT:    lw a0, 0(a0)
-; RV64XANDESPERF-NEXT:    ret
   %3 = lshr i64 %1, 1
   %4 = getelementptr inbounds i32, ptr %0, i64 %3
   %5 = load i32, ptr %4, align 4
@@ -2919,13 +2204,6 @@ define i64 @srli_2_sh3add(ptr %0, i64 %1) {
 ; RV64ZBA-NEXT:    sh3add a0, a1, a0
 ; RV64ZBA-NEXT:    ld a0, 0(a0)
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: srli_2_sh3add:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    srli a1, a1, 2
-; RV64XANDESPERF-NEXT:    nds.lea.d a0, a0, a1
-; RV64XANDESPERF-NEXT:    ld a0, 0(a0)
-; RV64XANDESPERF-NEXT:    ret
   %3 = lshr i64 %1, 2
   %4 = getelementptr inbounds i64, ptr %0, i64 %3
   %5 = load i64, ptr %4, align 8
@@ -2947,13 +2225,6 @@ define signext i16 @srli_2_sh1add(ptr %0, i64 %1) {
 ; RV64ZBA-NEXT:    sh1add a0, a1, a0
 ; RV64ZBA-NEXT:    lh a0, 0(a0)
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: srli_2_sh1add:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    srli a1, a1, 2
-; RV64XANDESPERF-NEXT:    nds.lea.h a0, a0, a1
-; RV64XANDESPERF-NEXT:    lh a0, 0(a0)
-; RV64XANDESPERF-NEXT:    ret
   %3 = lshr i64 %1, 2
   %4 = getelementptr inbounds i16, ptr %0, i64 %3
   %5 = load i16, ptr %4, align 2
@@ -2975,13 +2246,6 @@ define signext i32 @srli_3_sh2add(ptr %0, i64 %1) {
 ; RV64ZBA-NEXT:    sh2add a0, a1, a0
 ; RV64ZBA-NEXT:    lw a0, 0(a0)
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: srli_3_sh2add:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    srli a1, a1, 3
-; RV64XANDESPERF-NEXT:    nds.lea.w a0, a0, a1
-; RV64XANDESPERF-NEXT:    lw a0, 0(a0)
-; RV64XANDESPERF-NEXT:    ret
   %3 = lshr i64 %1, 3
   %4 = getelementptr inbounds i32, ptr %0, i64 %3
   %5 = load i32, ptr %4, align 4
@@ -3003,13 +2267,6 @@ define i64 @srli_4_sh3add(ptr %0, i64 %1) {
 ; RV64ZBA-NEXT:    sh3add a0, a1, a0
 ; RV64ZBA-NEXT:    ld a0, 0(a0)
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: srli_4_sh3add:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    srli a1, a1, 4
-; RV64XANDESPERF-NEXT:    nds.lea.d a0, a0, a1
-; RV64XANDESPERF-NEXT:    ld a0, 0(a0)
-; RV64XANDESPERF-NEXT:    ret
   %3 = lshr i64 %1, 4
   %4 = getelementptr inbounds i64, ptr %0, i64 %3
   %5 = load i64, ptr %4, align 8
@@ -3031,13 +2288,6 @@ define signext i16 @shl_2_sh1adduw(ptr %0, i32 signext 
%1) {
 ; RV64ZBA-NEXT:    sh1add.uw a0, a1, a0
 ; RV64ZBA-NEXT:    lh a0, 0(a0)
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: shl_2_sh1adduw:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    slli a1, a1, 2
-; RV64XANDESPERF-NEXT:    nds.lea.h.ze a0, a0, a1
-; RV64XANDESPERF-NEXT:    lh a0, 0(a0)
-; RV64XANDESPERF-NEXT:    ret
   %3 = shl i32 %1, 2
   %4 = zext i32 %3 to i64
   %5 = getelementptr inbounds i16, ptr %0, i64 %4
@@ -3060,13 +2310,6 @@ define signext i32 @shl_16_sh2adduw(ptr %0, i32 signext 
%1) {
 ; RV64ZBA-NEXT:    sh2add.uw a0, a1, a0
 ; RV64ZBA-NEXT:    lw a0, 0(a0)
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: shl_16_sh2adduw:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    slli a1, a1, 16
-; RV64XANDESPERF-NEXT:    nds.lea.w.ze a0, a0, a1
-; RV64XANDESPERF-NEXT:    lw a0, 0(a0)
-; RV64XANDESPERF-NEXT:    ret
   %3 = shl i32 %1, 16
   %4 = zext i32 %3 to i64
   %5 = getelementptr inbounds i32, ptr %0, i64 %4
@@ -3089,13 +2332,6 @@ define i64 @shl_31_sh3adduw(ptr %0, i32 signext %1) {
 ; RV64ZBA-NEXT:    sh3add.uw a0, a1, a0
 ; RV64ZBA-NEXT:    ld a0, 0(a0)
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: shl_31_sh3adduw:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    slli a1, a1, 31
-; RV64XANDESPERF-NEXT:    nds.lea.d.ze a0, a0, a1
-; RV64XANDESPERF-NEXT:    ld a0, 0(a0)
-; RV64XANDESPERF-NEXT:    ret
   %3 = shl i32 %1, 31
   %4 = zext i32 %3 to i64
   %5 = getelementptr inbounds i64, ptr %0, i64 %4
@@ -3117,12 +2353,6 @@ define i64 @pack_i64(i64 %a, i64 %b) nounwind {
 ; RV64ZBA-NEXT:    slli a1, a1, 32
 ; RV64ZBA-NEXT:    add.uw a0, a0, a1
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: pack_i64:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    slli a1, a1, 32
-; RV64XANDESPERF-NEXT:    nds.lea.b.ze a0, a1, a0
-; RV64XANDESPERF-NEXT:    ret
   %shl = and i64 %a, 4294967295
   %shl1 = shl i64 %b, 32
   %or = or i64 %shl1, %shl
@@ -3143,12 +2373,6 @@ define i64 @pack_i64_2(i32 signext %a, i32 signext %b) 
nounwind {
 ; RV64ZBA-NEXT:    slli a1, a1, 32
 ; RV64ZBA-NEXT:    add.uw a0, a0, a1
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: pack_i64_2:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    slli a1, a1, 32
-; RV64XANDESPERF-NEXT:    nds.lea.b.ze a0, a1, a0
-; RV64XANDESPERF-NEXT:    ret
   %zexta = zext i32 %a to i64
   %zextb = zext i32 %b to i64
   %shl1 = shl i64 %zextb, 32
@@ -3168,11 +2392,6 @@ define i64 @pack_i64_disjoint(i64 %a, i64 %b) nounwind {
 ; RV64ZBA:       # %bb.0:
 ; RV64ZBA-NEXT:    add.uw a0, a0, a1
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: pack_i64_disjoint:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.b.ze a0, a1, a0
-; RV64XANDESPERF-NEXT:    ret
   %shl = and i64 %a, 4294967295
   %or = or disjoint i64 %b, %shl
   ret i64 %or
@@ -3190,11 +2409,6 @@ define i64 @pack_i64_disjoint_2(i32 signext %a, i64 %b) 
nounwind {
 ; RV64ZBA:       # %bb.0:
 ; RV64ZBA-NEXT:    add.uw a0, a0, a1
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: pack_i64_disjoint_2:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.b.ze a0, a1, a0
-; RV64XANDESPERF-NEXT:    ret
   %zexta = zext i32 %a to i64
   %or = or disjoint i64 %b, %zexta
   ret i64 %or
@@ -3215,13 +2429,6 @@ define i8 @array_index_sh1_sh0(ptr %p, i64 %idx1, i64 
%idx2) {
 ; RV64ZBA-NEXT:    add a0, a0, a2
 ; RV64ZBA-NEXT:    lbu a0, 0(a0)
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: array_index_sh1_sh0:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.h a0, a0, a1
-; RV64XANDESPERF-NEXT:    add a0, a0, a2
-; RV64XANDESPERF-NEXT:    lbu a0, 0(a0)
-; RV64XANDESPERF-NEXT:    ret
   %a = getelementptr inbounds [2 x i8], ptr %p, i64 %idx1, i64 %idx2
   %b = load i8, ptr %a, align 1
   ret i8 %b
@@ -3243,13 +2450,6 @@ define i16 @array_index_sh1_sh1(ptr %p, i64 %idx1, i64 
%idx2) {
 ; RV64ZBA-NEXT:    sh1add a0, a2, a0
 ; RV64ZBA-NEXT:    lh a0, 0(a0)
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: array_index_sh1_sh1:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.w a0, a0, a1
-; RV64XANDESPERF-NEXT:    nds.lea.h a0, a0, a2
-; RV64XANDESPERF-NEXT:    lh a0, 0(a0)
-; RV64XANDESPERF-NEXT:    ret
   %a = getelementptr inbounds [2 x i16], ptr %p, i64 %idx1, i64 %idx2
   %b = load i16, ptr %a, align 2
   ret i16 %b
@@ -3271,13 +2471,6 @@ define i32 @array_index_sh1_sh2(ptr %p, i64 %idx1, i64 
%idx2) {
 ; RV64ZBA-NEXT:    sh2add a0, a2, a0
 ; RV64ZBA-NEXT:    lw a0, 0(a0)
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: array_index_sh1_sh2:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.d a0, a0, a1
-; RV64XANDESPERF-NEXT:    nds.lea.w a0, a0, a2
-; RV64XANDESPERF-NEXT:    lw a0, 0(a0)
-; RV64XANDESPERF-NEXT:    ret
   %a = getelementptr inbounds [2 x i32], ptr %p, i64 %idx1, i64 %idx2
   %b = load i32, ptr %a, align 4
   ret i32 %b
@@ -3299,14 +2492,6 @@ define i64 @array_index_sh1_sh3(ptr %p, i64 %idx1, i64 
%idx2) {
 ; RV64ZBA-NEXT:    sh3add a0, a1, a0
 ; RV64ZBA-NEXT:    ld a0, 0(a0)
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: array_index_sh1_sh3:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    slli a1, a1, 4
-; RV64XANDESPERF-NEXT:    add a0, a0, a1
-; RV64XANDESPERF-NEXT:    nds.lea.d a0, a0, a2
-; RV64XANDESPERF-NEXT:    ld a0, 0(a0)
-; RV64XANDESPERF-NEXT:    ret
   %a = getelementptr inbounds [2 x i64], ptr %p, i64 %idx1, i64 %idx2
   %b = load i64, ptr %a, align 8
   ret i64 %b
@@ -3327,13 +2512,6 @@ define i8 @array_index_sh2_sh0(ptr %p, i64 %idx1, i64 
%idx2) {
 ; RV64ZBA-NEXT:    add a0, a0, a2
 ; RV64ZBA-NEXT:    lbu a0, 0(a0)
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: array_index_sh2_sh0:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.w a0, a0, a1
-; RV64XANDESPERF-NEXT:    add a0, a0, a2
-; RV64XANDESPERF-NEXT:    lbu a0, 0(a0)
-; RV64XANDESPERF-NEXT:    ret
   %a = getelementptr inbounds [4 x i8], ptr %p, i64 %idx1, i64 %idx2
   %b = load i8, ptr %a, align 1
   ret i8 %b
@@ -3355,13 +2533,6 @@ define i16 @array_index_sh2_sh1(ptr %p, i64 %idx1, i64 
%idx2) {
 ; RV64ZBA-NEXT:    sh1add a0, a2, a0
 ; RV64ZBA-NEXT:    lh a0, 0(a0)
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: array_index_sh2_sh1:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.d a0, a0, a1
-; RV64XANDESPERF-NEXT:    nds.lea.h a0, a0, a2
-; RV64XANDESPERF-NEXT:    lh a0, 0(a0)
-; RV64XANDESPERF-NEXT:    ret
   %a = getelementptr inbounds [4 x i16], ptr %p, i64 %idx1, i64 %idx2
   %b = load i16, ptr %a, align 2
   ret i16 %b
@@ -3383,14 +2554,6 @@ define i32 @array_index_sh2_sh2(ptr %p, i64 %idx1, i64 
%idx2) {
 ; RV64ZBA-NEXT:    sh2add a0, a1, a0
 ; RV64ZBA-NEXT:    lw a0, 0(a0)
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: array_index_sh2_sh2:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    slli a1, a1, 4
-; RV64XANDESPERF-NEXT:    add a0, a0, a1
-; RV64XANDESPERF-NEXT:    nds.lea.w a0, a0, a2
-; RV64XANDESPERF-NEXT:    lw a0, 0(a0)
-; RV64XANDESPERF-NEXT:    ret
   %a = getelementptr inbounds [4 x i32], ptr %p, i64 %idx1, i64 %idx2
   %b = load i32, ptr %a, align 4
   ret i32 %b
@@ -3412,14 +2575,6 @@ define i64 @array_index_sh2_sh3(ptr %p, i64 %idx1, i64 
%idx2) {
 ; RV64ZBA-NEXT:    sh3add a0, a1, a0
 ; RV64ZBA-NEXT:    ld a0, 0(a0)
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: array_index_sh2_sh3:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    slli a1, a1, 5
-; RV64XANDESPERF-NEXT:    add a0, a0, a1
-; RV64XANDESPERF-NEXT:    nds.lea.d a0, a0, a2
-; RV64XANDESPERF-NEXT:    ld a0, 0(a0)
-; RV64XANDESPERF-NEXT:    ret
   %a = getelementptr inbounds [4 x i64], ptr %p, i64 %idx1, i64 %idx2
   %b = load i64, ptr %a, align 8
   ret i64 %b
@@ -3440,13 +2595,6 @@ define i8 @array_index_sh3_sh0(ptr %p, i64 %idx1, i64 
%idx2) {
 ; RV64ZBA-NEXT:    add a0, a0, a2
 ; RV64ZBA-NEXT:    lbu a0, 0(a0)
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: array_index_sh3_sh0:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.d a0, a0, a1
-; RV64XANDESPERF-NEXT:    add a0, a0, a2
-; RV64XANDESPERF-NEXT:    lbu a0, 0(a0)
-; RV64XANDESPERF-NEXT:    ret
   %a = getelementptr inbounds [8 x i8], ptr %p, i64 %idx1, i64 %idx2
   %b = load i8, ptr %a, align 1
   ret i8 %b
@@ -3468,14 +2616,6 @@ define i16 @array_index_sh3_sh1(ptr %p, i64 %idx1, i64 
%idx2) {
 ; RV64ZBA-NEXT:    sh1add a0, a1, a0
 ; RV64ZBA-NEXT:    lh a0, 0(a0)
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: array_index_sh3_sh1:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    slli a1, a1, 4
-; RV64XANDESPERF-NEXT:    add a0, a0, a1
-; RV64XANDESPERF-NEXT:    nds.lea.h a0, a0, a2
-; RV64XANDESPERF-NEXT:    lh a0, 0(a0)
-; RV64XANDESPERF-NEXT:    ret
   %a = getelementptr inbounds [8 x i16], ptr %p, i64 %idx1, i64 %idx2
   %b = load i16, ptr %a, align 2
   ret i16 %b
@@ -3497,14 +2637,6 @@ define i32 @array_index_sh3_sh2(ptr %p, i64 %idx1, i64 
%idx2) {
 ; RV64ZBA-NEXT:    sh2add a0, a1, a0
 ; RV64ZBA-NEXT:    lw a0, 0(a0)
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: array_index_sh3_sh2:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    slli a1, a1, 5
-; RV64XANDESPERF-NEXT:    add a0, a0, a1
-; RV64XANDESPERF-NEXT:    nds.lea.w a0, a0, a2
-; RV64XANDESPERF-NEXT:    lw a0, 0(a0)
-; RV64XANDESPERF-NEXT:    ret
   %a = getelementptr inbounds [8 x i32], ptr %p, i64 %idx1, i64 %idx2
   %b = load i32, ptr %a, align 4
   ret i32 %b
@@ -3526,14 +2658,6 @@ define i64 @array_index_sh3_sh3(ptr %p, i64 %idx1, i64 
%idx2) {
 ; RV64ZBA-NEXT:    sh3add a0, a1, a0
 ; RV64ZBA-NEXT:    ld a0, 0(a0)
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: array_index_sh3_sh3:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    slli a1, a1, 6
-; RV64XANDESPERF-NEXT:    add a0, a0, a1
-; RV64XANDESPERF-NEXT:    nds.lea.d a0, a0, a2
-; RV64XANDESPERF-NEXT:    ld a0, 0(a0)
-; RV64XANDESPERF-NEXT:    ret
   %a = getelementptr inbounds [8 x i64], ptr %p, i64 %idx1, i64 %idx2
   %b = load i64, ptr %a, align 8
   ret i64 %b
@@ -3559,15 +2683,6 @@ define i64 @array_index_lshr_sh3_sh3(ptr %p, i64 %idx1, 
i64 %idx2) {
 ; RV64ZBA-NEXT:    sh3add a0, a1, a0
 ; RV64ZBA-NEXT:    ld a0, 0(a0)
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: array_index_lshr_sh3_sh3:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    srli a1, a1, 58
-; RV64XANDESPERF-NEXT:    slli a1, a1, 6
-; RV64XANDESPERF-NEXT:    add a0, a0, a1
-; RV64XANDESPERF-NEXT:    nds.lea.d a0, a0, a2
-; RV64XANDESPERF-NEXT:    ld a0, 0(a0)
-; RV64XANDESPERF-NEXT:    ret
   %shr = lshr i64 %idx1, 58
   %a = getelementptr inbounds [8 x i64], ptr %p, i64 %shr, i64 %idx2
   %b = load i64, ptr %a, align 8
@@ -3604,14 +2719,6 @@ define i16 @array_index_sh4_sh1(ptr %p, i64 %idx1, i64 
%idx2) {
 ; RV64ZBA-NEXT:    sh1add a0, a2, a0
 ; RV64ZBA-NEXT:    lh a0, 0(a0)
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: array_index_sh4_sh1:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    slli a1, a1, 5
-; RV64XANDESPERF-NEXT:    add a0, a0, a1
-; RV64XANDESPERF-NEXT:    nds.lea.h a0, a0, a2
-; RV64XANDESPERF-NEXT:    lh a0, 0(a0)
-; RV64XANDESPERF-NEXT:    ret
   %a = getelementptr inbounds [16 x i16], ptr %p, i64 %idx1, i64 %idx2
   %b = load i16, ptr %a, align 2
   ret i16 %b
@@ -3634,14 +2741,6 @@ define i32 @array_index_sh4_sh2(ptr %p, i64 %idx1, i64 
%idx2) {
 ; RV64ZBA-NEXT:    sh2add a0, a2, a0
 ; RV64ZBA-NEXT:    lw a0, 0(a0)
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: array_index_sh4_sh2:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    slli a1, a1, 6
-; RV64XANDESPERF-NEXT:    add a0, a0, a1
-; RV64XANDESPERF-NEXT:    nds.lea.w a0, a0, a2
-; RV64XANDESPERF-NEXT:    lw a0, 0(a0)
-; RV64XANDESPERF-NEXT:    ret
   %a = getelementptr inbounds [16 x i32], ptr %p, i64 %idx1, i64 %idx2
   %b = load i32, ptr %a, align 4
   ret i32 %b
@@ -3664,14 +2763,6 @@ define i64 @array_index_sh4_sh3(ptr %p, i64 %idx1, i64 
%idx2) {
 ; RV64ZBA-NEXT:    sh3add a0, a2, a0
 ; RV64ZBA-NEXT:    ld a0, 0(a0)
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: array_index_sh4_sh3:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    slli a1, a1, 7
-; RV64XANDESPERF-NEXT:    add a0, a0, a1
-; RV64XANDESPERF-NEXT:    nds.lea.d a0, a0, a2
-; RV64XANDESPERF-NEXT:    ld a0, 0(a0)
-; RV64XANDESPERF-NEXT:    ret
   %a = getelementptr inbounds [16 x i64], ptr %p, i64 %idx1, i64 %idx2
   %b = load i64, ptr %a, align 8
   ret i64 %b
@@ -3693,14 +2784,6 @@ define ptr @test_gep_gep_dont_crash(ptr %p, i64 %a1, i64 
%a2) {
 ; RV64ZBA-NEXT:    add a1, a2, a1
 ; RV64ZBA-NEXT:    sh3add a0, a1, a0
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: test_gep_gep_dont_crash:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    srliw a2, a2, 6
-; RV64XANDESPERF-NEXT:    slli a2, a2, 3
-; RV64XANDESPERF-NEXT:    add a0, a0, a2
-; RV64XANDESPERF-NEXT:    nds.lea.d a0, a0, a1
-; RV64XANDESPERF-NEXT:    ret
   %lshr = lshr i64 %a2, 6
   %and = and i64 %lshr, 67108863
   %gep1 = getelementptr i64, ptr %p, i64 %and
@@ -3724,14 +2807,6 @@ define i64 @regression(i32 signext %x, i32 signext %y) {
 ; RV64ZBA-NEXT:    slli.uw a0, a0, 3
 ; RV64ZBA-NEXT:    sh1add a0, a0, a0
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: regression:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    subw a0, a0, a1
-; RV64XANDESPERF-NEXT:    slli a0, a0, 32
-; RV64XANDESPERF-NEXT:    srli a0, a0, 29
-; RV64XANDESPERF-NEXT:    nds.lea.h a0, a0, a0
-; RV64XANDESPERF-NEXT:    ret
   %sub = sub i32 %x, %y
   %ext = zext i32 %sub to i64
   %res = mul nuw nsw i64 %ext, 24
@@ -3770,12 +2845,6 @@ define i64 @mul_neg3(i64 %a) {
 ; RV64ZBA-NEXT:    sh1add a0, a0, a0
 ; RV64ZBA-NEXT:    neg a0, a0
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: mul_neg3:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.h a0, a0, a0
-; RV64XANDESPERF-NEXT:    neg a0, a0
-; RV64XANDESPERF-NEXT:    ret
   %c = mul i64 %a, -3
   ret i64 %c
 }
@@ -3803,12 +2872,6 @@ define i64 @mul_neg5(i64 %a) {
 ; RV64ZBA-NEXT:    sh2add a0, a0, a0
 ; RV64ZBA-NEXT:    neg a0, a0
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: mul_neg5:
-; RV64XANDESPERF:       # %bb.0:
-; RV64XANDESPERF-NEXT:    nds.lea.w a0, a0, a0
-; RV64XANDESPERF-NEXT:    neg a0, a0
-; RV64XANDESPERF-NEXT:    ret
   %c = mul i64 %a, -5
   ret i64 %c
 }
@@ -3875,14 +2938,6 @@ define i64 @bext_mul12(i32 %1, i32 %2) {
 ; RV64ZBAZBBZBS-NEXT:    sh1add a0, a0, a0
 ; RV64ZBAZBBZBS-NEXT:    slli a0, a0, 2
 ; RV64ZBAZBBZBS-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: bext_mul12:
-; RV64XANDESPERF:       # %bb.0: # %entry
-; RV64XANDESPERF-NEXT:    srlw a0, a0, a1
-; RV64XANDESPERF-NEXT:    andi a0, a0, 1
-; RV64XANDESPERF-NEXT:    nds.lea.h a0, a0, a0
-; RV64XANDESPERF-NEXT:    slli a0, a0, 2
-; RV64XANDESPERF-NEXT:    ret
 entry:
   %3 = lshr i32 %1, %2
   %4 = and i32 %3, 1
@@ -3922,14 +2977,6 @@ define i64 @bext_mul45(i32 %1, i32 %2) {
 ; RV64ZBAZBBZBS-NEXT:    sh2add a0, a0, a0
 ; RV64ZBAZBBZBS-NEXT:    sh3add a0, a0, a0
 ; RV64ZBAZBBZBS-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: bext_mul45:
-; RV64XANDESPERF:       # %bb.0: # %entry
-; RV64XANDESPERF-NEXT:    srlw a0, a0, a1
-; RV64XANDESPERF-NEXT:    andi a0, a0, 1
-; RV64XANDESPERF-NEXT:    nds.lea.w a0, a0, a0
-; RV64XANDESPERF-NEXT:    nds.lea.d a0, a0, a0
-; RV64XANDESPERF-NEXT:    ret
 entry:
   %3 = lshr i32 %1, %2
   %4 = and i32 %3, 1
@@ -3970,14 +3017,6 @@ define i64 @bext_mul132(i32 %1, i32 %2) {
 ; RV64ZBAZBBZBS-NEXT:    slli a1, a0, 7
 ; RV64ZBAZBBZBS-NEXT:    sh2add a0, a0, a1
 ; RV64ZBAZBBZBS-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: bext_mul132:
-; RV64XANDESPERF:       # %bb.0: # %entry
-; RV64XANDESPERF-NEXT:    srlw a0, a0, a1
-; RV64XANDESPERF-NEXT:    andi a0, a0, 1
-; RV64XANDESPERF-NEXT:    slli a1, a0, 7
-; RV64XANDESPERF-NEXT:    nds.lea.w a0, a1, a0
-; RV64XANDESPERF-NEXT:    ret
 entry:
   %3 = lshr i32 %1, %2
   %4 = and i32 %3, 1
@@ -4004,17 +3043,6 @@ define ptr @gep_lshr_i32(ptr %0, i64 %1) {
 ; RV64ZBA-NEXT:    sh2add a1, a1, a1
 ; RV64ZBA-NEXT:    add a0, a0, a1
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: gep_lshr_i32:
-; RV64XANDESPERF:       # %bb.0: # %entry
-; RV64XANDESPERF-NEXT:    slli a1, a1, 2
-; RV64XANDESPERF-NEXT:    li a2, 1
-; RV64XANDESPERF-NEXT:    slli a2, a2, 36
-; RV64XANDESPERF-NEXT:    addi a2, a2, -16
-; RV64XANDESPERF-NEXT:    and a1, a1, a2
-; RV64XANDESPERF-NEXT:    nds.lea.w a1, a1, a1
-; RV64XANDESPERF-NEXT:    add a0, a0, a1
-; RV64XANDESPERF-NEXT:    ret
 entry:
   %2 = lshr exact i64 %1, 2
   %3 = and i64 %2, 4294967295
@@ -4037,15 +3065,6 @@ define i64 @srli_slliuw(i64 %1) {
 ; RV64ZBA-NEXT:    srli a0, a0, 2
 ; RV64ZBA-NEXT:    slli.uw a0, a0, 4
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: srli_slliuw:
-; RV64XANDESPERF:       # %bb.0: # %entry
-; RV64XANDESPERF-NEXT:    slli a0, a0, 2
-; RV64XANDESPERF-NEXT:    li a1, 1
-; RV64XANDESPERF-NEXT:    slli a1, a1, 36
-; RV64XANDESPERF-NEXT:    addi a1, a1, -16
-; RV64XANDESPERF-NEXT:    and a0, a0, a1
-; RV64XANDESPERF-NEXT:    ret
 entry:
   %2 = lshr exact i64 %1, 2
   %3 = and i64 %2, 4294967295
@@ -4068,15 +3087,6 @@ define i64 @srli_slliuw_canonical(i64 %0) {
 ; RV64ZBA-NEXT:    srli a0, a0, 2
 ; RV64ZBA-NEXT:    slli.uw a0, a0, 4
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: srli_slliuw_canonical:
-; RV64XANDESPERF:       # %bb.0: # %entry
-; RV64XANDESPERF-NEXT:    slli a0, a0, 2
-; RV64XANDESPERF-NEXT:    li a1, 1
-; RV64XANDESPERF-NEXT:    slli a1, a1, 36
-; RV64XANDESPERF-NEXT:    addi a1, a1, -16
-; RV64XANDESPERF-NEXT:    and a0, a0, a1
-; RV64XANDESPERF-NEXT:    ret
 entry:
   %1 = shl i64 %0, 2
   %2 = and i64 %1, 68719476720
@@ -4126,15 +3136,6 @@ define i64 @srli_slliuw_2(i64 %1) {
 ; RV64ZBA-NEXT:    srli a0, a0, 18
 ; RV64ZBA-NEXT:    slli.uw a0, a0, 3
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: srli_slliuw_2:
-; RV64XANDESPERF:       # %bb.0: # %entry
-; RV64XANDESPERF-NEXT:    srli a0, a0, 15
-; RV64XANDESPERF-NEXT:    li a1, 1
-; RV64XANDESPERF-NEXT:    slli a1, a1, 35
-; RV64XANDESPERF-NEXT:    addi a1, a1, -8
-; RV64XANDESPERF-NEXT:    and a0, a0, a1
-; RV64XANDESPERF-NEXT:    ret
 entry:
   %2 = lshr i64 %1, 18
   %3 = and i64 %2, 4294967295
@@ -4157,15 +3158,6 @@ define i64 @srli_slliuw_canonical_2(i64 %0) {
 ; RV64ZBA-NEXT:    srli a0, a0, 18
 ; RV64ZBA-NEXT:    slli.uw a0, a0, 3
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: srli_slliuw_canonical_2:
-; RV64XANDESPERF:       # %bb.0: # %entry
-; RV64XANDESPERF-NEXT:    srli a0, a0, 15
-; RV64XANDESPERF-NEXT:    li a1, 1
-; RV64XANDESPERF-NEXT:    slli a1, a1, 35
-; RV64XANDESPERF-NEXT:    addi a1, a1, -8
-; RV64XANDESPERF-NEXT:    and a0, a0, a1
-; RV64XANDESPERF-NEXT:    ret
 entry:
   %1 = lshr i64 %0, 15
   %2 = and i64 %1, 34359738360
@@ -4187,13 +3179,6 @@ define ptr @srai_srli_sh3add(ptr %0, i64 %1) nounwind {
 ; RV64ZBA-NEXT:    srli a1, a1, 6
 ; RV64ZBA-NEXT:    sh3add a0, a1, a0
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: srai_srli_sh3add:
-; RV64XANDESPERF:       # %bb.0: # %entry
-; RV64XANDESPERF-NEXT:    srai a1, a1, 32
-; RV64XANDESPERF-NEXT:    srli a1, a1, 6
-; RV64XANDESPERF-NEXT:    nds.lea.d a0, a0, a1
-; RV64XANDESPERF-NEXT:    ret
 entry:
   %2 = ashr i64 %1, 32
   %3 = lshr i64 %2, 6
@@ -4261,16 +3246,6 @@ define i64 @add_u32simm32_zextw(i64 %x) nounwind {
 ; RV64ZBA-NEXT:    addi a0, a0, -2
 ; RV64ZBA-NEXT:    zext.w a0, a0
 ; RV64ZBA-NEXT:    ret
-;
-; RV64XANDESPERF-LABEL: add_u32simm32_zextw:
-; RV64XANDESPERF:       # %bb.0: # %entry
-; RV64XANDESPERF-NEXT:    li a1, 1
-; RV64XANDESPERF-NEXT:    slli a1, a1, 32
-; RV64XANDESPERF-NEXT:    addi a1, a1, -2
-; RV64XANDESPERF-NEXT:    add a0, a0, a1
-; RV64XANDESPERF-NEXT:    addi a1, a1, 1
-; RV64XANDESPERF-NEXT:    and a0, a0, a1
-; RV64XANDESPERF-NEXT:    ret
 entry:
   %add = add i64 %x, 4294967294
   %and = and i64 %add, 4294967295


        
_______________________________________________
llvm-branch-commits mailing list
llvm-branch-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-branch-commits

Reply via email to