https://github.com/JivanH created 
https://github.com/llvm/llvm-project/pull/74824

This implements experimental support for the Zimop extension as specified here:
https://github.com/riscv/riscv-isa-manual/blob/main/src/zimop.adoc.

This change adds intrinsics of mop.r.[n] and mop.rr.[n] instructions for Zimop 
extension based on 
https://github.com/riscv-non-isa/riscv-c-api-doc/blob/master/riscv-c-api.md. 


>From 2940a49eed5214668d4235ddaf3c82d076202b94 Mon Sep 17 00:00:00 2001
From: ln8-8 <lyut.nersis...@gmail.com>
Date: Fri, 8 Dec 2023 12:25:49 +0400
Subject: [PATCH] [RISCV] Add support for experimental Zimop extension

 This implements experimental support for the Zimop extension as specified 
here: https://github.com/riscv/riscv-isa-manual/blob/main/src/zimop.adoc.

This change adds IR intrinsics of mop.r.[n] and mop.rr.[n] instructions for 
Zimop extension based on 
https://github.com/riscv-non-isa/riscv-c-api-doc/blob/master/riscv-c-api.md. 
Also added assembly support.
---
 clang/include/clang/Basic/BuiltinsRISCV.def   |   5 +
 clang/lib/CodeGen/CGBuiltin.cpp               |  34 ++++
 clang/lib/Sema/SemaChecking.cpp               |   8 +
 .../test/CodeGen/RISCV/rvb-intrinsics/zimop.c | 104 +++++++++++
 llvm/docs/RISCVUsage.rst                      |   3 +
 llvm/include/llvm/IR/IntrinsicsRISCV.td       |  23 +++
 llvm/lib/Support/RISCVISAInfo.cpp             |   2 +
 llvm/lib/Target/RISCV/RISCVFeatures.td        |   5 +
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp   | 171 ++++++++++++++++++
 llvm/lib/Target/RISCV/RISCVISelLowering.h     |   6 +
 llvm/lib/Target/RISCV/RISCVInstrFormats.td    |  21 +++
 llvm/lib/Target/RISCV/RISCVInstrInfo.td       |  53 ++++++
 llvm/lib/Target/RISCV/RISCVSchedRocket.td     |   1 +
 llvm/lib/Target/RISCV/RISCVSchedSiFive7.td    |   1 +
 .../Target/RISCV/RISCVSchedSyntacoreSCR1.td   |   1 +
 llvm/lib/Target/RISCV/RISCVSchedule.td        |  14 ++
 llvm/test/CodeGen/RISCV/attributes.ll         |   4 +
 .../test/CodeGen/RISCV/rv32zimop-intrinsic.ll |  47 +++++
 .../test/CodeGen/RISCV/rv64zimop-intrinsic.ll |  96 ++++++++++
 llvm/test/MC/RISCV/rv32zimop-invalid.s        |   6 +
 llvm/test/MC/RISCV/rvzimop-valid.s            |  26 +++
 llvm/unittests/Support/RISCVISAInfoTest.cpp   |   1 +
 22 files changed, 632 insertions(+)
 create mode 100644 clang/test/CodeGen/RISCV/rvb-intrinsics/zimop.c
 create mode 100644 llvm/test/CodeGen/RISCV/rv32zimop-intrinsic.ll
 create mode 100644 llvm/test/CodeGen/RISCV/rv64zimop-intrinsic.ll
 create mode 100644 llvm/test/MC/RISCV/rv32zimop-invalid.s
 create mode 100644 llvm/test/MC/RISCV/rvzimop-valid.s

diff --git a/clang/include/clang/Basic/BuiltinsRISCV.def 
b/clang/include/clang/Basic/BuiltinsRISCV.def
index 1528b18c82ead..6ba5288f9cbd1 100644
--- a/clang/include/clang/Basic/BuiltinsRISCV.def
+++ b/clang/include/clang/Basic/BuiltinsRISCV.def
@@ -89,5 +89,10 @@ TARGET_BUILTIN(__builtin_riscv_sm3p1, "UiUi", "nc", "zksh")
 TARGET_BUILTIN(__builtin_riscv_ntl_load, "v.", "t", "zihintntl")
 TARGET_BUILTIN(__builtin_riscv_ntl_store, "v.", "t", "zihintntl")
 
+TARGET_BUILTIN(__builtin_riscv_mopr_32, "UiUiUi", "nc", "experimental-zimop")
+TARGET_BUILTIN(__builtin_riscv_mopr_64, "UWiUWiUWi", "nc", 
"experimental-zimop,64bit")
+TARGET_BUILTIN(__builtin_riscv_moprr_32, "UiUiUiUi", "nc", 
"experimental-zimop")
+TARGET_BUILTIN(__builtin_riscv_moprr_64, "UWiUWiUWiUWi", "nc", 
"experimental-zimop,64bit")
+
 #undef BUILTIN
 #undef TARGET_BUILTIN
diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index 0d8b3e4aaad47..11ba665dda938 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -20808,6 +20808,10 @@ Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned 
BuiltinID,
   case RISCV::BI__builtin_riscv_clz_64:
   case RISCV::BI__builtin_riscv_ctz_32:
   case RISCV::BI__builtin_riscv_ctz_64:
+  case RISCV::BI__builtin_riscv_mopr_32:
+  case RISCV::BI__builtin_riscv_mopr_64:
+  case RISCV::BI__builtin_riscv_moprr_32:
+  case RISCV::BI__builtin_riscv_moprr_64:
   case RISCV::BI__builtin_riscv_clmul_32:
   case RISCV::BI__builtin_riscv_clmul_64:
   case RISCV::BI__builtin_riscv_clmulh_32:
@@ -20848,6 +20852,36 @@ Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned 
BuiltinID,
       return Result;
     }
 
+    // Zimop
+    case RISCV::BI__builtin_riscv_mopr_32:
+    case RISCV::BI__builtin_riscv_mopr_64: {
+      unsigned N = cast<ConstantInt>(Ops[1])->getZExtValue();
+      Function *F = nullptr;
+      if (N <= 1) {
+        F = CGM.getIntrinsic(Intrinsic::riscv_mopr0 + N, {ResultType});
+      } else if (N >= 10 && N <= 19) {
+        F = CGM.getIntrinsic(Intrinsic::riscv_mopr10 + N - 10, {ResultType});
+      } else if (N == 2) {
+        F = CGM.getIntrinsic(Intrinsic::riscv_mopr2, {ResultType});
+      } else if (N >= 20 && N <= 29) {
+        F = CGM.getIntrinsic(Intrinsic::riscv_mopr20 + N - 20, {ResultType});
+      } else if (N == 3) {
+        F = CGM.getIntrinsic(Intrinsic::riscv_mopr3, {ResultType});
+      } else if (N >= 30 && N <= 31) {
+        F = CGM.getIntrinsic(Intrinsic::riscv_mopr30 + N - 30, {ResultType});
+      } else if (N >= 4 && N <= 9) {
+        F = CGM.getIntrinsic(Intrinsic::riscv_mopr4 + N - 4, {ResultType});
+      } else {
+        llvm_unreachable("unexpected builtin ID");
+      }
+      return Builder.CreateCall(F, {Ops[0]}, "");
+    }
+    case RISCV::BI__builtin_riscv_moprr_32:
+    case RISCV::BI__builtin_riscv_moprr_64: {
+      unsigned N = cast<ConstantInt>(Ops[2])->getZExtValue();
+      Function *F = CGM.getIntrinsic(Intrinsic::riscv_moprr0 + N, 
{ResultType});
+      return Builder.CreateCall(F, {Ops[0], Ops[1]}, "");
+    }
     // Zbc
     case RISCV::BI__builtin_riscv_clmul_32:
     case RISCV::BI__builtin_riscv_clmul_64:
diff --git a/clang/lib/Sema/SemaChecking.cpp b/clang/lib/Sema/SemaChecking.cpp
index fc6ee6b2c5ab4..80ca886bda6ec 100644
--- a/clang/lib/Sema/SemaChecking.cpp
+++ b/clang/lib/Sema/SemaChecking.cpp
@@ -5468,6 +5468,14 @@ bool Sema::CheckRISCVBuiltinFunctionCall(const 
TargetInfo &TI,
   // Check if rnum is in [0, 10]
   case RISCV::BI__builtin_riscv_aes64ks1i:
     return SemaBuiltinConstantArgRange(TheCall, 1, 0, 10);
+  // Check if n of mop.r.[n] is in [0, 31]
+  case RISCV::BI__builtin_riscv_mopr_32:
+  case RISCV::BI__builtin_riscv_mopr_64:
+    return SemaBuiltinConstantArgRange(TheCall, 1, 0, 31);
+  // Check if n of mop.rr.[n] is in [0, 7]
+  case RISCV::BI__builtin_riscv_moprr_32:
+  case RISCV::BI__builtin_riscv_moprr_64:
+    return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7);
   // Check if value range for vxrm is in [0, 3]
   case RISCVVector::BI__builtin_rvv_vaaddu_vv:
   case RISCVVector::BI__builtin_rvv_vaaddu_vx:
diff --git a/clang/test/CodeGen/RISCV/rvb-intrinsics/zimop.c 
b/clang/test/CodeGen/RISCV/rvb-intrinsics/zimop.c
new file mode 100644
index 0000000000000..790c746f84606
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvb-intrinsics/zimop.c
@@ -0,0 +1,104 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-zimop 
-emit-llvm %s -o - \
+// RUN:     -disable-O0-optnone | opt -S -passes=mem2reg \
+// RUN:     | FileCheck %s  -check-prefix=RV32ZIMOP
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-zimop 
-emit-llvm %s -o - \
+// RUN:     -disable-O0-optnone | opt -S -passes=mem2reg \
+// RUN:     | FileCheck %s  -check-prefix=RV64ZIMOP
+
+#include <stdint.h>
+
+#if __riscv_xlen == 64
+// RV64ZIMOP-LABEL: @mopr_0_64(
+// RV64ZIMOP-NEXT:  entry:
+// RV64ZIMOP-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.mopr0.i64(i64 
[[A:%.*]])
+// RV64ZIMOP-NEXT:    ret i64 [[TMP0]]
+//
+uint64_t mopr_0_64(uint64_t a) {
+  return __builtin_riscv_mopr_64(a, 0);
+}
+
+// RV64ZIMOP-LABEL: @mopr_31_64(
+// RV64ZIMOP-NEXT:  entry:
+// RV64ZIMOP-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.mopr31.i64(i64 
[[A:%.*]])
+// RV64ZIMOP-NEXT:    ret i64 [[TMP0]]
+//
+uint64_t mopr_31_64(uint64_t a) {
+  return __builtin_riscv_mopr_64(a, 31);
+}
+
+// RV64ZIMOP-LABEL: @moprr_0_64(
+// RV64ZIMOP-NEXT:  entry:
+// RV64ZIMOP-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.moprr0.i64(i64 
[[A:%.*]], i64 [[B:%.*]])
+// RV64ZIMOP-NEXT:    ret i64 [[TMP0]]
+//
+uint64_t moprr_0_64(uint64_t a, uint64_t b) {
+  return __builtin_riscv_moprr_64(a, b, 0);
+}
+
+// RV64ZIMOP-LABEL: @moprr_7_64(
+// RV64ZIMOP-NEXT:  entry:
+// RV64ZIMOP-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.moprr7.i64(i64 
[[A:%.*]], i64 [[B:%.*]])
+// RV64ZIMOP-NEXT:    ret i64 [[TMP0]]
+//
+uint64_t moprr_7_64(uint64_t a, uint64_t b) {
+  return __builtin_riscv_moprr_64(a, b, 7);
+}
+
+#endif
+
+// RV32ZIMOP-LABEL: @mopr_0_32(
+// RV32ZIMOP-NEXT:  entry:
+// RV32ZIMOP-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.mopr0.i32(i32 
[[A:%.*]])
+// RV32ZIMOP-NEXT:    ret i32 [[TMP0]]
+//
+// RV64ZIMOP-LABEL: @mopr_0_32(
+// RV64ZIMOP-NEXT:  entry:
+// RV64ZIMOP-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.mopr0.i32(i32 
[[A:%.*]])
+// RV64ZIMOP-NEXT:    ret i32 [[TMP0]]
+//
+uint32_t mopr_0_32(uint32_t a) {
+  return __builtin_riscv_mopr_32(a, 0);
+}
+
+// RV32ZIMOP-LABEL: @mopr_31_32(
+// RV32ZIMOP-NEXT:  entry:
+// RV32ZIMOP-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.mopr31.i32(i32 
[[A:%.*]])
+// RV32ZIMOP-NEXT:    ret i32 [[TMP0]]
+//
+// RV64ZIMOP-LABEL: @mopr_31_32(
+// RV64ZIMOP-NEXT:  entry:
+// RV64ZIMOP-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.mopr31.i32(i32 
[[A:%.*]])
+// RV64ZIMOP-NEXT:    ret i32 [[TMP0]]
+//
+uint32_t mopr_31_32(uint32_t a) {
+  return __builtin_riscv_mopr_32(a, 31);
+}
+
+// RV32ZIMOP-LABEL: @moprr_0_32(
+// RV32ZIMOP-NEXT:  entry:
+// RV32ZIMOP-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.moprr0.i32(i32 
[[A:%.*]], i32 [[B:%.*]])
+// RV32ZIMOP-NEXT:    ret i32 [[TMP0]]
+//
+// RV64ZIMOP-LABEL: @moprr_0_32(
+// RV64ZIMOP-NEXT:  entry:
+// RV64ZIMOP-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.moprr0.i32(i32 
[[A:%.*]], i32 [[B:%.*]])
+// RV64ZIMOP-NEXT:    ret i32 [[TMP0]]
+//
+uint32_t moprr_0_32(uint32_t a, uint32_t b) {
+  return __builtin_riscv_moprr_32(a, b, 0);
+}
+
+// RV32ZIMOP-LABEL: @moprr_7_32(
+// RV32ZIMOP-NEXT:  entry:
+// RV32ZIMOP-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.moprr7.i32(i32 
[[A:%.*]], i32 [[B:%.*]])
+// RV32ZIMOP-NEXT:    ret i32 [[TMP0]]
+//
+// RV64ZIMOP-LABEL: @moprr_7_32(
+// RV64ZIMOP-NEXT:  entry:
+// RV64ZIMOP-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.moprr7.i32(i32 
[[A:%.*]], i32 [[B:%.*]])
+// RV64ZIMOP-NEXT:    ret i32 [[TMP0]]
+//
+uint32_t moprr_7_32(uint32_t a, uint32_t b) {
+  return __builtin_riscv_moprr_32(a, b, 7);
+}
\ No newline at end of file
diff --git a/llvm/docs/RISCVUsage.rst b/llvm/docs/RISCVUsage.rst
index 65dd0d83448ed..bd2f81fba186d 100644
--- a/llvm/docs/RISCVUsage.rst
+++ b/llvm/docs/RISCVUsage.rst
@@ -208,6 +208,9 @@ The primary goal of experimental support is to assist in 
the process of ratifica
 ``experimental-zvbb``, ``experimental-zvbc``, ``experimental-zvkb``, 
``experimental-zvkg``, ``experimental-zvkn``, ``experimental-zvknc``, 
``experimental-zvkned``, ``experimental-zvkng``, ``experimental-zvknha``, 
``experimental-zvknhb``, ``experimental-zvks``, ``experimental-zvksc``, 
``experimental-zvksed``, ``experimental-zvksg``, ``experimental-zvksh``, 
``experimental-zvkt``
   LLVM implements the `1.0.0-rc2 specification 
<https://github.com/riscv/riscv-crypto/releases/download/v/riscv-crypto-spec-vector.pdf>`__.
 Note that current vector crypto extension version can be found in: 
<https://github.com/riscv/riscv-crypto>.
 
+``experimental-zimop``
+  LLVM implements the `v0.1 proposed specification 
<https://github.com/riscv/riscv-isa-manual/blob/main/src/zimop.adoc>`__.
+
 To use an experimental extension from `clang`, you must add 
`-menable-experimental-extensions` to the command line, and specify the exact 
version of the experimental extension you are using.  To use an experimental 
extension with LLVM's internal developer tools (e.g. `llc`, `llvm-objdump`, 
`llvm-mc`), you must prefix the extension name with `experimental-`.  Note that 
you don't need to specify the version with internal tools, and shouldn't 
include the `experimental-` prefix with `clang`.
 
 Vendor Extensions
diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td 
b/llvm/include/llvm/IR/IntrinsicsRISCV.td
index 20c6a525a86ba..fcb11c8c51398 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCV.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -108,6 +108,29 @@ let TargetPrefix = "riscv" in {
   def int_riscv_xperm8  : BitManipGPRGPRIntrinsics;
 } // TargetPrefix = "riscv"
 
+//===----------------------------------------------------------------------===//
+// May-Be-Operations
+
+let TargetPrefix = "riscv" in {
+
+  class MOPGPRIntrinsics
+      : DefaultAttrsIntrinsic<[llvm_any_ty],
+                              [LLVMMatchType<0>],
+                              [IntrNoMem, IntrSpeculatable]>;
+  class MOPGPRGPRIntrinsics
+      : DefaultAttrsIntrinsic<[llvm_any_ty],
+                              [LLVMMatchType<0>, LLVMMatchType<0>],
+                              [IntrNoMem, IntrSpeculatable]>;
+
+  // Zimop
+   foreach i = 0...31 in {
+    def int_riscv_mopr#i : MOPGPRIntrinsics;
+   }
+  foreach i = 0...7 in {
+    def int_riscv_moprr#i : MOPGPRGPRIntrinsics;
+  }
+} // TargetPrefix = "riscv"
+
 
//===----------------------------------------------------------------------===//
 // Vectors
 
diff --git a/llvm/lib/Support/RISCVISAInfo.cpp 
b/llvm/lib/Support/RISCVISAInfo.cpp
index 6322748430063..1b303ba1e9431 100644
--- a/llvm/lib/Support/RISCVISAInfo.cpp
+++ b/llvm/lib/Support/RISCVISAInfo.cpp
@@ -177,6 +177,8 @@ static const RISCVSupportedExtension 
SupportedExperimentalExtensions[] = {
     {"zicfilp", RISCVExtensionVersion{0, 2}},
     {"zicond", RISCVExtensionVersion{1, 0}},
 
+    {"zimop", RISCVExtensionVersion{0, 1}},
+
     {"ztso", RISCVExtensionVersion{0, 1}},
 
     {"zvbb", RISCVExtensionVersion{1, 0}},
diff --git a/llvm/lib/Target/RISCV/RISCVFeatures.td 
b/llvm/lib/Target/RISCV/RISCVFeatures.td
index 7d142d38d0f9a..eddb7c33627f0 100644
--- a/llvm/lib/Target/RISCV/RISCVFeatures.td
+++ b/llvm/lib/Target/RISCV/RISCVFeatures.td
@@ -687,6 +687,11 @@ def HasStdExtZicond : 
Predicate<"Subtarget->hasStdExtZicond()">,
                                 AssemblerPredicate<(all_of 
FeatureStdExtZicond),
                                 "'Zicond' (Integer Conditional Operations)">;
 
+def FeatureStdExtZimop : SubtargetFeature<"experimental-zimop", 
"HasStdExtZimop", "true",
+                                          "'Zimop' (May-Be-Operations)">;
+def HasStdExtZimop : Predicate<"Subtarget->hasStdExtZimop()">,
+                               AssemblerPredicate<(all_of FeatureStdExtZimop),
+                               "'Zimop' (May-Be-Operations)">;
 def FeatureStdExtSmaia
     : SubtargetFeature<"smaia", "HasStdExtSmaia", "true",
                        "'Smaia' (Smaia encompasses all added CSRs and all "
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp 
b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index f2ec422b54a92..45fbea2088559 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -8367,6 +8367,73 @@ SDValue 
RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
         IntNo == Intrinsic::riscv_zip ? RISCVISD::ZIP : RISCVISD::UNZIP;
     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1));
   }
+#define RISCV_MOPR_64_CASE(NAME, OPCODE)                                       
\
+  case Intrinsic::riscv_##NAME: {                                              
\
+    if (RV64LegalI32 && Subtarget.is64Bit() &&                                 
\
+        Op.getValueType() == MVT::i32) {                                       
\
+      SDValue NewOp =                                                          
\
+          DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op.getOperand(1));        
\
+      SDValue Res = DAG.getNode(OPCODE, DL, MVT::i64, NewOp);                  
\
+      return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res);                    
\
+    }                                                                          
\
+    return DAG.getNode(OPCODE, DL, XLenVT, Op.getOperand(1));                  
\
+  }
+    RISCV_MOPR_64_CASE(mopr0, RISCVISD::MOPR0)
+    RISCV_MOPR_64_CASE(mopr1, RISCVISD::MOPR1)
+    RISCV_MOPR_64_CASE(mopr2, RISCVISD::MOPR2)
+    RISCV_MOPR_64_CASE(mopr3, RISCVISD::MOPR3)
+    RISCV_MOPR_64_CASE(mopr4, RISCVISD::MOPR4)
+    RISCV_MOPR_64_CASE(mopr5, RISCVISD::MOPR5)
+    RISCV_MOPR_64_CASE(mopr6, RISCVISD::MOPR6)
+    RISCV_MOPR_64_CASE(mopr7, RISCVISD::MOPR7)
+    RISCV_MOPR_64_CASE(mopr8, RISCVISD::MOPR8)
+    RISCV_MOPR_64_CASE(mopr9, RISCVISD::MOPR9)
+    RISCV_MOPR_64_CASE(mopr10, RISCVISD::MOPR10)
+    RISCV_MOPR_64_CASE(mopr11, RISCVISD::MOPR11)
+    RISCV_MOPR_64_CASE(mopr12, RISCVISD::MOPR12)
+    RISCV_MOPR_64_CASE(mopr13, RISCVISD::MOPR13)
+    RISCV_MOPR_64_CASE(mopr14, RISCVISD::MOPR14)
+    RISCV_MOPR_64_CASE(mopr15, RISCVISD::MOPR15)
+    RISCV_MOPR_64_CASE(mopr16, RISCVISD::MOPR16)
+    RISCV_MOPR_64_CASE(mopr17, RISCVISD::MOPR17)
+    RISCV_MOPR_64_CASE(mopr18, RISCVISD::MOPR18)
+    RISCV_MOPR_64_CASE(mopr19, RISCVISD::MOPR19)
+    RISCV_MOPR_64_CASE(mopr20, RISCVISD::MOPR20)
+    RISCV_MOPR_64_CASE(mopr21, RISCVISD::MOPR21)
+    RISCV_MOPR_64_CASE(mopr22, RISCVISD::MOPR22)
+    RISCV_MOPR_64_CASE(mopr23, RISCVISD::MOPR23)
+    RISCV_MOPR_64_CASE(mopr24, RISCVISD::MOPR24)
+    RISCV_MOPR_64_CASE(mopr25, RISCVISD::MOPR25)
+    RISCV_MOPR_64_CASE(mopr26, RISCVISD::MOPR26)
+    RISCV_MOPR_64_CASE(mopr27, RISCVISD::MOPR27)
+    RISCV_MOPR_64_CASE(mopr28, RISCVISD::MOPR28)
+    RISCV_MOPR_64_CASE(mopr29, RISCVISD::MOPR29)
+    RISCV_MOPR_64_CASE(mopr30, RISCVISD::MOPR30)
+    RISCV_MOPR_64_CASE(mopr31, RISCVISD::MOPR31)
+#undef RISCV_MOPR_64_CASE
+#define RISCV_MOPRR_64_CASE(NAME, OPCODE)                                      
\
+  case Intrinsic::riscv_##NAME: {                                              
\
+    if (RV64LegalI32 && Subtarget.is64Bit() &&                                 
\
+        Op.getValueType() == MVT::i32) {                                       
\
+      SDValue NewOp0 =                                                         
\
+          DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op.getOperand(1));        
\
+      SDValue NewOp1 =                                                         
\
+          DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op.getOperand(2));        
\
+      SDValue Res = DAG.getNode(OPCODE, DL, MVT::i64, NewOp0, NewOp1);         
\
+      return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res);                    
\
+    }                                                                          
\
+    return DAG.getNode(OPCODE, DL, XLenVT, Op.getOperand(1),                   
\
+                       Op.getOperand(2));                                      
\
+  }
+    RISCV_MOPRR_64_CASE(moprr0, RISCVISD::MOPRR0)
+    RISCV_MOPRR_64_CASE(moprr1, RISCVISD::MOPRR1)
+    RISCV_MOPRR_64_CASE(moprr2, RISCVISD::MOPRR2)
+    RISCV_MOPRR_64_CASE(moprr3, RISCVISD::MOPRR3)
+    RISCV_MOPRR_64_CASE(moprr4, RISCVISD::MOPRR4)
+    RISCV_MOPRR_64_CASE(moprr5, RISCVISD::MOPRR5)
+    RISCV_MOPRR_64_CASE(moprr6, RISCVISD::MOPRR6)
+    RISCV_MOPRR_64_CASE(moprr7, RISCVISD::MOPRR7)
+#undef RISCV_MOPRR_64_CASE
   case Intrinsic::riscv_clmul:
     if (RV64LegalI32 && Subtarget.is64Bit() && Op.getValueType() == MVT::i32) {
       SDValue NewOp0 =
@@ -11633,6 +11700,70 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
       return;
     }
+#define RISCV_MOPR_CASE(NAME, OPCODE)                                          
\
+  case Intrinsic::riscv_##NAME: {                                              
\
+    if (!Subtarget.is64Bit() || N->getValueType(0) != MVT::i32)                
\
+      return;                                                                  
\
+    SDValue NewOp =                                                            
\
+        DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));          
\
+    SDValue Res = DAG.getNode(OPCODE, DL, MVT::i64, NewOp);                    
\
+    Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));          
\
+    return;                                                                    
\
+  }
+      RISCV_MOPR_CASE(mopr0, RISCVISD::MOPR0)
+      RISCV_MOPR_CASE(mopr1, RISCVISD::MOPR1)
+      RISCV_MOPR_CASE(mopr2, RISCVISD::MOPR2)
+      RISCV_MOPR_CASE(mopr3, RISCVISD::MOPR3)
+      RISCV_MOPR_CASE(mopr4, RISCVISD::MOPR4)
+      RISCV_MOPR_CASE(mopr5, RISCVISD::MOPR5)
+      RISCV_MOPR_CASE(mopr6, RISCVISD::MOPR6)
+      RISCV_MOPR_CASE(mopr7, RISCVISD::MOPR7)
+      RISCV_MOPR_CASE(mopr8, RISCVISD::MOPR8)
+      RISCV_MOPR_CASE(mopr9, RISCVISD::MOPR9)
+      RISCV_MOPR_CASE(mopr10, RISCVISD::MOPR10)
+      RISCV_MOPR_CASE(mopr11, RISCVISD::MOPR11)
+      RISCV_MOPR_CASE(mopr12, RISCVISD::MOPR12)
+      RISCV_MOPR_CASE(mopr13, RISCVISD::MOPR13)
+      RISCV_MOPR_CASE(mopr14, RISCVISD::MOPR14)
+      RISCV_MOPR_CASE(mopr15, RISCVISD::MOPR15)
+      RISCV_MOPR_CASE(mopr16, RISCVISD::MOPR16)
+      RISCV_MOPR_CASE(mopr17, RISCVISD::MOPR17)
+      RISCV_MOPR_CASE(mopr18, RISCVISD::MOPR18)
+      RISCV_MOPR_CASE(mopr19, RISCVISD::MOPR19)
+      RISCV_MOPR_CASE(mopr20, RISCVISD::MOPR20)
+      RISCV_MOPR_CASE(mopr21, RISCVISD::MOPR21)
+      RISCV_MOPR_CASE(mopr22, RISCVISD::MOPR22)
+      RISCV_MOPR_CASE(mopr23, RISCVISD::MOPR23)
+      RISCV_MOPR_CASE(mopr24, RISCVISD::MOPR24)
+      RISCV_MOPR_CASE(mopr25, RISCVISD::MOPR25)
+      RISCV_MOPR_CASE(mopr26, RISCVISD::MOPR26)
+      RISCV_MOPR_CASE(mopr27, RISCVISD::MOPR27)
+      RISCV_MOPR_CASE(mopr28, RISCVISD::MOPR28)
+      RISCV_MOPR_CASE(mopr29, RISCVISD::MOPR29)
+      RISCV_MOPR_CASE(mopr30, RISCVISD::MOPR30)
+      RISCV_MOPR_CASE(mopr31, RISCVISD::MOPR31)
+#undef RISCV_MOPR_CASE
+#define RISCV_MOPRR_CASE(NAME, OPCODE)                                         
\
+  case Intrinsic::riscv_##NAME: {                                              
\
+    if (!Subtarget.is64Bit() || N->getValueType(0) != MVT::i32)                
\
+      return;                                                                  
\
+    SDValue NewOp0 =                                                           
\
+        DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));          
\
+    SDValue NewOp1 =                                                           
\
+        DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));          
\
+    SDValue Res = DAG.getNode(OPCODE, DL, MVT::i64, NewOp0, NewOp1);           
\
+    Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));          
\
+    return;                                                                    
\
+  }
+      RISCV_MOPRR_CASE(moprr0, RISCVISD::MOPRR0)
+      RISCV_MOPRR_CASE(moprr1, RISCVISD::MOPRR1)
+      RISCV_MOPRR_CASE(moprr2, RISCVISD::MOPRR2)
+      RISCV_MOPRR_CASE(moprr3, RISCVISD::MOPRR3)
+      RISCV_MOPRR_CASE(moprr4, RISCVISD::MOPRR4)
+      RISCV_MOPRR_CASE(moprr5, RISCVISD::MOPRR5)
+      RISCV_MOPRR_CASE(moprr6, RISCVISD::MOPRR6)
+      RISCV_MOPRR_CASE(moprr7, RISCVISD::MOPRR7)
+#undef RISCV_MOPRR_CASE
     case Intrinsic::riscv_clmul: {
       if (!Subtarget.is64Bit() || N->getValueType(0) != MVT::i32)
         return;
@@ -18371,6 +18502,46 @@ const char 
*RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
   NODE_NAME_CASE(CLMUL)
   NODE_NAME_CASE(CLMULH)
   NODE_NAME_CASE(CLMULR)
+  NODE_NAME_CASE(MOPR0)
+  NODE_NAME_CASE(MOPR1)
+  NODE_NAME_CASE(MOPR2)
+  NODE_NAME_CASE(MOPR3)
+  NODE_NAME_CASE(MOPR4)
+  NODE_NAME_CASE(MOPR5)
+  NODE_NAME_CASE(MOPR6)
+  NODE_NAME_CASE(MOPR7)
+  NODE_NAME_CASE(MOPR8)
+  NODE_NAME_CASE(MOPR9)
+  NODE_NAME_CASE(MOPR10)
+  NODE_NAME_CASE(MOPR11)
+  NODE_NAME_CASE(MOPR12)
+  NODE_NAME_CASE(MOPR13)
+  NODE_NAME_CASE(MOPR14)
+  NODE_NAME_CASE(MOPR15)
+  NODE_NAME_CASE(MOPR16)
+  NODE_NAME_CASE(MOPR17)
+  NODE_NAME_CASE(MOPR18)
+  NODE_NAME_CASE(MOPR19)
+  NODE_NAME_CASE(MOPR20)
+  NODE_NAME_CASE(MOPR21)
+  NODE_NAME_CASE(MOPR22)
+  NODE_NAME_CASE(MOPR23)
+  NODE_NAME_CASE(MOPR24)
+  NODE_NAME_CASE(MOPR25)
+  NODE_NAME_CASE(MOPR26)
+  NODE_NAME_CASE(MOPR27)
+  NODE_NAME_CASE(MOPR28)
+  NODE_NAME_CASE(MOPR29)
+  NODE_NAME_CASE(MOPR30)
+  NODE_NAME_CASE(MOPR31)
+  NODE_NAME_CASE(MOPRR0)
+  NODE_NAME_CASE(MOPRR1)
+  NODE_NAME_CASE(MOPRR2)
+  NODE_NAME_CASE(MOPRR3)
+  NODE_NAME_CASE(MOPRR4)
+  NODE_NAME_CASE(MOPRR5)
+  NODE_NAME_CASE(MOPRR6)
+  NODE_NAME_CASE(MOPRR7)
   NODE_NAME_CASE(SHA256SIG0)
   NODE_NAME_CASE(SHA256SIG1)
   NODE_NAME_CASE(SHA256SUM0)
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h 
b/llvm/lib/Target/RISCV/RISCVISelLowering.h
index ae798cc47bf83..5dd66f402a324 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.h
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h
@@ -141,6 +141,12 @@ enum NodeType : unsigned {
   SHA256SIG0, SHA256SIG1, SHA256SUM0, SHA256SUM1,
   SM4KS, SM4ED,
   SM3P0, SM3P1,
+  // May-Be-Operations
+  MOPR0, MOPR1, MOPR2, MOPR3, MOPR4, MOPR5, MOPR6, MOPR7, MOPR8, MOPR9, MOPR10,
+  MOPR11, MOPR12, MOPR13, MOPR14, MOPR15, MOPR16, MOPR17, MOPR18, MOPR19,
+  MOPR20, MOPR21, MOPR22, MOPR23, MOPR24, MOPR25, MOPR26, MOPR27, MOPR28,
+  MOPR29, MOPR30, MOPR31, MOPRR0, MOPRR1, MOPRR2, MOPRR3, MOPRR4, MOPRR5,
+  MOPRR6, MOPRR7,
 
   // Vector Extension
   FIRST_VL_VECTOR_OP,
diff --git a/llvm/lib/Target/RISCV/RISCVInstrFormats.td 
b/llvm/lib/Target/RISCV/RISCVInstrFormats.td
index e80ba26800a13..cb68752210d54 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrFormats.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrFormats.td
@@ -401,6 +401,27 @@ class RVInstIUnary<bits<12> imm12, bits<3> funct3, 
RISCVOpcode opcode,
   let Inst{31-20} = imm12;
 }
 
+class RVInstIMopr<bits<7> imm7, bits<5> imm5, bits<3> funct3, RISCVOpcode 
opcode,
+                   dag outs, dag ins, string opcodestr, string argstr>
+    : RVInstIBase<funct3, opcode, outs, ins, opcodestr, argstr> {
+  let Inst{31} = imm7{6};
+  let Inst{30} = imm5{4};
+  let Inst{29-28} = imm7{5-4};
+  let Inst{27-26} = imm5{3-2};
+  let Inst{25-22} = imm7{3-0};
+  let Inst{21-20} = imm5{1-0};
+}
+
+class RVInstRMoprr<bits<4> imm4, bits<3> imm3, bits<3> funct3, RISCVOpcode 
opcode,
+                   dag outs, dag ins, string opcodestr, string argstr>
+    : RVInstRBase<funct3, opcode, outs, ins, opcodestr, argstr> {
+  let Inst{31} = imm4{3};
+  let Inst{30} = imm3{2};
+  let Inst{29-28} = imm4{2-1};
+  let Inst{27-26} = imm3{1-0};
+  let Inst{25} = imm4{0};
+}
+
 class RVInstS<bits<3> funct3, RISCVOpcode opcode, dag outs, dag ins,
               string opcodestr, string argstr>
     : RVInst<outs, ins, opcodestr, argstr, [], InstFormatS> {
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td 
b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
index edc08187d8f77..6e82fea0339d4 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
@@ -90,6 +90,14 @@ def riscv_add_tprel : SDNode<"RISCVISD::ADD_TPREL",
                                                   SDTCisSameAs<0, 3>,
                                                   SDTCisInt<0>]>>;
 
+foreach i = 0...31 in {
+  defvar riscvisd_moprx = "RISCVISD::MOPR"#i;
+  def riscv_mopr#i : SDNode<riscvisd_moprx,  SDTIntUnaryOp>;
+}
+foreach i = 0...7 in {
+  defvar riscvisd_moprrx = "RISCVISD::MOPRR"#i;
+  def riscv_moprr#i : SDNode<riscvisd_moprrx,  SDTIntBinOp>;
+}
 
//===----------------------------------------------------------------------===//
 // Operand and SDNode transformation definitions.
 
//===----------------------------------------------------------------------===//
@@ -597,6 +605,16 @@ class Priv_rr<string opcodestr, bits<7> funct7>
   let rd = 0;
 }
 
+class RVMopr<bits<7> imm7, bits<5> imm5, bits<3> funct3,
+             RISCVOpcode opcode, string opcodestr>
+    : RVInstIMopr<imm7, imm5, funct3, opcode, (outs GPR:$rd), (ins GPR:$rs1),
+                   opcodestr, "$rd, $rs1">;
+
+class RVMoprr<bits<4> imm4, bits<3> imm3, bits<3> funct3,
+             RISCVOpcode opcode, string opcodestr>
+    : RVInstRMoprr<imm4, imm3, funct3, opcode, (outs GPR:$rd), (ins GPR:$rs1, 
GPR:$rs2),
+                   opcodestr, "$rd, $rs1, $rs2">;
+
 
//===----------------------------------------------------------------------===//
 // Instructions
 
//===----------------------------------------------------------------------===//
@@ -786,6 +804,22 @@ def SRAW  : ALUW_rr<0b0100000, 0b101, "sraw">,
 } // IsSignExtendingOpW = 1
 } // Predicates = [IsRV64]
 
+// Zimop instructions
+
+foreach i = 0...31 in {
+    let Predicates = [HasStdExtZimop] in {
+    def MOPR#i : RVMopr<0b1000111, i, 0b100, OPC_SYSTEM, "mop.r."#i>,
+                Sched<[WriteMOPR, ReadMOPR]>;
+    } // Predicates = [HasStdExtZimop]
+}
+
+foreach i = 0...7 in {
+    let Predicates = [HasStdExtZimop] in {
+    def MOPRR#i : RVMoprr<0b1001, i, 0b100, OPC_SYSTEM, "mop.rr."#i>,
+                Sched<[WriteMOPR, ReadMOPR, ReadMOPR]>;
+    } // Predicates = [HasStdExtZimop]
+}
+
 
//===----------------------------------------------------------------------===//
 // Privileged instructions
 
//===----------------------------------------------------------------------===//
@@ -1551,6 +1585,25 @@ let Predicates = [HasStdExtC, OptForMinSize] in {
   def : SelectCompressOpt<SETNE>;
 }
 
+// Zimop instructions
+foreach i = 0...31 in {
+    defvar moprx = !cast<Instruction>("MOPR"#i);
+    defvar riscv_moprx = !cast<SDNode>("riscv_mopr"#i);
+    let Predicates = [HasStdExtZimop] in {
+    def : Pat<(XLenVT (riscv_moprx (XLenVT GPR:$rs1))),
+              (moprx GPR:$rs1)>;
+    } // Predicates = [HasStdExtZimop]
+}
+
+foreach i = 0...7 in {
+    defvar moprrx = !cast<Instruction>("MOPRR"#i);
+    defvar riscv_moprrx = !cast<SDNode>("riscv_moprr"#i);
+    let Predicates = [HasStdExtZimop] in {
+    def : Pat<(XLenVT (riscv_moprrx (XLenVT GPR:$rs1), (XLenVT GPR:$rs2))),
+              (moprrx GPR:$rs1, GPR:$rs2)>;
+    } // Predicates = [HasStdExtZimop]
+}
+
 /// Branches and jumps
 
 // Match `riscv_brcc` and lower to the appropriate RISC-V branch instruction.
diff --git a/llvm/lib/Target/RISCV/RISCVSchedRocket.td 
b/llvm/lib/Target/RISCV/RISCVSchedRocket.td
index bb9dfe5d01240..d2fe83606eab6 100644
--- a/llvm/lib/Target/RISCV/RISCVSchedRocket.td
+++ b/llvm/lib/Target/RISCV/RISCVSchedRocket.td
@@ -248,4 +248,5 @@ defm : UnsupportedSchedZbkx;
 defm : UnsupportedSchedZfa;
 defm : UnsupportedSchedZfh;
 defm : UnsupportedSchedSFB;
+defm : UnsupportedSchedZimop;
 }
diff --git a/llvm/lib/Target/RISCV/RISCVSchedSiFive7.td 
b/llvm/lib/Target/RISCV/RISCVSchedSiFive7.td
index 45783d482f3bd..082710a7b1484 100644
--- a/llvm/lib/Target/RISCV/RISCVSchedSiFive7.td
+++ b/llvm/lib/Target/RISCV/RISCVSchedSiFive7.td
@@ -1212,4 +1212,5 @@ defm : UnsupportedSchedZbc;
 defm : UnsupportedSchedZbkb;
 defm : UnsupportedSchedZbkx;
 defm : UnsupportedSchedZfa;
+defm : UnsupportedSchedZimop;
 }
diff --git a/llvm/lib/Target/RISCV/RISCVSchedSyntacoreSCR1.td 
b/llvm/lib/Target/RISCV/RISCVSchedSyntacoreSCR1.td
index 06ad2075b0736..858f1ca89e151 100644
--- a/llvm/lib/Target/RISCV/RISCVSchedSyntacoreSCR1.td
+++ b/llvm/lib/Target/RISCV/RISCVSchedSyntacoreSCR1.td
@@ -207,4 +207,5 @@ defm : UnsupportedSchedZbkb;
 defm : UnsupportedSchedZbkx;
 defm : UnsupportedSchedZfa;
 defm : UnsupportedSchedZfh;
+defm : UnsupportedSchedZimop;
 }
diff --git a/llvm/lib/Target/RISCV/RISCVSchedule.td 
b/llvm/lib/Target/RISCV/RISCVSchedule.td
index f6c1b096ad90c..8bf3481b0d007 100644
--- a/llvm/lib/Target/RISCV/RISCVSchedule.td
+++ b/llvm/lib/Target/RISCV/RISCVSchedule.td
@@ -112,6 +112,9 @@ def WriteFST16        : SchedWrite;    // Floating point sp 
store
 def WriteFST32        : SchedWrite;    // Floating point sp store
 def WriteFST64        : SchedWrite;    // Floating point dp store
 
+// Zimop extension.
+def WriteMOPR     : SchedWrite;
+
 // short forward branch for Bullet
 def WriteSFB        : SchedWrite;
 def ReadSFBJmp      : SchedRead;
@@ -201,6 +204,9 @@ def ReadFClass16         : SchedRead;
 def ReadFClass32         : SchedRead;
 def ReadFClass64         : SchedRead;
 
+// Zimop extension.
+def ReadMOPR     : SchedRead;
+
 multiclass UnsupportedSchedZfh {
 let Unsupported = true in {
 def : WriteRes<WriteFAdd16, []>;
@@ -271,6 +277,14 @@ def : ReadAdvance<ReadFRoundF16, 0>;
 } // Unsupported = true
 }
 
+multiclass UnsupportedSchedZimop {
+let Unsupported = true in {
+def : WriteRes<WriteMOPR, []>;
+
+def : ReadAdvance<ReadMOPR, 0>;
+}
+}
+
 // Include the scheduler resources for other instruction extensions.
 include "RISCVScheduleZb.td"
 include "RISCVScheduleV.td"
diff --git a/llvm/test/CodeGen/RISCV/attributes.ll 
b/llvm/test/CodeGen/RISCV/attributes.ll
index 030ae06af6d28..0afe9b9f916d8 100644
--- a/llvm/test/CodeGen/RISCV/attributes.ll
+++ b/llvm/test/CodeGen/RISCV/attributes.ll
@@ -83,6 +83,7 @@
 ; RUN: llc -mtriple=riscv32 -mattr=+zve32x -mattr=+experimental-zvksh %s -o - 
| FileCheck --check-prefix=RV32ZVKSH %s
 ; RUN: llc -mtriple=riscv32 -mattr=+zve32x -mattr=+experimental-zvkt %s -o - | 
FileCheck --check-prefix=RV32ZVKT %s
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-zicond %s -o - | FileCheck 
--check-prefix=RV32ZICOND %s
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-zimop %s -o - | FileCheck 
--check-prefix=RV32ZIMOP %s
 ; RUN: llc -mtriple=riscv32 -mattr=+smaia %s -o - | FileCheck 
--check-prefixes=CHECK,RV32SMAIA %s
 ; RUN: llc -mtriple=riscv32 -mattr=+ssaia %s -o - | FileCheck 
--check-prefixes=CHECK,RV32SSAIA %s
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-zfbfmin %s -o - | FileCheck 
--check-prefixes=CHECK,RV32ZFBFMIN %s
@@ -173,6 +174,7 @@
 ; RUN: llc -mtriple=riscv64 -mattr=+zve32x -mattr=+experimental-zvksh %s -o - 
| FileCheck --check-prefix=RV64ZVKSH %s
 ; RUN: llc -mtriple=riscv64 -mattr=+zve32x -mattr=+experimental-zvkt %s -o - | 
FileCheck --check-prefix=RV64ZVKT %s
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-zicond %s -o - | FileCheck 
--check-prefix=RV64ZICOND %s
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-zimop %s -o - | FileCheck 
--check-prefix=RV64ZIMOP %s
 ; RUN: llc -mtriple=riscv64 -mattr=+smaia %s -o - | FileCheck 
--check-prefixes=CHECK,RV64SMAIA %s
 ; RUN: llc -mtriple=riscv64 -mattr=+ssaia %s -o - | FileCheck 
--check-prefixes=CHECK,RV64SSAIA %s
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-zfbfmin %s -o - | FileCheck 
--check-prefixes=CHECK,RV64ZFBFMIN %s
@@ -265,6 +267,7 @@
 ; RV32ZVKSH: .attribute 5, "rv32i2p1_zicsr2p0_zve32x1p0_zvksh1p0_zvl32b1p0"
 ; RV32ZVKT: .attribute 5, "rv32i2p1_zicsr2p0_zve32x1p0_zvkt1p0_zvl32b1p0"
 ; RV32ZICOND: .attribute 5, "rv32i2p1_zicond1p0"
+; RV32ZIMOP: .attribute 5, "rv32i2p1_zimop0p1"
 ; RV32SMAIA: .attribute 5, "rv32i2p1_smaia1p0"
 ; RV32SSAIA: .attribute 5, "rv32i2p1_ssaia1p0"
 ; RV32ZFBFMIN: .attribute 5, "rv32i2p1_f2p2_zicsr2p0_zfbfmin0p8"
@@ -354,6 +357,7 @@
 ; RV64ZVKSH: .attribute 5, "rv64i2p1_zicsr2p0_zve32x1p0_zvksh1p0_zvl32b1p0"
 ; RV64ZVKT: .attribute 5, "rv64i2p1_zicsr2p0_zve32x1p0_zvkt1p0_zvl32b1p0"
 ; RV64ZICOND: .attribute 5, "rv64i2p1_zicond1p0"
+; RV64ZIMOP: .attribute 5, "rv64i2p1_zimop0p1"
 ; RV64SMAIA: .attribute 5, "rv64i2p1_smaia1p0"
 ; RV64SSAIA: .attribute 5, "rv64i2p1_ssaia1p0"
 ; RV64ZFBFMIN: .attribute 5, "rv64i2p1_f2p2_zicsr2p0_zfbfmin0p8"
diff --git a/llvm/test/CodeGen/RISCV/rv32zimop-intrinsic.ll 
b/llvm/test/CodeGen/RISCV/rv32zimop-intrinsic.ll
new file mode 100644
index 0000000000000..477c4abc126e1
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rv32zimop-intrinsic.ll
@@ -0,0 +1,47 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-zimop -verify-machineinstrs < 
%s \
+; RUN:   | FileCheck %s -check-prefix=RV32ZIMOP
+
+declare i32 @llvm.riscv.mopr0.i32(i32 %a)
+
+define i32 @mopr0_32(i32 %a) nounwind {
+; RV32ZIMOP-LABEL: mopr0_32:
+; RV32ZIMOP:       # %bb.0:
+; RV32ZIMOP-NEXT:    mop.r.0 a0, a0
+; RV32ZIMOP-NEXT:    ret
+  %tmp = call i32 @llvm.riscv.mopr0.i32(i32 %a)
+  ret i32 %tmp
+}
+
+declare i32 @llvm.riscv.mopr31.i32(i32 %a)
+
+define i32 @mopr31_32(i32 %a) nounwind {
+; RV32ZIMOP-LABEL: mopr31_32:
+; RV32ZIMOP:       # %bb.0:
+; RV32ZIMOP-NEXT:    mop.r.31 a0, a0
+; RV32ZIMOP-NEXT:    ret
+  %tmp = call i32 @llvm.riscv.mopr31.i32(i32 %a)
+  ret i32 %tmp
+}
+
+declare i32 @llvm.riscv.moprr0.i32(i32 %a, i32 %b)
+
+define i32 @moprr0_32(i32 %a, i32 %b) nounwind {
+; RV32ZIMOP-LABEL: moprr0_32:
+; RV32ZIMOP:       # %bb.0:
+; RV32ZIMOP-NEXT:    mop.rr.0 a0, a0, a1
+; RV32ZIMOP-NEXT:    ret
+  %tmp = call i32 @llvm.riscv.moprr0.i32(i32 %a, i32 %b)
+  ret i32 %tmp
+}
+
+declare i32 @llvm.riscv.moprr7.i32(i32 %a, i32 %b)
+
+define i32 @moprr7_32(i32 %a, i32 %b) nounwind {
+; RV32ZIMOP-LABEL: moprr7_32:
+; RV32ZIMOP:       # %bb.0:
+; RV32ZIMOP-NEXT:    mop.rr.7 a0, a0, a1
+; RV32ZIMOP-NEXT:    ret
+  %tmp = call i32 @llvm.riscv.moprr7.i32(i32 %a, i32 %b)
+  ret i32 %tmp
+}
\ No newline at end of file
diff --git a/llvm/test/CodeGen/RISCV/rv64zimop-intrinsic.ll 
b/llvm/test/CodeGen/RISCV/rv64zimop-intrinsic.ll
new file mode 100644
index 0000000000000..3faa0ca2f78ea
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rv64zimop-intrinsic.ll
@@ -0,0 +1,96 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-zimop -verify-machineinstrs < 
%s \
+; RUN:   | FileCheck %s -check-prefix=RV64ZIMOP
+
+declare i64 @llvm.riscv.mopr0.i64(i64 %a)
+
+define i64 @mopr0_64(i64 %a) nounwind {
+; RV64ZIMOP-LABEL: mopr0_64:
+; RV64ZIMOP:       # %bb.0:
+; RV64ZIMOP-NEXT:    mop.r.0 a0, a0
+; RV64ZIMOP-NEXT:    ret
+  %tmp = call i64 @llvm.riscv.mopr0.i64(i64 %a)
+  ret i64 %tmp
+}
+
+declare i64 @llvm.riscv.mopr31.i64(i64 %a)
+
+define i64 @mopr31_64(i64 %a) nounwind {
+; RV64ZIMOP-LABEL: mopr31_64:
+; RV64ZIMOP:       # %bb.0:
+; RV64ZIMOP-NEXT:    mop.r.31 a0, a0
+; RV64ZIMOP-NEXT:    ret
+  %tmp = call i64 @llvm.riscv.mopr31.i64(i64 %a)
+  ret i64 %tmp
+}
+
+declare i64 @llvm.riscv.moprr0.i64(i64 %a, i64 %b)
+
+define i64 @moprr0_64(i64 %a, i64 %b) nounwind {
+; RV64ZIMOP-LABEL: moprr0_64:
+; RV64ZIMOP:       # %bb.0:
+; RV64ZIMOP-NEXT:    mop.rr.0 a0, a0, a1
+; RV64ZIMOP-NEXT:    ret
+  %tmp = call i64 @llvm.riscv.moprr0.i64(i64 %a, i64 %b)
+  ret i64 %tmp
+}
+
+declare i64 @llvm.riscv.moprr7.i64(i64 %a, i64 %b)
+
+define i64 @moprr7_64(i64 %a, i64 %b) nounwind {
+; RV64ZIMOP-LABEL: moprr7_64:
+; RV64ZIMOP:       # %bb.0:
+; RV64ZIMOP-NEXT:    mop.rr.7 a0, a0, a1
+; RV64ZIMOP-NEXT:    ret
+  %tmp = call i64 @llvm.riscv.moprr7.i64(i64 %a, i64 %b)
+  ret i64 %tmp
+}
+
+
+declare i32 @llvm.riscv.mopr0.i32(i32 %a)
+
+define signext i32 @mopr0_32(i32 signext %a) nounwind {
+; RV64ZIMOP-LABEL: mopr0_32:
+; RV64ZIMOP:       # %bb.0:
+; RV64ZIMOP-NEXT:    mop.r.0 a0, a0
+; RV64ZIMOP-NEXT:    sext.w a0, a0
+; RV64ZIMOP-NEXT:    ret
+  %tmp = call i32 @llvm.riscv.mopr0.i32(i32 %a)
+  ret i32 %tmp
+}
+
+declare i32 @llvm.riscv.mopr31.i32(i32 %a)
+
+define signext i32 @mopr31_32(i32 signext %a) nounwind {
+; RV64ZIMOP-LABEL: mopr31_32:
+; RV64ZIMOP:       # %bb.0:
+; RV64ZIMOP-NEXT:    mop.r.31 a0, a0
+; RV64ZIMOP-NEXT:    sext.w a0, a0
+; RV64ZIMOP-NEXT:    ret
+  %tmp = call i32 @llvm.riscv.mopr31.i32(i32 %a)
+  ret i32 %tmp
+}
+
+declare i32 @llvm.riscv.moprr0.i32(i32 %a, i32 %b)
+
+define signext i32 @moprr0_32(i32 signext %a, i32 signext %b) nounwind {
+; RV64ZIMOP-LABEL: moprr0_32:
+; RV64ZIMOP:       # %bb.0:
+; RV64ZIMOP-NEXT:    mop.rr.0 a0, a0, a1
+; RV64ZIMOP-NEXT:    sext.w a0, a0
+; RV64ZIMOP-NEXT:    ret
+  %tmp = call i32 @llvm.riscv.moprr0.i32(i32 %a, i32 %b)
+  ret i32 %tmp
+}
+
+declare i32 @llvm.riscv.moprr7.i32(i32 %a, i32 %b)
+
+define signext i32 @moprr7_32(i32 signext %a, i32 signext %b) nounwind {
+; RV64ZIMOP-LABEL: moprr7_32:
+; RV64ZIMOP:       # %bb.0:
+; RV64ZIMOP-NEXT:    mop.rr.7 a0, a0, a1
+; RV64ZIMOP-NEXT:    sext.w a0, a0
+; RV64ZIMOP-NEXT:    ret
+  %tmp = call i32 @llvm.riscv.moprr7.i32(i32 %a, i32 %b)
+  ret i32 %tmp
+}
\ No newline at end of file
diff --git a/llvm/test/MC/RISCV/rv32zimop-invalid.s 
b/llvm/test/MC/RISCV/rv32zimop-invalid.s
new file mode 100644
index 0000000000000..e6c3adc4cd309
--- /dev/null
+++ b/llvm/test/MC/RISCV/rv32zimop-invalid.s
@@ -0,0 +1,6 @@
+# RUN: not llvm-mc -triple riscv32 -mattr=+experimental-zimop < %s 2>&1 | 
FileCheck %s
+
+# Too few operands
+mop.r.0 t0 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
+# Too few operands
+mop.rr.0 t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
diff --git a/llvm/test/MC/RISCV/rvzimop-valid.s 
b/llvm/test/MC/RISCV/rvzimop-valid.s
new file mode 100644
index 0000000000000..1552936629902
--- /dev/null
+++ b/llvm/test/MC/RISCV/rvzimop-valid.s
@@ -0,0 +1,26 @@
+# RUN: llvm-mc %s -triple=riscv32 -mattr=+experimental-zimop -show-encoding \
+# RUN:     | FileCheck -check-prefixes=CHECK-ASM,CHECK-ASM-AND-OBJ %s
+# RUN: llvm-mc %s -triple=riscv64 -mattr=+experimental-zimop -show-encoding \
+# RUN:     | FileCheck -check-prefixes=CHECK-ASM,CHECK-ASM-AND-OBJ %s
+# RUN: llvm-mc -filetype=obj -triple=riscv32 -mattr=+experimental-zimop < %s \
+# RUN:     | llvm-objdump --mattr=+experimental-zimop -d -r - \
+# RUN:     | FileCheck --check-prefix=CHECK-ASM-AND-OBJ %s
+# RUN: llvm-mc -filetype=obj -triple=riscv64 -mattr=+experimental-zimop < %s \
+# RUN:     | llvm-objdump --mattr=+experimental-zimop -d -r - \
+# RUN:     | FileCheck --check-prefix=CHECK-ASM-AND-OBJ %s
+
+# CHECK-ASM-AND-OBJ: mop.r.0 a2, a1
+# CHECK-ASM: encoding: [0x73,0xc6,0xc5,0x81]
+mop.r.0 a2, a1
+
+# CHECK-ASM-AND-OBJ: mop.r.31 a2, a1
+# CHECK-ASM: encoding: [0x73,0xc6,0xf5,0xcd]
+mop.r.31 a2, a1
+
+# CHECK-ASM-AND-OBJ: mop.rr.0 a3, a2, a1
+# CHECK-ASM: encoding: [0xf3,0x46,0xb6,0x82]
+mop.rr.0 a3, a2, a1
+
+# CHECK-ASM-AND-OBJ: mop.rr.7 a3, a2, a1
+# CHECK-ASM: encoding: [0xf3,0x46,0xb6,0xce]
+mop.rr.7 a3, a2, a1
\ No newline at end of file
diff --git a/llvm/unittests/Support/RISCVISAInfoTest.cpp 
b/llvm/unittests/Support/RISCVISAInfoTest.cpp
index 549964eed5551..bd41fa2338739 100644
--- a/llvm/unittests/Support/RISCVISAInfoTest.cpp
+++ b/llvm/unittests/Support/RISCVISAInfoTest.cpp
@@ -738,6 +738,7 @@ R"(All available -march extensions for RISC-V
 Experimental extensions
     zicfilp             0.2       This is a long dummy description
     zicond              1.0
+    zimop               0.1
     zacas               1.0
     zfbfmin             0.8
     ztso                0.1

_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to