eopXD created this revision.
eopXD added reviewers: aaron.ballman, craig.topper.
Herald added subscribers: jobnoorman, luke, VincentWu, vkmr, frasercrmck, 
luismarques, apazos, sameer.abuasal, s.egerton, Jim, benna, psnobl, jocewei, 
PkmX, the_o, brucehoult, MartinMosbeck, rogfer01, edward-jones, zzheng, jrtc27, 
shiva0217, kito-cheng, niosHD, sabuasal, simoncook, johnrusso, rbar, asb, 
arichardson.
Herald added a project: All.
eopXD requested review of this revision.
Herald added subscribers: cfe-commits, wangpc, MaskRay.
Herald added a project: clang.

The full multiply intrinsics are not included for EEW=64 in Zve64*.
They require the V extension to be enabled.

This commit improves diagnostic message from

  <source>:4:10: error: call to undeclared function '__riscv_vsmul_vv_i64m1';
      4 |   return __riscv_vsmul_vv_i64m1(op1, op2, __RISCV_VXRM_RNU, vl);

to

  test.c:5:10: error: builtin requires: v
      5 |   return __riscv_vsmul_vv_i64m1(op1, op2, __RISCV_VXRM_RNU, vl);


Repository:
  rG LLVM Github Monorepo

https://reviews.llvm.org/D155416

Files:
  clang/include/clang/Basic/riscv_vector.td
  clang/include/clang/Support/RISCVVIntrinsicUtils.h
  clang/lib/Sema/SemaChecking.cpp
  clang/lib/Sema/SemaRISCVVectorLookup.cpp
  clang/test/Sema/riscv-vector-v-check.c
  clang/utils/TableGen/RISCVVEmitter.cpp

Index: clang/utils/TableGen/RISCVVEmitter.cpp
===================================================================
--- clang/utils/TableGen/RISCVVEmitter.cpp
+++ clang/utils/TableGen/RISCVVEmitter.cpp
@@ -655,7 +655,6 @@
     for (auto RequiredFeature : RequiredFeatures) {
       RVVRequire RequireExt = StringSwitch<RVVRequire>(RequiredFeature)
                                   .Case("RV64", RVV_REQ_RV64)
-                                  .Case("FullMultiply", RVV_REQ_FullMultiply)
                                   .Case("Xsfvcp", RVV_REQ_Xsfvcp)
                                   .Default(RVV_REQ_None);
       assert(RequireExt != RVV_REQ_None && "Unrecognized required feature?");
Index: clang/test/Sema/riscv-vector-v-check.c
===================================================================
--- /dev/null
+++ clang/test/Sema/riscv-vector-v-check.c
@@ -0,0 +1,197 @@
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN:   -disable-O0-optnone -o - -fsyntax-only %s -verify 
+// REQUIRES: riscv-registered-target
+#include <riscv_vector.h>
+
+vint64m1_t test_vsmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+  return __riscv_vsmul_vv_i64m1(op1, op2, __RISCV_VXRM_RNU, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m1_t test_vsmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
+  return __riscv_vsmul_vx_i64m1(op1, op2, __RISCV_VXRM_RNU, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m2_t test_vsmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+  return __riscv_vsmul_vv_i64m2(op1, op2, __RISCV_VXRM_RNU, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m2_t test_vsmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
+  return __riscv_vsmul_vx_i64m2(op1, op2, __RISCV_VXRM_RNU, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m4_t test_vsmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+  return __riscv_vsmul_vv_i64m4(op1, op2, __RISCV_VXRM_RNU, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m4_t test_vsmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
+  return __riscv_vsmul_vx_i64m4(op1, op2, __RISCV_VXRM_RNU, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m8_t test_vsmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+  return __riscv_vsmul_vv_i64m8(op1, op2, __RISCV_VXRM_RNU, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m8_t test_vsmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
+  return __riscv_vsmul_vx_i64m8(op1, op2, __RISCV_VXRM_RNU, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m1_t test_vsmul_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+  return __riscv_vsmul_vv_i64m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m1_t test_vsmul_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) {
+  return __riscv_vsmul_vx_i64m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m2_t test_vsmul_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+  return __riscv_vsmul_vv_i64m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m2_t test_vsmul_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) {
+  return __riscv_vsmul_vx_i64m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m4_t test_vsmul_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+  return __riscv_vsmul_vv_i64m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m4_t test_vsmul_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) {
+  return __riscv_vsmul_vx_i64m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m8_t test_vsmul_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+  return __riscv_vsmul_vv_i64m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m8_t test_vsmul_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) {
+  return __riscv_vsmul_vx_i64m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m1_t test_vmulh_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+  return __riscv_vmulh_vv_i64m1(op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m1_t test_vmulh_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
+  return __riscv_vmulh_vx_i64m1(op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m2_t test_vmulh_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+  return __riscv_vmulh_vv_i64m2(op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m2_t test_vmulh_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
+  return __riscv_vmulh_vx_i64m2(op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m4_t test_vmulh_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+  return __riscv_vmulh_vv_i64m4(op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m4_t test_vmulh_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
+  return __riscv_vmulh_vx_i64m4(op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m8_t test_vmulh_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+  return __riscv_vmulh_vv_i64m8(op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m8_t test_vmulh_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
+  return __riscv_vmulh_vx_i64m8(op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m1_t test_vmulh_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+  return __riscv_vmulh_vv_i64m1_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m1_t test_vmulh_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) {
+  return __riscv_vmulh_vx_i64m1_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m2_t test_vmulh_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+  return __riscv_vmulh_vv_i64m2_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m2_t test_vmulh_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) {
+  return __riscv_vmulh_vx_i64m2_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m4_t test_vmulh_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+  return __riscv_vmulh_vv_i64m4_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m4_t test_vmulh_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) {
+  return __riscv_vmulh_vx_i64m4_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m8_t test_vmulh_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+  return __riscv_vmulh_vv_i64m8_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m8_t test_vmulh_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) {
+  return __riscv_vmulh_vx_i64m8_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vuint64m1_t test_vmulhu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+  return __riscv_vmulhu_vv_u64m1(op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vuint64m1_t test_vmulhu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
+  return __riscv_vmulhu_vx_u64m1(op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vuint64m2_t test_vmulhu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+  return __riscv_vmulhu_vv_u64m2(op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vuint64m2_t test_vmulhu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
+  return __riscv_vmulhu_vx_u64m2(op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vuint64m4_t test_vmulhu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+  return __riscv_vmulhu_vv_u64m4(op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vuint64m4_t test_vmulhu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
+  return __riscv_vmulhu_vx_u64m4(op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vuint64m8_t test_vmulhu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+  return __riscv_vmulhu_vv_u64m8(op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vuint64m8_t test_vmulhu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+  return __riscv_vmulhu_vx_u64m8(op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vuint64m1_t test_vmulhu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+  return __riscv_vmulhu_vv_u64m1_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vuint64m1_t test_vmulhu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) {
+  return __riscv_vmulhu_vx_u64m1_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vuint64m2_t test_vmulhu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+  return __riscv_vmulhu_vv_u64m2_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vuint64m2_t test_vmulhu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) {
+  return __riscv_vmulhu_vx_u64m2_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vuint64m4_t test_vmulhu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+  return __riscv_vmulhu_vv_u64m4_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vuint64m4_t test_vmulhu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) {
+  return __riscv_vmulhu_vx_u64m4_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vuint64m8_t test_vmulhu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+  return __riscv_vmulhu_vv_u64m8_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vuint64m8_t test_vmulhu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) {
+  return __riscv_vmulhu_vx_u64m8_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m1_t test_vmulhsu_vv_i64m1(vint64m1_t op1, vuint64m1_t op2, size_t vl) {
+  return __riscv_vmulhsu_vv_i64m1(op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m1_t test_vmulhsu_vx_i64m1(vint64m1_t op1, uint64_t op2, size_t vl) {
+  return __riscv_vmulhsu_vx_i64m1(op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m2_t test_vmulhsu_vv_i64m2(vint64m2_t op1, vuint64m2_t op2, size_t vl) {
+  return __riscv_vmulhsu_vv_i64m2(op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m2_t test_vmulhsu_vx_i64m2(vint64m2_t op1, uint64_t op2, size_t vl) {
+  return __riscv_vmulhsu_vx_i64m2(op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m4_t test_vmulhsu_vv_i64m4(vint64m4_t op1, vuint64m4_t op2, size_t vl) {
+  return __riscv_vmulhsu_vv_i64m4(op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m4_t test_vmulhsu_vx_i64m4(vint64m4_t op1, uint64_t op2, size_t vl) {
+  return __riscv_vmulhsu_vx_i64m4(op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m8_t test_vmulhsu_vv_i64m8(vint64m8_t op1, vuint64m8_t op2, size_t vl) {
+  return __riscv_vmulhsu_vv_i64m8(op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m8_t test_vmulhsu_vx_i64m8(vint64m8_t op1, uint64_t op2, size_t vl) {
+  return __riscv_vmulhsu_vx_i64m8(op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m1_t test_vmulhsu_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vuint64m1_t op2, size_t vl) {
+  return __riscv_vmulhsu_vv_i64m1_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m1_t test_vmulhsu_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, uint64_t op2, size_t vl) {
+  return __riscv_vmulhsu_vx_i64m1_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m2_t test_vmulhsu_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vuint64m2_t op2, size_t vl) {
+  return __riscv_vmulhsu_vv_i64m2_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m2_t test_vmulhsu_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, uint64_t op2, size_t vl) {
+  return __riscv_vmulhsu_vx_i64m2_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m4_t test_vmulhsu_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vuint64m4_t op2, size_t vl) {
+  return __riscv_vmulhsu_vv_i64m4_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m4_t test_vmulhsu_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, uint64_t op2, size_t vl) {
+  return __riscv_vmulhsu_vx_i64m4_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m8_t test_vmulhsu_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vuint64m8_t op2, size_t vl) {
+  return __riscv_vmulhsu_vv_i64m8_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m8_t test_vmulhsu_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, uint64_t op2, size_t vl) {
+  return __riscv_vmulhsu_vx_i64m8_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
Index: clang/lib/Sema/SemaRISCVVectorLookup.cpp
===================================================================
--- clang/lib/Sema/SemaRISCVVectorLookup.cpp
+++ clang/lib/Sema/SemaRISCVVectorLookup.cpp
@@ -202,7 +202,6 @@
     ArrayRef<RVVIntrinsicRecord> Recs, IntrinsicKind K) {
   const TargetInfo &TI = Context.getTargetInfo();
   bool HasRV64 = TI.hasFeature("64bit");
-  bool HasFullMultiply = TI.hasFeature("v");
   // Construction of RVVIntrinsicRecords need to sync with createRVVIntrinsics
   // in RISCVVEmitter.cpp.
   for (auto &Record : Recs) {
@@ -256,12 +255,6 @@
           !HasRV64)
         continue;
 
-      if ((BaseType == BasicType::Int64) &&
-          ((Record.RequiredExtensions & RVV_REQ_FullMultiply) ==
-           RVV_REQ_FullMultiply) &&
-          !HasFullMultiply)
-        continue;
-
       // Expanded with different LMUL.
       for (int Log2LMUL = -3; Log2LMUL <= 3; Log2LMUL++) {
         if (!(Record.Log2LMULMask & (1 << (Log2LMUL + 3))))
Index: clang/lib/Sema/SemaChecking.cpp
===================================================================
--- clang/lib/Sema/SemaChecking.cpp
+++ clang/lib/Sema/SemaChecking.cpp
@@ -4527,6 +4527,73 @@
   if (FeatureMissing)
     return true;
 
+  // vmulh.vv, vmulh.vx, vmulhu.vv, vmulhu.vx, vmulhsu.vv, vmulhsu.vx,
+  // vsmul.vv, vsmul.vx are not included for EEW=64 in Zve64*.
+  switch (BuiltinID) {
+  default:
+    break;
+  case RISCVVector::BI__builtin_rvv_vmulhsu_vv:
+  case RISCVVector::BI__builtin_rvv_vmulhsu_vx:
+  case RISCVVector::BI__builtin_rvv_vmulhsu_vv_tu:
+  case RISCVVector::BI__builtin_rvv_vmulhsu_vx_tu:
+  case RISCVVector::BI__builtin_rvv_vmulhsu_vv_m:
+  case RISCVVector::BI__builtin_rvv_vmulhsu_vx_m:
+  case RISCVVector::BI__builtin_rvv_vmulhsu_vv_mu:
+  case RISCVVector::BI__builtin_rvv_vmulhsu_vx_mu:
+  case RISCVVector::BI__builtin_rvv_vmulhsu_vv_tum:
+  case RISCVVector::BI__builtin_rvv_vmulhsu_vx_tum:
+  case RISCVVector::BI__builtin_rvv_vmulhsu_vv_tumu:
+  case RISCVVector::BI__builtin_rvv_vmulhsu_vx_tumu:
+  case RISCVVector::BI__builtin_rvv_vmulhu_vv:
+  case RISCVVector::BI__builtin_rvv_vmulhu_vx:
+  case RISCVVector::BI__builtin_rvv_vmulhu_vv_tu:
+  case RISCVVector::BI__builtin_rvv_vmulhu_vx_tu:
+  case RISCVVector::BI__builtin_rvv_vmulhu_vv_m:
+  case RISCVVector::BI__builtin_rvv_vmulhu_vx_m:
+  case RISCVVector::BI__builtin_rvv_vmulhu_vv_mu:
+  case RISCVVector::BI__builtin_rvv_vmulhu_vx_mu:
+  case RISCVVector::BI__builtin_rvv_vmulhu_vv_tum:
+  case RISCVVector::BI__builtin_rvv_vmulhu_vx_tum:
+  case RISCVVector::BI__builtin_rvv_vmulhu_vv_tumu:
+  case RISCVVector::BI__builtin_rvv_vmulhu_vx_tumu:
+  case RISCVVector::BI__builtin_rvv_vmulh_vv:
+  case RISCVVector::BI__builtin_rvv_vmulh_vx:
+  case RISCVVector::BI__builtin_rvv_vmulh_vv_tu:
+  case RISCVVector::BI__builtin_rvv_vmulh_vx_tu:
+  case RISCVVector::BI__builtin_rvv_vmulh_vv_m:
+  case RISCVVector::BI__builtin_rvv_vmulh_vx_m:
+  case RISCVVector::BI__builtin_rvv_vmulh_vv_mu:
+  case RISCVVector::BI__builtin_rvv_vmulh_vx_mu:
+  case RISCVVector::BI__builtin_rvv_vmulh_vv_tum:
+  case RISCVVector::BI__builtin_rvv_vmulh_vx_tum:
+  case RISCVVector::BI__builtin_rvv_vmulh_vv_tumu:
+  case RISCVVector::BI__builtin_rvv_vmulh_vx_tumu:
+  case RISCVVector::BI__builtin_rvv_vsmul_vv:
+  case RISCVVector::BI__builtin_rvv_vsmul_vx:
+  case RISCVVector::BI__builtin_rvv_vsmul_vv_tu:
+  case RISCVVector::BI__builtin_rvv_vsmul_vx_tu:
+  case RISCVVector::BI__builtin_rvv_vsmul_vv_m:
+  case RISCVVector::BI__builtin_rvv_vsmul_vx_m:
+  case RISCVVector::BI__builtin_rvv_vsmul_vv_mu:
+  case RISCVVector::BI__builtin_rvv_vsmul_vx_mu:
+  case RISCVVector::BI__builtin_rvv_vsmul_vv_tum:
+  case RISCVVector::BI__builtin_rvv_vsmul_vx_tum:
+  case RISCVVector::BI__builtin_rvv_vsmul_vv_tumu:
+  case RISCVVector::BI__builtin_rvv_vsmul_vx_tumu: {
+    bool RequireV = false;
+    for (unsigned ArgNum = 0; ArgNum < TheCall->getNumArgs(); ++ArgNum)
+      RequireV |= TheCall->getArg(ArgNum)->getType()->isRVVType(
+          /* Bitwidth */ 64, /* IsFloat */ false);
+
+    if (RequireV && !TI.hasFeature("v"))
+      return Diag(TheCall->getBeginLoc(),
+                  diag::err_riscv_builtin_requires_extension)
+             << /* IsExtension */ false << TheCall->getSourceRange() << "v";
+
+    break;
+  }
+  }
+
   switch (BuiltinID) {
   case RISCVVector::BI__builtin_rvv_vsetvli:
     return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3) ||
Index: clang/include/clang/Support/RISCVVIntrinsicUtils.h
===================================================================
--- clang/include/clang/Support/RISCVVIntrinsicUtils.h
+++ clang/include/clang/Support/RISCVVIntrinsicUtils.h
@@ -475,8 +475,7 @@
 enum RVVRequire : uint8_t {
   RVV_REQ_None = 0,
   RVV_REQ_RV64 = 1 << 0,
-  RVV_REQ_FullMultiply = 1 << 1,
-  RVV_REQ_Xsfvcp = 1 << 2,
+  RVV_REQ_Xsfvcp = 1 << 1,
 
   LLVM_MARK_AS_BITMASK_ENUM(RVV_REQ_Xsfvcp)
 };
Index: clang/include/clang/Basic/riscv_vector.td
===================================================================
--- clang/include/clang/Basic/riscv_vector.td
+++ clang/include/clang/Basic/riscv_vector.td
@@ -1713,13 +1713,11 @@
 
 // 12.10. Vector Single-Width Integer Multiply Instructions
 defm vmul : RVVIntBinBuiltinSet;
-let RequiredFeatures = ["FullMultiply"] in {
 defm vmulh : RVVSignedBinBuiltinSet;
 defm vmulhu : RVVUnsignedBinBuiltinSet;
 defm vmulhsu : RVVOutOp1BuiltinSet<"vmulhsu", "csil",
                                    [["vv", "v", "vvUv"],
                                     ["vx", "v", "vvUe"]]>;
-}
 
 // 12.11. Vector Integer Divide Instructions
 defm vdivu : RVVUnsignedBinBuiltinSet;
@@ -1859,9 +1857,7 @@
   defm vasub : RVVSignedBinBuiltinSetRoundingMode;
 
   // 13.3. Vector Single-Width Fractional Multiply with Rounding and Saturation
-  let RequiredFeatures = ["FullMultiply"] in {
   defm vsmul : RVVSignedBinBuiltinSetRoundingMode;
-  }
 
   // 13.4. Vector Single-Width Scaling Shift Instructions
   defm vssrl : RVVUnsignedShiftBuiltinSetRoundingMode;
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits
  • [PATCH] D155416: [Cla... Yueh-Ting (eop) Chen via Phabricator via cfe-commits

Reply via email to