craig.topper updated this revision to Diff 541759.
craig.topper added a comment.

Fix name of zip/unzip builtin


Repository:
  rG LLVM Github Monorepo

CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D155647/new/

https://reviews.llvm.org/D155647

Files:
  clang/lib/Headers/CMakeLists.txt
  clang/lib/Headers/riscv_crypto.h
  clang/test/CodeGen/RISCV/rvb-intrinsics/riscv32-zbkb.c
  clang/test/CodeGen/RISCV/rvb-intrinsics/riscv32-zbkc.c
  clang/test/CodeGen/RISCV/rvb-intrinsics/riscv32-zbkx.c
  clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-zbkb.c
  clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-zbkc.c
  clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-zbkx.c
  clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zknd.c
  clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zkne.c
  clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zknh.c
  clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zknd-zkne.c
  clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zknd.c
  clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zkne.c
  clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zknh.c
  clang/test/CodeGen/RISCV/rvk-intrinsics/zksed.c
  clang/test/CodeGen/RISCV/rvk-intrinsics/zksh.c

Index: clang/test/CodeGen/RISCV/rvk-intrinsics/zksh.c
===================================================================
--- clang/test/CodeGen/RISCV/rvk-intrinsics/zksh.c
+++ clang/test/CodeGen/RISCV/rvk-intrinsics/zksh.c
@@ -4,45 +4,57 @@
 // RUN: %clang_cc1 -triple riscv64 -target-feature +zksh -emit-llvm %s -o - \
 // RUN:     | FileCheck %s  -check-prefix=RV64ZKSH
 
-#include <stdint.h>
+#include <riscv_crypto.h>
 
 // RV32ZKSH-LABEL: @sm3p0(
 // RV32ZKSH-NEXT:  entry:
+// RV32ZKSH-NEXT:    [[__X_ADDR_I:%.*]] = alloca i32, align 4
 // RV32ZKSH-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
 // RV32ZKSH-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
 // RV32ZKSH-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV32ZKSH-NEXT:    [[TMP1:%.*]] = call i32 @llvm.riscv.sm3p0(i32 [[TMP0]])
-// RV32ZKSH-NEXT:    ret i32 [[TMP1]]
+// RV32ZKSH-NEXT:    store i32 [[TMP0]], ptr [[__X_ADDR_I]], align 4
+// RV32ZKSH-NEXT:    [[TMP1:%.*]] = load i32, ptr [[__X_ADDR_I]], align 4
+// RV32ZKSH-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.sm3p0(i32 [[TMP1]])
+// RV32ZKSH-NEXT:    ret i32 [[TMP2]]
 //
 // RV64ZKSH-LABEL: @sm3p0(
 // RV64ZKSH-NEXT:  entry:
+// RV64ZKSH-NEXT:    [[__X_ADDR_I:%.*]] = alloca i32, align 4
 // RV64ZKSH-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
 // RV64ZKSH-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
 // RV64ZKSH-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV64ZKSH-NEXT:    [[TMP1:%.*]] = call i32 @llvm.riscv.sm3p0(i32 [[TMP0]])
-// RV64ZKSH-NEXT:    ret i32 [[TMP1]]
+// RV64ZKSH-NEXT:    store i32 [[TMP0]], ptr [[__X_ADDR_I]], align 4
+// RV64ZKSH-NEXT:    [[TMP1:%.*]] = load i32, ptr [[__X_ADDR_I]], align 4
+// RV64ZKSH-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.sm3p0(i32 [[TMP1]])
+// RV64ZKSH-NEXT:    ret i32 [[TMP2]]
 //
 uint32_t sm3p0(uint32_t rs1) {
-  return __builtin_riscv_sm3p0(rs1);
+  return __riscv_sm3p0(rs1);
 }
 
 
 // RV32ZKSH-LABEL: @sm3p1(
 // RV32ZKSH-NEXT:  entry:
+// RV32ZKSH-NEXT:    [[__X_ADDR_I:%.*]] = alloca i32, align 4
 // RV32ZKSH-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
 // RV32ZKSH-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
 // RV32ZKSH-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV32ZKSH-NEXT:    [[TMP1:%.*]] = call i32 @llvm.riscv.sm3p1(i32 [[TMP0]])
-// RV32ZKSH-NEXT:    ret i32 [[TMP1]]
+// RV32ZKSH-NEXT:    store i32 [[TMP0]], ptr [[__X_ADDR_I]], align 4
+// RV32ZKSH-NEXT:    [[TMP1:%.*]] = load i32, ptr [[__X_ADDR_I]], align 4
+// RV32ZKSH-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.sm3p1(i32 [[TMP1]])
+// RV32ZKSH-NEXT:    ret i32 [[TMP2]]
 //
 // RV64ZKSH-LABEL: @sm3p1(
 // RV64ZKSH-NEXT:  entry:
+// RV64ZKSH-NEXT:    [[__X_ADDR_I:%.*]] = alloca i32, align 4
 // RV64ZKSH-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
 // RV64ZKSH-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
 // RV64ZKSH-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV64ZKSH-NEXT:    [[TMP1:%.*]] = call i32 @llvm.riscv.sm3p1(i32 [[TMP0]])
-// RV64ZKSH-NEXT:    ret i32 [[TMP1]]
+// RV64ZKSH-NEXT:    store i32 [[TMP0]], ptr [[__X_ADDR_I]], align 4
+// RV64ZKSH-NEXT:    [[TMP1:%.*]] = load i32, ptr [[__X_ADDR_I]], align 4
+// RV64ZKSH-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.sm3p1(i32 [[TMP1]])
+// RV64ZKSH-NEXT:    ret i32 [[TMP2]]
 //
 uint32_t sm3p1(uint32_t rs1) {
-  return __builtin_riscv_sm3p1(rs1);
+  return __riscv_sm3p1(rs1);
 }
Index: clang/test/CodeGen/RISCV/rvk-intrinsics/zksed.c
===================================================================
--- clang/test/CodeGen/RISCV/rvk-intrinsics/zksed.c
+++ clang/test/CodeGen/RISCV/rvk-intrinsics/zksed.c
@@ -4,7 +4,7 @@
 // RUN: %clang_cc1 -triple riscv64 -target-feature +zksed -emit-llvm %s -o - \
 // RUN:     | FileCheck %s  -check-prefix=RV64ZKSED
 
-#include <stdint.h>
+#include <riscv_crypto.h>
 
 // RV32ZKSED-LABEL: @sm4ks(
 // RV32ZKSED-NEXT:  entry:
@@ -29,7 +29,7 @@
 // RV64ZKSED-NEXT:    ret i32 [[TMP2]]
 //
 uint32_t sm4ks(uint32_t rs1, uint32_t rs2) {
-  return __builtin_riscv_sm4ks(rs1, rs2, 0);
+  return __riscv_sm4ks(rs1, rs2, 0);
 }
 
 // RV32ZKSED-LABEL: @sm4ed(
@@ -55,5 +55,5 @@
 // RV64ZKSED-NEXT:    ret i32 [[TMP2]]
 //
 uint32_t sm4ed(uint32_t rs1, uint32_t rs2) {
-  return __builtin_riscv_sm4ed(rs1, rs2, 0);
+  return __riscv_sm4ed(rs1, rs2, 0);
 }
Index: clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zknh.c
===================================================================
--- clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zknh.c
+++ clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zknh.c
@@ -2,105 +2,129 @@
 // RUN: %clang_cc1 -triple riscv64 -target-feature +zknh -emit-llvm %s -o - \
 // RUN:     | FileCheck %s  -check-prefix=RV64ZKNH
 
-#include <stdint.h>
+#include <riscv_crypto.h>
 
 // RV64ZKNH-LABEL: @sha512sig0(
 // RV64ZKNH-NEXT:  entry:
+// RV64ZKNH-NEXT:    [[__X_ADDR_I:%.*]] = alloca i64, align 8
 // RV64ZKNH-NEXT:    [[RS1_ADDR:%.*]] = alloca i64, align 8
 // RV64ZKNH-NEXT:    store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8
 // RV64ZKNH-NEXT:    [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8
-// RV64ZKNH-NEXT:    [[TMP1:%.*]] = call i64 @llvm.riscv.sha512sig0(i64 [[TMP0]])
-// RV64ZKNH-NEXT:    ret i64 [[TMP1]]
+// RV64ZKNH-NEXT:    store i64 [[TMP0]], ptr [[__X_ADDR_I]], align 8
+// RV64ZKNH-NEXT:    [[TMP1:%.*]] = load i64, ptr [[__X_ADDR_I]], align 8
+// RV64ZKNH-NEXT:    [[TMP2:%.*]] = call i64 @llvm.riscv.sha512sig0(i64 [[TMP1]])
+// RV64ZKNH-NEXT:    ret i64 [[TMP2]]
 //
 uint64_t sha512sig0(uint64_t rs1) {
-  return __builtin_riscv_sha512sig0(rs1);
+  return __riscv_sha512sig0(rs1);
 }
 
 
 // RV64ZKNH-LABEL: @sha512sig1(
 // RV64ZKNH-NEXT:  entry:
+// RV64ZKNH-NEXT:    [[__X_ADDR_I:%.*]] = alloca i64, align 8
 // RV64ZKNH-NEXT:    [[RS1_ADDR:%.*]] = alloca i64, align 8
 // RV64ZKNH-NEXT:    store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8
 // RV64ZKNH-NEXT:    [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8
-// RV64ZKNH-NEXT:    [[TMP1:%.*]] = call i64 @llvm.riscv.sha512sig1(i64 [[TMP0]])
-// RV64ZKNH-NEXT:    ret i64 [[TMP1]]
+// RV64ZKNH-NEXT:    store i64 [[TMP0]], ptr [[__X_ADDR_I]], align 8
+// RV64ZKNH-NEXT:    [[TMP1:%.*]] = load i64, ptr [[__X_ADDR_I]], align 8
+// RV64ZKNH-NEXT:    [[TMP2:%.*]] = call i64 @llvm.riscv.sha512sig0(i64 [[TMP1]])
+// RV64ZKNH-NEXT:    ret i64 [[TMP2]]
 //
 uint64_t sha512sig1(uint64_t rs1) {
-  return __builtin_riscv_sha512sig1(rs1);
+  return __riscv_sha512sig1(rs1);
 }
 
 
 // RV64ZKNH-LABEL: @sha512sum0(
 // RV64ZKNH-NEXT:  entry:
+// RV64ZKNH-NEXT:    [[__X_ADDR_I:%.*]] = alloca i64, align 8
 // RV64ZKNH-NEXT:    [[RS1_ADDR:%.*]] = alloca i64, align 8
 // RV64ZKNH-NEXT:    store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8
 // RV64ZKNH-NEXT:    [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8
-// RV64ZKNH-NEXT:    [[TMP1:%.*]] = call i64 @llvm.riscv.sha512sum0(i64 [[TMP0]])
-// RV64ZKNH-NEXT:    ret i64 [[TMP1]]
+// RV64ZKNH-NEXT:    store i64 [[TMP0]], ptr [[__X_ADDR_I]], align 8
+// RV64ZKNH-NEXT:    [[TMP1:%.*]] = load i64, ptr [[__X_ADDR_I]], align 8
+// RV64ZKNH-NEXT:    [[TMP2:%.*]] = call i64 @llvm.riscv.sha512sum0(i64 [[TMP1]])
+// RV64ZKNH-NEXT:    ret i64 [[TMP2]]
 //
 uint64_t sha512sum0(uint64_t rs1) {
-  return __builtin_riscv_sha512sum0(rs1);
+  return __riscv_sha512sum0(rs1);
 }
 
 
 // RV64ZKNH-LABEL: @sha512sum1(
 // RV64ZKNH-NEXT:  entry:
+// RV64ZKNH-NEXT:    [[__X_ADDR_I:%.*]] = alloca i64, align 8
 // RV64ZKNH-NEXT:    [[RS1_ADDR:%.*]] = alloca i64, align 8
 // RV64ZKNH-NEXT:    store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8
 // RV64ZKNH-NEXT:    [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8
-// RV64ZKNH-NEXT:    [[TMP1:%.*]] = call i64 @llvm.riscv.sha512sum1(i64 [[TMP0]])
-// RV64ZKNH-NEXT:    ret i64 [[TMP1]]
+// RV64ZKNH-NEXT:    store i64 [[TMP0]], ptr [[__X_ADDR_I]], align 8
+// RV64ZKNH-NEXT:    [[TMP1:%.*]] = load i64, ptr [[__X_ADDR_I]], align 8
+// RV64ZKNH-NEXT:    [[TMP2:%.*]] = call i64 @llvm.riscv.sha512sum0(i64 [[TMP1]])
+// RV64ZKNH-NEXT:    ret i64 [[TMP2]]
 //
 uint64_t sha512sum1(uint64_t rs1) {
-  return __builtin_riscv_sha512sum1(rs1);
+  return __riscv_sha512sum1(rs1);
 }
 
 
 // RV64ZKNH-LABEL: @sha256sig0(
 // RV64ZKNH-NEXT:  entry:
+// RV64ZKNH-NEXT:    [[__X_ADDR_I:%.*]] = alloca i32, align 4
 // RV64ZKNH-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
 // RV64ZKNH-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
 // RV64ZKNH-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV64ZKNH-NEXT:    [[TMP1:%.*]] = call i32 @llvm.riscv.sha256sig0(i32 [[TMP0]])
-// RV64ZKNH-NEXT:    ret i32 [[TMP1]]
+// RV64ZKNH-NEXT:    store i32 [[TMP0]], ptr [[__X_ADDR_I]], align 4
+// RV64ZKNH-NEXT:    [[TMP1:%.*]] = load i32, ptr [[__X_ADDR_I]], align 4
+// RV64ZKNH-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.sha256sig0(i32 [[TMP1]])
+// RV64ZKNH-NEXT:    ret i32 [[TMP2]]
 //
 uint32_t sha256sig0(uint32_t rs1) {
-  return __builtin_riscv_sha256sig0(rs1);
+  return __riscv_sha256sig0(rs1);
 }
 
 // RV64ZKNH-LABEL: @sha256sig1(
 // RV64ZKNH-NEXT:  entry:
+// RV64ZKNH-NEXT:    [[__X_ADDR_I:%.*]] = alloca i32, align 4
 // RV64ZKNH-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
 // RV64ZKNH-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
 // RV64ZKNH-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV64ZKNH-NEXT:    [[TMP1:%.*]] = call i32 @llvm.riscv.sha256sig1(i32 [[TMP0]])
-// RV64ZKNH-NEXT:    ret i32 [[TMP1]]
+// RV64ZKNH-NEXT:    store i32 [[TMP0]], ptr [[__X_ADDR_I]], align 4
+// RV64ZKNH-NEXT:    [[TMP1:%.*]] = load i32, ptr [[__X_ADDR_I]], align 4
+// RV64ZKNH-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.sha256sig0(i32 [[TMP1]])
+// RV64ZKNH-NEXT:    ret i32 [[TMP2]]
 //
 uint32_t sha256sig1(uint32_t rs1) {
-  return __builtin_riscv_sha256sig1(rs1);
+  return __riscv_sha256sig1(rs1);
 }
 
 
 // RV64ZKNH-LABEL: @sha256sum0(
 // RV64ZKNH-NEXT:  entry:
+// RV64ZKNH-NEXT:    [[__X_ADDR_I:%.*]] = alloca i32, align 4
 // RV64ZKNH-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
 // RV64ZKNH-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
 // RV64ZKNH-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV64ZKNH-NEXT:    [[TMP1:%.*]] = call i32 @llvm.riscv.sha256sum0(i32 [[TMP0]])
-// RV64ZKNH-NEXT:    ret i32 [[TMP1]]
+// RV64ZKNH-NEXT:    store i32 [[TMP0]], ptr [[__X_ADDR_I]], align 4
+// RV64ZKNH-NEXT:    [[TMP1:%.*]] = load i32, ptr [[__X_ADDR_I]], align 4
+// RV64ZKNH-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.sha256sum0(i32 [[TMP1]])
+// RV64ZKNH-NEXT:    ret i32 [[TMP2]]
 //
 uint32_t sha256sum0(uint32_t rs1) {
-  return __builtin_riscv_sha256sum0(rs1);
+  return __riscv_sha256sum0(rs1);
 }
 
 // RV64ZKNH-LABEL: @sha256sum1(
 // RV64ZKNH-NEXT:  entry:
+// RV64ZKNH-NEXT:    [[__X_ADDR_I:%.*]] = alloca i32, align 4
 // RV64ZKNH-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
 // RV64ZKNH-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
 // RV64ZKNH-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV64ZKNH-NEXT:    [[TMP1:%.*]] = call i32 @llvm.riscv.sha256sum1(i32 [[TMP0]])
-// RV64ZKNH-NEXT:    ret i32 [[TMP1]]
+// RV64ZKNH-NEXT:    store i32 [[TMP0]], ptr [[__X_ADDR_I]], align 4
+// RV64ZKNH-NEXT:    [[TMP1:%.*]] = load i32, ptr [[__X_ADDR_I]], align 4
+// RV64ZKNH-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.sha256sum0(i32 [[TMP1]])
+// RV64ZKNH-NEXT:    ret i32 [[TMP2]]
 //
 uint32_t sha256sum1(uint32_t rs1) {
-  return __builtin_riscv_sha256sum1(rs1);
+  return __riscv_sha256sum1(rs1);
 }
Index: clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zkne.c
===================================================================
--- clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zkne.c
+++ clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zkne.c
@@ -2,35 +2,47 @@
 // RUN: %clang_cc1 -triple riscv64 -target-feature +zkne -emit-llvm %s -o - \
 // RUN:     | FileCheck %s  -check-prefix=RV64ZKNE
 
-#include <stdint.h>
+#include <riscv_crypto.h>
 
 // RV64ZKNE-LABEL: @aes64es(
 // RV64ZKNE-NEXT:  entry:
+// RV64ZKNE-NEXT:    [[__X_ADDR_I:%.*]] = alloca i64, align 8
+// RV64ZKNE-NEXT:    [[__Y_ADDR_I:%.*]] = alloca i64, align 8
 // RV64ZKNE-NEXT:    [[RS1_ADDR:%.*]] = alloca i64, align 8
 // RV64ZKNE-NEXT:    [[RS2_ADDR:%.*]] = alloca i64, align 8
 // RV64ZKNE-NEXT:    store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8
 // RV64ZKNE-NEXT:    store i64 [[RS2:%.*]], ptr [[RS2_ADDR]], align 8
 // RV64ZKNE-NEXT:    [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8
 // RV64ZKNE-NEXT:    [[TMP1:%.*]] = load i64, ptr [[RS2_ADDR]], align 8
-// RV64ZKNE-NEXT:    [[TMP2:%.*]] = call i64 @llvm.riscv.aes64es(i64 [[TMP0]], i64 [[TMP1]])
-// RV64ZKNE-NEXT:    ret i64 [[TMP2]]
+// RV64ZKNE-NEXT:    store i64 [[TMP0]], ptr [[__X_ADDR_I]], align 8
+// RV64ZKNE-NEXT:    store i64 [[TMP1]], ptr [[__Y_ADDR_I]], align 8
+// RV64ZKNE-NEXT:    [[TMP2:%.*]] = load i64, ptr [[__X_ADDR_I]], align 8
+// RV64ZKNE-NEXT:    [[TMP3:%.*]] = load i64, ptr [[__Y_ADDR_I]], align 8
+// RV64ZKNE-NEXT:    [[TMP4:%.*]] = call i64 @llvm.riscv.aes64es(i64 [[TMP2]], i64 [[TMP3]])
+// RV64ZKNE-NEXT:    ret i64 [[TMP4]]
 //
 uint64_t aes64es(uint64_t rs1, uint64_t rs2) {
-  return __builtin_riscv_aes64es(rs1, rs2);
+  return __riscv_aes64es(rs1, rs2);
 }
 
 
 // RV64ZKNE-LABEL: @aes64esm(
 // RV64ZKNE-NEXT:  entry:
+// RV64ZKNE-NEXT:    [[__X_ADDR_I:%.*]] = alloca i64, align 8
+// RV64ZKNE-NEXT:    [[__Y_ADDR_I:%.*]] = alloca i64, align 8
 // RV64ZKNE-NEXT:    [[RS1_ADDR:%.*]] = alloca i64, align 8
 // RV64ZKNE-NEXT:    [[RS2_ADDR:%.*]] = alloca i64, align 8
 // RV64ZKNE-NEXT:    store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8
 // RV64ZKNE-NEXT:    store i64 [[RS2:%.*]], ptr [[RS2_ADDR]], align 8
 // RV64ZKNE-NEXT:    [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8
 // RV64ZKNE-NEXT:    [[TMP1:%.*]] = load i64, ptr [[RS2_ADDR]], align 8
-// RV64ZKNE-NEXT:    [[TMP2:%.*]] = call i64 @llvm.riscv.aes64esm(i64 [[TMP0]], i64 [[TMP1]])
-// RV64ZKNE-NEXT:    ret i64 [[TMP2]]
+// RV64ZKNE-NEXT:    store i64 [[TMP0]], ptr [[__X_ADDR_I]], align 8
+// RV64ZKNE-NEXT:    store i64 [[TMP1]], ptr [[__Y_ADDR_I]], align 8
+// RV64ZKNE-NEXT:    [[TMP2:%.*]] = load i64, ptr [[__X_ADDR_I]], align 8
+// RV64ZKNE-NEXT:    [[TMP3:%.*]] = load i64, ptr [[__Y_ADDR_I]], align 8
+// RV64ZKNE-NEXT:    [[TMP4:%.*]] = call i64 @llvm.riscv.aes64esm(i64 [[TMP2]], i64 [[TMP3]])
+// RV64ZKNE-NEXT:    ret i64 [[TMP4]]
 //
 uint64_t aes64esm(uint64_t rs1, uint64_t rs2) {
-  return __builtin_riscv_aes64esm(rs1, rs2);
+  return __riscv_aes64esm(rs1, rs2);
 }
Index: clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zknd.c
===================================================================
--- clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zknd.c
+++ clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zknd.c
@@ -2,48 +2,63 @@
 // RUN: %clang_cc1 -triple riscv64 -target-feature +zknd -emit-llvm %s -o - \
 // RUN:     | FileCheck %s  -check-prefix=RV64ZKND
 
-#include <stdint.h>
+#include <riscv_crypto.h>
 
 // RV64ZKND-LABEL: @aes64dsm(
 // RV64ZKND-NEXT:  entry:
+// RV64ZKND-NEXT:    [[__X_ADDR_I:%.*]] = alloca i64, align 8
+// RV64ZKND-NEXT:    [[__Y_ADDR_I:%.*]] = alloca i64, align 8
 // RV64ZKND-NEXT:    [[RS1_ADDR:%.*]] = alloca i64, align 8
 // RV64ZKND-NEXT:    [[RS2_ADDR:%.*]] = alloca i64, align 8
 // RV64ZKND-NEXT:    store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8
 // RV64ZKND-NEXT:    store i64 [[RS2:%.*]], ptr [[RS2_ADDR]], align 8
 // RV64ZKND-NEXT:    [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8
 // RV64ZKND-NEXT:    [[TMP1:%.*]] = load i64, ptr [[RS2_ADDR]], align 8
-// RV64ZKND-NEXT:    [[TMP2:%.*]] = call i64 @llvm.riscv.aes64dsm(i64 [[TMP0]], i64 [[TMP1]])
-// RV64ZKND-NEXT:    ret i64 [[TMP2]]
+// RV64ZKND-NEXT:    store i64 [[TMP0]], ptr [[__X_ADDR_I]], align 8
+// RV64ZKND-NEXT:    store i64 [[TMP1]], ptr [[__Y_ADDR_I]], align 8
+// RV64ZKND-NEXT:    [[TMP2:%.*]] = load i64, ptr [[__X_ADDR_I]], align 8
+// RV64ZKND-NEXT:    [[TMP3:%.*]] = load i64, ptr [[__Y_ADDR_I]], align 8
+// RV64ZKND-NEXT:    [[TMP4:%.*]] = call i64 @llvm.riscv.aes64dsm(i64 [[TMP2]], i64 [[TMP3]])
+// RV64ZKND-NEXT:    ret i64 [[TMP4]]
 //
 uint64_t aes64dsm(uint64_t rs1, uint64_t rs2) {
-  return __builtin_riscv_aes64dsm(rs1, rs2);
+  return __riscv_aes64dsm(rs1, rs2);
 }
 
 
 // RV64ZKND-LABEL: @aes64ds(
 // RV64ZKND-NEXT:  entry:
+// RV64ZKND-NEXT:    [[__X_ADDR_I:%.*]] = alloca i64, align 8
+// RV64ZKND-NEXT:    [[__Y_ADDR_I:%.*]] = alloca i64, align 8
 // RV64ZKND-NEXT:    [[RS1_ADDR:%.*]] = alloca i64, align 8
 // RV64ZKND-NEXT:    [[RS2_ADDR:%.*]] = alloca i64, align 8
 // RV64ZKND-NEXT:    store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8
 // RV64ZKND-NEXT:    store i64 [[RS2:%.*]], ptr [[RS2_ADDR]], align 8
 // RV64ZKND-NEXT:    [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8
 // RV64ZKND-NEXT:    [[TMP1:%.*]] = load i64, ptr [[RS2_ADDR]], align 8
-// RV64ZKND-NEXT:    [[TMP2:%.*]] = call i64 @llvm.riscv.aes64ds(i64 [[TMP0]], i64 [[TMP1]])
-// RV64ZKND-NEXT:    ret i64 [[TMP2]]
+// RV64ZKND-NEXT:    store i64 [[TMP0]], ptr [[__X_ADDR_I]], align 8
+// RV64ZKND-NEXT:    store i64 [[TMP1]], ptr [[__Y_ADDR_I]], align 8
+// RV64ZKND-NEXT:    [[TMP2:%.*]] = load i64, ptr [[__X_ADDR_I]], align 8
+// RV64ZKND-NEXT:    [[TMP3:%.*]] = load i64, ptr [[__Y_ADDR_I]], align 8
+// RV64ZKND-NEXT:    [[TMP4:%.*]] = call i64 @llvm.riscv.aes64ds(i64 [[TMP2]], i64 [[TMP3]])
+// RV64ZKND-NEXT:    ret i64 [[TMP4]]
 //
 uint64_t aes64ds(uint64_t rs1, uint64_t rs2) {
-  return __builtin_riscv_aes64ds(rs1, rs2);
+  return __riscv_aes64ds(rs1, rs2);
 }
 
 
 // RV64ZKND-LABEL: @aes64im(
 // RV64ZKND-NEXT:  entry:
+// RV64ZKND-NEXT:    [[__X_ADDR_I:%.*]] = alloca i64, align 8
 // RV64ZKND-NEXT:    [[RS1_ADDR:%.*]] = alloca i64, align 8
 // RV64ZKND-NEXT:    store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8
 // RV64ZKND-NEXT:    [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8
-// RV64ZKND-NEXT:    [[TMP1:%.*]] = call i64 @llvm.riscv.aes64im(i64 [[TMP0]])
-// RV64ZKND-NEXT:    ret i64 [[TMP1]]
+// RV64ZKND-NEXT:    store i64 [[TMP0]], ptr [[__X_ADDR_I]], align 8
+// RV64ZKND-NEXT:    [[TMP1:%.*]] = load i64, ptr [[__X_ADDR_I]], align 8
+// RV64ZKND-NEXT:    [[TMP2:%.*]] = call i64 @llvm.riscv.aes64im(i64 [[TMP1]])
+// RV64ZKND-NEXT:    ret i64 [[TMP2]]
 //
 uint64_t aes64im(uint64_t rs1) {
-  return __builtin_riscv_aes64im(rs1);
+  return __riscv_aes64im(rs1);
 }
Index: clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zknd-zkne.c
===================================================================
--- clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zknd-zkne.c
+++ clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zknd-zkne.c
@@ -4,7 +4,7 @@
 // RUN: %clang_cc1 -triple riscv64 -target-feature +zkne -emit-llvm %s -o - \
 // RUN:     | FileCheck %s  -check-prefix=RV64ZKND-ZKNE
 
-#include <stdint.h>
+#include <riscv_crypto.h>
 
 // RV64ZKND-ZKNE-LABEL: @aes64ks1i(
 // RV64ZKND-ZKNE-NEXT:  entry:
@@ -15,20 +15,26 @@
 // RV64ZKND-ZKNE-NEXT:    ret i64 [[TMP1]]
 //
 uint64_t aes64ks1i(uint64_t rs1) {
-  return __builtin_riscv_aes64ks1i(rs1, 0);
+  return __riscv_aes64ks1i(rs1, 0);
 }
 
 // RV64ZKND-ZKNE-LABEL: @aes64ks2(
 // RV64ZKND-ZKNE-NEXT:  entry:
+// RV64ZKND-ZKNE-NEXT:    [[__X_ADDR_I:%.*]] = alloca i64, align 8
+// RV64ZKND-ZKNE-NEXT:    [[__Y_ADDR_I:%.*]] = alloca i64, align 8
 // RV64ZKND-ZKNE-NEXT:    [[RS1_ADDR:%.*]] = alloca i64, align 8
 // RV64ZKND-ZKNE-NEXT:    [[RS2_ADDR:%.*]] = alloca i64, align 8
 // RV64ZKND-ZKNE-NEXT:    store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8
 // RV64ZKND-ZKNE-NEXT:    store i64 [[RS2:%.*]], ptr [[RS2_ADDR]], align 8
 // RV64ZKND-ZKNE-NEXT:    [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8
 // RV64ZKND-ZKNE-NEXT:    [[TMP1:%.*]] = load i64, ptr [[RS2_ADDR]], align 8
-// RV64ZKND-ZKNE-NEXT:    [[TMP2:%.*]] = call i64 @llvm.riscv.aes64ks2(i64 [[TMP0]], i64 [[TMP1]])
-// RV64ZKND-ZKNE-NEXT:    ret i64 [[TMP2]]
+// RV64ZKND-ZKNE-NEXT:    store i64 [[TMP0]], ptr [[__X_ADDR_I]], align 8
+// RV64ZKND-ZKNE-NEXT:    store i64 [[TMP1]], ptr [[__Y_ADDR_I]], align 8
+// RV64ZKND-ZKNE-NEXT:    [[TMP2:%.*]] = load i64, ptr [[__X_ADDR_I]], align 8
+// RV64ZKND-ZKNE-NEXT:    [[TMP3:%.*]] = load i64, ptr [[__Y_ADDR_I]], align 8
+// RV64ZKND-ZKNE-NEXT:    [[TMP4:%.*]] = call i64 @llvm.riscv.aes64ks2(i64 [[TMP2]], i64 [[TMP3]])
+// RV64ZKND-ZKNE-NEXT:    ret i64 [[TMP4]]
 //
 uint64_t aes64ks2(uint64_t rs1, uint64_t rs2) {
-  return __builtin_riscv_aes64ks2(rs1, rs2);
+  return __riscv_aes64ks2(rs1, rs2);
 }
Index: clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zknh.c
===================================================================
--- clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zknh.c
+++ clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zknh.c
@@ -2,142 +2,190 @@
 // RUN: %clang_cc1 -triple riscv32 -target-feature +zknh -emit-llvm %s -o - \
 // RUN:     | FileCheck %s  -check-prefix=RV32ZKNH
 
-#include <stdint.h>
+#include <riscv_crypto.h>
 
 // RV32ZKNH-LABEL: @sha256sig0(
 // RV32ZKNH-NEXT:  entry:
+// RV32ZKNH-NEXT:    [[__X_ADDR_I:%.*]] = alloca i32, align 4
 // RV32ZKNH-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
 // RV32ZKNH-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
 // RV32ZKNH-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV32ZKNH-NEXT:    [[TMP1:%.*]] = call i32 @llvm.riscv.sha256sig0(i32 [[TMP0]])
-// RV32ZKNH-NEXT:    ret i32 [[TMP1]]
+// RV32ZKNH-NEXT:    store i32 [[TMP0]], ptr [[__X_ADDR_I]], align 4
+// RV32ZKNH-NEXT:    [[TMP1:%.*]] = load i32, ptr [[__X_ADDR_I]], align 4
+// RV32ZKNH-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.sha256sig0(i32 [[TMP1]])
+// RV32ZKNH-NEXT:    ret i32 [[TMP2]]
 //
 uint32_t sha256sig0(uint32_t rs1) {
-  return __builtin_riscv_sha256sig0(rs1);
+  return __riscv_sha256sig0(rs1);
 }
 
 // RV32ZKNH-LABEL: @sha256sig1(
 // RV32ZKNH-NEXT:  entry:
+// RV32ZKNH-NEXT:    [[__X_ADDR_I:%.*]] = alloca i32, align 4
 // RV32ZKNH-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
 // RV32ZKNH-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
 // RV32ZKNH-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV32ZKNH-NEXT:    [[TMP1:%.*]] = call i32 @llvm.riscv.sha256sig1(i32 [[TMP0]])
-// RV32ZKNH-NEXT:    ret i32 [[TMP1]]
+// RV32ZKNH-NEXT:    store i32 [[TMP0]], ptr [[__X_ADDR_I]], align 4
+// RV32ZKNH-NEXT:    [[TMP1:%.*]] = load i32, ptr [[__X_ADDR_I]], align 4
+// RV32ZKNH-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.sha256sig0(i32 [[TMP1]])
+// RV32ZKNH-NEXT:    ret i32 [[TMP2]]
 //
 uint32_t sha256sig1(uint32_t rs1) {
-  return __builtin_riscv_sha256sig1(rs1);
+  return __riscv_sha256sig1(rs1);
 }
 
 // RV32ZKNH-LABEL: @sha256sum0(
 // RV32ZKNH-NEXT:  entry:
+// RV32ZKNH-NEXT:    [[__X_ADDR_I:%.*]] = alloca i32, align 4
 // RV32ZKNH-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
 // RV32ZKNH-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
 // RV32ZKNH-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV32ZKNH-NEXT:    [[TMP1:%.*]] = call i32 @llvm.riscv.sha256sum0(i32 [[TMP0]])
-// RV32ZKNH-NEXT:    ret i32 [[TMP1]]
+// RV32ZKNH-NEXT:    store i32 [[TMP0]], ptr [[__X_ADDR_I]], align 4
+// RV32ZKNH-NEXT:    [[TMP1:%.*]] = load i32, ptr [[__X_ADDR_I]], align 4
+// RV32ZKNH-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.sha256sum0(i32 [[TMP1]])
+// RV32ZKNH-NEXT:    ret i32 [[TMP2]]
 //
 uint32_t sha256sum0(uint32_t rs1) {
-  return __builtin_riscv_sha256sum0(rs1);
+  return __riscv_sha256sum0(rs1);
 }
 
 // RV32ZKNH-LABEL: @sha256sum1(
 // RV32ZKNH-NEXT:  entry:
+// RV32ZKNH-NEXT:    [[__X_ADDR_I:%.*]] = alloca i32, align 4
 // RV32ZKNH-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
 // RV32ZKNH-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
 // RV32ZKNH-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV32ZKNH-NEXT:    [[TMP1:%.*]] = call i32 @llvm.riscv.sha256sum1(i32 [[TMP0]])
-// RV32ZKNH-NEXT:    ret i32 [[TMP1]]
+// RV32ZKNH-NEXT:    store i32 [[TMP0]], ptr [[__X_ADDR_I]], align 4
+// RV32ZKNH-NEXT:    [[TMP1:%.*]] = load i32, ptr [[__X_ADDR_I]], align 4
+// RV32ZKNH-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.sha256sum0(i32 [[TMP1]])
+// RV32ZKNH-NEXT:    ret i32 [[TMP2]]
 //
 uint32_t sha256sum1(uint32_t rs1) {
-  return __builtin_riscv_sha256sum1(rs1);
+  return __riscv_sha256sum1(rs1);
 }
 
 // RV32ZKNH-LABEL: @sha512sig0h(
 // RV32ZKNH-NEXT:  entry:
+// RV32ZKNH-NEXT:    [[__X_ADDR_I:%.*]] = alloca i32, align 4
+// RV32ZKNH-NEXT:    [[__Y_ADDR_I:%.*]] = alloca i32, align 4
 // RV32ZKNH-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
 // RV32ZKNH-NEXT:    [[RS2_ADDR:%.*]] = alloca i32, align 4
 // RV32ZKNH-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
 // RV32ZKNH-NEXT:    store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
 // RV32ZKNH-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
 // RV32ZKNH-NEXT:    [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
-// RV32ZKNH-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.sha512sig0h(i32 [[TMP0]], i32 [[TMP1]])
-// RV32ZKNH-NEXT:    ret i32 [[TMP2]]
+// RV32ZKNH-NEXT:    store i32 [[TMP0]], ptr [[__X_ADDR_I]], align 4
+// RV32ZKNH-NEXT:    store i32 [[TMP1]], ptr [[__Y_ADDR_I]], align 4
+// RV32ZKNH-NEXT:    [[TMP2:%.*]] = load i32, ptr [[__X_ADDR_I]], align 4
+// RV32ZKNH-NEXT:    [[TMP3:%.*]] = load i32, ptr [[__Y_ADDR_I]], align 4
+// RV32ZKNH-NEXT:    [[TMP4:%.*]] = call i32 @llvm.riscv.sha512sig0h(i32 [[TMP2]], i32 [[TMP3]])
+// RV32ZKNH-NEXT:    ret i32 [[TMP4]]
 //
 uint32_t sha512sig0h(uint32_t rs1, uint32_t rs2) {
-  return __builtin_riscv_sha512sig0h(rs1, rs2);
+  return __riscv_sha512sig0h(rs1, rs2);
 }
 
 // RV32ZKNH-LABEL: @sha512sig0l(
 // RV32ZKNH-NEXT:  entry:
+// RV32ZKNH-NEXT:    [[__X_ADDR_I:%.*]] = alloca i32, align 4
+// RV32ZKNH-NEXT:    [[__Y_ADDR_I:%.*]] = alloca i32, align 4
 // RV32ZKNH-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
 // RV32ZKNH-NEXT:    [[RS2_ADDR:%.*]] = alloca i32, align 4
 // RV32ZKNH-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
 // RV32ZKNH-NEXT:    store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
 // RV32ZKNH-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
 // RV32ZKNH-NEXT:    [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
-// RV32ZKNH-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.sha512sig0l(i32 [[TMP0]], i32 [[TMP1]])
-// RV32ZKNH-NEXT:    ret i32 [[TMP2]]
+// RV32ZKNH-NEXT:    store i32 [[TMP0]], ptr [[__X_ADDR_I]], align 4
+// RV32ZKNH-NEXT:    store i32 [[TMP1]], ptr [[__Y_ADDR_I]], align 4
+// RV32ZKNH-NEXT:    [[TMP2:%.*]] = load i32, ptr [[__X_ADDR_I]], align 4
+// RV32ZKNH-NEXT:    [[TMP3:%.*]] = load i32, ptr [[__Y_ADDR_I]], align 4
+// RV32ZKNH-NEXT:    [[TMP4:%.*]] = call i32 @llvm.riscv.sha512sig0l(i32 [[TMP2]], i32 [[TMP3]])
+// RV32ZKNH-NEXT:    ret i32 [[TMP4]]
 //
 uint32_t sha512sig0l(uint32_t rs1, uint32_t rs2) {
-  return __builtin_riscv_sha512sig0l(rs1, rs2);
+  return __riscv_sha512sig0l(rs1, rs2);
 }
 
 // RV32ZKNH-LABEL: @sha512sig1h(
 // RV32ZKNH-NEXT:  entry:
+// RV32ZKNH-NEXT:    [[__X_ADDR_I:%.*]] = alloca i32, align 4
+// RV32ZKNH-NEXT:    [[__Y_ADDR_I:%.*]] = alloca i32, align 4
 // RV32ZKNH-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
 // RV32ZKNH-NEXT:    [[RS2_ADDR:%.*]] = alloca i32, align 4
 // RV32ZKNH-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
 // RV32ZKNH-NEXT:    store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
 // RV32ZKNH-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
 // RV32ZKNH-NEXT:    [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
-// RV32ZKNH-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.sha512sig1h(i32 [[TMP0]], i32 [[TMP1]])
-// RV32ZKNH-NEXT:    ret i32 [[TMP2]]
+// RV32ZKNH-NEXT:    store i32 [[TMP0]], ptr [[__X_ADDR_I]], align 4
+// RV32ZKNH-NEXT:    store i32 [[TMP1]], ptr [[__Y_ADDR_I]], align 4
+// RV32ZKNH-NEXT:    [[TMP2:%.*]] = load i32, ptr [[__X_ADDR_I]], align 4
+// RV32ZKNH-NEXT:    [[TMP3:%.*]] = load i32, ptr [[__Y_ADDR_I]], align 4
+// RV32ZKNH-NEXT:    [[TMP4:%.*]] = call i32 @llvm.riscv.sha512sig1h(i32 [[TMP2]], i32 [[TMP3]])
+// RV32ZKNH-NEXT:    ret i32 [[TMP4]]
 //
 uint32_t sha512sig1h(uint32_t rs1, uint32_t rs2) {
-  return __builtin_riscv_sha512sig1h(rs1, rs2);
+  return __riscv_sha512sig1h(rs1, rs2);
 }
 
 // RV32ZKNH-LABEL: @sha512sig1l(
 // RV32ZKNH-NEXT:  entry:
+// RV32ZKNH-NEXT:    [[__X_ADDR_I:%.*]] = alloca i32, align 4
+// RV32ZKNH-NEXT:    [[__Y_ADDR_I:%.*]] = alloca i32, align 4
 // RV32ZKNH-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
 // RV32ZKNH-NEXT:    [[RS2_ADDR:%.*]] = alloca i32, align 4
 // RV32ZKNH-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
 // RV32ZKNH-NEXT:    store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
 // RV32ZKNH-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
 // RV32ZKNH-NEXT:    [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
-// RV32ZKNH-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.sha512sig1l(i32 [[TMP0]], i32 [[TMP1]])
-// RV32ZKNH-NEXT:    ret i32 [[TMP2]]
+// RV32ZKNH-NEXT:    store i32 [[TMP0]], ptr [[__X_ADDR_I]], align 4
+// RV32ZKNH-NEXT:    store i32 [[TMP1]], ptr [[__Y_ADDR_I]], align 4
+// RV32ZKNH-NEXT:    [[TMP2:%.*]] = load i32, ptr [[__X_ADDR_I]], align 4
+// RV32ZKNH-NEXT:    [[TMP3:%.*]] = load i32, ptr [[__Y_ADDR_I]], align 4
+// RV32ZKNH-NEXT:    [[TMP4:%.*]] = call i32 @llvm.riscv.sha512sig1l(i32 [[TMP2]], i32 [[TMP3]])
+// RV32ZKNH-NEXT:    ret i32 [[TMP4]]
 //
 uint32_t sha512sig1l(uint32_t rs1, uint32_t rs2) {
-  return __builtin_riscv_sha512sig1l(rs1, rs2);
+  return __riscv_sha512sig1l(rs1, rs2);
 }
 
 // RV32ZKNH-LABEL: @sha512sum0r(
 // RV32ZKNH-NEXT:  entry:
+// RV32ZKNH-NEXT:    [[__X_ADDR_I:%.*]] = alloca i32, align 4
+// RV32ZKNH-NEXT:    [[__Y_ADDR_I:%.*]] = alloca i32, align 4
 // RV32ZKNH-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
 // RV32ZKNH-NEXT:    [[RS2_ADDR:%.*]] = alloca i32, align 4
 // RV32ZKNH-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
 // RV32ZKNH-NEXT:    store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
 // RV32ZKNH-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
 // RV32ZKNH-NEXT:    [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
-// RV32ZKNH-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.sha512sum0r(i32 [[TMP0]], i32 [[TMP1]])
-// RV32ZKNH-NEXT:    ret i32 [[TMP2]]
+// RV32ZKNH-NEXT:    store i32 [[TMP0]], ptr [[__X_ADDR_I]], align 4
+// RV32ZKNH-NEXT:    store i32 [[TMP1]], ptr [[__Y_ADDR_I]], align 4
+// RV32ZKNH-NEXT:    [[TMP2:%.*]] = load i32, ptr [[__X_ADDR_I]], align 4
+// RV32ZKNH-NEXT:    [[TMP3:%.*]] = load i32, ptr [[__Y_ADDR_I]], align 4
+// RV32ZKNH-NEXT:    [[TMP4:%.*]] = call i32 @llvm.riscv.sha512sum0r(i32 [[TMP2]], i32 [[TMP3]])
+// RV32ZKNH-NEXT:    ret i32 [[TMP4]]
 //
 uint32_t sha512sum0r(uint32_t rs1, uint32_t rs2) {
-  return __builtin_riscv_sha512sum0r(rs1, rs2);
+  return __riscv_sha512sum0r(rs1, rs2);
 }
 
 // RV32ZKNH-LABEL: @sha512sum1r(
 // RV32ZKNH-NEXT:  entry:
+// RV32ZKNH-NEXT:    [[__X_ADDR_I:%.*]] = alloca i32, align 4
+// RV32ZKNH-NEXT:    [[__Y_ADDR_I:%.*]] = alloca i32, align 4
 // RV32ZKNH-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
 // RV32ZKNH-NEXT:    [[RS2_ADDR:%.*]] = alloca i32, align 4
 // RV32ZKNH-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
 // RV32ZKNH-NEXT:    store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
 // RV32ZKNH-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
 // RV32ZKNH-NEXT:    [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
-// RV32ZKNH-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.sha512sum1r(i32 [[TMP0]], i32 [[TMP1]])
-// RV32ZKNH-NEXT:    ret i32 [[TMP2]]
+// RV32ZKNH-NEXT:    store i32 [[TMP0]], ptr [[__X_ADDR_I]], align 4
+// RV32ZKNH-NEXT:    store i32 [[TMP1]], ptr [[__Y_ADDR_I]], align 4
+// RV32ZKNH-NEXT:    [[TMP2:%.*]] = load i32, ptr [[__X_ADDR_I]], align 4
+// RV32ZKNH-NEXT:    [[TMP3:%.*]] = load i32, ptr [[__Y_ADDR_I]], align 4
+// RV32ZKNH-NEXT:    [[TMP4:%.*]] = call i32 @llvm.riscv.sha512sum1r(i32 [[TMP2]], i32 [[TMP3]])
+// RV32ZKNH-NEXT:    ret i32 [[TMP4]]
 //
 uint32_t sha512sum1r(uint32_t rs1, uint32_t rs2) {
-  return __builtin_riscv_sha512sum1r(rs1, rs2);
+  return __riscv_sha512sum1r(rs1, rs2);
 }
Index: clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zkne.c
===================================================================
--- clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zkne.c
+++ clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zkne.c
@@ -2,7 +2,7 @@
 // RUN: %clang_cc1 -triple riscv32 -target-feature +zkne -emit-llvm %s -o - \
 // RUN:     | FileCheck %s  -check-prefix=RV32ZKNE
 
-#include <stdint.h>
+#include <riscv_crypto.h>
 
 // RV32ZKNE-LABEL: @aes32esi(
 // RV32ZKNE-NEXT:  entry:
@@ -16,7 +16,7 @@
 // RV32ZKNE-NEXT:    ret i32 [[TMP2]]
 //
 uint32_t aes32esi(uint32_t rs1, uint32_t rs2) {
-  return __builtin_riscv_aes32esi(rs1, rs2, 3);
+  return __riscv_aes32esi(rs1, rs2, 3);
 }
 
 // RV32ZKNE-LABEL: @aes32esmi(
@@ -31,5 +31,5 @@
 // RV32ZKNE-NEXT:    ret i32 [[TMP2]]
 //
 uint32_t aes32esmi(uint32_t rs1, uint32_t rs2) {
-  return __builtin_riscv_aes32esmi(rs1, rs2, 3);
+  return __riscv_aes32esmi(rs1, rs2, 3);
 }
Index: clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zknd.c
===================================================================
--- clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zknd.c
+++ clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zknd.c
@@ -2,7 +2,7 @@
 // RUN: %clang_cc1 -triple riscv32 -target-feature +zknd -emit-llvm %s -o - \
 // RUN:     | FileCheck %s  -check-prefix=RV32ZKND
 
-#include <stdint.h>
+#include <riscv_crypto.h>
 
 // RV32ZKND-LABEL: @aes32dsi(
 // RV32ZKND-NEXT:  entry:
@@ -16,7 +16,7 @@
 // RV32ZKND-NEXT:    ret i32 [[TMP2]]
 //
 uint32_t aes32dsi(uint32_t rs1, uint32_t rs2) {
-  return __builtin_riscv_aes32dsi(rs1, rs2, 3);
+  return __riscv_aes32dsi(rs1, rs2, 3);
 }
 
 // RV32ZKND-LABEL: @aes32dsmi(
@@ -31,5 +31,5 @@
 // RV32ZKND-NEXT:    ret i32 [[TMP2]]
 //
 uint32_t aes32dsmi(uint32_t rs1, uint32_t rs2) {
-  return __builtin_riscv_aes32dsmi(rs1, rs2, 3);
+  return __riscv_aes32dsmi(rs1, rs2, 3);
 }
Index: clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-zbkx.c
===================================================================
--- clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-zbkx.c
+++ clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-zbkx.c
@@ -2,36 +2,48 @@
 // RUN: %clang_cc1 -triple riscv64 -target-feature +zbkx -emit-llvm %s -o - \
 // RUN:     | FileCheck %s  -check-prefix=RV64ZBKX
 
-#include <stdint.h>
+#include <riscv_crypto.h>
 
 // RV64ZBKX-LABEL: @xperm8(
 // RV64ZBKX-NEXT:  entry:
+// RV64ZBKX-NEXT:    [[__X_ADDR_I:%.*]] = alloca i64, align 8
+// RV64ZBKX-NEXT:    [[__Y_ADDR_I:%.*]] = alloca i64, align 8
 // RV64ZBKX-NEXT:    [[RS1_ADDR:%.*]] = alloca i64, align 8
 // RV64ZBKX-NEXT:    [[RS2_ADDR:%.*]] = alloca i64, align 8
 // RV64ZBKX-NEXT:    store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8
 // RV64ZBKX-NEXT:    store i64 [[RS2:%.*]], ptr [[RS2_ADDR]], align 8
 // RV64ZBKX-NEXT:    [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8
 // RV64ZBKX-NEXT:    [[TMP1:%.*]] = load i64, ptr [[RS2_ADDR]], align 8
-// RV64ZBKX-NEXT:    [[TMP2:%.*]] = call i64 @llvm.riscv.xperm8.i64(i64 [[TMP0]], i64 [[TMP1]])
-// RV64ZBKX-NEXT:    ret i64 [[TMP2]]
+// RV64ZBKX-NEXT:    store i64 [[TMP0]], ptr [[__X_ADDR_I]], align 8
+// RV64ZBKX-NEXT:    store i64 [[TMP1]], ptr [[__Y_ADDR_I]], align 8
+// RV64ZBKX-NEXT:    [[TMP2:%.*]] = load i64, ptr [[__X_ADDR_I]], align 8
+// RV64ZBKX-NEXT:    [[TMP3:%.*]] = load i64, ptr [[__Y_ADDR_I]], align 8
+// RV64ZBKX-NEXT:    [[TMP4:%.*]] = call i64 @llvm.riscv.xperm8.i64(i64 [[TMP2]], i64 [[TMP3]])
+// RV64ZBKX-NEXT:    ret i64 [[TMP4]]
 //
 uint64_t xperm8(uint64_t rs1, uint64_t rs2)
 {
-  return __builtin_riscv_xperm8_64(rs1, rs2);
+  return __riscv_xperm8_64(rs1, rs2);
 }
 
 // RV64ZBKX-LABEL: @xperm4(
 // RV64ZBKX-NEXT:  entry:
+// RV64ZBKX-NEXT:    [[__X_ADDR_I:%.*]] = alloca i64, align 8
+// RV64ZBKX-NEXT:    [[__Y_ADDR_I:%.*]] = alloca i64, align 8
 // RV64ZBKX-NEXT:    [[RS1_ADDR:%.*]] = alloca i64, align 8
 // RV64ZBKX-NEXT:    [[RS2_ADDR:%.*]] = alloca i64, align 8
 // RV64ZBKX-NEXT:    store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8
 // RV64ZBKX-NEXT:    store i64 [[RS2:%.*]], ptr [[RS2_ADDR]], align 8
 // RV64ZBKX-NEXT:    [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8
 // RV64ZBKX-NEXT:    [[TMP1:%.*]] = load i64, ptr [[RS2_ADDR]], align 8
-// RV64ZBKX-NEXT:    [[TMP2:%.*]] = call i64 @llvm.riscv.xperm4.i64(i64 [[TMP0]], i64 [[TMP1]])
-// RV64ZBKX-NEXT:    ret i64 [[TMP2]]
+// RV64ZBKX-NEXT:    store i64 [[TMP0]], ptr [[__X_ADDR_I]], align 8
+// RV64ZBKX-NEXT:    store i64 [[TMP1]], ptr [[__Y_ADDR_I]], align 8
+// RV64ZBKX-NEXT:    [[TMP2:%.*]] = load i64, ptr [[__X_ADDR_I]], align 8
+// RV64ZBKX-NEXT:    [[TMP3:%.*]] = load i64, ptr [[__Y_ADDR_I]], align 8
+// RV64ZBKX-NEXT:    [[TMP4:%.*]] = call i64 @llvm.riscv.xperm4.i64(i64 [[TMP2]], i64 [[TMP3]])
+// RV64ZBKX-NEXT:    ret i64 [[TMP4]]
 //
 uint64_t xperm4(uint64_t rs1, uint64_t rs2)
 {
-  return __builtin_riscv_xperm4_64(rs1, rs2);
+  return __riscv_xperm4_64(rs1, rs2);
 }
Index: clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-zbkc.c
===================================================================
--- clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-zbkc.c
+++ clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-zbkc.c
@@ -2,49 +2,67 @@
 // RUN: %clang_cc1 -triple riscv64 -target-feature +zbkc -emit-llvm %s -o - \
 // RUN:     | FileCheck %s  -check-prefix=RV64ZBKC
 
-#include <stdint.h>
+#include <riscv_crypto.h>
 
 // RV64ZBKC-LABEL: @clmul_64(
 // RV64ZBKC-NEXT:  entry:
+// RV64ZBKC-NEXT:    [[__X_ADDR_I:%.*]] = alloca i64, align 8
+// RV64ZBKC-NEXT:    [[__Y_ADDR_I:%.*]] = alloca i64, align 8
 // RV64ZBKC-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
 // RV64ZBKC-NEXT:    [[B_ADDR:%.*]] = alloca i64, align 8
 // RV64ZBKC-NEXT:    store i64 [[A:%.*]], ptr [[A_ADDR]], align 8
 // RV64ZBKC-NEXT:    store i64 [[B:%.*]], ptr [[B_ADDR]], align 8
 // RV64ZBKC-NEXT:    [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8
 // RV64ZBKC-NEXT:    [[TMP1:%.*]] = load i64, ptr [[B_ADDR]], align 8
-// RV64ZBKC-NEXT:    [[TMP2:%.*]] = call i64 @llvm.riscv.clmul.i64(i64 [[TMP0]], i64 [[TMP1]])
-// RV64ZBKC-NEXT:    ret i64 [[TMP2]]
+// RV64ZBKC-NEXT:    store i64 [[TMP0]], ptr [[__X_ADDR_I]], align 8
+// RV64ZBKC-NEXT:    store i64 [[TMP1]], ptr [[__Y_ADDR_I]], align 8
+// RV64ZBKC-NEXT:    [[TMP2:%.*]] = load i64, ptr [[__X_ADDR_I]], align 8
+// RV64ZBKC-NEXT:    [[TMP3:%.*]] = load i64, ptr [[__Y_ADDR_I]], align 8
+// RV64ZBKC-NEXT:    [[TMP4:%.*]] = call i64 @llvm.riscv.clmul.i64(i64 [[TMP2]], i64 [[TMP3]])
+// RV64ZBKC-NEXT:    ret i64 [[TMP4]]
 //
 uint64_t clmul_64(uint64_t a, uint64_t b) {
-  return __builtin_riscv_clmul_64(a, b);
+  return __riscv_clmul_64(a, b);
 }
 
 // RV64ZBKC-LABEL: @clmulh_64(
 // RV64ZBKC-NEXT:  entry:
+// RV64ZBKC-NEXT:    [[__X_ADDR_I:%.*]] = alloca i64, align 8
+// RV64ZBKC-NEXT:    [[__Y_ADDR_I:%.*]] = alloca i64, align 8
 // RV64ZBKC-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
 // RV64ZBKC-NEXT:    [[B_ADDR:%.*]] = alloca i64, align 8
 // RV64ZBKC-NEXT:    store i64 [[A:%.*]], ptr [[A_ADDR]], align 8
 // RV64ZBKC-NEXT:    store i64 [[B:%.*]], ptr [[B_ADDR]], align 8
 // RV64ZBKC-NEXT:    [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8
 // RV64ZBKC-NEXT:    [[TMP1:%.*]] = load i64, ptr [[B_ADDR]], align 8
-// RV64ZBKC-NEXT:    [[TMP2:%.*]] = call i64 @llvm.riscv.clmulh.i64(i64 [[TMP0]], i64 [[TMP1]])
-// RV64ZBKC-NEXT:    ret i64 [[TMP2]]
+// RV64ZBKC-NEXT:    store i64 [[TMP0]], ptr [[__X_ADDR_I]], align 8
+// RV64ZBKC-NEXT:    store i64 [[TMP1]], ptr [[__Y_ADDR_I]], align 8
+// RV64ZBKC-NEXT:    [[TMP2:%.*]] = load i64, ptr [[__X_ADDR_I]], align 8
+// RV64ZBKC-NEXT:    [[TMP3:%.*]] = load i64, ptr [[__Y_ADDR_I]], align 8
+// RV64ZBKC-NEXT:    [[TMP4:%.*]] = call i64 @llvm.riscv.clmulh.i64(i64 [[TMP2]], i64 [[TMP3]])
+// RV64ZBKC-NEXT:    ret i64 [[TMP4]]
 //
 uint64_t clmulh_64(uint64_t a, uint64_t b) {
-  return __builtin_riscv_clmulh_64(a, b);
+  return __riscv_clmulh_64(a, b);
 }
 
 // RV64ZBKC-LABEL: @clmul_32(
 // RV64ZBKC-NEXT:  entry:
+// RV64ZBKC-NEXT:    [[__X_ADDR_I:%.*]] = alloca i32, align 4
+// RV64ZBKC-NEXT:    [[__Y_ADDR_I:%.*]] = alloca i32, align 4
 // RV64ZBKC-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
 // RV64ZBKC-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
 // RV64ZBKC-NEXT:    store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
 // RV64ZBKC-NEXT:    store i32 [[B:%.*]], ptr [[B_ADDR]], align 4
 // RV64ZBKC-NEXT:    [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
 // RV64ZBKC-NEXT:    [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4
-// RV64ZBKC-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.clmul.i32(i32 [[TMP0]], i32 [[TMP1]])
-// RV64ZBKC-NEXT:    ret i32 [[TMP2]]
+// RV64ZBKC-NEXT:    store i32 [[TMP0]], ptr [[__X_ADDR_I]], align 4
+// RV64ZBKC-NEXT:    store i32 [[TMP1]], ptr [[__Y_ADDR_I]], align 4
+// RV64ZBKC-NEXT:    [[TMP2:%.*]] = load i32, ptr [[__X_ADDR_I]], align 4
+// RV64ZBKC-NEXT:    [[TMP3:%.*]] = load i32, ptr [[__Y_ADDR_I]], align 4
+// RV64ZBKC-NEXT:    [[TMP4:%.*]] = call i32 @llvm.riscv.clmul.i32(i32 [[TMP2]], i32 [[TMP3]])
+// RV64ZBKC-NEXT:    ret i32 [[TMP4]]
 //
 uint32_t clmul_32(uint32_t a, uint32_t b) {
-  return __builtin_riscv_clmul_32(a, b);
+  return __riscv_clmul_32(a, b);
 }
Index: clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-zbkb.c
===================================================================
--- clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-zbkb.c
+++ clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-zbkb.c
@@ -2,30 +2,158 @@
 // RUN: %clang_cc1 -triple riscv64 -target-feature +zbkb -emit-llvm %s -o - \
 // RUN:     | FileCheck %s  -check-prefix=RV64ZBKB
 
-#include <stdint.h>
+#include <riscv_crypto.h>
 
 // RV64ZBKB-LABEL: @brev8_32(
 // RV64ZBKB-NEXT:  entry:
+// RV64ZBKB-NEXT:    [[__X_ADDR_I:%.*]] = alloca i32, align 4
 // RV64ZBKB-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
 // RV64ZBKB-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
 // RV64ZBKB-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV64ZBKB-NEXT:    [[TMP1:%.*]] = call i32 @llvm.riscv.brev8.i32(i32 [[TMP0]])
-// RV64ZBKB-NEXT:    ret i32 [[TMP1]]
+// RV64ZBKB-NEXT:    store i32 [[TMP0]], ptr [[__X_ADDR_I]], align 4
+// RV64ZBKB-NEXT:    [[TMP1:%.*]] = load i32, ptr [[__X_ADDR_I]], align 4
+// RV64ZBKB-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.brev8.i32(i32 [[TMP1]])
+// RV64ZBKB-NEXT:    ret i32 [[TMP2]]
 //
 uint32_t brev8_32(uint32_t rs1)
 {
-  return __builtin_riscv_brev8_32(rs1);
+  return __riscv_brev8_32(rs1);
 }
 
 // RV64ZBKB-LABEL: @brev8_64(
 // RV64ZBKB-NEXT:  entry:
+// RV64ZBKB-NEXT:    [[__X_ADDR_I:%.*]] = alloca i64, align 8
 // RV64ZBKB-NEXT:    [[RS1_ADDR:%.*]] = alloca i64, align 8
 // RV64ZBKB-NEXT:    store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8
 // RV64ZBKB-NEXT:    [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8
-// RV64ZBKB-NEXT:    [[TMP1:%.*]] = call i64 @llvm.riscv.brev8.i64(i64 [[TMP0]])
-// RV64ZBKB-NEXT:    ret i64 [[TMP1]]
+// RV64ZBKB-NEXT:    store i64 [[TMP0]], ptr [[__X_ADDR_I]], align 8
+// RV64ZBKB-NEXT:    [[TMP1:%.*]] = load i64, ptr [[__X_ADDR_I]], align 8
+// RV64ZBKB-NEXT:    [[TMP2:%.*]] = call i64 @llvm.riscv.brev8.i64(i64 [[TMP1]])
+// RV64ZBKB-NEXT:    ret i64 [[TMP2]]
 //
 uint64_t brev8_64(uint64_t rs1)
 {
-  return __builtin_riscv_brev8_64(rs1);
+  return __riscv_brev8_64(rs1);
+}
+
+// RV64ZBKB-LABEL: @rev8_32(
+// RV64ZBKB-NEXT:  entry:
+// RV64ZBKB-NEXT:    [[__X_ADDR_I:%.*]] = alloca i32, align 4
+// RV64ZBKB-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
+// RV64ZBKB-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
+// RV64ZBKB-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
+// RV64ZBKB-NEXT:    store i32 [[TMP0]], ptr [[__X_ADDR_I]], align 4
+// RV64ZBKB-NEXT:    [[TMP1:%.*]] = load i32, ptr [[__X_ADDR_I]], align 4
+// RV64ZBKB-NEXT:    [[TMP2:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP1]])
+// RV64ZBKB-NEXT:    ret i32 [[TMP2]]
+//
+uint32_t rev8_32(uint32_t rs1)
+{
+  return __riscv_rev8_32(rs1);
+}
+
+// RV64ZBKB-LABEL: @rev8_64(
+// RV64ZBKB-NEXT:  entry:
+// RV64ZBKB-NEXT:    [[__X_ADDR_I:%.*]] = alloca i64, align 8
+// RV64ZBKB-NEXT:    [[RS1_ADDR:%.*]] = alloca i64, align 8
+// RV64ZBKB-NEXT:    store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8
+// RV64ZBKB-NEXT:    [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8
+// RV64ZBKB-NEXT:    store i64 [[TMP0]], ptr [[__X_ADDR_I]], align 8
+// RV64ZBKB-NEXT:    [[TMP1:%.*]] = load i64, ptr [[__X_ADDR_I]], align 8
+// RV64ZBKB-NEXT:    [[TMP2:%.*]] = call i64 @llvm.bswap.i64(i64 [[TMP1]])
+// RV64ZBKB-NEXT:    ret i64 [[TMP2]]
+//
+uint64_t rev8_64(uint64_t rs1)
+{
+  return __riscv_rev8_64(rs1);
+}
+
+// RV64ZBKB-LABEL: @rol_32(
+// RV64ZBKB-NEXT:  entry:
+// RV64ZBKB-NEXT:    [[__X_ADDR_I:%.*]] = alloca i32, align 4
+// RV64ZBKB-NEXT:    [[__Y_ADDR_I:%.*]] = alloca i32, align 4
+// RV64ZBKB-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
+// RV64ZBKB-NEXT:    [[RS2_ADDR:%.*]] = alloca i32, align 4
+// RV64ZBKB-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
+// RV64ZBKB-NEXT:    store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
+// RV64ZBKB-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
+// RV64ZBKB-NEXT:    [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
+// RV64ZBKB-NEXT:    store i32 [[TMP0]], ptr [[__X_ADDR_I]], align 4
+// RV64ZBKB-NEXT:    store i32 [[TMP1]], ptr [[__Y_ADDR_I]], align 4
+// RV64ZBKB-NEXT:    [[TMP2:%.*]] = load i32, ptr [[__X_ADDR_I]], align 4
+// RV64ZBKB-NEXT:    [[TMP3:%.*]] = load i32, ptr [[__Y_ADDR_I]], align 4
+// RV64ZBKB-NEXT:    [[TMP4:%.*]] = call i32 @llvm.fshl.i32(i32 [[TMP2]], i32 [[TMP2]], i32 [[TMP3]])
+// RV64ZBKB-NEXT:    ret i32 [[TMP4]]
+//
+uint32_t rol_32(uint32_t rs1, uint32_t rs2)
+{
+  return __riscv_rol_32(rs1, rs2);
+}
+
+// RV64ZBKB-LABEL: @rol_64(
+// RV64ZBKB-NEXT:  entry:
+// RV64ZBKB-NEXT:    [[__X_ADDR_I:%.*]] = alloca i64, align 8
+// RV64ZBKB-NEXT:    [[__Y_ADDR_I:%.*]] = alloca i32, align 4
+// RV64ZBKB-NEXT:    [[RS1_ADDR:%.*]] = alloca i64, align 8
+// RV64ZBKB-NEXT:    [[RS2_ADDR:%.*]] = alloca i32, align 4
+// RV64ZBKB-NEXT:    store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8
+// RV64ZBKB-NEXT:    store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
+// RV64ZBKB-NEXT:    [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8
+// RV64ZBKB-NEXT:    [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
+// RV64ZBKB-NEXT:    store i64 [[TMP0]], ptr [[__X_ADDR_I]], align 8
+// RV64ZBKB-NEXT:    store i32 [[TMP1]], ptr [[__Y_ADDR_I]], align 4
+// RV64ZBKB-NEXT:    [[TMP2:%.*]] = load i64, ptr [[__X_ADDR_I]], align 8
+// RV64ZBKB-NEXT:    [[TMP3:%.*]] = load i32, ptr [[__Y_ADDR_I]], align 4
+// RV64ZBKB-NEXT:    [[CONV_I:%.*]] = zext i32 [[TMP3]] to i64
+// RV64ZBKB-NEXT:    [[TMP4:%.*]] = call i64 @llvm.fshl.i64(i64 [[TMP2]], i64 [[TMP2]], i64 [[CONV_I]])
+// RV64ZBKB-NEXT:    ret i64 [[TMP4]]
+//
+uint64_t rol_64(uint64_t rs1, uint32_t rs2)
+{
+  return __riscv_rol_64(rs1, rs2);
+}
+
+// RV64ZBKB-LABEL: @ror_32(
+// RV64ZBKB-NEXT:  entry:
+// RV64ZBKB-NEXT:    [[__X_ADDR_I:%.*]] = alloca i32, align 4
+// RV64ZBKB-NEXT:    [[__Y_ADDR_I:%.*]] = alloca i32, align 4
+// RV64ZBKB-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
+// RV64ZBKB-NEXT:    [[RS2_ADDR:%.*]] = alloca i32, align 4
+// RV64ZBKB-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
+// RV64ZBKB-NEXT:    store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
+// RV64ZBKB-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
+// RV64ZBKB-NEXT:    [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
+// RV64ZBKB-NEXT:    store i32 [[TMP0]], ptr [[__X_ADDR_I]], align 4
+// RV64ZBKB-NEXT:    store i32 [[TMP1]], ptr [[__Y_ADDR_I]], align 4
+// RV64ZBKB-NEXT:    [[TMP2:%.*]] = load i32, ptr [[__X_ADDR_I]], align 4
+// RV64ZBKB-NEXT:    [[TMP3:%.*]] = load i32, ptr [[__Y_ADDR_I]], align 4
+// RV64ZBKB-NEXT:    [[TMP4:%.*]] = call i32 @llvm.fshr.i32(i32 [[TMP2]], i32 [[TMP2]], i32 [[TMP3]])
+// RV64ZBKB-NEXT:    ret i32 [[TMP4]]
+//
+uint32_t ror_32(uint32_t rs1, uint32_t rs2)
+{
+  return __riscv_ror_32(rs1, rs2);
+}
+
+// RV64ZBKB-LABEL: @ror_64(
+// RV64ZBKB-NEXT:  entry:
+// RV64ZBKB-NEXT:    [[__X_ADDR_I:%.*]] = alloca i64, align 8
+// RV64ZBKB-NEXT:    [[__Y_ADDR_I:%.*]] = alloca i32, align 4
+// RV64ZBKB-NEXT:    [[RS1_ADDR:%.*]] = alloca i64, align 8
+// RV64ZBKB-NEXT:    [[RS2_ADDR:%.*]] = alloca i32, align 4
+// RV64ZBKB-NEXT:    store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8
+// RV64ZBKB-NEXT:    store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
+// RV64ZBKB-NEXT:    [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8
+// RV64ZBKB-NEXT:    [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
+// RV64ZBKB-NEXT:    store i64 [[TMP0]], ptr [[__X_ADDR_I]], align 8
+// RV64ZBKB-NEXT:    store i32 [[TMP1]], ptr [[__Y_ADDR_I]], align 4
+// RV64ZBKB-NEXT:    [[TMP2:%.*]] = load i64, ptr [[__X_ADDR_I]], align 8
+// RV64ZBKB-NEXT:    [[TMP3:%.*]] = load i32, ptr [[__Y_ADDR_I]], align 4
+// RV64ZBKB-NEXT:    [[CONV_I:%.*]] = zext i32 [[TMP3]] to i64
+// RV64ZBKB-NEXT:    [[TMP4:%.*]] = call i64 @llvm.fshr.i64(i64 [[TMP2]], i64 [[TMP2]], i64 [[CONV_I]])
+// RV64ZBKB-NEXT:    ret i64 [[TMP4]]
+//
+uint64_t ror_64(uint64_t rs1, uint32_t rs2)
+{
+  return __riscv_ror_64(rs1, rs2);
 }
Index: clang/test/CodeGen/RISCV/rvb-intrinsics/riscv32-zbkx.c
===================================================================
--- clang/test/CodeGen/RISCV/rvb-intrinsics/riscv32-zbkx.c
+++ clang/test/CodeGen/RISCV/rvb-intrinsics/riscv32-zbkx.c
@@ -2,36 +2,48 @@
 // RUN: %clang_cc1 -triple riscv32 -target-feature +zbkx -emit-llvm %s -o - \
 // RUN:     | FileCheck %s  -check-prefix=RV32ZBKX
 
-#include <stdint.h>
+#include <riscv_crypto.h>
 
 // RV32ZBKX-LABEL: @xperm8(
 // RV32ZBKX-NEXT:  entry:
+// RV32ZBKX-NEXT:    [[__X_ADDR_I:%.*]] = alloca i32, align 4
+// RV32ZBKX-NEXT:    [[__Y_ADDR_I:%.*]] = alloca i32, align 4
 // RV32ZBKX-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
 // RV32ZBKX-NEXT:    [[RS2_ADDR:%.*]] = alloca i32, align 4
 // RV32ZBKX-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
 // RV32ZBKX-NEXT:    store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
 // RV32ZBKX-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
 // RV32ZBKX-NEXT:    [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
-// RV32ZBKX-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.xperm8.i32(i32 [[TMP0]], i32 [[TMP1]])
-// RV32ZBKX-NEXT:    ret i32 [[TMP2]]
+// RV32ZBKX-NEXT:    store i32 [[TMP0]], ptr [[__X_ADDR_I]], align 4
+// RV32ZBKX-NEXT:    store i32 [[TMP1]], ptr [[__Y_ADDR_I]], align 4
+// RV32ZBKX-NEXT:    [[TMP2:%.*]] = load i32, ptr [[__X_ADDR_I]], align 4
+// RV32ZBKX-NEXT:    [[TMP3:%.*]] = load i32, ptr [[__Y_ADDR_I]], align 4
+// RV32ZBKX-NEXT:    [[TMP4:%.*]] = call i32 @llvm.riscv.xperm8.i32(i32 [[TMP2]], i32 [[TMP3]])
+// RV32ZBKX-NEXT:    ret i32 [[TMP4]]
 //
 uint32_t xperm8(uint32_t rs1, uint32_t rs2)
 {
-  return __builtin_riscv_xperm8_32(rs1, rs2);
+  return __riscv_xperm8_32(rs1, rs2);
 }
 
 // RV32ZBKX-LABEL: @xperm4(
 // RV32ZBKX-NEXT:  entry:
+// RV32ZBKX-NEXT:    [[__X_ADDR_I:%.*]] = alloca i32, align 4
+// RV32ZBKX-NEXT:    [[__Y_ADDR_I:%.*]] = alloca i32, align 4
 // RV32ZBKX-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
 // RV32ZBKX-NEXT:    [[RS2_ADDR:%.*]] = alloca i32, align 4
 // RV32ZBKX-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
 // RV32ZBKX-NEXT:    store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
 // RV32ZBKX-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
 // RV32ZBKX-NEXT:    [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
-// RV32ZBKX-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.xperm4.i32(i32 [[TMP0]], i32 [[TMP1]])
-// RV32ZBKX-NEXT:    ret i32 [[TMP2]]
+// RV32ZBKX-NEXT:    store i32 [[TMP0]], ptr [[__X_ADDR_I]], align 4
+// RV32ZBKX-NEXT:    store i32 [[TMP1]], ptr [[__Y_ADDR_I]], align 4
+// RV32ZBKX-NEXT:    [[TMP2:%.*]] = load i32, ptr [[__X_ADDR_I]], align 4
+// RV32ZBKX-NEXT:    [[TMP3:%.*]] = load i32, ptr [[__Y_ADDR_I]], align 4
+// RV32ZBKX-NEXT:    [[TMP4:%.*]] = call i32 @llvm.riscv.xperm4.i32(i32 [[TMP2]], i32 [[TMP3]])
+// RV32ZBKX-NEXT:    ret i32 [[TMP4]]
 //
 uint32_t xperm4(uint32_t rs1, uint32_t rs2)
 {
-  return __builtin_riscv_xperm4_32(rs1, rs2);
+  return __riscv_xperm4_32(rs1, rs2);
 }
Index: clang/test/CodeGen/RISCV/rvb-intrinsics/riscv32-zbkc.c
===================================================================
--- clang/test/CodeGen/RISCV/rvb-intrinsics/riscv32-zbkc.c
+++ clang/test/CodeGen/RISCV/rvb-intrinsics/riscv32-zbkc.c
@@ -2,34 +2,46 @@
 // RUN: %clang_cc1 -triple riscv32 -target-feature +zbkc -emit-llvm %s -o - \
 // RUN:     | FileCheck %s  -check-prefix=RV32ZBKC
 
-#include <stdint.h>
+#include <riscv_crypto.h>
 
 // RV32ZBKC-LABEL: @clmul_32(
 // RV32ZBKC-NEXT:  entry:
+// RV32ZBKC-NEXT:    [[__X_ADDR_I:%.*]] = alloca i32, align 4
+// RV32ZBKC-NEXT:    [[__Y_ADDR_I:%.*]] = alloca i32, align 4
 // RV32ZBKC-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
 // RV32ZBKC-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
 // RV32ZBKC-NEXT:    store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
 // RV32ZBKC-NEXT:    store i32 [[B:%.*]], ptr [[B_ADDR]], align 4
 // RV32ZBKC-NEXT:    [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
 // RV32ZBKC-NEXT:    [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4
-// RV32ZBKC-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.clmul.i32(i32 [[TMP0]], i32 [[TMP1]])
-// RV32ZBKC-NEXT:    ret i32 [[TMP2]]
+// RV32ZBKC-NEXT:    store i32 [[TMP0]], ptr [[__X_ADDR_I]], align 4
+// RV32ZBKC-NEXT:    store i32 [[TMP1]], ptr [[__Y_ADDR_I]], align 4
+// RV32ZBKC-NEXT:    [[TMP2:%.*]] = load i32, ptr [[__X_ADDR_I]], align 4
+// RV32ZBKC-NEXT:    [[TMP3:%.*]] = load i32, ptr [[__Y_ADDR_I]], align 4
+// RV32ZBKC-NEXT:    [[TMP4:%.*]] = call i32 @llvm.riscv.clmul.i32(i32 [[TMP2]], i32 [[TMP3]])
+// RV32ZBKC-NEXT:    ret i32 [[TMP4]]
 //
 uint32_t clmul_32(uint32_t a, uint32_t b) {
-  return __builtin_riscv_clmul_32(a, b);
+  return __riscv_clmul_32(a, b);
 }
 
 // RV32ZBKC-LABEL: @clmulh_32(
 // RV32ZBKC-NEXT:  entry:
+// RV32ZBKC-NEXT:    [[__X_ADDR_I:%.*]] = alloca i32, align 4
+// RV32ZBKC-NEXT:    [[__Y_ADDR_I:%.*]] = alloca i32, align 4
 // RV32ZBKC-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
 // RV32ZBKC-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
 // RV32ZBKC-NEXT:    store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
 // RV32ZBKC-NEXT:    store i32 [[B:%.*]], ptr [[B_ADDR]], align 4
 // RV32ZBKC-NEXT:    [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
 // RV32ZBKC-NEXT:    [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4
-// RV32ZBKC-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.clmulh.i32(i32 [[TMP0]], i32 [[TMP1]])
-// RV32ZBKC-NEXT:    ret i32 [[TMP2]]
+// RV32ZBKC-NEXT:    store i32 [[TMP0]], ptr [[__X_ADDR_I]], align 4
+// RV32ZBKC-NEXT:    store i32 [[TMP1]], ptr [[__Y_ADDR_I]], align 4
+// RV32ZBKC-NEXT:    [[TMP2:%.*]] = load i32, ptr [[__X_ADDR_I]], align 4
+// RV32ZBKC-NEXT:    [[TMP3:%.*]] = load i32, ptr [[__Y_ADDR_I]], align 4
+// RV32ZBKC-NEXT:    [[TMP4:%.*]] = call i32 @llvm.riscv.clmulh.i32(i32 [[TMP2]], i32 [[TMP3]])
+// RV32ZBKC-NEXT:    ret i32 [[TMP4]]
 //
 uint32_t clmulh_32(uint32_t a, uint32_t b) {
-  return __builtin_riscv_clmulh_32(a, b);
+  return __riscv_clmulh_32(a, b);
 }
Index: clang/test/CodeGen/RISCV/rvb-intrinsics/riscv32-zbkb.c
===================================================================
--- clang/test/CodeGen/RISCV/rvb-intrinsics/riscv32-zbkb.c
+++ clang/test/CodeGen/RISCV/rvb-intrinsics/riscv32-zbkb.c
@@ -2,43 +2,112 @@
 // RUN: %clang_cc1 -triple riscv32 -target-feature +zbkb -emit-llvm %s -o - \
 // RUN:     | FileCheck %s  -check-prefix=RV32ZBKB
 
-#include <stdint.h>
+#include <riscv_crypto.h>
 
 // RV32ZBKB-LABEL: @brev8(
 // RV32ZBKB-NEXT:  entry:
+// RV32ZBKB-NEXT:    [[__X_ADDR_I:%.*]] = alloca i32, align 4
 // RV32ZBKB-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
 // RV32ZBKB-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
 // RV32ZBKB-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV32ZBKB-NEXT:    [[TMP1:%.*]] = call i32 @llvm.riscv.brev8.i32(i32 [[TMP0]])
-// RV32ZBKB-NEXT:    ret i32 [[TMP1]]
+// RV32ZBKB-NEXT:    store i32 [[TMP0]], ptr [[__X_ADDR_I]], align 4
+// RV32ZBKB-NEXT:    [[TMP1:%.*]] = load i32, ptr [[__X_ADDR_I]], align 4
+// RV32ZBKB-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.brev8.i32(i32 [[TMP1]])
+// RV32ZBKB-NEXT:    ret i32 [[TMP2]]
 //
 uint32_t brev8(uint32_t rs1)
 {
-  return __builtin_riscv_brev8_32(rs1);
+  return __riscv_brev8_32(rs1);
 }
 
 // RV32ZBKB-LABEL: @zip(
 // RV32ZBKB-NEXT:  entry:
+// RV32ZBKB-NEXT:    [[__X_ADDR_I:%.*]] = alloca i32, align 4
 // RV32ZBKB-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
 // RV32ZBKB-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
 // RV32ZBKB-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV32ZBKB-NEXT:    [[TMP1:%.*]] = call i32 @llvm.riscv.zip.i32(i32 [[TMP0]])
-// RV32ZBKB-NEXT:    ret i32 [[TMP1]]
+// RV32ZBKB-NEXT:    store i32 [[TMP0]], ptr [[__X_ADDR_I]], align 4
+// RV32ZBKB-NEXT:    [[TMP1:%.*]] = load i32, ptr [[__X_ADDR_I]], align 4
+// RV32ZBKB-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.zip.i32(i32 [[TMP1]])
+// RV32ZBKB-NEXT:    ret i32 [[TMP2]]
 //
 uint32_t zip(uint32_t rs1)
 {
-  return __builtin_riscv_zip_32(rs1);
+  return __riscv_zip_32(rs1);
 }
 
 // RV32ZBKB-LABEL: @unzip(
 // RV32ZBKB-NEXT:  entry:
+// RV32ZBKB-NEXT:    [[__X_ADDR_I:%.*]] = alloca i32, align 4
 // RV32ZBKB-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
 // RV32ZBKB-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
 // RV32ZBKB-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV32ZBKB-NEXT:    [[TMP1:%.*]] = call i32 @llvm.riscv.unzip.i32(i32 [[TMP0]])
-// RV32ZBKB-NEXT:    ret i32 [[TMP1]]
+// RV32ZBKB-NEXT:    store i32 [[TMP0]], ptr [[__X_ADDR_I]], align 4
+// RV32ZBKB-NEXT:    [[TMP1:%.*]] = load i32, ptr [[__X_ADDR_I]], align 4
+// RV32ZBKB-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.unzip.i32(i32 [[TMP1]])
+// RV32ZBKB-NEXT:    ret i32 [[TMP2]]
 //
 uint32_t unzip(uint32_t rs1)
 {
-  return __builtin_riscv_unzip_32(rs1);
+  return __riscv_unzip_32(rs1);
+}
+
+// RV32ZBKB-LABEL: @rev8(
+// RV32ZBKB-NEXT:  entry:
+// RV32ZBKB-NEXT:    [[__X_ADDR_I:%.*]] = alloca i32, align 4
+// RV32ZBKB-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
+// RV32ZBKB-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
+// RV32ZBKB-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
+// RV32ZBKB-NEXT:    store i32 [[TMP0]], ptr [[__X_ADDR_I]], align 4
+// RV32ZBKB-NEXT:    [[TMP1:%.*]] = load i32, ptr [[__X_ADDR_I]], align 4
+// RV32ZBKB-NEXT:    [[TMP2:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP1]])
+// RV32ZBKB-NEXT:    ret i32 [[TMP2]]
+//
+uint32_t rev8(uint32_t rs1)
+{
+  return __riscv_rev8_32(rs1);
+}
+
+// RV32ZBKB-LABEL: @rol(
+// RV32ZBKB-NEXT:  entry:
+// RV32ZBKB-NEXT:    [[__X_ADDR_I:%.*]] = alloca i32, align 4
+// RV32ZBKB-NEXT:    [[__Y_ADDR_I:%.*]] = alloca i32, align 4
+// RV32ZBKB-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
+// RV32ZBKB-NEXT:    [[RS2_ADDR:%.*]] = alloca i32, align 4
+// RV32ZBKB-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
+// RV32ZBKB-NEXT:    store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
+// RV32ZBKB-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
+// RV32ZBKB-NEXT:    [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
+// RV32ZBKB-NEXT:    store i32 [[TMP0]], ptr [[__X_ADDR_I]], align 4
+// RV32ZBKB-NEXT:    store i32 [[TMP1]], ptr [[__Y_ADDR_I]], align 4
+// RV32ZBKB-NEXT:    [[TMP2:%.*]] = load i32, ptr [[__X_ADDR_I]], align 4
+// RV32ZBKB-NEXT:    [[TMP3:%.*]] = load i32, ptr [[__Y_ADDR_I]], align 4
+// RV32ZBKB-NEXT:    [[TMP4:%.*]] = call i32 @llvm.fshl.i32(i32 [[TMP2]], i32 [[TMP2]], i32 [[TMP3]])
+// RV32ZBKB-NEXT:    ret i32 [[TMP4]]
+//
+uint32_t rol(uint32_t rs1, uint32_t rs2)
+{
+  return __riscv_rol_32(rs1, rs2);
+}
+
+// RV32ZBKB-LABEL: @ror(
+// RV32ZBKB-NEXT:  entry:
+// RV32ZBKB-NEXT:    [[__X_ADDR_I:%.*]] = alloca i32, align 4
+// RV32ZBKB-NEXT:    [[__Y_ADDR_I:%.*]] = alloca i32, align 4
+// RV32ZBKB-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
+// RV32ZBKB-NEXT:    [[RS2_ADDR:%.*]] = alloca i32, align 4
+// RV32ZBKB-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
+// RV32ZBKB-NEXT:    store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
+// RV32ZBKB-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
+// RV32ZBKB-NEXT:    [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
+// RV32ZBKB-NEXT:    store i32 [[TMP0]], ptr [[__X_ADDR_I]], align 4
+// RV32ZBKB-NEXT:    store i32 [[TMP1]], ptr [[__Y_ADDR_I]], align 4
+// RV32ZBKB-NEXT:    [[TMP2:%.*]] = load i32, ptr [[__X_ADDR_I]], align 4
+// RV32ZBKB-NEXT:    [[TMP3:%.*]] = load i32, ptr [[__Y_ADDR_I]], align 4
+// RV32ZBKB-NEXT:    [[TMP4:%.*]] = call i32 @llvm.fshr.i32(i32 [[TMP2]], i32 [[TMP2]], i32 [[TMP3]])
+// RV32ZBKB-NEXT:    ret i32 [[TMP4]]
+//
+uint32_t ror(uint32_t rs1, uint32_t rs2)
+{
+  return __riscv_ror_32(rs1, rs2);
 }
Index: clang/lib/Headers/riscv_crypto.h
===================================================================
--- /dev/null
+++ clang/lib/Headers/riscv_crypto.h
@@ -0,0 +1,288 @@
+/*===---- riscv_crypto.h - RISC-V Zk* and Zbk* intrinsics ------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __RISCV_CRYPTO_H
+#define __RISCV_CRYPTO_H
+
+#include <stdint.h>
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#if defined(__riscv_zbkb)
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__riscv_brev8_32(uint32_t __x) {
+  return __builtin_riscv_brev8_32(__x);
+}
+
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__riscv_rev8_32(uint32_t __x) {
+  return __builtin_bswap32(__x);
+}
+
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__riscv_rol_32(uint32_t __x, uint32_t __y) {
+  return __builtin_rotateleft32(__x, __y);
+}
+
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__riscv_ror_32(uint32_t __x, uint32_t __y) {
+  return __builtin_rotateright32(__x, __y);
+}
+
+#if __riscv_xlen == 64
+static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
+__riscv_brev8_64(uint64_t __x) {
+  return __builtin_riscv_brev8_64(__x);
+}
+
+static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
+__riscv_rev8_64(uint64_t __x) {
+  return __builtin_bswap64(__x);
+}
+
+static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
+__riscv_rol_64(uint64_t __x, uint32_t __y) {
+  return __builtin_rotateleft64(__x, __y);
+}
+
+static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
+__riscv_ror_64(uint64_t __x, uint32_t __y) {
+  return __builtin_rotateright64(__x, __y);
+}
+#endif
+
+#if __riscv_xlen == 32
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__riscv_unzip_32(uint32_t __x) {
+  return __builtin_riscv_unzip_32(__x);
+}
+
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__riscv_zip_32(uint32_t __x) {
+  return __builtin_riscv_zip_32(__x);
+}
+#endif
+#endif // defined(__riscv_zbkb)
+
+#if defined(__riscv_zbkc)
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__riscv_clmul_32(uint32_t __x, uint32_t __y) {
+  return __builtin_riscv_clmul_32(__x, __y);
+}
+
+#if __riscv_xlen == 32
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__riscv_clmulh_32(uint32_t __x, uint32_t __y) {
+  return __builtin_riscv_clmulh_32(__x, __y);
+}
+#endif
+
+#if __riscv_xlen == 64
+static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
+__riscv_clmul_64(uint64_t __x, uint64_t __y) {
+  return __builtin_riscv_clmul_64(__x, __y);
+}
+
+static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
+__riscv_clmulh_64(uint64_t __x, uint64_t __y) {
+  return __builtin_riscv_clmulh_64(__x, __y);
+}
+#endif
+#endif // defined(__riscv_zbkc)
+
+#if defined(__riscv_zbkx)
+#if __riscv_xlen == 32
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__riscv_xperm4_32(uint32_t __x, uint32_t __y) {
+  return __builtin_riscv_xperm4_32(__x, __y);
+}
+
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__riscv_xperm8_32(uint32_t __x, uint32_t __y) {
+  return __builtin_riscv_xperm8_32(__x, __y);
+}
+#endif
+
+#if __riscv_xlen == 64
+static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
+__riscv_xperm4_64(uint64_t __x, uint64_t __y) {
+  return __builtin_riscv_xperm4_64(__x, __y);
+}
+
+static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
+__riscv_xperm8_64(uint64_t __x, uint64_t __y) {
+  return __builtin_riscv_xperm8_64(__x, __y);
+}
+#endif
+#endif // defined(__riscv_zbkx)
+
+#if defined(__riscv_zknd)
+#if __riscv_xlen == 32
+#define __riscv_aes32dsi(x, y, bs) __builtin_riscv_aes32dsi(x, y, bs)
+#define __riscv_aes32dsmi(x, y, bs) __builtin_riscv_aes32dsmi(x, y, bs)
+#endif
+
+#if __riscv_xlen == 64
+static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
+__riscv_aes64ds(uint64_t __x, uint64_t __y) {
+  return __builtin_riscv_aes64ds(__x, __y);
+}
+
+static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
+__riscv_aes64dsm(uint64_t __x, uint64_t __y) {
+  return __builtin_riscv_aes64dsm(__x, __y);
+}
+
+static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
+__riscv_aes64im(uint64_t __x) {
+  return __builtin_riscv_aes64im(__x);
+}
+#endif
+#endif // defined(__riscv_zknd)
+
+#if defined(__riscv_zkne)
+#if __riscv_xlen == 32
+#define __riscv_aes32esi(x, y, bs) __builtin_riscv_aes32esi(x, y, bs)
+#define __riscv_aes32esmi(x, y, bs) __builtin_riscv_aes32esmi(x, y, bs)
+#endif
+
+#if __riscv_xlen == 64
+static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
+__riscv_aes64es(uint64_t __x, uint64_t __y) {
+  return __builtin_riscv_aes64es(__x, __y);
+}
+
+static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
+__riscv_aes64esm(uint64_t __x, uint64_t __y) {
+  return __builtin_riscv_aes64esm(__x, __y);
+}
+#endif
+#endif // defined(__riscv_zknd)
+
+#if defined(__riscv_zknd) || defined(__riscv_zkne)
+#if __riscv_xlen == 64
+#define __riscv_aes64ks1i(x, rnum) __builtin_riscv_aes64ks1i(x, rnum)
+
+static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
+__riscv_aes64ks2(uint64_t __x, uint64_t __y) {
+  return __builtin_riscv_aes64ks2(__x, __y);
+}
+#endif
+#endif // defined(__riscv_zknd) || defined(__riscv_zkne)
+
+#if defined(__riscv_zknh)
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__riscv_sha256sig0(uint32_t __x) {
+  return __builtin_riscv_sha256sig0(__x);
+}
+
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__riscv_sha256sig1(uint32_t __x) {
+  return __builtin_riscv_sha256sig0(__x);
+}
+
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__riscv_sha256sum0(uint32_t __x) {
+  return __builtin_riscv_sha256sum0(__x);
+}
+
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__riscv_sha256sum1(uint32_t __x) {
+  return __builtin_riscv_sha256sum0(__x);
+}
+
+#if __riscv_xlen == 32
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__riscv_sha512sig0h(uint32_t __x, uint32_t __y) {
+  return __builtin_riscv_sha512sig0h(__x, __y);
+}
+
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__riscv_sha512sig0l(uint32_t __x, uint32_t __y) {
+  return __builtin_riscv_sha512sig0l(__x, __y);
+}
+
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__riscv_sha512sig1h(uint32_t __x, uint32_t __y) {
+  return __builtin_riscv_sha512sig1h(__x, __y);
+}
+
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__riscv_sha512sig1l(uint32_t __x, uint32_t __y) {
+  return __builtin_riscv_sha512sig1l(__x, __y);
+}
+
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__riscv_sha512sum0l(uint32_t __x, uint32_t __y) {
+  return __builtin_riscv_sha512sum0l(__x, __y);
+}
+
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__riscv_sha512sum0r(uint32_t __x, uint32_t __y) {
+  return __builtin_riscv_sha512sum0r(__x, __y);
+}
+
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__riscv_sha512sum1l(uint32_t __x, uint32_t __y) {
+  return __builtin_riscv_sha512sum1l(__x, __y);
+}
+
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__riscv_sha512sum1r(uint32_t __x, uint32_t __y) {
+  return __builtin_riscv_sha512sum1r(__x, __y);
+}
+#endif
+
+#if __riscv_xlen == 64
+static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
+__riscv_sha512sig0(uint64_t __x) {
+  return __builtin_riscv_sha512sig0(__x);
+}
+
+static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
+__riscv_sha512sig1(uint64_t __x) {
+  return __builtin_riscv_sha512sig0(__x);
+}
+
+static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
+__riscv_sha512sum0(uint64_t __x) {
+  return __builtin_riscv_sha512sum0(__x);
+}
+
+static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
+__riscv_sha512sum1(uint64_t __x) {
+  return __builtin_riscv_sha512sum0(__x);
+}
+#endif
+#endif // defined(__riscv_zknh)
+
+#if defined(__riscv_zksh)
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__riscv_sm3p0(uint32_t __x) {
+  return __builtin_riscv_sm3p0(__x);
+}
+
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__riscv_sm3p1(uint32_t __x) {
+  return __builtin_riscv_sm3p1(__x);
+}
+#endif // defined(__riscv_zksh)
+
+#if defined(__riscv_zksed)
+#define __riscv_sm4ed(x, y, bs) __builtin_riscv_sm4ed(x, y, bs);
+#define __riscv_sm4ks(x, y, bs) __builtin_riscv_sm4ks(x, y, bs);
+#endif // defined(__riscv_zksh)
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif
Index: clang/lib/Headers/CMakeLists.txt
===================================================================
--- clang/lib/Headers/CMakeLists.txt
+++ clang/lib/Headers/CMakeLists.txt
@@ -99,6 +99,7 @@
   )
 
 set(riscv_files
+  riscv_crypto.h
   riscv_ntlh.h
   )
 
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to