Nils Asmussen has uploaded this change for review. (
https://gem5-review.googlesource.com/c/public/gem5/+/25705 )
Change subject: arch-riscv,tests: merged changes from RISCV-test upstream
repo.
......................................................................
arch-riscv,tests: merged changes from RISCV-test upstream repo.
Change-Id: Ied7c34fedc99657e667bddaf3f3521e5a8f96a6e
---
M tests/test-progs/asmtest/src/riscv/env/encoding.h
M tests/test-progs/asmtest/src/riscv/env/p/riscv_test.h
M tests/test-progs/asmtest/src/riscv/env/v/entry.S
M tests/test-progs/asmtest/src/riscv/env/v/riscv_test.h
M tests/test-progs/asmtest/src/riscv/env/v/vm.c
M tests/test-progs/asmtest/src/riscv/isa/macros/scalar/test_macros.h
M tests/test-progs/asmtest/src/riscv/isa/rv64mi/access.S
M tests/test-progs/asmtest/src/riscv/isa/rv64mi/breakpoint.S
M tests/test-progs/asmtest/src/riscv/isa/rv64mi/illegal.S
M tests/test-progs/asmtest/src/riscv/isa/rv64si/Makefrag
M tests/test-progs/asmtest/src/riscv/isa/rv64si/csr.S
A tests/test-progs/asmtest/src/riscv/isa/rv64si/icache-alias.S
M tests/test-progs/asmtest/src/riscv/isa/rv64si/ma_fetch.S
M tests/test-progs/asmtest/src/riscv/isa/rv64si/scall.S
M tests/test-progs/asmtest/src/riscv/isa/rv64ua/lrsc.S
M tests/test-progs/asmtest/src/riscv/isa/rv64uc/Makefrag
M tests/test-progs/asmtest/src/riscv/isa/rv64ud/structural.S
M tests/test-progs/asmtest/src/riscv/isa/rv64ui/sll.S
M tests/test-progs/asmtest/src/riscv/isa/rv64ui/slli.S
M tests/test-progs/asmtest/src/riscv/isa/rv64ui/slliw.S
M tests/test-progs/asmtest/src/riscv/isa/rv64ui/sllw.S
M tests/test-progs/asmtest/src/riscv/isa/rv64ui/sra.S
M tests/test-progs/asmtest/src/riscv/isa/rv64ui/sraiw.S
M tests/test-progs/asmtest/src/riscv/isa/rv64ui/sraw.S
M tests/test-progs/asmtest/src/riscv/isa/rv64ui/srl.S
M tests/test-progs/asmtest/src/riscv/isa/rv64ui/srli.S
M tests/test-progs/asmtest/src/riscv/isa/rv64ui/srliw.S
M tests/test-progs/asmtest/src/riscv/isa/rv64ui/srlw.S
28 files changed, 453 insertions(+), 122 deletions(-)
diff --git a/tests/test-progs/asmtest/src/riscv/env/encoding.h
b/tests/test-progs/asmtest/src/riscv/env/encoding.h
index c109ce1..e32f958 100644
--- a/tests/test-progs/asmtest/src/riscv/env/encoding.h
+++ b/tests/test-progs/asmtest/src/riscv/env/encoding.h
@@ -22,6 +22,7 @@
#define MSTATUS_TVM 0x00100000
#define MSTATUS_TW 0x00200000
#define MSTATUS_TSR 0x00400000
+#define MSTATUS_VS 0x01800000
#define MSTATUS32_SD 0x80000000
#define MSTATUS_UXL 0x0000000300000000
#define MSTATUS_SXL 0x0000000C00000000
@@ -36,6 +37,7 @@
#define SSTATUS_XS 0x00018000
#define SSTATUS_SUM 0x00040000
#define SSTATUS_MXR 0x00080000
+#define SSTATUS_VS 0x01800000
#define SSTATUS32_SD 0x80000000
#define SSTATUS_UXL 0x0000000300000000
#define SSTATUS64_SD 0x8000000000000000
diff --git a/tests/test-progs/asmtest/src/riscv/env/p/riscv_test.h
b/tests/test-progs/asmtest/src/riscv/env/p/riscv_test.h
index 3fbcb50..bb7ced6 100644
--- a/tests/test-progs/asmtest/src/riscv/env/p/riscv_test.h
+++ b/tests/test-progs/asmtest/src/riscv/env/p/riscv_test.h
@@ -18,6 +18,11 @@
RVTEST_FP_ENABLE; \
.endm
+#define RVTEST_RV64UV \
+ .macro init; \
+ RVTEST_VECTOR_ENABLE; \
+ .endm
+
#define RVTEST_RV32U \
.macro init; \
.endm
@@ -27,6 +32,11 @@
RVTEST_FP_ENABLE; \
.endm
+#define RVTEST_RV32UV \
+ .macro init; \
+ RVTEST_VECTOR_ENABLE; \
+ .endm
+
#define RVTEST_RV64M \
.macro init; \
RVTEST_ENABLE_MACHINE; \
@@ -56,7 +66,8 @@
#define INIT_PMP \
la t0, 1f; \
csrw mtvec, t0; \
- li t0, -1; /* Set up a PMP to permit all accesses */ \
+ /* Set up a PMP to permit all accesses */ \
+ li t0, (1 << (31 + (__riscv_xlen / 64) * (53 - 31))) - 1; \
csrw pmpaddr0, t0; \
li t0, PMP_NAPOT | PMP_R | PMP_W | PMP_X; \
csrw pmpcfg0, t0; \
@@ -94,6 +105,12 @@
csrs mstatus, a0; \
csrwi fcsr, 0
+#define RVTEST_VECTOR_ENABLE \
+ li a0, (MSTATUS_VS & (MSTATUS_VS >> 1)) | \
+ (MSTATUS_FS & (MSTATUS_FS >> 1)); \
+ csrs mstatus, a0; \
+ csrwi fcsr, 0
+
#define RISCV_MULTICORE_DISABLE \
csrr a0, mhartid; \
1: bnez a0, 1b
@@ -186,6 +203,8 @@
#define RVTEST_PASS \
fence; \
li TESTNUM, 1; \
+ li a7, 93; \
+ li a0, 0; \
ecall
#define TESTNUM gp
@@ -194,6 +213,8 @@
1: beqz TESTNUM, 1b; \
sll TESTNUM, TESTNUM, 1; \
or TESTNUM, TESTNUM, 1; \
+ li a7, 93; \
+ addi a0, TESTNUM, 0; \
ecall
//-----------------------------------------------------------------------
diff --git a/tests/test-progs/asmtest/src/riscv/env/v/entry.S
b/tests/test-progs/asmtest/src/riscv/env/v/entry.S
index 9719662..fa492e6 100644
--- a/tests/test-progs/asmtest/src/riscv/env/v/entry.S
+++ b/tests/test-progs/asmtest/src/riscv/env/v/entry.S
@@ -14,17 +14,52 @@
.section ".text.init","ax",@progbits
.globl _start
+ .align 2
_start:
j handle_reset
/* NMI vector */
+ .align 2
nmi_vector:
j wtf
+ .align 2
trap_vector:
j wtf
handle_reset:
+ li x1, 0
+ li x2, 0
+ li x3, 0
+ li x4, 0
+ li x5, 0
+ li x6, 0
+ li x7, 0
+ li x8, 0
+ li x9, 0
+ li x10, 0
+ li x11, 0
+ li x12, 0
+ li x13, 0
+ li x14, 0
+ li x15, 0
+ li x16, 0
+ li x17, 0
+ li x18, 0
+ li x19, 0
+ li x20, 0
+ li x21, 0
+ li x22, 0
+ li x23, 0
+ li x24, 0
+ li x25, 0
+ li x26, 0
+ li x27, 0
+ li x28, 0
+ li x29, 0
+ li x30, 0
+ li x31, 0
+
la t0, trap_vector
csrw mtvec, t0
la sp, STACK_TOP - SIZEOF_TRAPFRAME_T
@@ -32,6 +67,7 @@
slli t0, t0, 12
add sp, sp, t0
csrw mscratch, sp
+ call extra_boot
la a0, userstart
j vm_boot
@@ -73,6 +109,7 @@
sret
.global trap_entry
+ .align 2
trap_entry:
csrrw sp, sscratch, sp
diff --git a/tests/test-progs/asmtest/src/riscv/env/v/riscv_test.h
b/tests/test-progs/asmtest/src/riscv/env/v/riscv_test.h
index 8ca9ffd..751e037 100644
--- a/tests/test-progs/asmtest/src/riscv/env/v/riscv_test.h
+++ b/tests/test-progs/asmtest/src/riscv/env/v/riscv_test.h
@@ -15,6 +15,10 @@
#undef RVTEST_CODE_BEGIN
#define RVTEST_CODE_BEGIN \
.text; \
+ .global extra_boot; \
+extra_boot: \
+ EXTRA_INIT \
+ ret; \
.global userstart; \
userstart: \
init
diff --git a/tests/test-progs/asmtest/src/riscv/env/v/vm.c
b/tests/test-progs/asmtest/src/riscv/env/v/vm.c
index a2e5533..df302b4 100644
--- a/tests/test-progs/asmtest/src/riscv/env/v/vm.c
+++ b/tests/test-progs/asmtest/src/riscv/env/v/vm.c
@@ -6,6 +6,14 @@
#include "riscv_test.h"
+#if __riscv_xlen == 32
+# define SATP_MODE_CHOICE SATP_MODE_SV32
+#elif defined(Sv48)
+# define SATP_MODE_CHOICE SATP_MODE_SV48
+#else
+# define SATP_MODE_CHOICE SATP_MODE_SV39
+#endif
+
void trap_entry();
void pop_tf(trapframe_t*);
@@ -62,13 +70,21 @@
#define l1pt pt[0]
#define user_l2pt pt[1]
-#if __riscv_xlen == 64
+#if SATP_MODE_CHOICE == SATP_MODE_SV48
+# define NPT 6
+# define kernel_l2pt pt[2]
+# define kernel_l3pt pt[3]
+# define user_l3pt pt[4]
+# define user_llpt pt[5]
+#elif SATP_MODE_CHOICE == SATP_MODE_SV39
# define NPT 4
-#define kernel_l2pt pt[2]
-# define user_l3pt pt[3]
-#else
+# define kernel_l2pt pt[2]
+# define user_llpt pt[3]
+#elif SATP_MODE_CHOICE == SATP_MODE_SV32
# define NPT 2
-# define user_l3pt user_l2pt
+# define user_llpt user_l2pt
+#else
+# error Unknown SATP_MODE_CHOICE
#endif
pte_t pt[NPT][PTES_PER_PT] __attribute__((aligned(PGSIZE)));
@@ -100,10 +116,10 @@
if (node->addr)
{
// check accessed and dirty bits
- assert(user_l3pt[addr/PGSIZE] & PTE_A);
+ assert(user_llpt[addr/PGSIZE] & PTE_A);
uintptr_t sstatus = set_csr(sstatus, SSTATUS_SUM);
if (memcmp((void*)addr, uva2kva(addr), PGSIZE)) {
- assert(user_l3pt[addr/PGSIZE] & PTE_D);
+ assert(user_llpt[addr/PGSIZE] & PTE_D);
memcpy((void*)addr, uva2kva(addr), PGSIZE);
}
write_csr(sstatus, sstatus);
@@ -125,12 +141,12 @@
assert(addr >= PGSIZE && addr < MAX_TEST_PAGES * PGSIZE);
addr = addr/PGSIZE*PGSIZE;
- if (user_l3pt[addr/PGSIZE]) {
- if (!(user_l3pt[addr/PGSIZE] & PTE_A)) {
- user_l3pt[addr/PGSIZE] |= PTE_A;
+ if (user_llpt[addr/PGSIZE]) {
+ if (!(user_llpt[addr/PGSIZE] & PTE_A)) {
+ user_llpt[addr/PGSIZE] |= PTE_A;
} else {
- assert(!(user_l3pt[addr/PGSIZE] & PTE_D) && cause ==
CAUSE_STORE_PAGE_FAULT);
- user_l3pt[addr/PGSIZE] |= PTE_D;
+ assert(!(user_llpt[addr/PGSIZE] & PTE_D) && cause ==
CAUSE_STORE_PAGE_FAULT);
+ user_llpt[addr/PGSIZE] |= PTE_D;
}
flush_page(addr);
return;
@@ -143,7 +159,7 @@
freelist_tail = 0;
uintptr_t new_pte = (node->addr >> PGSHIFT << PTE_PPN_SHIFT) | PTE_V |
PTE_U | PTE_R | PTE_W | PTE_X;
- user_l3pt[addr/PGSIZE] = new_pte | PTE_A | PTE_D;
+ user_llpt[addr/PGSIZE] = new_pte | PTE_A | PTE_D;
flush_page(addr);
assert(user_mapping[addr/PGSIZE].addr == 0);
@@ -153,7 +169,7 @@
memcpy((void*)addr, uva2kva(addr), PGSIZE);
write_csr(sstatus, sstatus);
- user_l3pt[addr/PGSIZE] = new_pte;
+ user_llpt[addr/PGSIZE] = new_pte;
flush_page(addr);
__builtin___clear_cache(0,0);
@@ -194,7 +210,7 @@
static void coherence_torture()
{
// cause coherence misses without affecting program semantics
- unsigned int random = ENTROPY;
+ uint64_t random = ENTROPY;
while (1) {
uintptr_t paddr = DRAM_BASE + ((random % (2 * (MAX_TEST_PAGES + 1) *
PGSIZE)) & -4);
#ifdef __riscv_atomic
@@ -209,7 +225,7 @@
void vm_boot(uintptr_t test_addr)
{
- unsigned int random = ENTROPY;
+ uint64_t random = ENTROPY;
if (read_csr(mhartid) > 0)
coherence_torture();
@@ -221,27 +237,38 @@
// map user to lowermost megapage
l1pt[0] = ((pte_t)user_l2pt >> PGSHIFT << PTE_PPN_SHIFT) | PTE_V;
// map kernel to uppermost megapage
-#if __riscv_xlen == 64
+#if SATP_MODE_CHOICE == SATP_MODE_SV48
+ l1pt[PTES_PER_PT-1] = ((pte_t)kernel_l2pt >> PGSHIFT << PTE_PPN_SHIFT) |
PTE_V;
+ kernel_l2pt[PTES_PER_PT-1] = ((pte_t)kernel_l3pt >> PGSHIFT <<
PTE_PPN_SHIFT) | PTE_V;
+ kernel_l3pt[PTES_PER_PT-1] = (DRAM_BASE/RISCV_PGSIZE << PTE_PPN_SHIFT) |
PTE_V | PTE_R | PTE_W | PTE_X | PTE_A | PTE_D;
+ user_l2pt[0] = ((pte_t)user_l3pt >> PGSHIFT << PTE_PPN_SHIFT) | PTE_V;
+ user_l3pt[0] = ((pte_t)user_llpt >> PGSHIFT << PTE_PPN_SHIFT) | PTE_V;
+#elif SATP_MODE_CHOICE == SATP_MODE_SV39
l1pt[PTES_PER_PT-1] = ((pte_t)kernel_l2pt >> PGSHIFT << PTE_PPN_SHIFT) |
PTE_V;
kernel_l2pt[PTES_PER_PT-1] = (DRAM_BASE/RISCV_PGSIZE << PTE_PPN_SHIFT) |
PTE_V | PTE_R | PTE_W | PTE_X | PTE_A | PTE_D;
- user_l2pt[0] = ((pte_t)user_l3pt >> PGSHIFT << PTE_PPN_SHIFT) | PTE_V;
- uintptr_t vm_choice = SATP_MODE_SV39;
-#else
+ user_l2pt[0] = ((pte_t)user_llpt >> PGSHIFT << PTE_PPN_SHIFT) | PTE_V;
+#elif SATP_MODE_CHOICE == SATP_MODE_SV32
l1pt[PTES_PER_PT-1] = (DRAM_BASE/RISCV_PGSIZE << PTE_PPN_SHIFT) | PTE_V
| PTE_R | PTE_W | PTE_X | PTE_A | PTE_D;
- uintptr_t vm_choice = SATP_MODE_SV32;
+#else
+# error
#endif
- write_csr(sptbr, ((uintptr_t)l1pt >> PGSHIFT) |
- (vm_choice * (SATP_MODE & ~(SATP_MODE<<1))));
+ uintptr_t vm_choice = SATP_MODE_CHOICE;
+ uintptr_t sptbr_value = ((uintptr_t)l1pt >> PGSHIFT)
+ | (vm_choice * (SATP_MODE & ~(SATP_MODE<<1)));
+ write_csr(sptbr, sptbr_value);
+ if (read_csr(sptbr) != sptbr_value)
+ assert(!"unsupported satp mode");
// Set up PMPs if present, ignoring illegal instruction trap if not.
uintptr_t pmpc = PMP_NAPOT | PMP_R | PMP_W | PMP_X;
+ uintptr_t pmpa = ((uintptr_t)1 << (__riscv_xlen == 32 ? 31 : 53)) - 1;
asm volatile ("la t0, 1f\n\t"
"csrrw t0, mtvec, t0\n\t"
"csrw pmpaddr0, %1\n\t"
"csrw pmpcfg0, %0\n\t"
".align 2\n\t"
"1:"
- : : "r" (pmpc), "r" (-1UL) : "t0");
+ : : "r" (pmpc), "r" (pmpa) : "t0");
// set up supervisor trap handling
write_csr(stvec, pa2kva(trap_entry));
diff --git
a/tests/test-progs/asmtest/src/riscv/isa/macros/scalar/test_macros.h
b/tests/test-progs/asmtest/src/riscv/isa/macros/scalar/test_macros.h
index 58e389f..7ac9a3f 100644
--- a/tests/test-progs/asmtest/src/riscv/isa/macros/scalar/test_macros.h
+++ b/tests/test-progs/asmtest/src/riscv/isa/macros/scalar/test_macros.h
@@ -44,9 +44,9 @@
#define SEXT_IMM(x) ((x) | (-(((x) >> 11) & 1) << 11))
#define TEST_IMM_OP( testnum, inst, result, val1, imm ) \
- TEST_CASE( testnum, x30, result, \
+ TEST_CASE( testnum, x14, result, \
li x1, MASK_XLEN(val1); \
- inst x30, x1, SEXT_IMM(imm); \
+ inst x14, x1, SEXT_IMM(imm); \
)
#define TEST_IMM_SRC1_EQ_DEST( testnum, inst, result, val1, imm ) \
@@ -59,20 +59,20 @@
TEST_CASE( testnum, x6, result, \
li x4, 0; \
1: li x1, MASK_XLEN(val1); \
- inst x30, x1, SEXT_IMM(imm); \
+ inst x14, x1, SEXT_IMM(imm); \
TEST_INSERT_NOPS_ ## nop_cycles \
- addi x6, x30, 0; \
+ addi x6, x14, 0; \
addi x4, x4, 1; \
li x5, 2; \
bne x4, x5, 1b \
)
#define TEST_IMM_SRC1_BYPASS( testnum, nop_cycles, inst, result, val1, imm
) \
- TEST_CASE( testnum, x30, result, \
+ TEST_CASE( testnum, x14, result, \
li x4, 0; \
1: li x1, MASK_XLEN(val1); \
TEST_INSERT_NOPS_ ## nop_cycles \
- inst x30, x1, SEXT_IMM(imm); \
+ inst x14, x1, SEXT_IMM(imm); \
addi x4, x4, 1; \
li x5, 2; \
bne x4, x5, 1b \
@@ -94,9 +94,9 @@
#-----------------------------------------------------------------------
#define TEST_R_OP( testnum, inst, result, val1 ) \
- TEST_CASE( testnum, x30, result, \
+ TEST_CASE( testnum, x14, result, \
li x1, val1; \
- inst x30, x1; \
+ inst x14, x1; \
)
#define TEST_R_SRC1_EQ_DEST( testnum, inst, result, val1 ) \
@@ -109,9 +109,9 @@
TEST_CASE( testnum, x6, result, \
li x4, 0; \
1: li x1, val1; \
- inst x30, x1; \
+ inst x14, x1; \
TEST_INSERT_NOPS_ ## nop_cycles \
- addi x6, x30, 0; \
+ addi x6, x14, 0; \
addi x4, x4, 1; \
li x5, 2; \
bne x4, x5, 1b \
@@ -122,10 +122,10 @@
#-----------------------------------------------------------------------
#define TEST_RR_OP( testnum, inst, result, val1, val2 ) \
- TEST_CASE( testnum, x30, result, \
+ TEST_CASE( testnum, x14, result, \
li x1, MASK_XLEN(val1); \
li x2, MASK_XLEN(val2); \
- inst x30, x1, x2; \
+ inst x14, x1, x2; \
)
#define TEST_RR_SRC1_EQ_DEST( testnum, inst, result, val1, val2 ) \
@@ -153,35 +153,35 @@
li x4, 0; \
1: li x1, MASK_XLEN(val1); \
li x2, MASK_XLEN(val2); \
- inst x30, x1, x2; \
+ inst x14, x1, x2; \
TEST_INSERT_NOPS_ ## nop_cycles \
- addi x6, x30, 0; \
+ addi x6, x14, 0; \
addi x4, x4, 1; \
li x5, 2; \
bne x4, x5, 1b \
)
#define TEST_RR_SRC12_BYPASS( testnum, src1_nops, src2_nops, inst, result,
val1, val2 ) \
- TEST_CASE( testnum, x30, result, \
+ TEST_CASE( testnum, x14, result, \
li x4, 0; \
1: li x1, MASK_XLEN(val1); \
TEST_INSERT_NOPS_ ## src1_nops \
li x2, MASK_XLEN(val2); \
TEST_INSERT_NOPS_ ## src2_nops \
- inst x30, x1, x2; \
+ inst x14, x1, x2; \
addi x4, x4, 1; \
li x5, 2; \
bne x4, x5, 1b \
)
#define TEST_RR_SRC21_BYPASS( testnum, src1_nops, src2_nops, inst, result,
val1, val2 ) \
- TEST_CASE( testnum, x30, result, \
+ TEST_CASE( testnum, x14, result, \
li x4, 0; \
1: li x2, MASK_XLEN(val2); \
TEST_INSERT_NOPS_ ## src1_nops \
li x1, MASK_XLEN(val1); \
TEST_INSERT_NOPS_ ## src2_nops \
- inst x30, x1, x2; \
+ inst x14, x1, x2; \
addi x4, x4, 1; \
li x5, 2; \
bne x4, x5, 1b \
@@ -216,17 +216,17 @@
#-----------------------------------------------------------------------
#define TEST_LD_OP( testnum, inst, result, offset, base ) \
- TEST_CASE( testnum, x30, result, \
+ TEST_CASE( testnum, x14, result, \
la x1, base; \
- inst x30, offset(x1); \
+ inst x14, offset(x1); \
)
#define TEST_ST_OP( testnum, load_inst, store_inst, result, offset, base )
\
- TEST_CASE( testnum, x30, result, \
+ TEST_CASE( testnum, x14, result, \
la x1, base; \
li x2, result; \
store_inst x2, offset(x1); \
- load_inst x30, offset(x1); \
+ load_inst x14, offset(x1); \
)
#define TEST_LD_DEST_BYPASS( testnum, nop_cycles, inst, result, offset,
base ) \
@@ -234,9 +234,9 @@
li TESTNUM, testnum; \
li x4, 0; \
1: la x1, base; \
- inst x30, offset(x1); \
+ inst x14, offset(x1); \
TEST_INSERT_NOPS_ ## nop_cycles \
- addi x6, x30, 0; \
+ addi x6, x14, 0; \
li x29, result; \
bne x6, x29, fail; \
addi x4, x4, 1; \
@@ -249,9 +249,9 @@
li x4, 0; \
1: la x1, base; \
TEST_INSERT_NOPS_ ## nop_cycles \
- inst x30, offset(x1); \
+ inst x14, offset(x1); \
li x29, result; \
- bne x30, x29, fail; \
+ bne x14, x29, fail; \
addi x4, x4, 1; \
li x5, 2; \
bne x4, x5, 1b \
@@ -265,9 +265,9 @@
la x2, base; \
TEST_INSERT_NOPS_ ## src2_nops \
store_inst x1, offset(x2); \
- load_inst x30, offset(x2); \
+ load_inst x14, offset(x2); \
li x29, result; \
- bne x30, x29, fail; \
+ bne x14, x29, fail; \
addi x4, x4, 1; \
li x5, 2; \
bne x4, x5, 1b \
@@ -281,9 +281,9 @@
li x1, result; \
TEST_INSERT_NOPS_ ## src2_nops \
store_inst x1, offset(x2); \
- load_inst x30, offset(x2); \
+ load_inst x14, offset(x2); \
li x29, result; \
- bne x30, x29, fail; \
+ bne x14, x29, fail; \
addi x4, x4, 1; \
li x5, 2; \
bne x4, x5, 1b \
@@ -359,7 +359,7 @@
li x4, 0; \
1: la x6, 2f; \
TEST_INSERT_NOPS_ ## nop_cycles \
- inst x19, x6, 0; \
+ inst x13, x6, 0; \
bne x0, TESTNUM, fail; \
2: addi x4, x4, 1; \
li x5, 2; \
@@ -614,19 +614,19 @@
#define TEST_CASE_D32( testnum, testreg1, testreg2, correctval, code... ) \
test_ ## testnum: \
code; \
- la x31, test_ ## testnum ## _data ; \
- lw x29, 0(x31); \
- lw x31, 4(x31); \
+ la x15, test_ ## testnum ## _data ; \
+ lw x29, 0(x15); \
+ lw x15, 4(x15); \
li TESTNUM, testnum; \
bne testreg1, x29, fail;\
- bne testreg2, x31, fail;\
+ bne testreg2, x15, fail;\
.pushsection .data; \
.align 3; \
test_ ## testnum ## _data: \
.dword correctval; \
.popsection
-// ^ x30 is used in some other macros, to avoid issues we use x31 for
upper word
+// ^ x14 is used in some other macros, to avoid issues we use x15 for
upper word
#-----------------------------------------------------------------------
# Pass and fail code (assumes test num is in TESTNUM)
diff --git a/tests/test-progs/asmtest/src/riscv/isa/rv64mi/access.S
b/tests/test-progs/asmtest/src/riscv/isa/rv64mi/access.S
index 202a364..40a28d3 100644
--- a/tests/test-progs/asmtest/src/riscv/isa/rv64mi/access.S
+++ b/tests/test-progs/asmtest/src/riscv/isa/rv64mi/access.S
@@ -24,7 +24,7 @@
# after the pc is set to rs1, an access exception should be raised.
li TESTNUM, 2
li t1, CAUSE_FETCH_ACCESS
- la t3, 1f
+ la s1, 1f
li t2, 0
jalr t2, t0
1:
@@ -32,8 +32,8 @@
# A load to an illegal address should not commit.
li TESTNUM, 3
li t1, CAUSE_LOAD_ACCESS
- la t3, 1f
- mv t2, t3
+ la s1, 1f
+ mv t2, s1
lb t2, (t0)
j fail
1:
@@ -52,12 +52,12 @@
j fail
2:
- bne t2, t3, fail
+ bne t2, s1, fail
csrr t2, mcause
bne t2, t1, fail
- csrw mepc, t3
+ csrw mepc, s1
mret
RVTEST_CODE_END
diff --git a/tests/test-progs/asmtest/src/riscv/isa/rv64mi/breakpoint.S
b/tests/test-progs/asmtest/src/riscv/isa/rv64mi/breakpoint.S
index 647430b..252a696 100644
--- a/tests/test-progs/asmtest/src/riscv/isa/rv64mi/breakpoint.S
+++ b/tests/test-progs/asmtest/src/riscv/isa/rv64mi/breakpoint.S
@@ -21,19 +21,12 @@
csrr a1, tselect
bne x0, a1, pass
- # Make sure there's a breakpoint there.
- csrr a0, tdata1
- srli a0, a0, __riscv_xlen - 4
- li a1, 2
- bne a0, a1, pass
-
la a2, 1f
csrw tdata2, a2
- li a0, MCONTROL_M | MCONTROL_EXECUTE
+ li a0, (2 << (__riscv_xlen - 4)) | MCONTROL_M | MCONTROL_EXECUTE
csrw tdata1, a0
# Skip if breakpoint type is unsupported.
csrr a1, tdata1
- andi a1, a1, 0x7ff
bne a0, a1, 2f
.align 2
1:
@@ -47,11 +40,10 @@
2:
# Set up breakpoint to trap on M-mode reads.
li TESTNUM, 4
- li a0, MCONTROL_M | MCONTROL_LOAD
+ li a0, (2 << (__riscv_xlen - 4)) | MCONTROL_M | MCONTROL_LOAD
csrw tdata1, a0
# Skip if breakpoint type is unsupported.
csrr a1, tdata1
- andi a1, a1, 0x7ff
bne a0, a1, 2f
la a2, data1
csrw tdata2, a2
@@ -67,11 +59,10 @@
2:
# Set up breakpoint to trap on M-mode stores.
li TESTNUM, 6
- li a0, MCONTROL_M | MCONTROL_STORE
+ li a0, (2 << (__riscv_xlen - 4)) | MCONTROL_M | MCONTROL_STORE
csrw tdata1, a0
# Skip if breakpoint type is unsupported.
csrr a1, tdata1
- andi a1, a1, 0x7ff
bne a0, a1, 2f
# Trap handler should skip this instruction.
@@ -88,13 +79,7 @@
csrr a1, tselect
bne a0, a1, pass
- # Make sure there's a breakpoint there.
- csrr a0, tdata1
- srli a0, a0, __riscv_xlen - 4
- li a1, 2
- bne a0, a1, pass
-
- li a0, MCONTROL_M | MCONTROL_LOAD
+ li a0, (2 << (__riscv_xlen - 4)) | MCONTROL_M | MCONTROL_LOAD
csrw tdata1, a0
la a3, data2
csrw tdata2, a3
diff --git a/tests/test-progs/asmtest/src/riscv/isa/rv64mi/illegal.S
b/tests/test-progs/asmtest/src/riscv/isa/rv64mi/illegal.S
index d825c44..5531570 100644
--- a/tests/test-progs/asmtest/src/riscv/isa/rv64mi/illegal.S
+++ b/tests/test-progs/asmtest/src/riscv/isa/rv64mi/illegal.S
@@ -134,11 +134,12 @@
# Make sure mtval contains either 0 or the instruction word.
csrr t2, mbadaddr
beqz t2, 1f
- lhu t3, 0(t0)
- lhu t4, 2(t0)
- slli t4, t4, 16
- or t3, t3, t4
- bne t2, t3, fail
+ lhu t1, 0(t0)
+ xor t2, t2, t1
+ lhu t1, 2(t0)
+ slli t1, t1, 16
+ xor t2, t2, t1
+ bnez t2, fail
1:
la t1, bad2
diff --git a/tests/test-progs/asmtest/src/riscv/isa/rv64si/Makefrag
b/tests/test-progs/asmtest/src/riscv/isa/rv64si/Makefrag
index f9ca5e8..c4a5564 100644
--- a/tests/test-progs/asmtest/src/riscv/isa/rv64si/Makefrag
+++ b/tests/test-progs/asmtest/src/riscv/isa/rv64si/Makefrag
@@ -5,6 +5,7 @@
rv64si_sc_tests = \
csr \
dirty \
+ icache-alias \
ma_fetch \
scall \
wfi \
diff --git a/tests/test-progs/asmtest/src/riscv/isa/rv64si/csr.S
b/tests/test-progs/asmtest/src/riscv/isa/rv64si/csr.S
index dbe1c05..2860c8d 100644
--- a/tests/test-progs/asmtest/src/riscv/isa/rv64si/csr.S
+++ b/tests/test-progs/asmtest/src/riscv/isa/rv64si/csr.S
@@ -46,6 +46,9 @@
#endif
#endif
+ TEST_CASE(15, a0, 0, csrrwi a0, sscratch, 0; csrrwi a0,
sscratch, 0xF);
+ TEST_CASE(16, a0, 0, csrw sscratch, zero; csrr a0, sscratch);
+
csrwi sscratch, 3
TEST_CASE( 2, a0, 3, csrr a0, sscratch);
TEST_CASE( 3, a1, 3, csrrci a1, sscratch, 1);
diff --git a/tests/test-progs/asmtest/src/riscv/isa/rv64si/icache-alias.S
b/tests/test-progs/asmtest/src/riscv/isa/rv64si/icache-alias.S
new file mode 100644
index 0000000..dbc934e
--- /dev/null
+++ b/tests/test-progs/asmtest/src/riscv/isa/rv64si/icache-alias.S
@@ -0,0 +1,141 @@
+# See LICENSE for license details.
+
+#*****************************************************************************
+# icache-alias.S
+#-----------------------------------------------------------------------------
+#
+# Test that instruction memory appears to be physically addressed, i.e.,
+# that disagreements in the low-order VPN and PPN bits don't cause the
+# wrong instruction to be fetched. It also tests that changing a page
+# mapping takes effect without executing FENCE.I.
+#
+
+#include "riscv_test.h"
+#include "test_macros.h"
+
+RVTEST_RV64M
+RVTEST_CODE_BEGIN
+
+ li TESTNUM, 2
+
+ # Set up intermediate page tables
+
+ la t0, page_table_3
+ srl t0, t0, RISCV_PGSHIFT - PTE_PPN_SHIFT
+ ori t0, t0, PTE_V
+ sd t0, page_table_2, t1
+
+ la t0, page_table_2
+ srl t0, t0, RISCV_PGSHIFT - PTE_PPN_SHIFT
+ ori t0, t0, PTE_V
+ sd t0, page_table_1, t1
+
+ # Set up leaf mappings where va[12] != pa[12]
+
+ la t0, code_page_1
+ srl t0, t0, RISCV_PGSHIFT - PTE_PPN_SHIFT
+ ori t0, t0, PTE_V | PTE_X | PTE_A
+ sd t0, page_table_3 + 8, t1
+
+ la t0, code_page_2
+ srl t0, t0, RISCV_PGSHIFT - PTE_PPN_SHIFT
+ ori t0, t0, PTE_V | PTE_X | PTE_A
+ sd t0, page_table_3 + 0, t1
+
+ # Turn on VM
+
+ li a0, (SATP_MODE & ~(SATP_MODE<<1)) * SATP_MODE_SV39
+ la a1, page_table_1
+ srl a1, a1, RISCV_PGSHIFT
+ or a1, a1, a0
+ csrw sptbr, a1
+ sfence.vma
+
+ # Enter supervisor mode and make sure correct page is accessed
+
+ la a2, 1f
+ csrwi mepc, 0
+ li a1, ((MSTATUS_MPP & ~(MSTATUS_MPP<<1)) * PRV_S)
+ csrs mstatus, a1
+ mret
+
+1:
+ li TESTNUM, 2
+ addi a0, a0, -321
+ bnez a0, fail
+
+ li TESTNUM, 3
+ la a2, 1f
+ li t0, RISCV_PGSIZE
+ csrw mepc, t0
+ mret
+
+1:
+ addi a0, a0, -123
+ bnez a0, fail
+
+ li TESTNUM, 4
+ la a2, 1f
+ csrwi mepc, 0
+ mret
+
+ .align 2
+1:
+ addi a0, a0, -321
+ bnez a0, fail
+
+ li TESTNUM, 5
+
+ # Change mapping and try again
+
+ la t0, code_page_1
+ srl t0, t0, RISCV_PGSHIFT - PTE_PPN_SHIFT
+ ori t0, t0, PTE_V | PTE_X | PTE_A
+ sd t0, page_table_3 + 0, t1
+ sfence.vma
+
+ la a2, 1f
+ csrwi mepc, 0
+ mret
+
+ .align 2
+1:
+ addi a0, a0, -123
+ bnez a0, fail
+
+ RVTEST_PASS
+
+ TEST_PASSFAIL
+
+ .align 2
+ .global mtvec_handler
+mtvec_handler:
+ csrr t0, mcause
+ add t0, t0, -CAUSE_STORE_PAGE_FAULT
+ bnez t0, fail
+
+ jr a2
+
+RVTEST_CODE_END
+
+ .data
+RVTEST_DATA_BEGIN
+
+ TEST_DATA
+
+.align 12
+page_table_1: .dword 0
+.align 12
+page_table_2: .dword 0
+.align 12
+page_table_3: .dword 0
+.align 13
+code_page_1:
+ li a0, 123
+ sw x0, (x0)
+.align 12
+code_page_2:
+ li a0, 321
+ sw x0, (x0)
+
+RVTEST_DATA_END
diff --git a/tests/test-progs/asmtest/src/riscv/isa/rv64si/ma_fetch.S
b/tests/test-progs/asmtest/src/riscv/isa/rv64si/ma_fetch.S
index cd5a22d..7d2adec 100644
--- a/tests/test-progs/asmtest/src/riscv/isa/rv64si/ma_fetch.S
+++ b/tests/test-progs/asmtest/src/riscv/isa/rv64si/ma_fetch.S
@@ -106,6 +106,51 @@
j fail
2:
+#ifdef __MACHINE_MODE
+ # RVC cannot be disabled if doing so would cause a misaligned instruction
+ # exception on the next instruction fetch. (This test assumes no other
+ # extensions that support misalignment are present.)
+ li TESTNUM, 8
+ csrr t2, misa
+ andi t2, t2, 1 << ('c' - 'a')
+ beqz t2, 2f
+
+ .option rvc
+ c.nop
+ csrci misa, 1 << ('c' - 'a')
+1:
+ c.nop
+ .option norvc
+
+ csrr t2, misa
+ andi t2, t2, 1 << ('c' - 'a')
+ beqz t2, fail
+
+ # When RVC is disabled, mret to a misaligned mepc should succeed,
+ # masking off mepc[1].
+ la t0, 1f
+ addi t0, t0, -2
+ csrw mepc, t0
+
+ # Try to disable RVC; if it can't be disabled, skip the test.
+ csrci misa, 1 << ('c' - 'a')
+ csrr t2, misa
+ andi t2, t2, 1 << ('c' - 'a')
+ bnez t2, 2f
+
+ li t2, MSTATUS_MPP
+ csrs mstatus, t2
+ mret
+
+ # mret should transfer control to this branch. Otherwise, it will
+ # transfer control two bytes into the branch, which happens to be the
+ # illegal instruction c.unimp.
+ beqz x0, 1f
+1:
+ csrsi misa, 1 << ('c' - 'a')
+2:
+#endif
+
j pass
TEST_PASSFAIL
@@ -113,7 +158,7 @@
.align 2
.global stvec_handler
stvec_handler:
- # tests 2, 4, 5, and 6 should trap
+ # tests 2, 4, 5, 6, and 8 should trap
li a0, 2
beq TESTNUM, a0, 1f
li a0, 4
diff --git a/tests/test-progs/asmtest/src/riscv/isa/rv64si/scall.S
b/tests/test-progs/asmtest/src/riscv/isa/rv64si/scall.S
index 0579806..77718f2 100644
--- a/tests/test-progs/asmtest/src/riscv/isa/rv64si/scall.S
+++ b/tests/test-progs/asmtest/src/riscv/isa/rv64si/scall.S
@@ -19,7 +19,6 @@
#define scause mcause
#define sepc mepc
#define sret mret
- #define stvec_handler mtvec_handler
#undef SSTATUS_SPP
#define SSTATUS_SPP MSTATUS_MPP
#endif
@@ -34,8 +33,8 @@
# Otherwise, if in S mode, then U mode must exist and we don't need to
check.
li t0, MSTATUS_MPP
csrc mstatus, t0
- csrr t1, mstatus
- and t0, t0, t1
+ csrr t2, mstatus
+ and t0, t0, t2
beqz t0, 1f
# If U mode doesn't exist, mcause should indicate ECALL from M mode.
@@ -57,6 +56,11 @@
TEST_PASSFAIL
+# make the linker not find the symbol stvec_handler when running in machine
+# mode. env/p/riscv_test.h sets stvec to the address of that symbol in
case it
+# is non-zero. thus, effectively, we don't register a handler for scalls,
so
+# that the default handler (trap_vector) is used.
+#ifndef __MACHINE_MODE
.align 2
.global stvec_handler
stvec_handler:
@@ -66,6 +70,7 @@
csrr t0, sepc
bne t0, t2, fail
j pass
+#endif
RVTEST_CODE_END
diff --git a/tests/test-progs/asmtest/src/riscv/isa/rv64ua/lrsc.S
b/tests/test-progs/asmtest/src/riscv/isa/rv64ua/lrsc.S
index 11eb7de..c7589d7 100644
--- a/tests/test-progs/asmtest/src/riscv/isa/rv64ua/lrsc.S
+++ b/tests/test-progs/asmtest/src/riscv/isa/rv64ua/lrsc.S
@@ -28,14 +28,20 @@
# make sure that sc without a reservation fails.
TEST_CASE( 2, a4, 1, \
la a0, foo; \
- sc.w a4, x0, (a0); \
+ li a5, 0xdeadbeef; \
+ sc.w a4, a5, (a0); \
+)
+
+# make sure the failing sc did not commit into memory
+TEST_CASE( 3, a4, 0, \
+ lw a4, foo; \
)
# make sure that sc with the wrong reservation fails.
# TODO is this actually mandatory behavior?
-TEST_CASE( 3, a4, 1, \
+TEST_CASE( 4, a4, 1, \
la a0, foo; \
- add a1, a0, 1024; \
+ la a1, fooTest3; \
lr.w a1, (a1); \
sc.w a4, a1, (a0); \
)
@@ -62,7 +68,7 @@
fence
# expected result is 512*ncores*(ncores+1)
-TEST_CASE( 4, a0, 0, \
+TEST_CASE( 5, a0, 0, \
lw a0, foo; \
slli a1, a3, LOG_ITERATIONS-1; \
1:sub a0, a0, a1; \
@@ -70,6 +76,15 @@
bgez a3, 1b
)
+# make sure that sc-after-successful-sc fails.
+TEST_CASE( 6, a1, 1, \
+ la a0, foo; \
+1:lr.w a1, (a0); \
+ sc.w a1, x0, (a0); \
+ bnez a1, 1b; \
+ sc.w a1, x0, (a0)
+)
+
TEST_PASSFAIL
RVTEST_CODE_END
@@ -82,4 +97,6 @@
coreid: .word 0
barrier: .word 0
foo: .word 0
+.skip 1024
+fooTest3: .word 0
RVTEST_DATA_END
diff --git a/tests/test-progs/asmtest/src/riscv/isa/rv64uc/Makefrag
b/tests/test-progs/asmtest/src/riscv/isa/rv64uc/Makefrag
index 90aeb63..a1fec6f 100644
--- a/tests/test-progs/asmtest/src/riscv/isa/rv64uc/Makefrag
+++ b/tests/test-progs/asmtest/src/riscv/isa/rv64uc/Makefrag
@@ -5,7 +5,7 @@
rv64uc_sc_tests = \
rvc \
-rv64ua_p_tests = $(addprefix rv64ua-p-, $(rv64ua_sc_tests))
+rv64uc_p_tests = $(addprefix rv64uc-p-, $(rv64uc_sc_tests))
rv64uc_v_tests = $(addprefix rv64uc-v-, $(rv64uc_sc_tests))
rv64ua_ps_tests = $(addprefix rv64ua-ps-, $(rv64ua_sc_tests))
diff --git a/tests/test-progs/asmtest/src/riscv/isa/rv64ud/structural.S
b/tests/test-progs/asmtest/src/riscv/isa/rv64ud/structural.S
index 5ecbb96..3cf87aa 100644
--- a/tests/test-progs/asmtest/src/riscv/isa/rv64ud/structural.S
+++ b/tests/test-progs/asmtest/src/riscv/isa/rv64ud/structural.S
@@ -14,7 +14,7 @@
RVTEST_RV64UF
RVTEST_CODE_BEGIN
-li x25, 1
+li x12, 1
li x2, 0x3FF0000000000000
li x1, 0x3F800000
diff --git a/tests/test-progs/asmtest/src/riscv/isa/rv64ui/sll.S
b/tests/test-progs/asmtest/src/riscv/isa/rv64ui/sll.S
index 257aa9d..8682743 100644
--- a/tests/test-progs/asmtest/src/riscv/isa/rv64ui/sll.S
+++ b/tests/test-progs/asmtest/src/riscv/isa/rv64ui/sll.S
@@ -35,7 +35,7 @@
TEST_RR_OP( 15, sll, 0x0000084848484000, 0x0000000021212121, 14 );
TEST_RR_OP( 16, sll, 0x1090909080000000, 0x0000000021212121, 31 );
- # Verify that shifts only use bottom six bits
+ # Verify that shifts only use bottom six(rv64) or five(rv32) bits
TEST_RR_OP( 17, sll, 0x0000000021212121, 0x0000000021212121,
0xffffffffffffffc0 );
TEST_RR_OP( 18, sll, 0x0000000042424242, 0x0000000021212121,
0xffffffffffffffc1 );
diff --git a/tests/test-progs/asmtest/src/riscv/isa/rv64ui/slli.S
b/tests/test-progs/asmtest/src/riscv/isa/rv64ui/slli.S
index f28ea1c..b5341ad 100644
--- a/tests/test-progs/asmtest/src/riscv/isa/rv64ui/slli.S
+++ b/tests/test-progs/asmtest/src/riscv/isa/rv64ui/slli.S
@@ -36,9 +36,9 @@
TEST_IMM_OP( 16, slli, 0x1090909080000000, 0x0000000021212121, 31 );
#if __riscv_xlen == 64
- TEST_RR_OP( 50, sll, 0x8000000000000000, 0x0000000000000001, 63 );
- TEST_RR_OP( 51, sll, 0xffffff8000000000, 0xffffffffffffffff, 39 );
- TEST_RR_OP( 52, sll, 0x0909080000000000, 0x0000000021212121, 43 );
+ TEST_IMM_OP( 50, slli, 0x8000000000000000, 0x0000000000000001, 63 );
+ TEST_IMM_OP( 51, slli, 0xffffff8000000000, 0xffffffffffffffff, 39 );
+ TEST_IMM_OP( 52, slli, 0x0909080000000000, 0x0000000021212121, 43 );
#endif
#-------------------------------------------------------------
diff --git a/tests/test-progs/asmtest/src/riscv/isa/rv64ui/slliw.S
b/tests/test-progs/asmtest/src/riscv/isa/rv64ui/slliw.S
index 7822f09..0ed888b 100644
--- a/tests/test-progs/asmtest/src/riscv/isa/rv64ui/slliw.S
+++ b/tests/test-progs/asmtest/src/riscv/isa/rv64ui/slliw.S
@@ -35,6 +35,13 @@
TEST_IMM_OP( 15, slliw, 0x0000000048484000, 0x0000000021212121, 14 );
TEST_IMM_OP( 16, slliw, 0xffffffff80000000, 0x0000000021212121, 31 );
+ # Verify that shifts ignore top 32 (using true 64-bit values)
+
+ TEST_IMM_OP( 44, slliw, 0x0000000012345678, 0xffffffff12345678, 0 );
+ TEST_IMM_OP( 45, slliw, 0x0000000023456780, 0xffffffff12345678, 4 );
+ TEST_IMM_OP( 46, slliw, 0xffffffff92345678, 0x0000000092345678, 0 );
+ TEST_IMM_OP( 47, slliw, 0xffffffff93456780, 0x0000000099345678, 4 );
+
#-------------------------------------------------------------
# Source/Destination tests
#-------------------------------------------------------------
diff --git a/tests/test-progs/asmtest/src/riscv/isa/rv64ui/sllw.S
b/tests/test-progs/asmtest/src/riscv/isa/rv64ui/sllw.S
index 59770ee..62b4db6 100644
--- a/tests/test-progs/asmtest/src/riscv/isa/rv64ui/sllw.S
+++ b/tests/test-progs/asmtest/src/riscv/isa/rv64ui/sllw.S
@@ -43,6 +43,13 @@
TEST_RR_OP( 20, sllw, 0x0000000048484000, 0x0000000021212121,
0xffffffffffffffee );
TEST_RR_OP( 21, sllw, 0xffffffff80000000, 0x0000000021212121,
0xffffffffffffffff );
+ # Verify that shifts ignore top 32 (using true 64-bit values)
+
+ TEST_RR_OP( 44, sllw, 0x0000000012345678, 0xffffffff12345678, 0 );
+ TEST_RR_OP( 45, sllw, 0x0000000023456780, 0xffffffff12345678, 4 );
+ TEST_RR_OP( 46, sllw, 0xffffffff92345678, 0x0000000092345678, 0 );
+ TEST_RR_OP( 47, sllw, 0xffffffff93456780, 0x0000000099345678, 4 );
+
#-------------------------------------------------------------
# Source/Destination tests
#-------------------------------------------------------------
diff --git a/tests/test-progs/asmtest/src/riscv/isa/rv64ui/sra.S
b/tests/test-progs/asmtest/src/riscv/isa/rv64ui/sra.S
index 9b359a3..580ae89 100644
--- a/tests/test-progs/asmtest/src/riscv/isa/rv64ui/sra.S
+++ b/tests/test-progs/asmtest/src/riscv/isa/rv64ui/sra.S
@@ -35,7 +35,7 @@
TEST_RR_OP( 15, sra, 0xfffffffffffe0606, 0xffffffff81818181, 14 );
TEST_RR_OP( 16, sra, 0xffffffffffffffff, 0xffffffff81818181, 31 );
- # Verify that shifts only use bottom five bits
+ # Verify that shifts only use bottom six(rv64) or five(rv32) bits
TEST_RR_OP( 17, sra, 0xffffffff81818181, 0xffffffff81818181,
0xffffffffffffffc0 );
TEST_RR_OP( 18, sra, 0xffffffffc0c0c0c0, 0xffffffff81818181,
0xffffffffffffffc1 );
diff --git a/tests/test-progs/asmtest/src/riscv/isa/rv64ui/sraiw.S
b/tests/test-progs/asmtest/src/riscv/isa/rv64ui/sraiw.S
index 9240c9b..a435e59 100644
--- a/tests/test-progs/asmtest/src/riscv/isa/rv64ui/sraiw.S
+++ b/tests/test-progs/asmtest/src/riscv/isa/rv64ui/sraiw.S
@@ -35,6 +35,13 @@
TEST_IMM_OP( 15, sraiw, 0xfffffffffffe0606, 0xffffffff81818181, 14 );
TEST_IMM_OP( 16, sraiw, 0xffffffffffffffff, 0xffffffff81818181, 31 );
+ # Verify that shifts ignore top 32 (using true 64-bit values)
+
+ TEST_IMM_OP( 44, sraiw, 0x0000000012345678, 0xffffffff12345678, 0 );
+ TEST_IMM_OP( 45, sraiw, 0x0000000001234567, 0xffffffff12345678, 4 );
+ TEST_IMM_OP( 46, sraiw, 0xffffffff92345678, 0x0000000092345678, 0 );
+ TEST_IMM_OP( 47, sraiw, 0xfffffffff9234567, 0x0000000092345678, 4 );
+
#-------------------------------------------------------------
# Source/Destination tests
#-------------------------------------------------------------
diff --git a/tests/test-progs/asmtest/src/riscv/isa/rv64ui/sraw.S
b/tests/test-progs/asmtest/src/riscv/isa/rv64ui/sraw.S
index 8c234c1..68d913e 100644
--- a/tests/test-progs/asmtest/src/riscv/isa/rv64ui/sraw.S
+++ b/tests/test-progs/asmtest/src/riscv/isa/rv64ui/sraw.S
@@ -43,6 +43,13 @@
TEST_RR_OP( 20, sraw, 0xfffffffffffe0606, 0xffffffff81818181,
0xffffffffffffffee );
TEST_RR_OP( 21, sraw, 0xffffffffffffffff, 0xffffffff81818181,
0xffffffffffffffff );
+ # Verify that shifts ignore top 32 (using true 64-bit values)
+
+ TEST_RR_OP( 44, sraw, 0x0000000012345678, 0xffffffff12345678, 0 );
+ TEST_RR_OP( 45, sraw, 0x0000000001234567, 0xffffffff12345678, 4 );
+ TEST_RR_OP( 46, sraw, 0xffffffff92345678, 0x0000000092345678, 0 );
+ TEST_RR_OP( 47, sraw, 0xfffffffff9234567, 0x0000000092345678, 4 );
+
#-------------------------------------------------------------
# Source/Destination tests
#-------------------------------------------------------------
diff --git a/tests/test-progs/asmtest/src/riscv/isa/rv64ui/srl.S
b/tests/test-progs/asmtest/src/riscv/isa/rv64ui/srl.S
index c1e936a..5ee223f 100644
--- a/tests/test-progs/asmtest/src/riscv/isa/rv64ui/srl.S
+++ b/tests/test-progs/asmtest/src/riscv/isa/rv64ui/srl.S
@@ -38,7 +38,7 @@
TEST_SRL( 15, 0x0000000021212121, 14 );
TEST_SRL( 16, 0x0000000021212121, 31 );
- # Verify that shifts only use bottom five bits
+ # Verify that shifts only use bottom six(rv64) or five(rv32) bits
TEST_RR_OP( 17, srl, 0x0000000021212121, 0x0000000021212121,
0xffffffffffffffc0 );
TEST_RR_OP( 18, srl, 0x0000000010909090, 0x0000000021212121,
0xffffffffffffffc1 );
diff --git a/tests/test-progs/asmtest/src/riscv/isa/rv64ui/srli.S
b/tests/test-progs/asmtest/src/riscv/isa/rv64ui/srli.S
index 88ee8d2..3522957 100644
--- a/tests/test-progs/asmtest/src/riscv/isa/rv64ui/srli.S
+++ b/tests/test-progs/asmtest/src/riscv/isa/rv64ui/srli.S
@@ -17,26 +17,26 @@
# Arithmetic tests
#-------------------------------------------------------------
-#define TEST_SRL(n, v, a) \
+#define TEST_SRLI(n, v, a) \
TEST_IMM_OP(n, srli, ((v) & ((1 << (__riscv_xlen-1) << 1) - 1)) >> (a),
v, a)
- TEST_SRL( 2, 0xffffffff80000000, 0 );
- TEST_SRL( 3, 0xffffffff80000000, 1 );
- TEST_SRL( 4, 0xffffffff80000000, 7 );
- TEST_SRL( 5, 0xffffffff80000000, 14 );
- TEST_SRL( 6, 0xffffffff80000001, 31 );
+ TEST_SRLI( 2, 0xffffffff80000000, 0 );
+ TEST_SRLI( 3, 0xffffffff80000000, 1 );
+ TEST_SRLI( 4, 0xffffffff80000000, 7 );
+ TEST_SRLI( 5, 0xffffffff80000000, 14 );
+ TEST_SRLI( 6, 0xffffffff80000001, 31 );
- TEST_SRL( 7, 0xffffffffffffffff, 0 );
- TEST_SRL( 8, 0xffffffffffffffff, 1 );
- TEST_SRL( 9, 0xffffffffffffffff, 7 );
- TEST_SRL( 10, 0xffffffffffffffff, 14 );
- TEST_SRL( 11, 0xffffffffffffffff, 31 );
+ TEST_SRLI( 7, 0xffffffffffffffff, 0 );
+ TEST_SRLI( 8, 0xffffffffffffffff, 1 );
+ TEST_SRLI( 9, 0xffffffffffffffff, 7 );
+ TEST_SRLI( 10, 0xffffffffffffffff, 14 );
+ TEST_SRLI( 11, 0xffffffffffffffff, 31 );
- TEST_SRL( 12, 0x0000000021212121, 0 );
- TEST_SRL( 13, 0x0000000021212121, 1 );
- TEST_SRL( 14, 0x0000000021212121, 7 );
- TEST_SRL( 15, 0x0000000021212121, 14 );
- TEST_SRL( 16, 0x0000000021212121, 31 );
+ TEST_SRLI( 12, 0x0000000021212121, 0 );
+ TEST_SRLI( 13, 0x0000000021212121, 1 );
+ TEST_SRLI( 14, 0x0000000021212121, 7 );
+ TEST_SRLI( 15, 0x0000000021212121, 14 );
+ TEST_SRLI( 16, 0x0000000021212121, 31 );
#-------------------------------------------------------------
# Source/Destination tests
diff --git a/tests/test-progs/asmtest/src/riscv/isa/rv64ui/srliw.S
b/tests/test-progs/asmtest/src/riscv/isa/rv64ui/srliw.S
index a8b9fd7..471042f 100644
--- a/tests/test-progs/asmtest/src/riscv/isa/rv64ui/srliw.S
+++ b/tests/test-progs/asmtest/src/riscv/isa/rv64ui/srliw.S
@@ -35,6 +35,13 @@
TEST_IMM_OP( 15, srliw, 0x0000000000008484, 0x0000000021212121, 14 );
TEST_IMM_OP( 16, srliw, 0x0000000000000000, 0x0000000021212121, 31 );
+ # Verify that shifts ignore top 32 (using true 64-bit values)
+
+ TEST_IMM_OP( 44, srliw, 0x0000000012345678, 0xffffffff12345678, 0 );
+ TEST_IMM_OP( 45, srliw, 0x0000000001234567, 0xffffffff12345678, 4 );
+ TEST_IMM_OP( 46, srliw, 0xffffffff92345678, 0x0000000092345678, 0 );
+ TEST_IMM_OP( 47, srliw, 0x0000000009234567, 0x0000000092345678, 4 );
+
#-------------------------------------------------------------
# Source/Destination tests
#-------------------------------------------------------------
diff --git a/tests/test-progs/asmtest/src/riscv/isa/rv64ui/srlw.S
b/tests/test-progs/asmtest/src/riscv/isa/rv64ui/srlw.S
index 24a492a..f0d1dae 100644
--- a/tests/test-progs/asmtest/src/riscv/isa/rv64ui/srlw.S
+++ b/tests/test-progs/asmtest/src/riscv/isa/rv64ui/srlw.S
@@ -43,6 +43,13 @@
TEST_RR_OP( 20, srlw, 0x0000000000008484, 0x0000000021212121,
0xffffffffffffffee );
TEST_RR_OP( 21, srlw, 0x0000000000000000, 0x0000000021212121,
0xffffffffffffffff );
+ # Verify that shifts ignore top 32 (using true 64-bit values)
+
+ TEST_RR_OP( 44, srlw, 0x0000000012345678, 0xffffffff12345678, 0 );
+ TEST_RR_OP( 45, srlw, 0x0000000001234567, 0xffffffff12345678, 4 );
+ TEST_RR_OP( 46, srlw, 0xffffffff92345678, 0x0000000092345678, 0 );
+ TEST_RR_OP( 47, srlw, 0x0000000009234567, 0x0000000092345678, 4 );
+
#-------------------------------------------------------------
# Source/Destination tests
#-------------------------------------------------------------
--
To view, visit https://gem5-review.googlesource.com/c/public/gem5/+/25705
To unsubscribe, or for help writing mail filters, visit
https://gem5-review.googlesource.com/settings
Gerrit-Project: public/gem5
Gerrit-Branch: develop
Gerrit-Change-Id: Ied7c34fedc99657e667bddaf3f3521e5a8f96a6e
Gerrit-Change-Number: 25705
Gerrit-PatchSet: 1
Gerrit-Owner: Nils Asmussen <nils.asmus...@barkhauseninstitut.org>
Gerrit-MessageType: newchange
_______________________________________________
gem5-dev mailing list
gem5-dev@gem5.org
http://m5sim.org/mailman/listinfo/gem5-dev