The current rte_io.h for RISC-V only includes generic/rte_io.h, which uses volatile pointer casts for MMIO read/write. While this prevents compiler optimizations, it does not prevent CPU-level reordering on RISC-V, which is a weak-ordered architecture.
This patch adds a RISC-V specific implementation using explicit load/store instructions: - lbu/lhu/lwu/ld for reads (zero-extending to avoid sign-bit pollution) - sb/sh/sw/sd for writes Signed-off-by: Dang Shiwei <[email protected]> --- lib/eal/riscv/include/rte_io.h | 152 ++++++++++++++++++++++++++++++++- 1 file changed, 149 insertions(+), 3 deletions(-) diff --git a/lib/eal/riscv/include/rte_io.h b/lib/eal/riscv/include/rte_io.h index 4ae1f087ba..e67b5c1c9c 100644 --- a/lib/eal/riscv/include/rte_io.h +++ b/lib/eal/riscv/include/rte_io.h @@ -5,9 +5,155 @@ * Copyright(c) 2022 Semihalf */ -#ifndef RTE_IO_RISCV_H -#define RTE_IO_RISCV_H +#ifndef _RTE_IO_RISCV_H_ +#define _RTE_IO_RISCV_H_ + +#include <stdint.h> + +#define RTE_OVERRIDE_IO_H #include "generic/rte_io.h" +#include <rte_compat.h> +#include "rte_atomic.h" + +#ifdef __cplusplus +extern "C" { +#endif + +static __rte_always_inline uint8_t +rte_read8_relaxed(const volatile void *addr) +{ + uint8_t val; + asm volatile("lbu %0, 0(%1)" : "=r"(val) : "r"(addr)); + return val; +} + +static __rte_always_inline uint16_t +rte_read16_relaxed(const volatile void *addr) +{ + uint16_t val; + asm volatile("lhu %0, 0(%1)" : "=r"(val) : "r"(addr)); + return val; +} + +static __rte_always_inline uint32_t +rte_read32_relaxed(const volatile void *addr) +{ + uint32_t val; + asm volatile("lwu %0, 0(%1)" : "=r"(val) : "r"(addr)); + return val; +} + +static __rte_always_inline uint64_t +rte_read64_relaxed(const volatile void *addr) +{ + uint64_t val; + asm volatile("ld %0, 0(%1)" : "=r"(val) : "r"(addr)); + return val; +} + +static __rte_always_inline void +rte_write8_relaxed(uint8_t val, volatile void *addr) +{ + asm volatile("sb %1, 0(%0)" : : "r"(addr), "r"(val)); +} + +static __rte_always_inline void +rte_write16_relaxed(uint16_t val, volatile void *addr) +{ + asm volatile("sh %1, 0(%0)" : : "r"(addr), "r"(val)); +} + +static __rte_always_inline void +rte_write32_relaxed(uint32_t val, volatile void *addr) +{ + asm volatile("sw %1, 0(%0)" : : "r"(addr), "r"(val)); +} + +static __rte_always_inline void +rte_write64_relaxed(uint64_t val, volatile void *addr) +{ + asm volatile("sd %1, 0(%0)" : : "r"(addr), "r"(val)); +} + + +static __rte_always_inline uint8_t +rte_read8(const volatile void *addr) +{ + uint8_t val = rte_read8_relaxed(addr); + rte_io_rmb(); + return val; +} + +static __rte_always_inline uint16_t +rte_read16(const volatile void *addr) +{ + uint16_t val = rte_read16_relaxed(addr); + rte_io_rmb(); + return val; +} + +static __rte_always_inline uint32_t +rte_read32(const volatile void *addr) +{ + uint32_t val = rte_read32_relaxed(addr); + rte_io_rmb(); + return val; +} + +static __rte_always_inline uint64_t +rte_read64(const volatile void *addr) +{ + uint64_t val = rte_read64_relaxed(addr); + rte_io_rmb(); + return val; +} + + +static __rte_always_inline void +rte_write8(uint8_t val, volatile void *addr) +{ + rte_io_wmb(); + rte_write8_relaxed(val, addr); +} + +static __rte_always_inline void +rte_write16(uint16_t val, volatile void *addr) +{ + rte_io_wmb(); + rte_write16_relaxed(val, addr); +} + +static __rte_always_inline void +rte_write32(uint32_t val, volatile void *addr) +{ + rte_io_wmb(); + rte_write32_relaxed(val, addr); +} + +static __rte_always_inline void +rte_write64(uint64_t val, volatile void *addr) +{ + rte_io_wmb(); + rte_write64_relaxed(val, addr); +} + +__rte_experimental +static __rte_always_inline void +rte_write32_wc(uint32_t val, volatile void *addr) +{ + rte_write32(val, addr); +} + +__rte_experimental +static __rte_always_inline void +rte_write32_wc_relaxed(uint32_t val, volatile void *addr) +{ + rte_write32_relaxed(val, addr); +} + +#ifdef __cplusplus +} +#endif -#endif /* RTE_IO_RISCV_H */ +#endif /* _RTE_IO_RISCV_H_ */ -- 2.43.0

