On 8/21/25 21:26, Igor Mammedov wrote:
the helpers form load-acquire/store-release pair and use them to replace
open-coded checkers/setters consistently across the code, which
ensures that appropriate barriers are in place in case checks happen
outside of BQL.
Signed-off-by: Igor Mammedov <imamm...@redhat.com>
Reviewed-by: Peter Xu <pet...@redhat.com>
Reviewed-by: Jason J. Herne <jjhe...@linux.ibm.com>
---
v5: fix copy/paste error in doc comment of cpu_set_interrupt()
"Jason J. Herne" <jjhe...@linux.ibm.com>
v4:
add cpu_set_interrupt() and merge helpers patch with
patches that use them (v3 6-7,9/10).
Peter Xu <pet...@redhat.com>
---
include/hw/core/cpu.h | 25 +++++++++++++++++++++
accel/tcg/cpu-exec.c | 10 ++++-----
accel/tcg/tcg-accel-ops.c | 2 +-
accel/tcg/user-exec.c | 2 +-
hw/intc/s390_flic.c | 2 +-
hw/openrisc/cputimer.c | 2 +-
system/cpus.c | 2 +-
target/alpha/cpu.c | 8 +++----
target/arm/cpu.c | 20 ++++++++---------
target/arm/helper.c | 18 +++++++--------
target/arm/hvf/hvf.c | 6 ++---
target/avr/cpu.c | 2 +-
target/hppa/cpu.c | 2 +-
target/i386/hvf/hvf.c | 4 ++--
target/i386/hvf/x86hvf.c | 21 +++++++++---------
target/i386/kvm/kvm.c | 34 ++++++++++++++---------------
target/i386/nvmm/nvmm-all.c | 24 ++++++++++----------
target/i386/tcg/system/seg_helper.c | 2 +-
target/i386/tcg/system/svm_helper.c | 2 +-
target/i386/whpx/whpx-all.c | 34 ++++++++++++++---------------
target/loongarch/cpu.c | 2 +-
target/m68k/cpu.c | 2 +-
target/microblaze/cpu.c | 2 +-
target/mips/cpu.c | 6 ++---
target/mips/kvm.c | 2 +-
target/openrisc/cpu.c | 3 +--
target/ppc/cpu_init.c | 2 +-
target/ppc/kvm.c | 2 +-
target/rx/cpu.c | 3 +--
target/rx/helper.c | 2 +-
target/s390x/cpu-system.c | 2 +-
target/sh4/cpu.c | 2 +-
target/sh4/helper.c | 2 +-
target/sparc/cpu.c | 2 +-
target/sparc/int64_helper.c | 4 ++--
35 files changed, 141 insertions(+), 119 deletions(-)
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
index 5eaf41a566..1dee9d4c76 100644
--- a/include/hw/core/cpu.h
+++ b/include/hw/core/cpu.h
@@ -942,6 +942,31 @@ CPUState *cpu_by_arch_id(int64_t id);
void cpu_interrupt(CPUState *cpu, int mask);
+/**
+ * cpu_test_interrupt:
+ * @cpu: The CPU to check interrupt(s) on.
+ * @mask: The interrupts to check.
+ *
+ * Checks if any of interrupts in @mask are pending on @cpu.
+ */
+static inline bool cpu_test_interrupt(CPUState *cpu, int mask)
+{
+ return qatomic_load_acquire(&cpu->interrupt_request) & mask;
+}
+
+/**
+ * cpu_set_interrupt:
+ * @cpu: The CPU to set pending interrupt(s) on.
+ * @mask: The interrupts to set.
+ *
+ * Sets interrupts in @mask as pending on @cpu.
+ */
+static inline void cpu_set_interrupt(CPUState *cpu, int mask)
+{
+ qatomic_store_release(&cpu->interrupt_request,
+ cpu->interrupt_request | mask);
+}
+
/**
* cpu_set_pc:
* @cpu: The CPU to set the program counter for.
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
index 713bdb2056..1269c2c6ba 100644
--- a/accel/tcg/cpu-exec.c
+++ b/accel/tcg/cpu-exec.c
@@ -778,7 +778,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
*/
qatomic_set_mb(&cpu->neg.icount_decr.u16.high, 0);
- if (unlikely(qatomic_read(&cpu->interrupt_request))) {
+ if (unlikely(cpu_test_interrupt(cpu, ~0))) {
int interrupt_request;
bql_lock();
interrupt_request = cpu->interrupt_request;
@@ -786,7 +786,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
/* Mask out external interrupts for this step. */
interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
}
- if (interrupt_request & CPU_INTERRUPT_DEBUG) {
+ if (cpu_test_interrupt(cpu, CPU_INTERRUPT_DEBUG)) {
cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
Do we need a helper for instances like these (logical &) as well ?
I see a couple more such instances below.
cpu->exception_index = EXCP_DEBUG;
bql_unlock();
@@ -795,7 +795,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
#if !defined(CONFIG_USER_ONLY)
if (replay_mode == REPLAY_MODE_PLAY && !replay_has_interrupt()) {
/* Do nothing */
- } else if (interrupt_request & CPU_INTERRUPT_HALT) {
+ } else if (cpu_test_interrupt(cpu, CPU_INTERRUPT_HALT)) {
replay_interrupt();
cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
cpu->halted = 1;
@@ -805,7 +805,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
} else {
const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
- if (interrupt_request & CPU_INTERRUPT_RESET) {
+ if (cpu_test_interrupt(cpu, CPU_INTERRUPT_RESET)) {
replay_interrupt();
tcg_ops->cpu_exec_reset(cpu);
bql_unlock();
@@ -841,7 +841,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
interrupt_request = cpu->interrupt_request;
}
#endif /* !CONFIG_USER_ONLY */
- if (interrupt_request & CPU_INTERRUPT_EXITTB) {
+ if (cpu_test_interrupt(cpu, CPU_INTERRUPT_EXITTB)) {
cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
/* ensure that no TB jump will be modified as
the program flow was changed */
diff --git a/accel/tcg/tcg-accel-ops.c b/accel/tcg/tcg-accel-ops.c
index 3b0d7d298e..9c37266c1e 100644
--- a/accel/tcg/tcg-accel-ops.c
+++ b/accel/tcg/tcg-accel-ops.c
@@ -97,7 +97,7 @@ static void tcg_cpu_reset_hold(CPUState *cpu)
/* mask must never be zero, except for A20 change call */
void tcg_handle_interrupt(CPUState *cpu, int mask)
{
- cpu->interrupt_request |= mask;
+ cpu_set_interrupt(cpu, mask);
/*
* If called from iothread context, wake the target cpu in
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
index f25d80e2dc..fc2eaf420d 100644
--- a/accel/tcg/user-exec.c
+++ b/accel/tcg/user-exec.c
@@ -49,7 +49,7 @@ __thread uintptr_t helper_retaddr;
void cpu_interrupt(CPUState *cpu, int mask)
{
g_assert(bql_locked());
- cpu->interrupt_request |= mask;
+ cpu_set_interrupt(cpu, mask);
qatomic_set(&cpu->neg.icount_decr.u16.high, -1);
}
<snip>
diff --git a/target/ppc/cpu_init.c b/target/ppc/cpu_init.c
index a0e77f2673..db841f1260 100644
--- a/target/ppc/cpu_init.c
+++ b/target/ppc/cpu_init.c
@@ -7225,7 +7225,7 @@ static int ppc_cpu_mmu_index(CPUState *cs, bool ifetch)
#ifndef CONFIG_USER_ONLY
static bool ppc_cpu_has_work(CPUState *cs)
{
- return cs->interrupt_request & CPU_INTERRUPT_HARD;
+ return cpu_test_interrupt(cs, CPU_INTERRUPT_HARD);
}
#endif /* !CONFIG_USER_ONLY */
diff --git a/target/ppc/kvm.c b/target/ppc/kvm.c
index 015658049e..d145774b09 100644
--- a/target/ppc/kvm.c
+++ b/target/ppc/kvm.c
@@ -1354,7 +1354,7 @@ static int kvmppc_handle_halt(PowerPCCPU *cpu)
CPUState *cs = CPU(cpu);
CPUPPCState *env = &cpu->env;
- if (!(cs->interrupt_request & CPU_INTERRUPT_HARD) &&
+ if (!cpu_test_interrupt(cs, CPU_INTERRUPT_HARD) &&
FIELD_EX64(env->msr, MSR, EE)) {
cs->halted = 1;
cs->exception_index = EXCP_HLT;
For target/ppc:
Reviewed-by: Harsh Prateek Bora <hars...@linux.ibm.com>
diff --git a/target/rx/cpu.c b/target/rx/cpu.c
index c6dd5d6f83..da02ae7bf8 100644
--- a/target/rx/cpu.c
+++ b/target/rx/cpu.c
@@ -75,8 +75,7 @@ static void rx_restore_state_to_opc(CPUState *cs,
static bool rx_cpu_has_work(CPUState *cs)
{
- return cs->interrupt_request &
- (CPU_INTERRUPT_HARD | CPU_INTERRUPT_FIR);
+ return cpu_test_interrupt(cs, CPU_INTERRUPT_HARD | CPU_INTERRUPT_FIR);
}
static int rx_cpu_mmu_index(CPUState *cs, bool ifunc)
diff --git a/target/rx/helper.c b/target/rx/helper.c
index 0640ab322b..ce003af421 100644
--- a/target/rx/helper.c
+++ b/target/rx/helper.c
@@ -44,7 +44,7 @@ void rx_cpu_unpack_psw(CPURXState *env, uint32_t psw, int rte)
void rx_cpu_do_interrupt(CPUState *cs)
{
CPURXState *env = cpu_env(cs);
- int do_irq = cs->interrupt_request & INT_FLAGS;
+ int do_irq = cpu_test_interrupt(cs, INT_FLAGS);
uint32_t save_psw;
env->in_sleep = 0;
diff --git a/target/s390x/cpu-system.c b/target/s390x/cpu-system.c
index 709ccd5299..f3a9ffb2a2 100644
--- a/target/s390x/cpu-system.c
+++ b/target/s390x/cpu-system.c
@@ -49,7 +49,7 @@ bool s390_cpu_has_work(CPUState *cs)
return false;
}
- if (!(cs->interrupt_request & CPU_INTERRUPT_HARD)) {
+ if (!cpu_test_interrupt(cs, CPU_INTERRUPT_HARD)) {
return false;
}
diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c
index 4f561e8c91..21ccb86df4 100644
--- a/target/sh4/cpu.c
+++ b/target/sh4/cpu.c
@@ -108,7 +108,7 @@ static bool superh_io_recompile_replay_branch(CPUState *cs,
static bool superh_cpu_has_work(CPUState *cs)
{
- return cs->interrupt_request & CPU_INTERRUPT_HARD;
+ return cpu_test_interrupt(cs, CPU_INTERRUPT_HARD);
}
#endif /* !CONFIG_USER_ONLY */
diff --git a/target/sh4/helper.c b/target/sh4/helper.c
index fb7642bda1..1744ef0e6d 100644
--- a/target/sh4/helper.c
+++ b/target/sh4/helper.c
@@ -58,7 +58,7 @@ int cpu_sh4_is_cached(CPUSH4State *env, target_ulong addr)
void superh_cpu_do_interrupt(CPUState *cs)
{
CPUSH4State *env = cpu_env(cs);
- int do_irq = cs->interrupt_request & CPU_INTERRUPT_HARD;
+ int do_irq = cpu_test_interrupt(cs, CPU_INTERRUPT_HARD);
int do_exp, irq_vector = cs->exception_index;
/* prioritize exceptions over interrupts */
diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c
index 245caf2de0..c9773f1540 100644
--- a/target/sparc/cpu.c
+++ b/target/sparc/cpu.c
@@ -783,7 +783,7 @@ static void sparc_restore_state_to_opc(CPUState *cs,
#ifndef CONFIG_USER_ONLY
static bool sparc_cpu_has_work(CPUState *cs)
{
- return (cs->interrupt_request & CPU_INTERRUPT_HARD) &&
+ return cpu_test_interrupt(cs, CPU_INTERRUPT_HARD) &&
cpu_interrupts_enabled(cpu_env(cs));
}
#endif /* !CONFIG_USER_ONLY */
diff --git a/target/sparc/int64_helper.c b/target/sparc/int64_helper.c
index bd14c7a0db..49e4e51c6d 100644
--- a/target/sparc/int64_helper.c
+++ b/target/sparc/int64_helper.c
@@ -89,7 +89,7 @@ void cpu_check_irqs(CPUSPARCState *env)
* the next bit is (2 << psrpil).
*/
if (pil < (2 << env->psrpil)) {
- if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
+ if (cpu_test_interrupt(cs, CPU_INTERRUPT_HARD)) {
trace_sparc64_cpu_check_irqs_reset_irq(env->interrupt_index);
env->interrupt_index = 0;
cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
@@ -120,7 +120,7 @@ void cpu_check_irqs(CPUSPARCState *env)
break;
}
}
- } else if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
+ } else if (cpu_test_interrupt(cs, CPU_INTERRUPT_HARD)) {
trace_sparc64_cpu_check_irqs_disabled(pil, env->pil_in, env->softint,
env->interrupt_index);
env->interrupt_index = 0;