Differently-sized larx/stcx. pairs can succeed if the starting address matches. Add a size check to require stcx. exactly match the larx that established the reservation.
Signed-off-by: Nicholas Piggin <npig...@gmail.com> --- target/ppc/cpu.h | 1 + target/ppc/cpu_init.c | 4 ++-- target/ppc/translate.c | 8 ++++++++ 3 files changed, 11 insertions(+), 2 deletions(-) diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h index 7959bfed0a..1d71f325d8 100644 --- a/target/ppc/cpu.h +++ b/target/ppc/cpu.h @@ -1124,6 +1124,7 @@ struct CPUArchState { target_ulong ca32; target_ulong reserve_addr; /* Reservation address */ + target_ulong reserve_size; /* Reservation size */ target_ulong reserve_val; /* Reservation value */ target_ulong reserve_val2; diff --git a/target/ppc/cpu_init.c b/target/ppc/cpu_init.c index 944a74befe..082981b148 100644 --- a/target/ppc/cpu_init.c +++ b/target/ppc/cpu_init.c @@ -7421,8 +7421,8 @@ void ppc_cpu_dump_state(CPUState *cs, FILE *f, int flags) } qemu_fprintf(f, " %c%c", a, env->crf[i] & 0x01 ? 'O' : ' '); } - qemu_fprintf(f, " ] RES " TARGET_FMT_lx "\n", - env->reserve_addr); + qemu_fprintf(f, " ] RES %03x@" TARGET_FMT_lx "\n", + (int)env->reserve_size, env->reserve_addr); if (flags & CPU_DUMP_FPU) { for (i = 0; i < 32; i++) { diff --git a/target/ppc/translate.c b/target/ppc/translate.c index e129cdcb8f..5195047146 100644 --- a/target/ppc/translate.c +++ b/target/ppc/translate.c @@ -71,6 +71,7 @@ static TCGv cpu_cfar; #endif static TCGv cpu_xer, cpu_so, cpu_ov, cpu_ca, cpu_ov32, cpu_ca32; static TCGv cpu_reserve; +static TCGv cpu_reserve_size; static TCGv cpu_reserve_val; static TCGv cpu_reserve_val2; static TCGv cpu_fpscr; @@ -141,6 +142,9 @@ void ppc_translate_init(void) cpu_reserve = tcg_global_mem_new(cpu_env, offsetof(CPUPPCState, reserve_addr), "reserve_addr"); + cpu_reserve_size = tcg_global_mem_new(cpu_env, + offsetof(CPUPPCState, reserve_size), + "reserve_size"); cpu_reserve_val = tcg_global_mem_new(cpu_env, offsetof(CPUPPCState, reserve_val), "reserve_val"); @@ -3584,6 +3588,7 @@ static void gen_load_locked(DisasContext *ctx, MemOp memop) gen_set_access_type(ctx, ACCESS_RES); gen_addr_reg_index(ctx, t0); tcg_gen_mov_tl(cpu_reserve, t0); + tcg_gen_movi_tl(cpu_reserve_size, memop_size(memop)); tcg_gen_qemu_ld_tl(gpr, t0, ctx->mem_idx, memop | MO_ALIGN); tcg_gen_mov_tl(cpu_reserve_val, gpr); tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ); @@ -3816,6 +3821,7 @@ static void gen_conditional_store(DisasContext *ctx, MemOp memop) gen_set_access_type(ctx, ACCESS_RES); gen_addr_reg_index(ctx, t0); tcg_gen_brcond_tl(TCG_COND_NE, t0, cpu_reserve, l1); + tcg_gen_brcondi_tl(TCG_COND_NE, cpu_reserve_size, memop_size(memop), l1); t0 = tcg_temp_new(); tcg_gen_atomic_cmpxchg_tl(t0, cpu_reserve, cpu_reserve_val, @@ -3873,6 +3879,7 @@ static void gen_lqarx(DisasContext *ctx) EA = tcg_temp_new(); gen_addr_reg_index(ctx, EA); tcg_gen_mov_tl(cpu_reserve, EA); + tcg_gen_movi_tl(cpu_reserve_size, 128); /* Note that the low part is always in RD+1, even in LE mode. */ lo = cpu_gpr[rd + 1]; @@ -3907,6 +3914,7 @@ static void gen_stqcx_(DisasContext *ctx) gen_addr_reg_index(ctx, EA); tcg_gen_brcond_tl(TCG_COND_NE, EA, cpu_reserve, lab_fail); + tcg_gen_brcondi_tl(TCG_COND_NE, cpu_reserve_size, 128, lab_fail); cmp = tcg_temp_new_i128(); val = tcg_temp_new_i128(); -- 2.40.1