On Tue, 29 Oct 2019 23:54:30 PDT (-0700), alistai...@gmail.com wrote:
On Tue, Oct 29, 2019 at 4:14 PM Palmer Dabbelt <pal...@sifive.com> wrote:

On Tue, 29 Oct 2019 03:49:23 PDT (-0700), alistai...@gmail.com wrote:
> On Fri, Oct 18, 2019 at 7:44 PM Alistair Francis <alistai...@gmail.com> wrote:
>>
>> On Fri, Oct 18, 2019 at 9:51 AM Palmer Dabbelt <pal...@sifive.com> wrote:
>> >
>> > On Tue, 08 Oct 2019 15:04:18 PDT (-0700), Alistair Francis wrote:
>> > > Instead of relying on atomics to access the MIP register let's update
>> > > our helper function to instead just lock the IO mutex thread before
>> > > writing. This follows the same concept as used in PPC for handling
>> > > interrupts
>> > >
>> > > Signed-off-by: Alistair Francis <alistair.fran...@wdc.com>
>> > > ---
>> > >  target/riscv/cpu.c        |  5 ++--
>> > >  target/riscv/cpu.h        |  9 --------
>> > >  target/riscv/cpu_helper.c | 48 +++++++++++++++------------------------
>> > >  target/riscv/csr.c        |  2 +-
>> > >  4 files changed, 21 insertions(+), 43 deletions(-)
>> > >
>> > > diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
>> > > index f13e298a36..e09dd7aa23 100644
>> > > --- a/target/riscv/cpu.c
>> > > +++ b/target/riscv/cpu.c
>> > > @@ -224,8 +224,7 @@ static void riscv_cpu_dump_state(CPUState *cs, FILE 
*f, int flags)
>> > >  #ifndef CONFIG_USER_ONLY
>> > >      qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mhartid ", 
env->mhartid);
>> > >      qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mstatus ", 
env->mstatus);
>> > > -    qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mip     ",
>> > > -                 (target_ulong)atomic_read(&env->mip));
>> > > +    qemu_fprintf(f, " %s 0x%x\n", "mip     ", env->mip);
>> > >      qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mie     ", env->mie);
>> > >      qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mideleg ", 
env->mideleg);
>> > >      qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "medeleg ", 
env->medeleg);
>> > > @@ -275,7 +274,7 @@ static bool riscv_cpu_has_work(CPUState *cs)
>> > >       * Definition of the WFI instruction requires it to ignore the 
privilege
>> > >       * mode and delegation registers, but respect individual enables
>> > >       */
>> > > -    return (atomic_read(&env->mip) & env->mie) != 0;
>> > > +    return (env->mip & env->mie) != 0;
>> > >  #else
>> > >      return true;
>> > >  #endif
>> > > diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
>> > > index 124ed33ee4..a71473b243 100644
>> > > --- a/target/riscv/cpu.h
>> > > +++ b/target/riscv/cpu.h
>> > > @@ -121,15 +121,6 @@ struct CPURISCVState {
>> > >      target_ulong mhartid;
>> > >      target_ulong mstatus;
>> > >
>> > > -    /*
>> > > -     * CAUTION! Unlike the rest of this struct, mip is accessed 
asynchonously
>> > > -     * by I/O threads. It should be read with atomic_read. It should be 
updated
>> > > -     * using riscv_cpu_update_mip with the iothread mutex held. The 
iothread
>> > > -     * mutex must be held because mip must be consistent with the CPU 
inturrept
>> > > -     * state. riscv_cpu_update_mip calls cpu_interrupt or 
cpu_reset_interrupt
>> > > -     * wuth the invariant that CPU_INTERRUPT_HARD is set iff mip is 
non-zero.
>> > > -     * mip is 32-bits to allow atomic_read on 32-bit hosts.
>> > > -     */
>> > >      uint32_t mip;
>> > >      uint32_t miclaim;
>> > >
>> > > diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
>> > > index 87dd6a6ece..4334978c2e 100644
>> > > --- a/target/riscv/cpu_helper.c
>> > > +++ b/target/riscv/cpu_helper.c
>> > > @@ -19,6 +19,7 @@
>> > >
>> > >  #include "qemu/osdep.h"
>> > >  #include "qemu/log.h"
>> > > +#include "qemu/main-loop.h"
>> > >  #include "cpu.h"
>> > >  #include "exec/exec-all.h"
>> > >  #include "tcg-op.h"
>> > > @@ -38,7 +39,7 @@ static int riscv_cpu_local_irq_pending(CPURISCVState 
*env)
>> > >  {
>> > >      target_ulong mstatus_mie = get_field(env->mstatus, MSTATUS_MIE);
>> > >      target_ulong mstatus_sie = get_field(env->mstatus, MSTATUS_SIE);
>> > > -    target_ulong pending = atomic_read(&env->mip) & env->mie;
>> > > +    target_ulong pending = env->mip & env->mie;
>> > >      target_ulong mie = env->priv < PRV_M || (env->priv == PRV_M && 
mstatus_mie);
>> > >      target_ulong sie = env->priv < PRV_S || (env->priv == PRV_S && 
mstatus_sie);
>> > >      target_ulong irqs = (pending & ~env->mideleg & -mie) |
>> > > @@ -92,42 +93,29 @@ int riscv_cpu_claim_interrupts(RISCVCPU *cpu, 
uint32_t interrupts)
>> > >      }
>> > >  }
>> > >
>> > > -struct CpuAsyncInfo {
>> > > -    uint32_t new_mip;
>> > > -};
>> > > -
>> > > -static void riscv_cpu_update_mip_irqs_async(CPUState *target_cpu_state,
>> > > -                                            run_on_cpu_data data)
>> > > -{
>> > > -    struct CpuAsyncInfo *info = (struct CpuAsyncInfo *) data.host_ptr;
>> > > -
>> > > -    if (info->new_mip) {
>> > > -        cpu_interrupt(target_cpu_state, CPU_INTERRUPT_HARD);
>> > > -    } else {
>> > > -        cpu_reset_interrupt(target_cpu_state, CPU_INTERRUPT_HARD);
>> > > -    }
>> > > -
>> > > -    g_free(info);
>> > > -}
>> > > -
>> > >  uint32_t riscv_cpu_update_mip(RISCVCPU *cpu, uint32_t mask, uint32_t 
value)
>> > >  {
>> > >      CPURISCVState *env = &cpu->env;
>> > >      CPUState *cs = CPU(cpu);
>> > > -    struct CpuAsyncInfo *info;
>> > > -    uint32_t old, new, cmp = atomic_read(&env->mip);
>> > > +    uint32_t old = env->mip;
>> > > +    bool locked = false;
>> > > +
>> > > +    if (!qemu_mutex_iothread_locked()) {
>> > > +        locked = true;
>> > > +        qemu_mutex_lock_iothread();
>> > > +    }
>> >
>> > I must be lost here, because I have no idea what this is trying to do.
>>
>> We lock the QEMU IO Thread before we trigger an interrupt. This way we
>> can call it from the PLIC.
>
> Ping! This missed the latest PR.

Sorry, I missed your reply.

I really don't think this does that.  For example: if two threads enter this
function at the same time, both will see that the lock is not taken and try to
take it.  One will succeed and the other will fail, but both will have

That's not what this is doing. This is ensuring that if we enter this
function with the IO thread lock we won't try to lock it again, if it
isn't locked we will take the lock. We then also only unlock it if we
took the lock. This way when we return from the function we didn't
change the lock state.

Sorry, I didn't realize that iothread_locked was per-thread. In that case this should be fine.

Reviewed-by: Palmer Dabbelt <pal...@dabbelt.com>

I'm happy to take this as part of the soft freeze, as it's meant to be a non-functional change.

QEMU will assert() if you try to lock the locked IO thread, this makes
sure that we don't do that.

Alistair

'locked=true' so both will try to unlock.  The first will win, causing the
second function to execute for some period without the lock.  Then the second
will unlock, possibly defeating another lock somewhere else.

It smells kind of like this is trying to be a recursive lock, but those are a
whole lot more complicated than this.

>
> Alistair
>
>>
>> Alistair
>>
>> >
>> > > -    do {
>> > > -        old = cmp;
>> > > -        new = (old & ~mask) | (value & mask);
>> > > -        cmp = atomic_cmpxchg(&env->mip, old, new);
>> > > -    } while (old != cmp);
>> > > +    env->mip = (env->mip & ~mask) | (value & mask);
>> > >
>> > > -    info = g_new(struct CpuAsyncInfo, 1);
>> > > -    info->new_mip = new;
>> > > +    if (env->mip) {
>> > > +        cpu_interrupt(cs, CPU_INTERRUPT_HARD);
>> > > +    } else {
>> > > +        cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
>> > > +    }
>> > >
>> > > -    async_run_on_cpu(cs, riscv_cpu_update_mip_irqs_async,
>> > > -                     RUN_ON_CPU_HOST_PTR(info));
>> > > +    if (locked) {
>> > > +        qemu_mutex_unlock_iothread();
>> > > +    }
>> > >
>> > >      return old;
>> > >  }
>> > > diff --git a/target/riscv/csr.c b/target/riscv/csr.c
>> > > index f767ad24be..db0cc6ef55 100644
>> > > --- a/target/riscv/csr.c
>> > > +++ b/target/riscv/csr.c
>> > > @@ -579,7 +579,7 @@ static int rmw_mip(CPURISCVState *env, int csrno, 
target_ulong *ret_value,
>> > >      if (mask) {
>> > >          old_mip = riscv_cpu_update_mip(cpu, mask, (new_value & mask));
>> > >      } else {
>> > > -        old_mip = atomic_read(&env->mip);
>> > > +        old_mip = env->mip;
>> > >      }
>> > >
>> > >      if (ret_value) {

Reply via email to