Sergey Fedorov <sergey.fedo...@linaro.org> writes: > From: Sergey Fedorov <serge.f...@gmail.com> > > Move tb_add_jump() call and surrounding code from cpu_exec() into > tb_find_fast(). That simplifies cpu_exec() a little by hiding the direct > chaining optimization details into tb_find_fast(). It also allows to > move tb_lock()/tb_unlock() pair into tb_find_fast(), putting it closer > to tb_find_slow() which also manipulates the lock. > > Suggested-by: Alex Bennée <alex.ben...@linaro.org> > Signed-off-by: Sergey Fedorov <serge.f...@gmail.com> > Signed-off-by: Sergey Fedorov <sergey.fedo...@linaro.org> > --- > > Changes in v6: > * Fixed rebase conflicts > > cpu-exec.c | 35 +++++++++++++++++++---------------- > 1 file changed, 19 insertions(+), 16 deletions(-) > > diff --git a/cpu-exec.c b/cpu-exec.c > index f49a436e1a5a..5f23c0660d6e 100644 > --- a/cpu-exec.c > +++ b/cpu-exec.c > @@ -320,7 +320,9 @@ found: > return tb; > } > > -static inline TranslationBlock *tb_find_fast(CPUState *cpu) > +static inline TranslationBlock *tb_find_fast(CPUState *cpu, > + TranslationBlock **last_tb, > + int tb_exit) > { > CPUArchState *env = (CPUArchState *)cpu->env_ptr; > TranslationBlock *tb; > @@ -331,11 +333,24 @@ static inline TranslationBlock *tb_find_fast(CPUState > *cpu) > always be the same before a given translated block > is executed. */ > cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); > + tb_lock(); > tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)]; > if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base || > tb->flags != flags)) { > tb = tb_find_slow(cpu, pc, cs_base, flags); > } > + if (cpu->tb_flushed) { > + /* Ensure that no TB jump will be modified as the > + * translation buffer has been flushed. > + */ > + *last_tb = NULL; > + cpu->tb_flushed = false; > + } > + /* See if we can patch the calling TB. */ > + if (*last_tb && qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
This should be !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN) > + tb_add_jump(*last_tb, tb_exit, tb); > + } > + tb_unlock(); > return tb; > } > > @@ -441,7 +456,8 @@ int cpu_exec(CPUState *cpu) > } else if (replay_has_exception() > && cpu->icount_decr.u16.low + cpu->icount_extra == 0) > { > /* try to cause an exception pending in the log */ > - cpu_exec_nocache(cpu, 1, tb_find_fast(cpu), true); > + last_tb = NULL; /* Avoid chaining TBs */ > + cpu_exec_nocache(cpu, 1, tb_find_fast(cpu, &last_tb, 0), > true); > ret = -1; > break; > #endif > @@ -511,20 +527,7 @@ int cpu_exec(CPUState *cpu) > cpu->exception_index = EXCP_INTERRUPT; > cpu_loop_exit(cpu); > } > - tb_lock(); > - tb = tb_find_fast(cpu); > - if (cpu->tb_flushed) { > - /* Ensure that no TB jump will be modified as the > - * translation buffer has been flushed. > - */ > - last_tb = NULL; > - cpu->tb_flushed = false; > - } > - /* See if we can patch the calling TB. */ > - if (last_tb && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) { > - tb_add_jump(last_tb, tb_exit, tb); > - } > - tb_unlock(); > + tb = tb_find_fast(cpu, &last_tb, tb_exit); > if (likely(!cpu->exit_request)) { > uintptr_t ret; > trace_exec_tb(tb, tb->pc); -- Alex Bennée