On 06/03/2017 02:34, Richard Henderson wrote: > On 03/06/2017 08:32 AM, Alex Bennée wrote: >>> #5 0x000000000046ea2e in tlb_flush (cpu=0x164a360) at >>> qemu.git/cputlb.c:121 >>> #6 0x0000000000538987 in cpu_x86_update_cr4 (env=0x16525f0, >>> new_cr4=1784) >>> at qemu.git/target/i386/helper.c:660 >>> #7 0x000000000055e318 in cpu_vmexit (env=0x16525f0, exit_code=78, >>> exit_info_1=4, retaddr=0) >>> at qemu.git/target/i386/svm_helper.c:689 >>> #8 0x000000000055d9b7 in cpu_svm_check_intercept_param (env=0x16525f0, >>> type=78, param=4, retaddr=0) >>> at qemu.git/target/i386/svm_helper.c:511 >>> #9 0x0000000000541acf in raise_interrupt2 (env=0x16525f0, intno=14, >>> is_int=0, error_code=4, next_eip_addend=0, retaddr=0) >>> at qemu.git/target/i386/excp_helper.c:96 >>> #10 0x0000000000541c0d in raise_exception_err_ra (env=0x16525f0, >>> exception_index=14, error_code=4, retaddr=0) >>> at qemu.git/target/i386/excp_helper.c:127 >>> #11 0x00000000005621a9 in tlb_fill (cs=0x164a360, addr=1245184, >>> access_type=MMU_INST_FETCH, mmu_idx=1, retaddr=0) >>> at qemu.git/target/i386/mem_helper.c:212 >> Richard, >> >> So this looks like another path through the SoftMMU code during >> code-generation (which is why tb_lock() is held in the first place). I'm >> not sure if the correct thing to do is bug out earlier or to defer the >> exception raising part to async work and exit the loop. > > My guess is that everything from cpu_svm_check_intercept_param on should > be done from do_interrupt instead of during raise_interrupt.
>From cpu_svm_check_intercept_param, or from cpu_vmexit? The former seems to be too early because it will usually not even do anything, but treating cpu_vmexit like an exception is a very good idea indeed. This is my uncompiled take. diff --git a/target/i386/cpu.h b/target/i386/cpu.h index 12a39d5..ef319cc 100644 --- a/target/i386/cpu.h +++ b/target/i386/cpu.h @@ -694,6 +694,7 @@ typedef uint32_t FeatureWordArray[FEATURE_WORDS]; #define EXCP_SYSCALL 0x100 /* only happens in user only emulation for syscall instruction */ +#define EXCP_VMEXIT 0x100 /* i386-specific interrupt pending bits. */ #define CPU_INTERRUPT_POLL CPU_INTERRUPT_TGT_EXT_1 @@ -1626,6 +1627,7 @@ void cpu_svm_check_intercept_param(CPUX86State *env1, uint32_t type, uint64_t param, uintptr_t retaddr); void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1, uintptr_t retaddr); +void do_vmexit(CPUX86State *env, uint32_t exit_code, uint64 exit_info_1); /* seg_helper.c */ void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw); diff --git a/target/i386/seg_helper.c b/target/i386/seg_helper.c index 5c845dc..0374031 100644 --- a/target/i386/seg_helper.c +++ b/target/i386/seg_helper.c @@ -1297,15 +1297,17 @@ void x86_cpu_do_interrupt(CPUState *cs) /* successfully delivered */ env->old_exception = -1; #else - /* simulate a real cpu exception. On i386, it can - trigger new exceptions, but we do not handle - double or triple faults yet. */ - do_interrupt_all(cpu, cs->exception_index, - env->exception_is_int, - env->error_code, - env->exception_next_eip, 0); - /* successfully delivered */ - env->old_exception = -1; + if (cs->exception_index >= EXCP_VMEXIT) { + assert(env->old_exception == -1); + do_vmexit(env, cs->exception_index - EXCP_VMEXIT, env->error_code); + } else { + do_interrupt_all(cpu, cs->exception_index, + env->exception_is_int, + env->error_code, + env->exception_next_eip, 0); + /* successfully delivered */ + env->old_exception = -1; + } #endif } diff --git a/target/i386/svm_helper.c b/target/i386/svm_helper.c index 78d8df4..b49cd6d 100644 --- a/target/i386/svm_helper.c +++ b/target/i386/svm_helper.c @@ -580,12 +580,10 @@ void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param, } } -/* Note: currently only 32 bits of exit_code are used */ void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1, uintptr_t retaddr) { CPUState *cs = CPU(x86_env_get_cpu(env)); - uint32_t int_ctl; if (retaddr) { cpu_restore_state(cs, retaddr); @@ -598,6 +596,19 @@ void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1, control.exit_info_2)), env->eip); + cs->exception_index = EXCP_VMEXIT + exit_code; + env->error_code = exit_info_1; + + /* remove any pending exception */ + env->old_exception = -1; + cpu_loop_exit(cs); +} + +void do_vmexit(CPUX86State *env, uint32_t exit_code, uint64 exit_info_1) +{ + CPUState *cs = CPU(x86_env_get_cpu(env)); + uint32_t int_ctl; + if (env->hflags & HF_INHIBIT_IRQ_MASK) { x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.int_state), @@ -759,13 +770,6 @@ void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1, /* If the host's rIP reloaded by #VMEXIT is outside the limit of the host's code segment or non-canonical (in the case of long mode), a #GP fault is delivered inside the host. */ - - /* remove any pending exception */ - cs->exception_index = -1; - env->error_code = 0; - env->old_exception = -1; - - cpu_loop_exit(cs); } #endif