On Thu, Jul 23, 2009 at 06:45:53PM -0300, Marcelo Tosatti wrote:
> On Wed, Jul 22, 2009 at 11:53:26PM +0200, Jan Kiszka wrote:
> > Release and re-acquire preemption and IRQ lock in the same order as
> > vcpu_enter_guest does.
>
> This should happen in vcpu_enter_guest, before it decides to disable
> preemption/irqs (so you consolidate the control there).
>
> Maybe add a new member to x86_ops?
Why don't do something like this ?
Index: kvm-requests/arch/x86/include/asm/kvm_host.h
===================================================================
--- kvm-requests.orig/arch/x86/include/asm/kvm_host.h
+++ kvm-requests/arch/x86/include/asm/kvm_host.h
@@ -529,6 +529,7 @@ struct kvm_x86_ops {
int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
int (*get_tdp_level)(void);
u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
+ int (*emulate_guest_state)(struct kvm_vcpu *vcpu, struct kvm_run *run);
const struct trace_print_flags *exit_reasons_str;
};
Index: kvm-requests/arch/x86/kvm/vmx.c
===================================================================
--- kvm-requests.orig/arch/x86/kvm/vmx.c
+++ kvm-requests/arch/x86/kvm/vmx.c
@@ -106,8 +106,6 @@ struct vcpu_vmx {
} irq;
} rmode;
int vpid;
- bool emulation_required;
- enum emulation_result invalid_state_emulation_result;
/* Support for vnmi-less CPUs */
int soft_vnmi_blocked;
@@ -1416,7 +1414,6 @@ static void enter_pmode(struct kvm_vcpu
unsigned long flags;
struct vcpu_vmx *vmx = to_vmx(vcpu);
- vmx->emulation_required = 1;
vmx->rmode.vm86_active = 0;
vmcs_writel(GUEST_TR_BASE, vmx->rmode.tr.base);
@@ -1433,8 +1430,10 @@ static void enter_pmode(struct kvm_vcpu
update_exception_bitmap(vcpu);
- if (emulate_invalid_guest_state)
+ if (emulate_invalid_guest_state) {
+ set_bit(KVM_REQ_EMULATE, &vcpu->requests);
return;
+ }
fix_pmode_dataseg(VCPU_SREG_ES, &vmx->rmode.es);
fix_pmode_dataseg(VCPU_SREG_DS, &vmx->rmode.ds);
@@ -1481,7 +1480,6 @@ static void enter_rmode(struct kvm_vcpu
if (enable_unrestricted_guest)
return;
- vmx->emulation_required = 1;
vmx->rmode.vm86_active = 1;
vmx->rmode.tr.base = vmcs_readl(GUEST_TR_BASE);
@@ -1503,8 +1501,10 @@ static void enter_rmode(struct kvm_vcpu
vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME);
update_exception_bitmap(vcpu);
- if (emulate_invalid_guest_state)
+ if (emulate_invalid_guest_state) {
+ set_bit(KVM_REQ_EMULATE, &vcpu->requests);
goto continue_rmode;
+ }
vmcs_write16(GUEST_SS_SELECTOR, vmcs_readl(GUEST_SS_BASE) >> 4);
vmcs_write32(GUEST_SS_LIMIT, 0xffff);
@@ -2517,9 +2517,6 @@ static int vmx_vcpu_reset(struct kvm_vcp
ret = 0;
- /* HACK: Don't enable emulation on guest boot/reset */
- vmx->emulation_required = 0;
-
out:
up_read(&vcpu->kvm->slots_lock);
return ret;
@@ -3319,15 +3316,11 @@ static int handle_nmi_window(struct kvm_
return 1;
}
-static void handle_invalid_guest_state(struct kvm_vcpu *vcpu,
+static int handle_invalid_guest_state(struct kvm_vcpu *vcpu,
struct kvm_run *kvm_run)
{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
enum emulation_result err = EMULATE_DONE;
- local_irq_enable();
- preempt_enable();
-
while (!guest_state_valid(vcpu)) {
err = emulate_instruction(vcpu, kvm_run, 0, 0, 0);
@@ -3345,10 +3338,10 @@ static void handle_invalid_guest_state(s
schedule();
}
- preempt_disable();
- local_irq_disable();
+ if (!guest_state_valid(vcpu))
+ set_bit(KVM_REQ_EMULATE, &vcpu->requests);
- vmx->invalid_state_emulation_result = err;
+ return (err != EMULATE_DO_MMIO);
}
/*
@@ -3405,14 +3398,6 @@ static int vmx_handle_exit(struct kvm_ru
trace_kvm_exit(exit_reason, kvm_rip_read(vcpu));
- /* If we need to emulate an MMIO from handle_invalid_guest_state
- * we just return 0 */
- if (vmx->emulation_required && emulate_invalid_guest_state) {
- if (guest_state_valid(vcpu))
- vmx->emulation_required = 0;
- return vmx->invalid_state_emulation_result != EMULATE_DO_MMIO;
- }
-
/* Access CR3 don't cause VMExit in paging mode, so we need
* to sync with guest real CR3. */
if (enable_ept && is_paging(vcpu))
@@ -3606,12 +3591,6 @@ static void vmx_vcpu_run(struct kvm_vcpu
if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked))
vmx->entry_time = ktime_get();
- /* Handle invalid guest state instead of entering VMX */
- if (vmx->emulation_required && emulate_invalid_guest_state) {
- handle_invalid_guest_state(vcpu, kvm_run);
- return;
- }
-
if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty))
vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
@@ -3967,6 +3946,7 @@ static struct kvm_x86_ops vmx_x86_ops =
.set_tss_addr = vmx_set_tss_addr,
.get_tdp_level = get_ept_level,
.get_mt_mask = vmx_get_mt_mask,
+ .emulate_guest_state = handle_invalid_guest_state,
.exit_reasons_str = vmx_exit_reasons_str,
};
Index: kvm-requests/arch/x86/kvm/x86.c
===================================================================
--- kvm-requests.orig/arch/x86/kvm/x86.c
+++ kvm-requests/arch/x86/kvm/x86.c
@@ -3556,6 +3556,11 @@ static int vcpu_enter_guest(struct kvm_v
kvm_mmu_sync_roots(vcpu);
if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
kvm_x86_ops->tlb_flush(vcpu);
+ if (test_and_clear_bit(KVM_REQ_EMULATE, &vcpu->requests)) {
+ r = kvm_x86_ops->emulate_guest_state(vcpu, kvm_run);
+ goto out;
+ }
+
if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS,
&vcpu->requests)) {
kvm_run->exit_reason = KVM_EXIT_TPR_ACCESS;
Index: kvm-requests/include/linux/kvm_host.h
===================================================================
--- kvm-requests.orig/include/linux/kvm_host.h
+++ kvm-requests/include/linux/kvm_host.h
@@ -39,6 +39,7 @@
#define KVM_REQ_MMU_SYNC 7
#define KVM_REQ_KVMCLOCK_UPDATE 8
#define KVM_REQ_KICK 9
+#define KVM_REQ_EMULATE 10
#define KVM_USERSPACE_IRQ_SOURCE_ID 0
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to [email protected]
More majordomo info at http://vger.kernel.org/majordomo-info.html