Guillaume Thouvenin wrote:
> On Mon, 07 Apr 2008 11:05:06 -0500
> Anthony Liguori <[EMAIL PROTECTED]> wrote:
>
>   
>> Perhaps a viable way to fix this upstream would be to catch the vmentry 
>> failure, look to see if SS.CPL != CS.CPL, and if so, invoke 
>> x86_emulate() in a loop until SS.CPL == CS.CPL.
>>
>> There are very few instructions in gfxboot that would need to be added 
>> to x86_emulate (if they aren't already there).
>>     
>
> So to see if I'm on the good way here is an attempt to implement the
> solution. It doesn't work yet.
>
> I'm trying to:
>   - Disable the code that modifies SS value in order to detect VM entry
> failure
>   - Add the handler that catches the VM entry failure
>   - Emulate the instruction until we recover a friendly VMX state
>      => add the jmp far (opcode 0xea) instruction in the emulation.
>
> With the patch, the VM entry failure is caught but the jmp far
> instruction seems to fail. I've got the following dmesg:
>
> ...
> handle_vmentry_failure: invalid guest state
> handle_vmentry_failure: start emulation
> handle_vmentry_failure: emulation failed
>   

What instruction failed, exactly?

> ...
>
>
> Regards,
> Guillaume
>
> ---
>
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index 8e5d664..a56bd83 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -1178,13 +1178,16 @@ static void enter_pmode(struct kvm_vcpu *vcpu)
>  
>       update_exception_bitmap(vcpu);
>  
> +     fix_pmode_dataseg(VCPU_SREG_SS, &vcpu->arch.rmode.ss);
>       fix_pmode_dataseg(VCPU_SREG_ES, &vcpu->arch.rmode.es);
>       fix_pmode_dataseg(VCPU_SREG_DS, &vcpu->arch.rmode.ds);
>       fix_pmode_dataseg(VCPU_SREG_GS, &vcpu->arch.rmode.gs);
>       fix_pmode_dataseg(VCPU_SREG_FS, &vcpu->arch.rmode.fs);
>   

You need to drop the fixes rather than add more.

>  
> +#if 0
>       vmcs_write16(GUEST_SS_SELECTOR, 0);
>       vmcs_write32(GUEST_SS_AR_BYTES, 0x93);
> +#endif
>  
>       vmcs_write16(GUEST_CS_SELECTOR,
>                    vmcs_read16(GUEST_CS_SELECTOR) & ~SELECTOR_RPL_MASK);
> @@ -1952,6 +1955,33 @@ static int handle_rmode_exception(struct kvm_vcpu 
> *vcpu,
>       return 0;
>  }
>  
> +static int handle_vmentry_failure(u32 exit_reason, struct kvm_vcpu *vcpu)
> +{
> +     unsigned int basic_exit_reason = (uint16_t) exit_reason;
> +     unsigned int cs_rpl = vmcs_read16(GUEST_CS_SELECTOR) & 
> SELECTOR_RPL_MASK;
> +     unsigned int ss_rpl = vmcs_read16(GUEST_SS_SELECTOR) & 
> SELECTOR_RPL_MASK;
> +
> +     switch (basic_exit_reason) {
> +             case EXIT_REASON_INVALID_GUEST_STATE:
> +                     printk(KERN_INFO "%s: invalid guest state\n", 
> __FUNCTION__);
> +                     printk(KERN_INFO "%s: start emulation \n", 
> __FUNCTION__);
> +                     while (cs_rpl != ss_rpl) {
> +                             if (emulate_instruction(vcpu, NULL, 0, 0, 0) == 
> EMULATE_FAIL) {
> +                                     printk(KERN_INFO "%s: emulation 
> failed\n", __FUNCTION__);
> +                                     return 0;
> +                             }
> +                             cs_rpl = vmcs_read16(GUEST_CS_SELECTOR) & 
> SELECTOR_RPL_MASK;
> +                             ss_rpl = vmcs_read16(GUEST_SS_SELECTOR) & 
> SELECTOR_RPL_MASK;
> +                     }
> +                     printk(KERN_INFO "%s: VMX friendly state recovered\n", 
> __FUNCTION__);
> +                     break;
> +             default:
> +                     printk(KERN_INFO "VM-entry failure due to unkown 
> reason\n");
> +                     return 0;
> +     }
> +     return 1;
> +}
> +
>  static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
>  {
>       struct vcpu_vmx *vmx = to_vmx(vcpu);
> @@ -2364,6 +2394,9 @@ static int kvm_handle_exit(struct kvm_run *kvm_run, 
> struct kvm_vcpu *vcpu)
>       KVMTRACE_3D(VMEXIT, vcpu, exit_reason, (u32)vmcs_readl(GUEST_RIP),
>                   (u32)((u64)vmcs_readl(GUEST_RIP) >> 32), entryexit);
>  
> +     if (unlikely(exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY))
> +             return handle_vmentry_failure(exit_reason, vcpu);
> +
>       if (unlikely(vmx->fail)) {
>               kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
>               kvm_run->fail_entry.hardware_entry_failure_reason
> diff --git a/arch/x86/kvm/vmx.h b/arch/x86/kvm/vmx.h
> index 5dff460..200c0f8 100644
> --- a/arch/x86/kvm/vmx.h
> +++ b/arch/x86/kvm/vmx.h
> @@ -223,7 +223,10 @@ enum vmcs_field {
>  #define EXIT_REASON_IO_INSTRUCTION      30
>  #define EXIT_REASON_MSR_READ            31
>  #define EXIT_REASON_MSR_WRITE           32
> +#define EXIT_REASON_INVALID_GUEST_STATE 33
> +#define EXIT_REASON_MSR_LOADING         34
>  #define EXIT_REASON_MWAIT_INSTRUCTION   36
> +#define EXIT_REASON_MACHINE_CHECK       41
>  #define EXIT_REASON_TPR_BELOW_THRESHOLD 43
>  #define EXIT_REASON_APIC_ACCESS         44
>  #define EXIT_REASON_WBINVD           54
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 0ce5563..b38065d 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -3267,7 +3267,7 @@ static int load_segment_descriptor_to_kvm_desct(struct 
> kvm_vcpu *vcpu,
>       return 0;
>  }
>  
> -static int load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
> +int load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
>                                  int type_bits, int seg)
>  {
>       struct kvm_segment kvm_seg;
> diff --git a/arch/x86/kvm/x86_emulate.c b/arch/x86/kvm/x86_emulate.c
> index 2ca0838..4bc3c80 100644
> --- a/arch/x86/kvm/x86_emulate.c
> +++ b/arch/x86/kvm/x86_emulate.c
> @@ -168,7 +168,7 @@ static u16 opcode_table[256] = {
>       /* 0xE0 - 0xE7 */
>       0, 0, 0, 0, 0, 0, 0, 0,
>       /* 0xE8 - 0xEF */
> -     ImplicitOps | Stack, SrcImm|ImplicitOps, 0, SrcImmByte|ImplicitOps,
> +     ImplicitOps | Stack, SrcImm|ImplicitOps, ImplicitOps, 
> SrcImmByte|ImplicitOps,
>       0, 0, 0, 0,
>       /* 0xF0 - 0xF7 */
>       0, 0, 0, 0,
> @@ -1156,6 +1156,9 @@ static inline int emulate_grp45(struct x86_emulate_ctxt 
> *ctxt,
>       case 4: /* jmp abs */
>               c->eip = c->src.val;
>               break;
> +     case 5: /* jmp far */
> +             printk(KERN_INFO "Jmp far need to be implemented\n");
> +             break;
>       case 6: /* push */
>               emulate_push(ctxt);
>               break;
> @@ -1657,6 +1660,24 @@ special_insn:
>               break;
>       }
>       case 0xe9: /* jmp rel */
> +             jmp_rel(c, c->src.val);
> +             c->dst.type = OP_NONE; /* Disable writeback. */
> +             break;
> +     case 0xea: /* jmp far */ {
> +             struct kvm_segment kvm_seg;
> +             int ret;
> +
> +             kvm_x86_ops->get_segment(ctxt->vcpu, &kvm_seg, VCPU_SREG_CS);
> +
> +
> +             ret = load_segment_descriptor(ctxt->vcpu, kvm_seg.selector, 9, 
> VCPU_SREG_CS);
> +             if (ret < 0){
> +                     printk(KERN_INFO "%s: Failed to load CS descriptor\n", 
> __FUNCTION__);
> +                     return ret;
> +             }
> +
>   

You need to load rip as well.

I suggest you print the instruction to be emulated and the register 
state before and after, and compare with the expected state.

-- 
error compiling committee.c: too many arguments to function


-------------------------------------------------------------------------
This SF.net email is sponsored by the 2008 JavaOne(SM) Conference 
Don't miss this year's exciting event. There's still time to save $100. 
Use priority code J8TL2D2. 
http://ad.doubleclick.net/clk;198757673;13503038;p?http://java.sun.com/javaone
_______________________________________________
kvm-devel mailing list
kvm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/kvm-devel

Reply via email to