On Mon, 5 May 2008 16:29:21 +0300
"Mohammed Gamal" <[EMAIL PROTECTED]> wrote:

> On Mon, May 5, 2008 at 3:57 PM, Anthony Liguori <[EMAIL PROTECTED]> wrote:
> 
> >  WinXP fails to boot with your patch applied too.  FWIW, Ubuntu 8.04 has
> >  a fixed version of gfxboot that doesn't do nasty things with SS on
> >  privileged mode transitions.
> >
> WinXP fails with the patch applied too. Ubuntu 7.10 live CD and
> FreeDOS don't boot but complain about instruction mov 0x11,sreg not
> being emulated.

Can you try with this one please?
On my computer it boots ubuntu-8.04-desktop-i386.iso liveCD and also
openSUSE-10.3-GM-x86_64-mini.iso

I will try FreeDOS and WinXP if I can find one ;)

Regards,
Guillaume

---

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 26c4f02..6e76c2e 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1272,7 +1272,9 @@ static void enter_pmode(struct kvm_vcpu *vcpu)
        fix_pmode_dataseg(VCPU_SREG_GS, &vcpu->arch.rmode.gs);
        fix_pmode_dataseg(VCPU_SREG_FS, &vcpu->arch.rmode.fs);
 
+#if 0
        vmcs_write16(GUEST_SS_SELECTOR, 0);
+#endif
        vmcs_write32(GUEST_SS_AR_BYTES, 0x93);
 
        vmcs_write16(GUEST_CS_SELECTOR,
@@ -2633,6 +2635,73 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu, 
struct kvm_run *kvm_run)
        return 1;
 }
 
+static int invalid_guest_state(struct kvm_vcpu *vcpu,
+               struct kvm_run *kvm_run, u32 failure_reason)
+{
+       u16 ss, cs;
+       u8 opcodes[4];
+       unsigned long rip = vcpu->arch.rip;
+       unsigned long rip_linear;
+
+       ss = vmcs_read16(GUEST_SS_SELECTOR);
+       cs = vmcs_read16(GUEST_CS_SELECTOR);
+
+       if ((ss & 0x03) != (cs & 0x03)) {
+               int err;
+               rip_linear = rip + vmx_get_segment_base(vcpu, VCPU_SREG_CS);
+               emulator_read_std(rip_linear, (void *)opcodes, 4, vcpu);
+#if 0
+               printk(KERN_INFO "emulation at (%lx) rip %lx: %02x %02x %02x 
%02x\n",
+                               rip_linear,
+                               rip, opcodes[0], opcodes[1], opcodes[2], 
opcodes[3]);
+#endif
+               err = emulate_instruction(vcpu, kvm_run, 0, 0, 0);
+               switch (err) {
+                       case EMULATE_DONE:
+#if 0
+                               printk(KERN_INFO "successfully emulated 
instruction\n");
+#endif
+                               return 1;
+                       case EMULATE_DO_MMIO:
+                               printk(KERN_INFO "mmio?\n");
+                               return 0;
+                       default:
+                               kvm_report_emulation_failure(vcpu, "vmentry 
failure");
+                               break;
+               }
+       }
+
+       kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
+       kvm_run->hw.hardware_exit_reason = failure_reason;
+       return 0;
+}
+
+static int handle_vmentry_failure(struct kvm_vcpu *vcpu,
+                                 struct kvm_run *kvm_run,
+                                 u32 failure_reason)
+{
+       unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+#if 0
+       printk(KERN_INFO "Failed vm entry (exit reason 0x%x) ", failure_reason);
+#endif
+       switch (failure_reason) {
+               case EXIT_REASON_INVALID_GUEST_STATE:
+#if 0
+                       printk("invalid guest state \n");
+#endif
+                       return invalid_guest_state(vcpu, kvm_run, 
failure_reason);
+               case EXIT_REASON_MSR_LOADING:
+                       printk("caused by MSR entry %ld loading.\n", 
exit_qualification);
+                       break;
+               case EXIT_REASON_MACHINE_CHECK:
+                       printk("caused by machine check.\n");
+                       break;
+               default:
+                       printk("reason not known yet!\n");
+                       break;
+       }
+       return 0;
+}
 /*
  * The exit handlers return 1 if the exit was handled fully and guest execution
  * may resume.  Otherwise they set the kvm_run parameter to indicate what needs
@@ -2694,6 +2763,12 @@ static int kvm_handle_exit(struct kvm_run *kvm_run, 
struct kvm_vcpu *vcpu)
                        exit_reason != EXIT_REASON_EPT_VIOLATION))
                printk(KERN_WARNING "%s: unexpected, valid vectoring info and "
                       "exit reason is 0x%x\n", __func__, exit_reason);
+
+       if ((exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) {
+               exit_reason &= ~VMX_EXIT_REASONS_FAILED_VMENTRY;
+               return handle_vmentry_failure(vcpu, kvm_run, exit_reason);
+       }
+
        if (exit_reason < kvm_vmx_max_exit_handlers
            && kvm_vmx_exit_handlers[exit_reason])
                return kvm_vmx_exit_handlers[exit_reason](vcpu, kvm_run);
diff --git a/arch/x86/kvm/vmx.h b/arch/x86/kvm/vmx.h
index 79d94c6..2cebf48 100644
--- a/arch/x86/kvm/vmx.h
+++ b/arch/x86/kvm/vmx.h
@@ -238,7 +238,10 @@ enum vmcs_field {
 #define EXIT_REASON_IO_INSTRUCTION      30
 #define EXIT_REASON_MSR_READ            31
 #define EXIT_REASON_MSR_WRITE           32
+#define EXIT_REASON_INVALID_GUEST_STATE 33
+#define EXIT_REASON_MSR_LOADING         34
 #define EXIT_REASON_MWAIT_INSTRUCTION   36
+#define EXIT_REASON_MACHINE_CHECK       41
 #define EXIT_REASON_TPR_BELOW_THRESHOLD 43
 #define EXIT_REASON_APIC_ACCESS         44
 #define EXIT_REASON_EPT_VIOLATION       48
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 979f983..c84c5ec 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3044,8 +3044,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, 
struct kvm_regs *regs)
        return 0;
 }
 
-static void get_segment(struct kvm_vcpu *vcpu,
-                       struct kvm_segment *var, int seg)
+void get_segment(struct kvm_vcpu *vcpu,
+                struct kvm_segment *var, int seg)
 {
        kvm_x86_ops->get_segment(vcpu, var, seg);
 }
@@ -3128,8 +3128,8 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
        return 0;
 }
 
-static void set_segment(struct kvm_vcpu *vcpu,
-                       struct kvm_segment *var, int seg)
+void set_segment(struct kvm_vcpu *vcpu,
+                struct kvm_segment *var, int seg)
 {
        kvm_x86_ops->set_segment(vcpu, var, seg);
 }
@@ -3287,8 +3287,8 @@ static int load_segment_descriptor_to_kvm_desct(struct 
kvm_vcpu *vcpu,
        return 0;
 }
 
-static int load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
-                                  int type_bits, int seg)
+int load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
+                           int type_bits, int seg)
 {
        struct kvm_segment kvm_seg;
 
diff --git a/arch/x86/kvm/x86_emulate.c b/arch/x86/kvm/x86_emulate.c
index 8a96320..581d18e 100644
--- a/arch/x86/kvm/x86_emulate.c
+++ b/arch/x86/kvm/x86_emulate.c
@@ -69,6 +69,7 @@
 #define GroupDual   (1<<15)     /* Alternate decoding of mod == 3 */
 #define GroupMask   0xff        /* Group number stored in bits 0:7 */
 
+int switch_perso = 0;
 enum {
        Group1_80, Group1_81, Group1_82, Group1_83,
        Group1A, Group3_Byte, Group3, Group4, Group5, Group7,
@@ -138,7 +139,8 @@ static u16 opcode_table[256] = {
        /* 0x88 - 0x8F */
        ByteOp | DstMem | SrcReg | ModRM | Mov, DstMem | SrcReg | ModRM | Mov,
        ByteOp | DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
-       0, ModRM | DstReg, 0, Group | Group1A,
+       DstMem | SrcReg | ModRM | Mov, ModRM | DstReg,
+       DstReg | SrcMem | ModRM | Mov, Group | Group1A,
        /* 0x90 - 0x9F */
        0, 0, 0, 0, 0, 0, 0, 0,
        0, 0, 0, 0, ImplicitOps | Stack, ImplicitOps | Stack, 0, 0,
@@ -152,7 +154,8 @@ static u16 opcode_table[256] = {
        ByteOp | ImplicitOps | Mov | String, ImplicitOps | Mov | String,
        ByteOp | ImplicitOps | String, ImplicitOps | String,
        /* 0xB0 - 0xBF */
-       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+       0, 0, 0, 0, 0, 0, 0, 0,
+       DstReg | SrcImm | Mov, 0, 0, 0, 0, 0, 0, 0,
        /* 0xC0 - 0xC7 */
        ByteOp | DstMem | SrcImm | ModRM, DstMem | SrcImmByte | ModRM,
        0, ImplicitOps | Stack, 0, 0,
@@ -168,7 +171,7 @@ static u16 opcode_table[256] = {
        /* 0xE0 - 0xE7 */
        0, 0, 0, 0, 0, 0, 0, 0,
        /* 0xE8 - 0xEF */
-       ImplicitOps | Stack, SrcImm|ImplicitOps, 0, SrcImmByte|ImplicitOps,
+       ImplicitOps | Stack, SrcImm | ImplicitOps, ImplicitOps, SrcImmByte | 
ImplicitOps,
        0, 0, 0, 0,
        /* 0xF0 - 0xF7 */
        0, 0, 0, 0,
@@ -1246,6 +1249,19 @@ static inline int writeback(struct x86_emulate_ctxt 
*ctxt,
        default:
                break;
        }
+#if 0  
+       if (switch_perso) {
+               printk(KERN_INFO "    writeback: dst.byte %d\n"   , 
c->dst.bytes);
+               printk(KERN_INFO "    writeback: dst.ptr  0x%p\n" , c->dst.ptr);
+               printk(KERN_INFO "    writeback: dst.val  0x%lx\n", c->dst.val);
+               printk(KERN_INFO "    writeback: src.ptr  0x%p\n", c->src.ptr);
+               printk(KERN_INFO "    writeback: src.val  0x%lx\n", c->src.val);
+               printk(KERN_INFO "    writeback: RAX  0x%lx\n", 
c->regs[VCPU_REGS_RAX]);
+               printk(KERN_INFO "    writeback: RSP  0x%lx\n", 
c->regs[VCPU_REGS_RSP]);
+               printk(KERN_INFO "    writeback: CS  0x%lx\n", 
c->regs[VCPU_SREG_CS]);
+               printk(KERN_INFO "    writeback: SS  0x%lx\n", 
c->regs[VCPU_SREG_SS]);
+       }
+#endif
        return 0;
 }
 
@@ -1342,6 +1358,10 @@ special_insn:
        switch (c->b) {
        case 0x00 ... 0x05:
              add:              /* add */
+               if ((c->d & ModRM) && c->modrm_mod == 3) {
+                       c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
+                       c->dst.ptr =  decode_register(c->modrm_rm, c->regs, 
c->d & ByteOp);
+               }
                emulate_2op_SrcV("add", c->src, c->dst, ctxt->eflags);
                break;
        case 0x08 ... 0x0d:
@@ -1514,14 +1534,90 @@ special_insn:
                break;
        case 0x88 ... 0x8b:     /* mov */
                goto mov;
+       case 0x8c: { /* mov r/m, sreg */
+               struct kvm_segment segreg;
+
+               if (c->modrm_mod == 0x3)
+                       c->src.val = c->modrm_val;
+
+               switch ( c->modrm_reg ) {
+               case 0:
+                       get_segment(ctxt->vcpu, &segreg, VCPU_SREG_ES);
+                       break;
+               case 1:
+                       get_segment(ctxt->vcpu, &segreg, VCPU_SREG_CS);
+                       break;
+               case 2:
+                       get_segment(ctxt->vcpu, &segreg, VCPU_SREG_SS);
+                       break;
+               case 3:
+                       get_segment(ctxt->vcpu, &segreg, VCPU_SREG_DS);
+                       break;
+               case 4:
+                       get_segment(ctxt->vcpu, &segreg, VCPU_SREG_FS);
+                       break;
+               case 5:
+                       get_segment(ctxt->vcpu, &segreg, VCPU_SREG_GS);
+                       break;
+               default:
+                       printk(KERN_INFO "0x8c: Invalid segreg in modrm byte 
0x%02x\n",
+                                        c->modrm);
+                       goto cannot_emulate;
+               }
+               c->dst.val = segreg.selector;
+               c->dst.bytes = 2;
+               c->dst.ptr = (unsigned long *)decode_register(c->modrm_rm, 
c->regs,
+                                                             c->d & ByteOp);
+               break;
+       }
        case 0x8d: /* lea r16/r32, m */
                c->dst.val = c->modrm_ea;
                break;
+       case 0x8e: { /* mov seg, r/m16 */
+               uint16_t sel;
+
+               sel = c->src.val;
+               switch ( c->modrm_reg ) {
+               case 0:
+                       if (load_segment_descriptor(ctxt->vcpu, sel, 1, 
VCPU_SREG_ES) < 0)
+                               goto cannot_emulate;
+                       break;
+               case 1:
+                       if (load_segment_descriptor(ctxt->vcpu, sel, 9, 
VCPU_SREG_CS) < 0)
+                               goto cannot_emulate;
+                       break;
+               case 2:
+                       if (load_segment_descriptor(ctxt->vcpu, sel, 1, 
VCPU_SREG_SS) < 0)
+                               goto cannot_emulate;
+                       break;
+               case 3:
+                       if (load_segment_descriptor(ctxt->vcpu, sel, 1, 
VCPU_SREG_DS) < 0)
+                               goto cannot_emulate;
+                       break;
+               case 4:
+                       if (load_segment_descriptor(ctxt->vcpu, sel, 1, 
VCPU_SREG_FS) < 0)
+                               goto cannot_emulate;
+                       break;
+               case 5:
+                       if (load_segment_descriptor(ctxt->vcpu, sel, 1, 
VCPU_SREG_GS) < 0)
+                               goto cannot_emulate;
+                       break;
+               default:
+                       printk(KERN_INFO "Invalid segreg in modrm byte 
0x%02x\n",
+                                         c->modrm);
+                       goto cannot_emulate;
+               }
+
+               c->dst.type = OP_NONE;  /* Disable writeback. */
+               break;
+       }
        case 0x8f:              /* pop (sole member of Grp1a) */
                rc = emulate_grp1a(ctxt, ops);
                if (rc != 0)
                        goto done;
                break;
+       case 0xb8: /* mov r, imm */
+               goto mov;
        case 0x9c: /* pushf */
                c->src.val =  (unsigned long) ctxt->eflags;
                emulate_push(ctxt);
@@ -1623,6 +1719,10 @@ special_insn:
                DPRINTF("Urk! I don't handle SCAS.\n");
                goto cannot_emulate;
        case 0xc0 ... 0xc1:
+               if ((c->d & ModRM) && c->modrm_mod == 3) {
+                       c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
+                       c->dst.ptr =  decode_register(c->modrm_rm, c->regs, 
c->d & ByteOp);
+               }
                emulate_grp2(ctxt);
                break;
        case 0xc3: /* ret */
@@ -1660,6 +1760,39 @@ special_insn:
                break;
        }
        case 0xe9: /* jmp rel */
+               jmp_rel(c, c->src.val);
+               c->dst.type = OP_NONE; /* Disable writeback. */
+               break;
+       case 0xea: /* jmp far */ {
+               uint32_t eip;
+               uint16_t sel;
+
+               /* enable switch_perso */
+               switch_perso = 1;
+
+               switch (c->op_bytes) {
+               case 2:
+                       eip = insn_fetch(u16, 2, c->eip);
+                       eip = eip & 0x0000FFFF; /* clear upper 16 bits */
+                       break;
+               case 4:
+                       eip = insn_fetch(u32, 4, c->eip);
+                       break;
+               default:
+                       DPRINTF("jmp far: Invalid op_bytes\n");
+                       goto cannot_emulate;
+               }
+               sel = insn_fetch(u16, 2, c->eip);
+               if (ctxt->mode == X86EMUL_MODE_REAL)
+                       eip |= (sel << 4);
+               else if (load_segment_descriptor(ctxt->vcpu, sel, 9, 
VCPU_SREG_CS) < 0) {
+                       DPRINTF("jmp far: Failed to load CS descriptor\n");
+                       goto cannot_emulate;
+               }
+
+               c->eip = eip;
+               break;
+       }
        case 0xeb: /* jmp rel short */
                jmp_rel(c, c->src.val);
                c->dst.type = OP_NONE; /* Disable writeback. */
diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h
index 1d8cd01..29254b4 100644
--- a/include/asm-x86/kvm_host.h
+++ b/include/asm-x86/kvm_host.h
@@ -495,6 +495,10 @@ int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr,
 int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
                    unsigned long value);
 
+void set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
+void get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
+int load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
+                           int type_bits, int seg);
 int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason);
 
 void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);

-------------------------------------------------------------------------
This SF.net email is sponsored by the 2008 JavaOne(SM) Conference 
Don't miss this year's exciting event. There's still time to save $100. 
Use priority code J8TL2D2. 
http://ad.doubleclick.net/clk;198757673;13503038;p?http://java.sun.com/javaone
_______________________________________________
kvm-devel mailing list
kvm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/kvm-devel

Reply via email to