From: Gleb Natapov <[email protected]>

x86 arch defines desc_ptr for idt/gdt pointers, no need to define
another structure in kvm code.

Signed-off-by: Gleb Natapov <[email protected]>
Signed-off-by: Avi Kivity <[email protected]>

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index f9a2f66..3c06fb1 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -461,11 +461,6 @@ struct kvm_vcpu_stat {
        u32 nmi_injections;
 };
 
-struct descriptor_table {
-       u16 limit;
-       unsigned long base;
-} __attribute__((packed));
-
 struct kvm_x86_ops {
        int (*cpu_has_kvm_support)(void);          /* __init */
        int (*disabled_by_bios)(void);             /* __init */
@@ -503,10 +498,10 @@ struct kvm_x86_ops {
        void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
        void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
        void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
-       void (*get_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
-       void (*set_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
-       void (*get_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
-       void (*set_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
+       void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
+       void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
+       void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
+       void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
        int (*get_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long *dest);
        int (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value);
        void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
@@ -725,12 +720,12 @@ static inline void kvm_load_ldt(u16 sel)
        asm("lldt %0" : : "rm"(sel));
 }
 
-static inline void kvm_get_idt(struct descriptor_table *table)
+static inline void kvm_get_idt(struct desc_ptr *table)
 {
        asm("sidt %0" : "=m"(*table));
 }
 
-static inline void kvm_get_gdt(struct descriptor_table *table)
+static inline void kvm_get_gdt(struct desc_ptr *table)
 {
        asm("sgdt %0" : "=m"(*table));
 }
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 52f78dd..2e1e8d6 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -318,7 +318,7 @@ static int svm_hardware_enable(void *garbage)
 
        struct svm_cpu_data *sd;
        uint64_t efer;
-       struct descriptor_table gdt_descr;
+       struct desc_ptr gdt_descr;
        struct desc_struct *gdt;
        int me = raw_smp_processor_id();
 
@@ -344,7 +344,7 @@ static int svm_hardware_enable(void *garbage)
        sd->next_asid = sd->max_asid + 1;
 
        kvm_get_gdt(&gdt_descr);
-       gdt = (struct desc_struct *)gdt_descr.base;
+       gdt = (struct desc_struct *)gdt_descr.address;
        sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
 
        wrmsrl(MSR_EFER, efer | EFER_SVME);
@@ -930,36 +930,36 @@ static int svm_get_cpl(struct kvm_vcpu *vcpu)
        return save->cpl;
 }
 
-static void svm_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
+static void svm_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
 
-       dt->limit = svm->vmcb->save.idtr.limit;
-       dt->base = svm->vmcb->save.idtr.base;
+       dt->size = svm->vmcb->save.idtr.limit;
+       dt->address = svm->vmcb->save.idtr.base;
 }
 
-static void svm_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
+static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
 
-       svm->vmcb->save.idtr.limit = dt->limit;
-       svm->vmcb->save.idtr.base = dt->base ;
+       svm->vmcb->save.idtr.limit = dt->size;
+       svm->vmcb->save.idtr.base = dt->address ;
 }
 
-static void svm_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
+static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
 
-       dt->limit = svm->vmcb->save.gdtr.limit;
-       dt->base = svm->vmcb->save.gdtr.base;
+       dt->size = svm->vmcb->save.gdtr.limit;
+       dt->address = svm->vmcb->save.gdtr.base;
 }
 
-static void svm_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
+static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
 
-       svm->vmcb->save.gdtr.limit = dt->limit;
-       svm->vmcb->save.gdtr.base = dt->base ;
+       svm->vmcb->save.gdtr.limit = dt->size;
+       svm->vmcb->save.gdtr.base = dt->address ;
 }
 
 static void svm_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index f82b072..f7c815b 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -597,11 +597,11 @@ static void reload_tss(void)
        /*
         * VT restores TR but not its size.  Useless.
         */
-       struct descriptor_table gdt;
+       struct desc_ptr gdt;
        struct desc_struct *descs;
 
        kvm_get_gdt(&gdt);
-       descs = (void *)gdt.base;
+       descs = (void *)gdt.address;
        descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
        load_TR_desc();
 }
@@ -755,7 +755,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
        }
 
        if (vcpu->cpu != cpu) {
-               struct descriptor_table dt;
+               struct desc_ptr dt;
                unsigned long sysenter_esp;
 
                vcpu->cpu = cpu;
@@ -765,7 +765,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
                 */
                vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
                kvm_get_gdt(&dt);
-               vmcs_writel(HOST_GDTR_BASE, dt.base);   /* 22.2.4 */
+               vmcs_writel(HOST_GDTR_BASE, dt.address);   /* 22.2.4 */
 
                rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
                vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
@@ -1927,28 +1927,28 @@ static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, 
int *db, int *l)
        *l = (ar >> 13) & 1;
 }
 
-static void vmx_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
+static void vmx_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
 {
-       dt->limit = vmcs_read32(GUEST_IDTR_LIMIT);
-       dt->base = vmcs_readl(GUEST_IDTR_BASE);
+       dt->size = vmcs_read32(GUEST_IDTR_LIMIT);
+       dt->address = vmcs_readl(GUEST_IDTR_BASE);
 }
 
-static void vmx_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
+static void vmx_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
 {
-       vmcs_write32(GUEST_IDTR_LIMIT, dt->limit);
-       vmcs_writel(GUEST_IDTR_BASE, dt->base);
+       vmcs_write32(GUEST_IDTR_LIMIT, dt->size);
+       vmcs_writel(GUEST_IDTR_BASE, dt->address);
 }
 
-static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
+static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
 {
-       dt->limit = vmcs_read32(GUEST_GDTR_LIMIT);
-       dt->base = vmcs_readl(GUEST_GDTR_BASE);
+       dt->size = vmcs_read32(GUEST_GDTR_LIMIT);
+       dt->address = vmcs_readl(GUEST_GDTR_BASE);
 }
 
-static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
+static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
 {
-       vmcs_write32(GUEST_GDTR_LIMIT, dt->limit);
-       vmcs_writel(GUEST_GDTR_BASE, dt->base);
+       vmcs_write32(GUEST_GDTR_LIMIT, dt->size);
+       vmcs_writel(GUEST_GDTR_BASE, dt->address);
 }
 
 static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg)
@@ -2327,7 +2327,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
        u32 junk;
        u64 host_pat, tsc_this, tsc_base;
        unsigned long a;
-       struct descriptor_table dt;
+       struct desc_ptr dt;
        int i;
        unsigned long kvm_vmx_return;
        u32 exec_control;
@@ -2409,7 +2409,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
        vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8);  /* 22.2.4 */
 
        kvm_get_idt(&dt);
-       vmcs_writel(HOST_IDTR_BASE, dt.base);   /* 22.2.4 */
+       vmcs_writel(HOST_IDTR_BASE, dt.address);   /* 22.2.4 */
 
        asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
        vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 86b739f..b2335f6 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -224,7 +224,7 @@ static void drop_user_return_notifiers(void *ignore)
 
 unsigned long segment_base(u16 selector)
 {
-       struct descriptor_table gdt;
+       struct desc_ptr gdt;
        struct desc_struct *d;
        unsigned long table_base;
        unsigned long v;
@@ -233,7 +233,7 @@ unsigned long segment_base(u16 selector)
                return 0;
 
        kvm_get_gdt(&gdt);
-       table_base = gdt.base;
+       table_base = gdt.address;
 
        if (selector & 4) {           /* from ldt */
                u16 ldt_selector = kvm_read_ldt();
@@ -3969,14 +3969,14 @@ static u64 mk_cr_64(u64 curr_cr, u32 new_val)
 
 void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
 {
-       struct descriptor_table dt = { limit, base };
+       struct desc_ptr dt = { limit, base };
 
        kvm_x86_ops->set_gdt(vcpu, &dt);
 }
 
 void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
 {
-       struct descriptor_table dt = { limit, base };
+       struct desc_ptr dt = { limit, base };
 
        kvm_x86_ops->set_idt(vcpu, &dt);
 }
@@ -4599,7 +4599,7 @@ EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
                                  struct kvm_sregs *sregs)
 {
-       struct descriptor_table dt;
+       struct desc_ptr dt;
 
        vcpu_load(vcpu);
 
@@ -4614,11 +4614,11 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
        kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
 
        kvm_x86_ops->get_idt(vcpu, &dt);
-       sregs->idt.limit = dt.limit;
-       sregs->idt.base = dt.base;
+       sregs->idt.limit = dt.size;
+       sregs->idt.base = dt.address;
        kvm_x86_ops->get_gdt(vcpu, &dt);
-       sregs->gdt.limit = dt.limit;
-       sregs->gdt.base = dt.base;
+       sregs->gdt.limit = dt.size;
+       sregs->gdt.base = dt.address;
 
        sregs->cr0 = kvm_read_cr0(vcpu);
        sregs->cr2 = vcpu->arch.cr2;
@@ -4690,7 +4690,7 @@ static void seg_desct_to_kvm_desct(struct desc_struct 
*seg_desc, u16 selector,
 
 static void get_segment_descriptor_dtable(struct kvm_vcpu *vcpu,
                                          u16 selector,
-                                         struct descriptor_table *dtable)
+                                         struct desc_ptr *dtable)
 {
        if (selector & 1 << 2) {
                struct kvm_segment kvm_seg;
@@ -4698,10 +4698,10 @@ static void get_segment_descriptor_dtable(struct 
kvm_vcpu *vcpu,
                kvm_get_segment(vcpu, &kvm_seg, VCPU_SREG_LDTR);
 
                if (kvm_seg.unusable)
-                       dtable->limit = 0;
+                       dtable->size = 0;
                else
-                       dtable->limit = kvm_seg.limit;
-               dtable->base = kvm_seg.base;
+                       dtable->size = kvm_seg.limit;
+               dtable->address = kvm_seg.base;
        }
        else
                kvm_x86_ops->get_gdt(vcpu, dtable);
@@ -4711,16 +4711,16 @@ static void get_segment_descriptor_dtable(struct 
kvm_vcpu *vcpu,
 static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
                                         struct desc_struct *seg_desc)
 {
-       struct descriptor_table dtable;
+       struct desc_ptr dtable;
        u16 index = selector >> 3;
 
        get_segment_descriptor_dtable(vcpu, selector, &dtable);
 
-       if (dtable.limit < index * 8 + 7) {
+       if (dtable.size < index * 8 + 7) {
                kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc);
                return X86EMUL_PROPAGATE_FAULT;
        }
-       return kvm_read_guest_virt_system(dtable.base + index*8,
+       return kvm_read_guest_virt_system(dtable.address + index*8,
                                          seg_desc, sizeof(*seg_desc),
                                          vcpu, NULL);
 }
@@ -4729,14 +4729,14 @@ static int load_guest_segment_descriptor(struct 
kvm_vcpu *vcpu, u16 selector,
 static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
                                         struct desc_struct *seg_desc)
 {
-       struct descriptor_table dtable;
+       struct desc_ptr dtable;
        u16 index = selector >> 3;
 
        get_segment_descriptor_dtable(vcpu, selector, &dtable);
 
-       if (dtable.limit < index * 8 + 7)
+       if (dtable.size < index * 8 + 7)
                return 1;
-       return kvm_write_guest_virt(dtable.base + index*8, seg_desc, 
sizeof(*seg_desc), vcpu, NULL);
+       return kvm_write_guest_virt(dtable.address + index*8, seg_desc, 
sizeof(*seg_desc), vcpu, NULL);
 }
 
 static gpa_t get_tss_base_addr_write(struct kvm_vcpu *vcpu,
@@ -5104,15 +5104,15 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
 {
        int mmu_reset_needed = 0;
        int pending_vec, max_bits;
-       struct descriptor_table dt;
+       struct desc_ptr dt;
 
        vcpu_load(vcpu);
 
-       dt.limit = sregs->idt.limit;
-       dt.base = sregs->idt.base;
+       dt.size = sregs->idt.limit;
+       dt.address = sregs->idt.base;
        kvm_x86_ops->set_idt(vcpu, &dt);
-       dt.limit = sregs->gdt.limit;
-       dt.base = sregs->gdt.base;
+       dt.size = sregs->gdt.limit;
+       dt.address = sregs->gdt.base;
        kvm_x86_ops->set_gdt(vcpu, &dt);
 
        vcpu->arch.cr2 = sregs->cr2;
--
To unsubscribe from this list: send the line "unsubscribe kvm-commits" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to