Hollis Blanchard wrote:
> NAK. I sent you a patch queue yesterday that replaces this patch.
> (Unfortunately I didn't copy kvm-devel due to technical error;
> resending in a moment.)

Hi Hollis,
        Unfortunately, I didn't receive this mail  you mentioned :(.
Does it have new changes except moving the filed stat to kvm_x86?  I
have included them in V3.
BTW, I used git-send-emailt to deliver these mails, seems it still can't
work with you.  I will check my mail format again. Thank you:)
Xiantao

> On Tue, 2007-11-20 at 10:29 +0800, Zhang, Xiantao wrote:
>> From: Zhang xiantao <[EMAIL PROTECTED]>
>> Date: Tue, 20 Nov 2007 10:08:19 +0800
>> Subject: [PATCH] KVM Portability split: Splitting kvm structure.
>> Use kvm_x86 to hold x86 specific kvm fields, in this way
>> kvm strcut only contains common fields.
>> Signed-off-by: Zhang xiantao <[EMAIL PROTECTED]> ---
>>  drivers/kvm/ioapic.c   |    7 +++-
>>  drivers/kvm/irq.h      |    1 +
>>  drivers/kvm/kvm.h      |   33 ---------------------
>>  drivers/kvm/kvm_main.c |    9 ++++--
>>  drivers/kvm/mmu.c      |   74
>> +++++++++++++++++++++++++++++------------------
>>  drivers/kvm/vmx.c      |   18 ++++++++----
>>  drivers/kvm/x86.c      |   33 +++++++++++++--------
>>  drivers/kvm/x86.h      |   50 +++++++++++++++++++++++++++++++-
>>  8 files changed, 139 insertions(+), 86 deletions(-)
>> diff --git a/drivers/kvm/ioapic.c b/drivers/kvm/ioapic.c
>> index cf1d50b..541164d 100644
>> --- a/drivers/kvm/ioapic.c
>> +++ b/drivers/kvm/ioapic.c
>> @@ -276,7 +276,9 @@ static int get_eoi_gsi(struct kvm_ioapic
>> *ioapic, int vector) 
>> 
>>  void kvm_ioapic_update_eoi(struct kvm *kvm, int vector)  {
>> -    struct kvm_ioapic *ioapic = kvm->vioapic;
>> +    struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
>> +
>> +    struct kvm_ioapic *ioapic = kvm_x86->vioapic;
>>      union ioapic_redir_entry *ent;
>>      int gsi;
>> 
>> @@ -386,11 +388,12 @@ void kvm_ioapic_reset(struct kvm_ioapic
>>  *ioapic) int kvm_ioapic_init(struct kvm *kvm)
>>  {
>>      struct kvm_ioapic *ioapic;
>> +    struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
>> 
>>      ioapic = kzalloc(sizeof(struct kvm_ioapic), GFP_KERNEL);
if
>>              (!ioapic) return -ENOMEM;
>> -    kvm->vioapic = ioapic;
>> +    kvm_x86->vioapic = ioapic;
>>      kvm_ioapic_reset(ioapic);
>>      ioapic->dev.read = ioapic_mmio_read;
>>      ioapic->dev.write = ioapic_mmio_write;
>> diff --git a/drivers/kvm/irq.h b/drivers/kvm/irq.h
>> index 5ad3cfd..7180481 100644
>> --- a/drivers/kvm/irq.h
>> +++ b/drivers/kvm/irq.h
>> @@ -23,6 +23,7 @@
>>  #define __IRQ_H
>> 
>>  #include "kvm.h"
>> +#include "x86.h"
>> 
>>  typedef void irq_request_func(void *opaque, int level);
>> 
>> diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
>> index 1901456..445012e 100644
>> --- a/drivers/kvm/kvm.h
>> +++ b/drivers/kvm/kvm.h
>> @@ -309,48 +309,16 @@ struct kvm {
>>      int nmemslots;
>>      struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS + 
>> KVM_PRIVATE_MEM_SLOTS]; 
>> -    /*
>> -     * Hash table of struct kvm_mmu_page.
>> -     */
>> -    struct list_head active_mmu_pages;
>> -    unsigned int n_free_mmu_pages;
>> -    unsigned int n_requested_mmu_pages;
>> -    unsigned int n_alloc_mmu_pages;
>> -    struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
>>      struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
>>      unsigned long rmap_overflow;
>>      struct list_head vm_list;
>>      struct file *filp;
>>      struct kvm_io_bus mmio_bus;
>>      struct kvm_io_bus pio_bus;
>> -    struct kvm_pic *vpic;
>> -    struct kvm_ioapic *vioapic;
>>      int round_robin_prev_vcpu;
>> -    unsigned int tss_addr;
>> -    struct page *apic_access_page;
>>      struct kvm_vm_stat stat;
>>  };
>> 
>> -static inline struct kvm_pic *pic_irqchip(struct kvm *kvm) -{
>> -    return kvm->vpic;
>> -}
>> -
>> -static inline struct kvm_ioapic *ioapic_irqchip(struct kvm *kvm) -{
>> -    return kvm->vioapic;
>> -}
>> -
>> -static inline int irqchip_in_kernel(struct kvm *kvm) -{
>> -    return pic_irqchip(kvm) != NULL;
>> -}
>> -
>> -struct descriptor_table {
>> -    u16 limit;
>> -    unsigned long base;
>> -} __attribute__((packed));
>> -
>>  /* The guest did something we don't support. */
>>  #define pr_unimpl(vcpu, fmt, ...)
>> \
>>   do {
>> \
>> @@ -493,7 +461,6 @@ static inline int memslot_id(struct kvm *kvm,
>>      struct kvm_memory_slot *slot) return slot - kvm->memslots;
>>  }
>> 
>> -
>>  enum kvm_stat_kind {
>>      KVM_STAT_VM,
>>      KVM_STAT_VCPU,
>> diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
>> index bda733a..5d4bb68 100644
>> --- a/drivers/kvm/kvm_main.c
>> +++ b/drivers/kvm/kvm_main.c
>> @@ -233,6 +233,8 @@ int __kvm_set_memory_region(struct kvm *kvm,
>>      struct kvm_memory_slot *memslot;
>>      struct kvm_memory_slot old, new;
>> 
>> +    struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
>> +
>>      r = -EINVAL;
>>      /* General sanity checks */
>>      if (mem->memory_size & (PAGE_SIZE - 1))
>> @@ -332,18 +334,19 @@ int __kvm_set_memory_region(struct kvm *kvm,
>>      if (mem->slot >= kvm->nmemslots)
>>              kvm->nmemslots = mem->slot + 1;
>> 
>> -    if (!kvm->n_requested_mmu_pages) {
>> +    if (!kvm_x86->n_requested_mmu_pages) {
>>              unsigned int n_pages;
>> 
>>              if (npages) {
>>                      n_pages = npages * KVM_PERMILLE_MMU_PAGES /
>> 1000;
>> -                    kvm_mmu_change_mmu_pages(kvm,
>> kvm->n_alloc_mmu_pages +
>> +                    kvm_mmu_change_mmu_pages(kvm,
>> +
>> kvm_x86->n_alloc_mmu_pages +
>>                                               n_pages);
>>              } else {
>>                      unsigned int nr_mmu_pages;
>> 
>>                      n_pages = old.npages * KVM_PERMILLE_MMU_PAGES /
1000;
>> -                    nr_mmu_pages = kvm->n_alloc_mmu_pages - n_pages;
>> +                    nr_mmu_pages = kvm_x86->n_alloc_mmu_pages -
>> n_pages;
>>                      nr_mmu_pages = max(nr_mmu_pages,
>>                                      (unsigned int)
>> KVM_MIN_ALLOC_MMU_PAGES);
>>                      kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
>> diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
>> index 87d8e70..9d6684a 100644
>> --- a/drivers/kvm/mmu.c
>> +++ b/drivers/kvm/mmu.c
>> @@ -526,12 +526,14 @@ static int is_empty_shadow_page(u64 *spt)
>>  static void kvm_mmu_free_page(struct kvm *kvm,
>>                            struct kvm_mmu_page *page_head)
>>  {
>> +    struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
>> +
>>      ASSERT(is_empty_shadow_page(page_head->spt));
>>      list_del(&page_head->link);
>>      __free_page(virt_to_page(page_head->spt));
>>      __free_page(virt_to_page(page_head->gfns));
>>      kfree(page_head);
>> -    ++kvm->n_free_mmu_pages;
>> +    ++kvm_x86->n_free_mmu_pages;
>>  }
>> 
>>  static unsigned kvm_page_table_hashfn(gfn_t gfn)
>> @@ -543,8 +545,9 @@ static struct kvm_mmu_page
>> *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
>>                                             u64 *parent_pte)
>>  {
>>      struct kvm_mmu_page *page;
>> +    struct kvm_x86 *kvm_x86 = to_kvm_x86(vcpu->kvm);
>> 
>> -    if (!vcpu->kvm->n_free_mmu_pages)
>> +    if (!kvm_x86->n_free_mmu_pages)
>>              return NULL;
>> 
>>      page = mmu_memory_cache_alloc(&vcpu->mmu_page_header_cache,
>> @@ -552,12 +555,12 @@ static struct kvm_mmu_page
>> *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
>>      page->spt = mmu_memory_cache_alloc(&vcpu->mmu_page_cache,
>> PAGE_SIZE);
>>      page->gfns = mmu_memory_cache_alloc(&vcpu->mmu_page_cache,
>>      PAGE_SIZE); set_page_private(virt_to_page(page->spt), (unsigned
>> long)page); -        list_add(&page->link,
&vcpu->kvm->active_mmu_pages);
>> +    list_add(&page->link, &kvm_x86->active_mmu_pages);
>>      ASSERT(is_empty_shadow_page(page->spt));
>>      page->slot_bitmap = 0;
>>      page->multimapped = 0;
>>      page->parent_pte = parent_pte;
>> -    --vcpu->kvm->n_free_mmu_pages;
>> +    --kvm_x86->n_free_mmu_pages;
>>      return page;
>>  }
>> 
>> @@ -643,10 +646,12 @@ static struct kvm_mmu_page
>> *kvm_mmu_lookup_page(struct kvm *kvm,
>>      struct hlist_head *bucket;
>>      struct kvm_mmu_page *page;
>>      struct hlist_node *node;
>> +    struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
>> +
>> 
>>      pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
>>      index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
>> -    bucket = &kvm->mmu_page_hash[index];
>> +    bucket = &kvm_x86->mmu_page_hash[index];
>>      hlist_for_each_entry(page, node, bucket, hash_link)
>>              if (page->gfn == gfn && !page->role.metaphysical) {
>>                      pgprintk("%s: found role %x\n",
>> @@ -670,6 +675,8 @@ static struct kvm_mmu_page
>>      *kvm_mmu_get_page(struct kvm_vcpu *vcpu, struct hlist_head
*bucket;
>>      struct kvm_mmu_page *page;
>>      struct hlist_node *node;
>> +    struct kvm_x86 *kvm_x86 = to_kvm_x86(vcpu->kvm);
>> +
>> 
>>      role.word = 0;
>>      role.glevels = vcpu->mmu.root_level;
>> @@ -684,7 +691,7 @@ static struct kvm_mmu_page
>>      *kvm_mmu_get_page(struct kvm_vcpu *vcpu, pgprintk("%s: looking
gfn
>>      %lx role %x\n", __FUNCTION__,            gfn, role.word); index
=
>> kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES; -    bucket =
>> &vcpu->kvm->mmu_page_hash[index]; +  bucket =
>>      &kvm_x86->mmu_page_hash[index]; hlist_for_each_entry(page, node,
>>              bucket, hash_link) if (page->gfn == gfn &&
page->role.word ==
>>                      role.word) { mmu_page_add_parent_pte(vcpu, page,
parent_pte);
>> @@ -754,6 +761,7 @@ static void kvm_mmu_zap_page(struct kvm *kvm,
>>                           struct kvm_mmu_page *page)
>>  {
>>      u64 *parent_pte;
>> +    struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
>> 
>>      ++kvm->stat.mmu_shadow_zapped;
>>      while (page->multimapped || page->parent_pte) {
>> @@ -775,7 +783,7 @@ static void kvm_mmu_zap_page(struct kvm *kvm,
>>              hlist_del(&page->hash_link);
>>              kvm_mmu_free_page(kvm, page);
>>      } else
>> -            list_move(&page->link, &kvm->active_mmu_pages);
>> +            list_move(&page->link, &kvm_x86->active_mmu_pages);
>>      kvm_mmu_reset_last_pte_updated(kvm);
>>  }
>> 
>> @@ -790,27 +798,28 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm,
>> unsigned int kvm_nr_mmu_pages)
>>       * number of actived pages , we must to free some mmu pages
>> before we
>>       * change the value
>>       */
>> +    struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
>> 
>> -    if ((kvm->n_alloc_mmu_pages - kvm->n_free_mmu_pages) >
>> +    if ((kvm_x86->n_alloc_mmu_pages - kvm_x86->n_free_mmu_pages) >

>> kvm_nr_mmu_pages) { 
>> -            int n_used_mmu_pages = kvm->n_alloc_mmu_pages
>> -                                   - kvm->n_free_mmu_pages;
>> +            int n_used_mmu_pages = kvm_x86->n_alloc_mmu_pages
>> +                                   - kvm_x86->n_free_mmu_pages;
>> 
>>              while (n_used_mmu_pages > kvm_nr_mmu_pages) {
>>                      struct kvm_mmu_page *page;
>> 
>> -                    page = container_of(kvm->active_mmu_pages.prev,
+                       page =
>> container_of(kvm_x86->active_mmu_pages.prev,
>>                                          struct kvm_mmu_page, link);
>>                      kvm_mmu_zap_page(kvm, page);
>>                      n_used_mmu_pages--;
>>              }
>> -            kvm->n_free_mmu_pages = 0;
>> +            kvm_x86->n_free_mmu_pages = 0;
>>      }
>>      else
>> -            kvm->n_free_mmu_pages += kvm_nr_mmu_pages
>> -                                     - kvm->n_alloc_mmu_pages;
>> +            kvm_x86->n_free_mmu_pages += kvm_nr_mmu_pages
>> +                                     - kvm_x86->n_alloc_mmu_pages;
>> 
>> -    kvm->n_alloc_mmu_pages = kvm_nr_mmu_pages;
>> +    kvm_x86->n_alloc_mmu_pages = kvm_nr_mmu_pages;
>>  }
>> 
>>  static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
>> @@ -820,11 +829,12 @@ static int kvm_mmu_unprotect_page(struct kvm
>>      *kvm, gfn_t gfn) struct kvm_mmu_page *page;
>>      struct hlist_node *node, *n;
>>      int r;
>> +    struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
>> 
>>      pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
r = 0;
>>      index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
>> -    bucket = &kvm->mmu_page_hash[index];
>> +    bucket = &kvm_x86->mmu_page_hash[index];
>>      hlist_for_each_entry_safe(page, node, n, bucket, hash_link)
>>              if (page->gfn == gfn && !page->role.metaphysical) {
>>                      pgprintk("%s: gfn %lx role %x\n", __FUNCTION__,
>> gfn,
>> @@ -1265,6 +1275,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu,
>>      gpa_t gpa, int level;
>>      int flooded = 0;
>>      int npte;
>> +    struct kvm_x86 *kvm_x86 = to_kvm_x86(vcpu->kvm);
>> 
>>      pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
>>      ++vcpu->kvm->stat.mmu_pte_write;
>> @@ -1280,7 +1291,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu,
>>              gpa_t gpa, vcpu->last_pte_updated = NULL;
>>      }
>>      index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
>> -    bucket = &vcpu->kvm->mmu_page_hash[index];
>> +    bucket = &kvm_x86->mmu_page_hash[index];
>>      hlist_for_each_entry_safe(page, node, n, bucket, hash_link) {
>>              if (page->gfn != gfn || page->role.metaphysical)
continue;
>> @@ -1344,10 +1355,12 @@ int kvm_mmu_unprotect_page_virt(struct
>> kvm_vcpu *vcpu, gva_t gva) 
>> 
>>  void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)  {
>> -    while (vcpu->kvm->n_free_mmu_pages < KVM_REFILL_PAGES) {
>> +    struct kvm_x86 *kvm_x86 = to_kvm_x86(vcpu->kvm);
>> +
>> +    while (kvm_x86->n_free_mmu_pages < KVM_REFILL_PAGES) {
struct
>> kvm_mmu_page *page; 
>> 
>> -            page = container_of(vcpu->kvm->active_mmu_pages.prev,
>> +            page = container_of(kvm_x86->active_mmu_pages.prev,
>>                                  struct kvm_mmu_page, link);
>>              kvm_mmu_zap_page(vcpu->kvm, page);
>>              ++vcpu->kvm->stat.mmu_recycled;
>> @@ -1397,9 +1410,10 @@ EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
>>  static void free_mmu_pages(struct kvm_vcpu *vcpu)
>>  {
>>      struct kvm_mmu_page *page;
>> +    struct kvm_x86 *kvm_x86 = to_kvm_x86(vcpu->kvm);
>> 
>> -    while (!list_empty(&vcpu->kvm->active_mmu_pages)) {
>> -            page = container_of(vcpu->kvm->active_mmu_pages.next,
>> +    while (!list_empty(&kvm_x86->active_mmu_pages)) {
>> +            page = container_of(kvm_x86->active_mmu_pages.next,
>>                                  struct kvm_mmu_page, link);
>>              kvm_mmu_zap_page(vcpu->kvm, page);
>>      }
>> @@ -1410,13 +1424,14 @@ static int alloc_mmu_pages(struct kvm_vcpu
>>  *vcpu) {
>>      struct page *page;
>>      int i;
>> +    struct kvm_x86 *kvm_x86 = to_kvm_x86(vcpu->kvm);
>> 
>>      ASSERT(vcpu);
>> 
>> -    if (vcpu->kvm->n_requested_mmu_pages)
>> -            vcpu->kvm->n_free_mmu_pages =
>> vcpu->kvm->n_requested_mmu_pages;
>> +    if (kvm_x86->n_requested_mmu_pages)
>> +            kvm_x86->n_free_mmu_pages =
>> kvm_x86->n_requested_mmu_pages;
>>      else
>> -            vcpu->kvm->n_free_mmu_pages =
>> vcpu->kvm->n_alloc_mmu_pages;
>> +            kvm_x86->n_free_mmu_pages = kvm_x86->n_alloc_mmu_pages;
/*
>>       * When emulating 32-bit mode, cr3 is only 32 bits even on
>> x86_64.
>>       * Therefore we need to allocate shadow page tables in the first
>> @@ -1464,8 +1479,9 @@ void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
>>  void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)  {
>>      struct kvm_mmu_page *page;
>> +    struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
>> 
>> -    list_for_each_entry(page, &kvm->active_mmu_pages, link) {
>> +    list_for_each_entry(page, &kvm_x86->active_mmu_pages, link) { 
>>              int i; u64 *pt;
>> 
>> @@ -1483,8 +1499,9 @@ void kvm_mmu_slot_remove_write_access(struct
>>  kvm *kvm, int slot) void kvm_mmu_zap_all(struct kvm *kvm)
>>  {
>>      struct kvm_mmu_page *page, *node;
>> +    struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
>> 
>> -    list_for_each_entry_safe(page, node, &kvm->active_mmu_pages,
>> link)
>> +    list_for_each_entry_safe(page, node, &kvm_x86->active_mmu_pages,
>>              link) kvm_mmu_zap_page(kvm, page);
>> 
>>      kvm_flush_remote_tlbs(kvm);
>> @@ -1637,7 +1654,7 @@ static int count_writable_mappings(struct
>>      kvm_vcpu *vcpu) struct kvm_mmu_page *page;
>>      int i;
>> 
>> -    list_for_each_entry(page, &vcpu->kvm->active_mmu_pages, link) {
>> +    list_for_each_entry(page, &kvm_x86->active_mmu_pages, link) { 
>> u64 *pt = page->spt; 
>> 
>>              if (page->role.level != PT_PAGE_TABLE_LEVEL)
>> @@ -1672,8 +1689,9 @@ static void audit_write_protection(struct
>>      kvm_vcpu *vcpu) struct kvm_memory_slot *slot;
>>      unsigned long *rmapp;
>>      gfn_t gfn;
>> +    struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
>> 
>> -    list_for_each_entry(page, &vcpu->kvm->active_mmu_pages, link) {
>> +    list_for_each_entry(page, &kvm_x86->active_mmu_pages, link) {
>>              if (page->role.metaphysical)
>>                      continue;
>> 
>> diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
>> index 4ad60c9..d5df045 100644
>> --- a/drivers/kvm/vmx.c
>> +++ b/drivers/kvm/vmx.c
>> @@ -1141,12 +1141,15 @@ static void enter_pmode(struct kvm_vcpu
>> *vcpu) 
>> 
>>  static gva_t rmode_tss_base(struct kvm *kvm)
>>  {
>> -    if (!kvm->tss_addr) {
>> +
>> +    struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
>> +
>> +    if (!kvm_x86->tss_addr) {
>>              gfn_t base_gfn = kvm->memslots[0].base_gfn +
>>                               kvm->memslots[0].npages - 3;
>>              return base_gfn << PAGE_SHIFT;
>>      }
>> -    return kvm->tss_addr;
>> +    return kvm_x86->tss_addr;
>>  }
>> 
>>  static void fix_rmode_seg(int seg, struct kvm_save_segment *save)
>> @@ -1467,10 +1470,11 @@ static void seg_setup(int seg)
>>  static int alloc_apic_access_page(struct kvm *kvm)  {
>>      struct kvm_userspace_memory_region kvm_userspace_mem;
>> +    struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
>>      int r = 0;
>> 
>>      mutex_lock(&kvm->lock);
>> -    if (kvm->apic_access_page)
>> +    if (kvm_x86->apic_access_page)
>>              goto out;
>>      kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT;
>>      kvm_userspace_mem.flags = 0;
>> @@ -1479,7 +1483,7 @@ static int alloc_apic_access_page(struct kvm
>>      *kvm) r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, 0); 
>>              if (r) goto out;
>> -    kvm->apic_access_page = gfn_to_page(kvm, 0xfee00);
>> +    kvm_x86->apic_access_page = gfn_to_page(kvm, 0xfee00);  out:
>>      mutex_unlock(&kvm->lock);
>>      return r;
>> @@ -1602,6 +1606,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
>>  static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
>>  {
>>      struct vcpu_vmx *vmx = to_vmx(vcpu);
>> +    struct kvm_x86 *kvm_x86 = to_kvm_x86(vcpu->kvm);
>>      u64 msr;
>>      int ret;
>> 
>> @@ -1694,7 +1699,7 @@ static int vmx_vcpu_reset(struct kvm_vcpu
>> *vcpu) 
>> 
>>      if (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))
>>              vmcs_write64(APIC_ACCESS_ADDR,
>> -
>> page_to_phys(vmx->vcpu.kvm->apic_access_page));
>> +                         page_to_phys(kvm_x86->apic_access_page));
>> 
>>      vmx->vcpu.cr0 = 0x60000010;
>>      vmx_set_cr0(&vmx->vcpu, vmx->vcpu.cr0); /* enter rmode */
>> @@ -1775,11 +1780,12 @@ static int vmx_set_tss_addr(struct kvm *kvm,
>>              unsigned int addr) .memory_size = PAGE_SIZE * 3,
>>              .flags = 0,
>>      };
>> +    struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
>> 
>>      ret = kvm_set_memory_region(kvm, &tss_mem, 0);
>>      if (ret)
>>              return ret;
>> -    kvm->tss_addr = addr;
>> +    kvm_x86->tss_addr = addr;
>>      return 0;
>>  }
>> 
>> diff --git a/drivers/kvm/x86.c b/drivers/kvm/x86.c
>> index 40871b5..0ba82b9 100644
>> --- a/drivers/kvm/x86.c
>> +++ b/drivers/kvm/x86.c
>> @@ -815,13 +815,15 @@ static int kvm_vm_ioctl_set_tss_addr(struct
>>  kvm *kvm, unsigned long addr) static int
>>  kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
u32
>> kvm_nr_mmu_pages) { +        struct kvm_x86 *kvm_x86 =
to_kvm_x86(kvm);
>> +
>>      if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
>>              return -EINVAL;
>> 
>>      mutex_lock(&kvm->lock);
>> 
>>      kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
>> -    kvm->n_requested_mmu_pages = kvm_nr_mmu_pages;
>> +    kvm_x86->n_requested_mmu_pages = kvm_nr_mmu_pages;
>> 
>>      mutex_unlock(&kvm->lock);
>>      return 0;
>> @@ -829,7 +831,9 @@ static int kvm_vm_ioctl_set_nr_mmu_pages(struct
>> kvm *kvm, 
>> 
>>  static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)  {
>> -    return kvm->n_alloc_mmu_pages;
>> +    struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
>> +
>> +    return kvm_x86->n_alloc_mmu_pages;
>>  }
>> 
>>  /*
>> @@ -974,6 +978,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
>>      struct kvm *kvm = filp->private_data;
>>      void __user *argp = (void __user *)arg;
>>      int r = -EINVAL;
>> +    struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
>> 
>>      switch (ioctl) {
>>      case KVM_SET_TSS_ADDR:
>> @@ -1018,12 +1023,12 @@ long kvm_arch_vm_ioctl(struct file *filp,
}
>>      case KVM_CREATE_IRQCHIP:
>>              r = -ENOMEM;
>> -            kvm->vpic = kvm_create_pic(kvm);
>> -            if (kvm->vpic) {
>> +            kvm_x86->vpic = kvm_create_pic(kvm);
>> +            if (kvm_x86->vpic) {
>>                      r = kvm_ioapic_init(kvm);
>>                      if (r) {
>> -                            kfree(kvm->vpic);
>> -                            kvm->vpic = NULL;
>> +                            kfree(kvm_x86->vpic);
>> +                            kvm_x86->vpic = NULL;
>>                              goto out;
>>                      }
>>              } else
>> @@ -1041,7 +1046,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
>>                              kvm_pic_set_irq(pic_irqchip(kvm),
>>                                      irq_event.irq,
>>                                      irq_event.level);
>> -                    kvm_ioapic_set_irq(kvm->vioapic,
>> +                    kvm_ioapic_set_irq(kvm_x86->vioapic,
>>                                      irq_event.irq,
>>                                      irq_event.level);
>>                      mutex_unlock(&kvm->lock);
>> @@ -2603,14 +2608,14 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu
>> *vcpu) 
>> 
>>  struct  kvm *kvm_arch_create_vm(void)
>>  {
>> -    struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
>> +    struct kvm_x86 *kvm_x86 = kzalloc(sizeof(struct kvm_x86),
>> GFP_KERNEL); 
>> 
>> -    if (!kvm)
>> +    if (!kvm_x86)
>>              return ERR_PTR(-ENOMEM);
>> 
>> -    INIT_LIST_HEAD(&kvm->active_mmu_pages);
>> +    INIT_LIST_HEAD(&kvm_x86->active_mmu_pages);
>> 
>> -    return kvm;
>> +    return &kvm_x86->kvm;
>>  }
>> 
>>  static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
>> @@ -2641,8 +2646,10 @@ static void kvm_free_vcpus(struct kvm *kvm)
>> 
>>  void kvm_arch_destroy_vm(struct kvm *kvm)
>>  {
>> -    kfree(kvm->vpic);
>> -    kfree(kvm->vioapic);
>> +    struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
>> +
>> +    kfree(kvm_x86->vpic);
>> +    kfree(kvm_x86->vioapic);
>>      kvm_free_vcpus(kvm);
>>      kvm_free_physmem(kvm);
>>      kfree(kvm);
>> diff --git a/drivers/kvm/x86.h b/drivers/kvm/x86.h
>> index 90b791b..1a6f8fe 100644
>> --- a/drivers/kvm/x86.h
>> +++ b/drivers/kvm/x86.h
>> @@ -156,6 +156,52 @@ struct kvm_vcpu {
>>      struct x86_emulate_ctxt emulate_ctxt;
>>  };
>> 
>> +struct kvm_x86 {
>> +    struct kvm kvm;
>> +    /*
>> +     * Hash table of struct kvm_mmu_page.
>> +     */
>> +    struct list_head active_mmu_pages;
>> +    unsigned int n_free_mmu_pages;
>> +    unsigned int n_requested_mmu_pages;
>> +    unsigned int n_alloc_mmu_pages;
>> +    struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES]; +   struct
>> kvm_pic *vpic; +     struct kvm_ioapic *vioapic;
>> +    unsigned int tss_addr;
>> +    struct page *apic_access_page;
>> +};
>> +
>> +static struct kvm_x86 *to_kvm_x86(struct kvm *kvm) +{
>> +    return container_of(kvm, struct kvm_x86, kvm);
>> +}
>> +
>> +static inline struct kvm_pic *pic_irqchip(struct kvm *kvm) +{
>> +    struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
>> +
>> +    return kvm_x86->vpic;
>> +}
>> +
>> +static inline struct kvm_ioapic *ioapic_irqchip(struct kvm *kvm) +{
>> +    struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
>> +
>> +    return kvm_x86->vioapic;
>> +}
>> +
>> +static inline int irqchip_in_kernel(struct kvm *kvm) +{
>> +    return pic_irqchip(kvm) != NULL;
>> +}
>> +
>> +struct descriptor_table {
>> +    u16 limit;
>> +    unsigned long base;
>> +} __attribute__((packed));
>> +
>> +
>>  struct kvm_x86_ops {
>>      int (*cpu_has_kvm_support)(void);          /* __init */
>>      int (*disabled_by_bios)(void);             /* __init */
>> @@ -313,7 +359,9 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu,
>> gva_t gva, u32 error_code); 
>> 
>>  static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)  {
>> -    if (unlikely(vcpu->kvm->n_free_mmu_pages <
>> KVM_MIN_FREE_MMU_PAGES))
>> +    struct kvm_x86 *kvm_x86 = to_kvm_x86(vcpu->kvm);
>> +
>> +    if (unlikely(kvm_x86->n_free_mmu_pages <
>> KVM_MIN_FREE_MMU_PAGES))
>>              __kvm_mmu_free_some_pages(vcpu);
>>  }

-------------------------------------------------------------------------
This SF.net email is sponsored by: Microsoft
Defy all challenges. Microsoft(R) Visual Studio 2005.
http://clk.atdmt.com/MRT/go/vse0120000070mrt/direct/01/
_______________________________________________
kvm-devel mailing list
kvm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/kvm-devel

Reply via email to