Signed-off-by: Zhang xiantao <[EMAIL PROTECTED]>
Signed-off-by: Hollis Blanchard <[EMAIL PROTECTED]>
---
Changes from Xiantao's patch:
- Fix whitespace
- Reorder variables to avoid stack padding
- Leave descriptor_table relocation for another patch
8 files changed, 132 insertions(+), 85 deletions(-)
drivers/kvm/ioapic.c | 7 ++-
drivers/kvm/irq.h | 1
drivers/kvm/kvm.h | 26 --------------
drivers/kvm/kvm_main.c | 9 ++---
drivers/kvm/mmu.c | 85 ++++++++++++++++++++++++++++--------------------
drivers/kvm/vmx.c | 9 +++--
drivers/kvm/x86.c | 37 ++++++++++++--------
drivers/kvm/x86.h | 43 +++++++++++++++++++++++-
diff --git a/drivers/kvm/ioapic.c b/drivers/kvm/ioapic.c
--- a/drivers/kvm/ioapic.c
+++ b/drivers/kvm/ioapic.c
@@ -276,7 +276,9 @@ static int get_eoi_gsi(struct kvm_ioapic
void kvm_ioapic_update_eoi(struct kvm *kvm, int vector)
{
- struct kvm_ioapic *ioapic = kvm->vioapic;
+ struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
+
+ struct kvm_ioapic *ioapic = kvm_x86->vioapic;
union ioapic_redir_entry *ent;
int gsi;
@@ -386,11 +388,12 @@ int kvm_ioapic_init(struct kvm *kvm)
int kvm_ioapic_init(struct kvm *kvm)
{
struct kvm_ioapic *ioapic;
+ struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
ioapic = kzalloc(sizeof(struct kvm_ioapic), GFP_KERNEL);
if (!ioapic)
return -ENOMEM;
- kvm->vioapic = ioapic;
+ kvm_x86->vioapic = ioapic;
kvm_ioapic_reset(ioapic);
ioapic->dev.read = ioapic_mmio_read;
ioapic->dev.write = ioapic_mmio_write;
diff --git a/drivers/kvm/irq.h b/drivers/kvm/irq.h
--- a/drivers/kvm/irq.h
+++ b/drivers/kvm/irq.h
@@ -23,6 +23,7 @@
#define __IRQ_H
#include "kvm.h"
+#include "x86.h"
typedef void irq_request_func(void *opaque, int level);
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
--- a/drivers/kvm/kvm.h
+++ b/drivers/kvm/kvm.h
@@ -309,41 +309,15 @@ struct kvm {
int nmemslots;
struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
KVM_PRIVATE_MEM_SLOTS];
- /*
- * Hash table of struct kvm_mmu_page.
- */
- struct list_head active_mmu_pages;
- unsigned int n_free_mmu_pages;
- unsigned int n_requested_mmu_pages;
- unsigned int n_alloc_mmu_pages;
- struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
struct list_head vm_list;
struct file *filp;
struct kvm_io_bus mmio_bus;
struct kvm_io_bus pio_bus;
- struct kvm_pic *vpic;
- struct kvm_ioapic *vioapic;
int round_robin_prev_vcpu;
- unsigned int tss_addr;
struct page *apic_access_page;
struct kvm_vm_stat stat;
};
-
-static inline struct kvm_pic *pic_irqchip(struct kvm *kvm)
-{
- return kvm->vpic;
-}
-
-static inline struct kvm_ioapic *ioapic_irqchip(struct kvm *kvm)
-{
- return kvm->vioapic;
-}
-
-static inline int irqchip_in_kernel(struct kvm *kvm)
-{
- return pic_irqchip(kvm) != NULL;
-}
struct descriptor_table {
u16 limit;
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -232,6 +232,7 @@ int __kvm_set_memory_region(struct kvm *
unsigned long i;
struct kvm_memory_slot *memslot;
struct kvm_memory_slot old, new;
+ struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
r = -EINVAL;
/* General sanity checks */
@@ -332,18 +333,18 @@ int __kvm_set_memory_region(struct kvm *
if (mem->slot >= kvm->nmemslots)
kvm->nmemslots = mem->slot + 1;
- if (!kvm->n_requested_mmu_pages) {
+ if (!kvm_x86->n_requested_mmu_pages) {
unsigned int n_pages;
if (npages) {
n_pages = npages * KVM_PERMILLE_MMU_PAGES / 1000;
- kvm_mmu_change_mmu_pages(kvm, kvm->n_alloc_mmu_pages +
- n_pages);
+ kvm_mmu_change_mmu_pages(kvm,
+ kvm_x86->n_alloc_mmu_pages + n_pages);
} else {
unsigned int nr_mmu_pages;
n_pages = old.npages * KVM_PERMILLE_MMU_PAGES / 1000;
- nr_mmu_pages = kvm->n_alloc_mmu_pages - n_pages;
+ nr_mmu_pages = kvm_x86->n_alloc_mmu_pages - n_pages;
nr_mmu_pages = max(nr_mmu_pages,
(unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
--- a/drivers/kvm/mmu.c
+++ b/drivers/kvm/mmu.c
@@ -526,12 +526,14 @@ static void kvm_mmu_free_page(struct kvm
static void kvm_mmu_free_page(struct kvm *kvm,
struct kvm_mmu_page *page_head)
{
+ struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
+
ASSERT(is_empty_shadow_page(page_head->spt));
list_del(&page_head->link);
__free_page(virt_to_page(page_head->spt));
__free_page(virt_to_page(page_head->gfns));
kfree(page_head);
- ++kvm->n_free_mmu_pages;
+ ++kvm_x86->n_free_mmu_pages;
}
static unsigned kvm_page_table_hashfn(gfn_t gfn)
@@ -543,8 +545,9 @@ static struct kvm_mmu_page *kvm_mmu_allo
u64 *parent_pte)
{
struct kvm_mmu_page *page;
-
- if (!vcpu->kvm->n_free_mmu_pages)
+ struct kvm_x86 *kvm_x86 = to_kvm_x86(vcpu->kvm);
+
+ if (!kvm_x86->n_free_mmu_pages)
return NULL;
page = mmu_memory_cache_alloc(&vcpu->mmu_page_header_cache,
@@ -552,12 +555,12 @@ static struct kvm_mmu_page *kvm_mmu_allo
page->spt = mmu_memory_cache_alloc(&vcpu->mmu_page_cache, PAGE_SIZE);
page->gfns = mmu_memory_cache_alloc(&vcpu->mmu_page_cache, PAGE_SIZE);
set_page_private(virt_to_page(page->spt), (unsigned long)page);
- list_add(&page->link, &vcpu->kvm->active_mmu_pages);
+ list_add(&page->link, &kvm_x86->active_mmu_pages);
ASSERT(is_empty_shadow_page(page->spt));
page->slot_bitmap = 0;
page->multimapped = 0;
page->parent_pte = parent_pte;
- --vcpu->kvm->n_free_mmu_pages;
+ --kvm_x86->n_free_mmu_pages;
return page;
}
@@ -643,10 +646,11 @@ static struct kvm_mmu_page *kvm_mmu_look
struct hlist_head *bucket;
struct kvm_mmu_page *page;
struct hlist_node *node;
+ struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
- bucket = &kvm->mmu_page_hash[index];
+ bucket = &kvm_x86->mmu_page_hash[index];
hlist_for_each_entry(page, node, bucket, hash_link)
if (page->gfn == gfn && !page->role.metaphysical) {
pgprintk("%s: found role %x\n",
@@ -670,6 +674,7 @@ static struct kvm_mmu_page *kvm_mmu_get_
struct hlist_head *bucket;
struct kvm_mmu_page *page;
struct hlist_node *node;
+ struct kvm_x86 *kvm_x86 = to_kvm_x86(vcpu->kvm);
role.word = 0;
role.glevels = vcpu->mmu.root_level;
@@ -684,7 +689,7 @@ static struct kvm_mmu_page *kvm_mmu_get_
pgprintk("%s: looking gfn %lx role %x\n", __FUNCTION__,
gfn, role.word);
index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
- bucket = &vcpu->kvm->mmu_page_hash[index];
+ bucket = &kvm_x86->mmu_page_hash[index];
hlist_for_each_entry(page, node, bucket, hash_link)
if (page->gfn == gfn && page->role.word == role.word) {
mmu_page_add_parent_pte(vcpu, page, parent_pte);
@@ -754,6 +759,7 @@ static void kvm_mmu_zap_page(struct kvm
struct kvm_mmu_page *page)
{
u64 *parent_pte;
+ struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
++kvm->stat.mmu_shadow_zapped;
while (page->multimapped || page->parent_pte) {
@@ -775,7 +781,7 @@ static void kvm_mmu_zap_page(struct kvm
hlist_del(&page->hash_link);
kvm_mmu_free_page(kvm, page);
} else
- list_move(&page->link, &kvm->active_mmu_pages);
+ list_move(&page->link, &kvm_x86->active_mmu_pages);
kvm_mmu_reset_last_pte_updated(kvm);
}
@@ -785,36 +791,39 @@ static void kvm_mmu_zap_page(struct kvm
*/
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
{
+ struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
+
/*
* If we set the number of mmu pages to be smaller be than the
* number of actived pages , we must to free some mmu pages before we
* change the value
*/
- if ((kvm->n_alloc_mmu_pages - kvm->n_free_mmu_pages) >
+ if ((kvm_x86->n_alloc_mmu_pages - kvm_x86->n_free_mmu_pages) >
kvm_nr_mmu_pages) {
- int n_used_mmu_pages = kvm->n_alloc_mmu_pages
- - kvm->n_free_mmu_pages;
+ int n_used_mmu_pages = kvm_x86->n_alloc_mmu_pages
+ - kvm_x86->n_free_mmu_pages;
while (n_used_mmu_pages > kvm_nr_mmu_pages) {
struct kvm_mmu_page *page;
- page = container_of(kvm->active_mmu_pages.prev,
+ page = container_of(kvm_x86->active_mmu_pages.prev,
struct kvm_mmu_page, link);
kvm_mmu_zap_page(kvm, page);
n_used_mmu_pages--;
}
- kvm->n_free_mmu_pages = 0;
+ kvm_x86->n_free_mmu_pages = 0;
}
else
- kvm->n_free_mmu_pages += kvm_nr_mmu_pages
- - kvm->n_alloc_mmu_pages;
-
- kvm->n_alloc_mmu_pages = kvm_nr_mmu_pages;
+ kvm_x86->n_free_mmu_pages += kvm_nr_mmu_pages
+ - kvm_x86->n_alloc_mmu_pages;
+
+ kvm_x86->n_alloc_mmu_pages = kvm_nr_mmu_pages;
}
static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
{
+ struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
unsigned index;
struct hlist_head *bucket;
struct kvm_mmu_page *page;
@@ -824,7 +833,7 @@ static int kvm_mmu_unprotect_page(struct
pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
r = 0;
index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
- bucket = &kvm->mmu_page_hash[index];
+ bucket = &kvm_x86->mmu_page_hash[index];
hlist_for_each_entry_safe(page, node, n, bucket, hash_link)
if (page->gfn == gfn && !page->role.metaphysical) {
pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn,
@@ -1251,6 +1260,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
const u8 *new, int bytes)
{
+ struct kvm_x86 *kvm_x86 = to_kvm_x86(vcpu->kvm);
gfn_t gfn = gpa >> PAGE_SHIFT;
struct kvm_mmu_page *page;
struct hlist_node *node, *n;
@@ -1280,7 +1290,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
vcpu->last_pte_updated = NULL;
}
index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
- bucket = &vcpu->kvm->mmu_page_hash[index];
+ bucket = &kvm_x86->mmu_page_hash[index];
hlist_for_each_entry_safe(page, node, n, bucket, hash_link) {
if (page->gfn != gfn || page->role.metaphysical)
continue;
@@ -1344,10 +1354,12 @@ int kvm_mmu_unprotect_page_virt(struct k
void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
{
- while (vcpu->kvm->n_free_mmu_pages < KVM_REFILL_PAGES) {
+ struct kvm_x86 *kvm_x86 = to_kvm_x86(vcpu->kvm);
+
+ while (kvm_x86->n_free_mmu_pages < KVM_REFILL_PAGES) {
struct kvm_mmu_page *page;
- page = container_of(vcpu->kvm->active_mmu_pages.prev,
+ page = container_of(kvm_x86->active_mmu_pages.prev,
struct kvm_mmu_page, link);
kvm_mmu_zap_page(vcpu->kvm, page);
++vcpu->kvm->stat.mmu_recycled;
@@ -1397,9 +1409,10 @@ static void free_mmu_pages(struct kvm_vc
static void free_mmu_pages(struct kvm_vcpu *vcpu)
{
struct kvm_mmu_page *page;
-
- while (!list_empty(&vcpu->kvm->active_mmu_pages)) {
- page = container_of(vcpu->kvm->active_mmu_pages.next,
+ struct kvm_x86 *kvm_x86 = to_kvm_x86(vcpu->kvm);
+
+ while (!list_empty(&kvm_x86->active_mmu_pages)) {
+ page = container_of(kvm_x86->active_mmu_pages.next,
struct kvm_mmu_page, link);
kvm_mmu_zap_page(vcpu->kvm, page);
}
@@ -1408,15 +1421,16 @@ static void free_mmu_pages(struct kvm_vc
static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
{
+ struct kvm_x86 *kvm_x86 = to_kvm_x86(vcpu->kvm);
struct page *page;
int i;
ASSERT(vcpu);
- if (vcpu->kvm->n_requested_mmu_pages)
- vcpu->kvm->n_free_mmu_pages = vcpu->kvm->n_requested_mmu_pages;
+ if (kvm_x86->n_requested_mmu_pages)
+ kvm_x86->n_free_mmu_pages = kvm_x86->n_requested_mmu_pages;
else
- vcpu->kvm->n_free_mmu_pages = vcpu->kvm->n_alloc_mmu_pages;
+ kvm_x86->n_free_mmu_pages = kvm_x86->n_alloc_mmu_pages;
/*
* When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
* Therefore we need to allocate shadow page tables in the first
@@ -1464,8 +1478,9 @@ void kvm_mmu_slot_remove_write_access(st
void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
{
struct kvm_mmu_page *page;
-
- list_for_each_entry(page, &kvm->active_mmu_pages, link) {
+ struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
+
+ list_for_each_entry(page, &kvm_x86->active_mmu_pages, link) {
int i;
u64 *pt;
@@ -1483,8 +1498,9 @@ void kvm_mmu_zap_all(struct kvm *kvm)
void kvm_mmu_zap_all(struct kvm *kvm)
{
struct kvm_mmu_page *page, *node;
-
- list_for_each_entry_safe(page, node, &kvm->active_mmu_pages, link)
+ struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
+
+ list_for_each_entry_safe(page, node, &kvm_x86->active_mmu_pages, link)
kvm_mmu_zap_page(kvm, page);
kvm_flush_remote_tlbs(kvm);
@@ -1637,7 +1653,7 @@ static int count_writable_mappings(struc
struct kvm_mmu_page *page;
int i;
- list_for_each_entry(page, &vcpu->kvm->active_mmu_pages, link) {
+ list_for_each_entry(page, &kvm_x86->active_mmu_pages, link) {
u64 *pt = page->spt;
if (page->role.level != PT_PAGE_TABLE_LEVEL)
@@ -1672,8 +1688,9 @@ static void audit_write_protection(struc
struct kvm_memory_slot *slot;
unsigned long *rmapp;
gfn_t gfn;
-
- list_for_each_entry(page, &vcpu->kvm->active_mmu_pages, link) {
+ struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
+
+ list_for_each_entry(page, &kvm_x86->active_mmu_pages, link) {
if (page->role.metaphysical)
continue;
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
--- a/drivers/kvm/vmx.c
+++ b/drivers/kvm/vmx.c
@@ -1141,12 +1141,14 @@ static void enter_pmode(struct kvm_vcpu
static gva_t rmode_tss_base(struct kvm *kvm)
{
- if (!kvm->tss_addr) {
+ struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
+
+ if (!kvm_x86->tss_addr) {
gfn_t base_gfn = kvm->memslots[0].base_gfn +
kvm->memslots[0].npages - 3;
return base_gfn << PAGE_SHIFT;
}
- return kvm->tss_addr;
+ return kvm_x86->tss_addr;
}
static void fix_rmode_seg(int seg, struct kvm_save_segment *save)
@@ -1768,6 +1770,7 @@ static void do_interrupt_requests(struct
static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
{
+ struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
int ret;
struct kvm_userspace_memory_region tss_mem = {
.slot = 8,
@@ -1779,7 +1782,7 @@ static int vmx_set_tss_addr(struct kvm *
ret = kvm_set_memory_region(kvm, &tss_mem, 0);
if (ret)
return ret;
- kvm->tss_addr = addr;
+ kvm_x86->tss_addr = addr;
return 0;
}
diff --git a/drivers/kvm/x86.c b/drivers/kvm/x86.c
--- a/drivers/kvm/x86.c
+++ b/drivers/kvm/x86.c
@@ -815,13 +815,15 @@ static int kvm_vm_ioctl_set_nr_mmu_pages
static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
u32 kvm_nr_mmu_pages)
{
+ struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
+
if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
return -EINVAL;
mutex_lock(&kvm->lock);
kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
- kvm->n_requested_mmu_pages = kvm_nr_mmu_pages;
+ kvm_x86->n_requested_mmu_pages = kvm_nr_mmu_pages;
mutex_unlock(&kvm->lock);
return 0;
@@ -829,7 +831,9 @@ static int kvm_vm_ioctl_set_nr_mmu_pages
static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
{
- return kvm->n_alloc_mmu_pages;
+ struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
+
+ return kvm_x86->n_alloc_mmu_pages;
}
/*
@@ -972,6 +976,7 @@ long kvm_arch_vm_ioctl(struct file *filp
unsigned int ioctl, unsigned long arg)
{
struct kvm *kvm = filp->private_data;
+ struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
void __user *argp = (void __user *)arg;
int r = -EINVAL;
@@ -1018,12 +1023,12 @@ long kvm_arch_vm_ioctl(struct file *filp
}
case KVM_CREATE_IRQCHIP:
r = -ENOMEM;
- kvm->vpic = kvm_create_pic(kvm);
- if (kvm->vpic) {
+ kvm_x86->vpic = kvm_create_pic(kvm);
+ if (kvm_x86->vpic) {
r = kvm_ioapic_init(kvm);
if (r) {
- kfree(kvm->vpic);
- kvm->vpic = NULL;
+ kfree(kvm_x86->vpic);
+ kvm_x86->vpic = NULL;
goto out;
}
} else
@@ -1041,7 +1046,7 @@ long kvm_arch_vm_ioctl(struct file *filp
kvm_pic_set_irq(pic_irqchip(kvm),
irq_event.irq,
irq_event.level);
- kvm_ioapic_set_irq(kvm->vioapic,
+ kvm_ioapic_set_irq(kvm_x86->vioapic,
irq_event.irq,
irq_event.level);
mutex_unlock(&kvm->lock);
@@ -2603,14 +2608,14 @@ void kvm_arch_vcpu_uninit(struct kvm_vcp
struct kvm *kvm_arch_create_vm(void)
{
- struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
-
- if (!kvm)
+ struct kvm_x86 *kvm_x86 = kzalloc(sizeof(struct kvm_x86), GFP_KERNEL);
+
+ if (!kvm_x86)
return ERR_PTR(-ENOMEM);
- INIT_LIST_HEAD(&kvm->active_mmu_pages);
-
- return kvm;
+ INIT_LIST_HEAD(&kvm_x86->active_mmu_pages);
+
+ return &kvm_x86->kvm;
}
static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
@@ -2641,8 +2646,10 @@ static void kvm_free_vcpus(struct kvm *k
void kvm_arch_destroy_vm(struct kvm *kvm)
{
- kfree(kvm->vpic);
- kfree(kvm->vioapic);
+ struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
+
+ kfree(kvm_x86->vpic);
+ kfree(kvm_x86->vioapic);
kvm_free_vcpus(kvm);
kvm_free_physmem(kvm);
kfree(kvm);
diff --git a/drivers/kvm/x86.h b/drivers/kvm/x86.h
--- a/drivers/kvm/x86.h
+++ b/drivers/kvm/x86.h
@@ -155,6 +155,45 @@ struct kvm_vcpu {
struct x86_emulate_ctxt emulate_ctxt;
};
+
+struct kvm_x86 {
+ struct kvm kvm;
+ /*
+ * Hash table of struct kvm_mmu_page.
+ */
+ struct list_head active_mmu_pages;
+ unsigned int n_free_mmu_pages;
+ unsigned int n_requested_mmu_pages;
+ unsigned int n_alloc_mmu_pages;
+ struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
+ struct kvm_pic *vpic;
+ struct kvm_ioapic *vioapic;
+ unsigned int tss_addr;
+};
+
+static struct kvm_x86 *to_kvm_x86(struct kvm *kvm)
+{
+ return container_of(kvm, struct kvm_x86, kvm);
+}
+
+static inline struct kvm_pic *pic_irqchip(struct kvm *kvm)
+{
+ struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
+
+ return kvm_x86->vpic;
+}
+
+static inline struct kvm_ioapic *ioapic_irqchip(struct kvm *kvm)
+{
+ struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
+
+ return kvm_x86->vioapic;
+}
+
+static inline int irqchip_in_kernel(struct kvm *kvm)
+{
+ return pic_irqchip(kvm) != NULL;
+}
struct kvm_x86_ops {
int (*cpu_has_kvm_support)(void); /* __init */
@@ -313,7 +352,9 @@ int kvm_mmu_page_fault(struct kvm_vcpu *
static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
{
- if (unlikely(vcpu->kvm->n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES))
+ struct kvm_x86 *kvm_x86 = to_kvm_x86(vcpu->kvm);
+
+ if (unlikely(kvm_x86->n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES))
__kvm_mmu_free_some_pages(vcpu);
}
-------------------------------------------------------------------------
This SF.net email is sponsored by: Microsoft
Defy all challenges. Microsoft(R) Visual Studio 2005.
http://clk.atdmt.com/MRT/go/vse0120000070mrt/direct/01/
_______________________________________________
kvm-devel mailing list
[email protected]
https://lists.sourceforge.net/lists/listinfo/kvm-devel