changes to standard KVM files:
--------------------------------------------------------------------------------------------
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index c6997c0..ec69310 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -32,6 +32,7 @@
#include <asm/desc.h>
#include <asm/vmx.h>
#include <asm/virtext.h>
+#include <linux/sched-if.h>
#define __ex(x) __kvm_handle_fault_on_reboot(x)
@@ -360,7 +361,6 @@ static void __vcpu_clear(void *arg)
if (per_cpu(current_vmcs, cpu) == vmx->vmcs)
per_cpu(current_vmcs, cpu) = NULL;
rdtscll(vmx->vcpu.arch.host_tsc);
- list_del(&vmx->local_vcpus_link);
vmx->vcpu.cpu = -1;
vmx->launched = 0;
}
@@ -369,6 +369,7 @@ static void vcpu_clear(struct vcpu_vmx *vmx)
{
if (vmx->vcpu.cpu == -1)
return;
+ list_del(&vmx->local_vcpus_link);
smp_call_function_single(vmx->vcpu.cpu, __vcpu_clear, vmx, 1);
}
@@ -648,6 +649,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
struct vcpu_vmx *vmx = to_vmx(vcpu);
u64 phys_addr = __pa(vmx->vmcs);
u64 tsc_this, delta, new_offset;
+ BUG_ON(is_host_vcpu(vcpu) || (is_idle_vcpu(vcpu)));
if (vcpu->cpu != cpu) {
vcpu_clear(vmx);
@@ -1102,8 +1104,10 @@ static void vmclear_local_vcpus(void)
struct vcpu_vmx *vmx, *n;
list_for_each_entry_safe(vmx, n, &per_cpu(vcpus_on_cpu, cpu),
- local_vcpus_link)
+ local_vcpus_link) {
+ list_del(&vmx->local_vcpus_link);
__vcpu_clear(vmx);
+ }
}
@@ -3587,13 +3591,15 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- spin_lock(&vmx_vpid_lock);
- if (vmx->vpid != 0)
- __clear_bit(vmx->vpid, vmx_vpid_bitmap);
- spin_unlock(&vmx_vpid_lock);
- vmx_free_vmcs(vcpu);
- kfree(vmx->host_msrs);
- kfree(vmx->guest_msrs);
+ if ((!is_host_vcpu(vcpu)) && (!is_idle_vcpu(vcpu))) {
+ spin_lock(&vmx_vpid_lock);
+ if (vmx->vpid != 0)
+ __clear_bit(vmx->vpid, vmx_vpid_bitmap);
+ spin_unlock(&vmx_vpid_lock);
+ vmx_free_vmcs(vcpu);
+ kfree(vmx->host_msrs);
+ kfree(vmx->guest_msrs);
+ }
kvm_vcpu_uninit(vcpu);
kmem_cache_free(kvm_vcpu_cache, vmx);
}
@@ -3613,6 +3619,9 @@ static struct kvm_vcpu *vmx_create_vcpu(struct
kvm *kvm, unsigned int id)
if (err)
goto free_vcpu;
+ if (is_idle_vm(kvm) || is_host_vm(kvm))
+ return &vmx->vcpu;
+
vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!vmx->guest_msrs) {
err = -ENOMEM;
@@ -3739,11 +3748,24 @@ static struct kvm_x86_ops vmx_x86_ops = {
.get_tdp_level = get_ept_level,
.get_mt_mask_shift = vmx_get_mt_mask_shift,
};
-
+#include <linux/moduleparam.h>
+extern long (*sched_setaffinity_p)(pid_t pid, cpumask_t* in_mask);
+static int setaffinity = -1;
+module_param(setaffinity, int, 0);
static int __init vmx_init(void)
{
int r;
+ sched_setaffinity_p = (void*)setaffinity;
+ if( setaffinity == -1 ){
+ printk("Please insert this module with parameters\n");
+ printk("to notify me the address of sched_setaffinity\n");
+ printk("Example:\n insmod kvm-intel.ko
setaffinity=0xffffffff9002ecd9\n");
+ return -EINVAL;
+ }else{
+ printk("the address of function sched_setaffinity is %p \n",
sched_setaffinity_p);
+ }
+
vmx_io_bitmap_a = (unsigned long *)__get_free_page(GFP_KERNEL);
if (!vmx_io_bitmap_a)
return -ENOMEM;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 0bb4131..3b19058 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -43,6 +43,8 @@
#include <asm/desc.h>
#include <asm/mtrr.h>
+#include <linux/sched-if.h>
+
#define MAX_IO_MSRS 256
#define CR0_RESERVED_BITS \
(~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
@@ -3256,8 +3258,12 @@ static int __vcpu_run(struct kvm_vcpu *vcpu,
struct kvm_run *kvm_run)
down_read(&vcpu->kvm->slots_lock);
vapic_enter(vcpu);
- r = 1;
+ r = 1;
+ if(test_and_clear_bit(_VPF_blocked, &vcpu->pause_flags))
+ vcpu_wake(vcpu);
+
while (r > 0) {
+ wait_event(vcpu->wq, (vcpu->status == VCPU_RUNNING));
if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
r = vcpu_enter_guest(vcpu, kvm_run);
else {
@@ -3297,13 +3303,9 @@ static int __vcpu_run(struct kvm_vcpu *vcpu,
struct kvm_run *kvm_run)
kvm_run->exit_reason = KVM_EXIT_INTR;
++vcpu->stat.signal_exits;
}
- if (need_resched()) {
- up_read(&vcpu->kvm->slots_lock);
- kvm_resched(vcpu);
- down_read(&vcpu->kvm->slots_lock);
- }
}
+ set_bit(_VPF_blocked, &vcpu->pause_flags);
up_read(&vcpu->kvm->slots_lock);
post_kvm_run_save(vcpu, kvm_run);
@@ -4382,16 +4384,17 @@ static void kvm_free_vcpus(struct kvm *kvm)
/*
* Unpin any mmu pages first.
*/
- for (i = 0; i < KVM_MAX_VCPUS; ++i)
- if (kvm->vcpus[i])
- kvm_unload_vcpu_mmu(kvm->vcpus[i]);
+ if ((!is_idle_vm(kvm)) && (!is_host_vm(kvm))) {
+ for (i = 0; i < KVM_MAX_VCPUS; ++i)
+ if (kvm->vcpus[i])
+ kvm_unload_vcpu_mmu(kvm->vcpus[i]);
+ }
for (i = 0; i < KVM_MAX_VCPUS; ++i) {
if (kvm->vcpus[i]) {
kvm_arch_vcpu_free(kvm->vcpus[i]);
kvm->vcpus[i] = NULL;
}
}
-
}
void kvm_arch_sync_events(struct kvm *kvm)
@@ -4401,16 +4404,20 @@ void kvm_arch_sync_events(struct kvm *kvm)
void kvm_arch_destroy_vm(struct kvm *kvm)
{
- kvm_iommu_unmap_guest(kvm);
- kvm_free_pit(kvm);
- kfree(kvm->arch.vpic);
- kfree(kvm->arch.vioapic);
- kvm_free_vcpus(kvm);
- kvm_free_physmem(kvm);
- if (kvm->arch.apic_access_page)
- put_page(kvm->arch.apic_access_page);
- if (kvm->arch.ept_identity_pagetable)
- put_page(kvm->arch.ept_identity_pagetable);
+ if ((!is_idle_vm(kvm)) && (!is_host_vm(kvm))) {
+ kvm_iommu_unmap_guest(kvm);
+ kvm_free_pit(kvm);
+ kfree(kvm->arch.vpic);
+ kfree(kvm->arch.vioapic);
+ kvm_free_vcpus(kvm);
+ kvm_free_physmem(kvm);
+ if (kvm->arch.apic_access_page)
+ put_page(kvm->arch.apic_access_page);
+ if (kvm->arch.ept_identity_pagetable)
+ put_page(kvm->arch.ept_identity_pagetable);
+ }else
+ kvm_free_vcpus(kvm);
+
kfree(kvm);
}
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to [email protected]
More majordomo info at http://vger.kernel.org/majordomo-info.html