[Updated the patch taking your comments into account]
Make sure that kvm_free_physmem_slot also frees the VM memory
if it was allocated by the kernel.
Signed-off-by: François Diakhaté <[EMAIL PROTECTED]>
---
arch/x86/kvm/x86.c | 10 +++++-----
virt/kvm/kvm_main.c | 18 ++++++++++++++----
2 files changed, 19 insertions(+), 9 deletions(-)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 883c137..818220b 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4179,13 +4179,13 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
if (npages && !old.rmap) {
unsigned long userspace_addr;
- down_write(¤t->mm->mmap_sem);
+ down_write(&kvm->mm->mmap_sem);
userspace_addr = do_mmap(NULL, 0,
npages * PAGE_SIZE,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS,
0);
- up_write(¤t->mm->mmap_sem);
+ up_write(&kvm->mm->mmap_sem);
if (IS_ERR((void *)userspace_addr))
return PTR_ERR((void *)userspace_addr);
@@ -4198,10 +4198,10 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
if (!old.user_alloc && old.rmap) {
int ret;
- down_write(¤t->mm->mmap_sem);
- ret = do_munmap(current->mm, old.userspace_addr,
+ down_write(&kvm->mm->mmap_sem);
+ ret = do_munmap(kvm->mm, old.userspace_addr,
old.npages * PAGE_SIZE);
- up_write(¤t->mm->mmap_sem);
+ up_write(&kvm->mm->mmap_sem);
if (ret < 0)
printk(KERN_WARNING
"kvm_vm_ioctl_set_memory_region: "
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index a87f45e..b420930 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -617,9 +617,19 @@ out:
/*
* Free any memory in @free but not in @dont.
*/
-static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
+static void kvm_free_physmem_slot(struct kvm * kvm, struct
kvm_memory_slot *free,
struct kvm_memory_slot *dont)
{
+ if(!dont || free->userspace_addr != dont->userspace_addr) {
+ struct kvm_userspace_memory_region mem = {
+ .slot = memslot_id(kvm, free),
+ .guest_phys_addr = free->base_gfn << PAGE_SHIFT,
+ .memory_size = 0,
+ .flags = 0,
+ };
+ kvm_arch_set_memory_region(kvm, &mem, *free, free->user_alloc);
+ }
+
if (!dont || free->rmap != dont->rmap)
vfree(free->rmap);
@@ -640,7 +650,7 @@ void kvm_free_physmem(struct kvm *kvm)
int i;
for (i = 0; i < kvm->nmemslots; ++i)
- kvm_free_physmem_slot(&kvm->memslots[i], NULL);
+ kvm_free_physmem_slot(kvm, &kvm->memslots[i], NULL);
}
static void kvm_destroy_vm(struct kvm *kvm)
@@ -821,7 +831,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
goto out_free;
}
- kvm_free_physmem_slot(&old, &new);
+ kvm_free_physmem_slot(kvm, &old, &new);
#ifdef CONFIG_DMAR
/* map the pages in iommu page table */
r = kvm_iommu_map_pages(kvm, base_gfn, npages);
@@ -831,7 +841,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
return 0;
out_free:
- kvm_free_physmem_slot(&new, &old);
+ kvm_free_physmem_slot(kvm, &new, &old);
out:
return r;
--
1.6.0.3