This is the kvm.git patch to enable -reserved-ram (without this kvm
will simply gracefully fail to emulate the illegal instruction inside
the bad_page). This trick avoids altering the ioctl api with libkvm,
in short if get_user_pages fails on a host kernel with reserved ram
config option enabled, it tries to see if it's a remap_pfn_range
mapping backing the memslot. In such case it checks if the ram was
reserved with page_count 0 and if so it disables the reference
counting as those pages are invisibile to linux. As long as pfn_valid
returns, pfn_to_page should be safe, so shall the memmap array be
allocated with holes corresponding to the holes generated in the e820
map, simply bad_page will be returned gracefully without risk like if
this patch wasn't applied to kvm.git.

Signed-off-by: Andrea Arcangeli <[EMAIL PROTECTED]>

diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index e9ae5db..e7a9c82 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -263,7 +263,8 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct 
kvm_mmu_page *page,
        npage = vcpu->arch.update_pte.page;
        if (!npage)
                return;
-       get_page(npage);
+       if (!page_is_reserved(npage))
+               get_page(npage);
        mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0,
                     gpte & PT_DIRTY_MASK, NULL, largepage, gpte_to_gfn(gpte),
                     npage, true);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index f4e1436..7f087ac 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -281,6 +281,18 @@ static inline void kvm_migrate_apic_timer(struct kvm_vcpu 
*vcpu)
        set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests);
 }
 
+#ifdef CONFIG_RESERVE_PHYSICAL_START
+static inline int page_is_reserved(struct page * page)
+{
+       return !page_count(page);
+}
+#else /* CONFIG_RESERVE_PHYSICAL_START */
+static inline int page_is_reserved(struct page * page)
+{
+       return 0;
+}
+#endif /* CONFIG_RESERVE_PHYSICAL_START */
+
 enum kvm_stat_kind {
        KVM_STAT_VM,
        KVM_STAT_VCPU,
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 30bf832..50a7b3e 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -498,6 +524,65 @@ unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
        return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE);
 }
 
+
+#ifdef CONFIG_RESERVE_PHYSICAL_START
+static struct page *direct_page(struct mm_struct *mm,
+                               unsigned long address)
+{
+       pgd_t *pgd;
+       pud_t *pud;
+       pmd_t *pmd;
+       pte_t *ptep, pte;
+       spinlock_t *ptl;
+       struct page *page;
+       struct vm_area_struct *vma;
+       unsigned long pfn;
+
+       page = NULL;
+       if (!capable(CAP_SYS_ADMIN)) /* go safe */
+               goto out;
+
+       vma = find_vma(current->mm, address);
+       if (!vma || vma->vm_start > address ||
+           !(vma->vm_flags & VM_PFNMAP))
+               goto out;
+
+       pgd = pgd_offset(mm, address);
+       if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
+               goto out;
+
+       pud = pud_offset(pgd, address);
+       if (pud_none(*pud) || unlikely(pud_bad(*pud)))
+               goto out;
+       
+       pmd = pmd_offset(pud, address);
+       if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
+               goto out;
+
+       ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
+       if (!ptep)
+               goto out;
+
+       pte = *ptep;
+       if (!pte_present(pte))
+               goto unlock;
+
+       pfn = pte_pfn(pte);
+       if (!pfn_valid(pfn))
+               goto unlock;
+
+       page = pfn_to_page(pfn);
+       if (!page_is_reserved(page)) {
+               page = NULL;
+               goto unlock;
+       }
+unlock:
+       pte_unmap_unlock(ptep, ptl);
+out:
+       return page;
+}
+#endif /* CONFIG_RESERVE_PHYSICAL_START */
+
 /*
  * Requires current->mm->mmap_sem to be held
  */
@@ -519,6 +604,11 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
                                NULL);
 
        if (npages != 1) {
+#ifdef CONFIG_RESERVE_PHYSICAL_START
+               page[0] = direct_page(current->mm, addr);
+               if (page[0])
+                       return page[0];
+#endif
                get_page(bad_page);
                return bad_page;
        }
@@ -530,15 +620,18 @@ EXPORT_SYMBOL_GPL(gfn_to_page);
 
 void kvm_release_page_clean(struct page *page)
 {
-       put_page(page);
+       if (!page_is_reserved(page))
+               put_page(page);
 }
 EXPORT_SYMBOL_GPL(kvm_release_page_clean);
 
 void kvm_release_page_dirty(struct page *page)
 {
-       if (!PageReserved(page))
-               SetPageDirty(page);
-       put_page(page);
+       if (!page_is_reserved(page)) {
+               if (!PageReserved(page))
+                       SetPageDirty(page);
+               put_page(page);
+       }
 }
 EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
 

-------------------------------------------------------------------------
Check out the new SourceForge.net Marketplace.
It's the best place to buy or sell services for
just about anything Open Source.
http://ad.doubleclick.net/clk;164216239;13503038;w?http://sf.net/marketplace
_______________________________________________
kvm-devel mailing list
kvm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/kvm-devel

Reply via email to