Hello,

This is the latest version of the kvm mmu notifier patch.

This allows largepages to be invalidated too, and a separate patch
takes care of alias locking (2/5) and memslot locking (3/5).

While in the long term we should remove the alias to microoptimize the
fast paths and avoid the need of unalias_gfn calls, I decided not to
remove alias immediately: that patch done best would have removed the
alias API as a whole from userland, and done bad it would have kept
the alias API intact while implementing the lowlevel kernel details
with memslots instead of the alias array. Problem is the memslot array
is already fragmented with the private entries, and so adding the
alias entries before or after the private memslot entries resulted in
different issues, often requiring a new loop over the alias range or
the private range depending where I put the alias slots (before or
after the private ones).

So I think to do it right the alias ioctl should be removed as a whole
and that was too large to do right now, especially given we're
concentrating on stability and fixing the alias with the mmu_lock was
a 2 liner change.

Marcelo please double check this code, I didn't effectively test it
myself with the largepages yet but it looks the rmap_next works the
same and the rmap_pde is just a rmapp with the only difference of the
PSE bit being set. I didn't hide the rmap_pde details with gfn_to_rmap
to avoid an entirely superflous gfn_to_memslot.

+                       retval |= handler(kvm,
+                                         &memslot->lpage_info[
+                                                 gfn_offset / 
KVM_PAGES_PER_HPAGE].rmap_pde);

Signed-off-by: Andrea Arcangeli <[EMAIL PROTECTED]>

diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index 8d45fab..ce3251c 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -21,6 +21,7 @@ config KVM
        tristate "Kernel-based Virtual Machine (KVM) support"
        depends on HAVE_KVM
        select PREEMPT_NOTIFIERS
+       select MMU_NOTIFIER
        select ANON_INODES
        ---help---
          Support hosting fully virtualized guest machines using hardware
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 2b60b7d..9bc31fc 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -653,6 +653,84 @@ static void rmap_write_protect(struct kvm *kvm, u64 gfn)
        account_shadowed(kvm, gfn);
 }
 
+static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp)
+{
+       u64 *spte;
+       int need_tlb_flush = 0;
+
+       while ((spte = rmap_next(kvm, rmapp, NULL))) {
+               BUG_ON(!(*spte & PT_PRESENT_MASK));
+               rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte, *spte);
+               rmap_remove(kvm, spte);
+               set_shadow_pte(spte, shadow_trap_nonpresent_pte);
+               need_tlb_flush = 1;
+       }
+       return need_tlb_flush;
+}
+
+static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
+                         int (*handler)(struct kvm *kvm, unsigned long *rmapp))
+{
+       int i;
+       int retval = 0;
+
+       /*
+        * If mmap_sem isn't taken, we can look the memslots with only
+        * the mmu_lock by skipping over the slots with userspace_addr == 0.
+        */
+       for (i = 0; i < kvm->nmemslots; i++) {
+               struct kvm_memory_slot *memslot = &kvm->memslots[i];
+               unsigned long start = memslot->userspace_addr;
+               unsigned long end;
+
+               /* mmu_lock protects userspace_addr */
+               if (!start)
+                       continue;
+
+               end = start + (memslot->npages << PAGE_SHIFT);
+               if (hva >= start && hva < end) {
+                       gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
+                       retval |= handler(kvm, &memslot->rmap[gfn_offset]);
+                       retval |= handler(kvm,
+                                         &memslot->lpage_info[
+                                                 gfn_offset /
+                                                 
KVM_PAGES_PER_HPAGE].rmap_pde);
+               }
+       }
+
+       return retval;
+}
+
+int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
+{
+       return kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
+}
+
+static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp)
+{
+       u64 *spte;
+       int young = 0;
+
+       spte = rmap_next(kvm, rmapp, NULL);
+       while (spte) {
+               int _young;
+               u64 _spte = *spte;
+               BUG_ON(!(_spte & PT_PRESENT_MASK));
+               _young = _spte & PT_ACCESSED_MASK;
+               if (_young) {
+                       young = 1;
+                       clear_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
+               }
+               spte = rmap_next(kvm, rmapp, spte);
+       }
+       return young;
+}
+
+int kvm_age_hva(struct kvm *kvm, unsigned long hva)
+{
+       return kvm_handle_hva(kvm, hva, kvm_age_rmapp);
+}
+
 #ifdef MMU_DEBUG
 static int is_empty_shadow_page(u64 *spt)
 {
@@ -1205,6 +1283,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, 
int write, gfn_t gfn)
        int r;
        int largepage = 0;
        pfn_t pfn;
+       unsigned long mmu_seq;
 
        down_read(&current->mm->mmap_sem);
        if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
@@ -1212,6 +1291,8 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, 
int write, gfn_t gfn)
                largepage = 1;
        }
 
+       mmu_seq = vcpu->kvm->mmu_notifier_seq;
+       /* implicit mb(), we'll read before PT lock is unlocked */
        pfn = gfn_to_pfn(vcpu->kvm, gfn);
        up_read(&current->mm->mmap_sem);
 
@@ -1222,6 +1303,8 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, 
int write, gfn_t gfn)
        }
 
        spin_lock(&vcpu->kvm->mmu_lock);
+       if (mmu_notifier_retry(vcpu, mmu_seq))
+               goto out_unlock;
        kvm_mmu_free_some_pages(vcpu);
        r = __direct_map(vcpu, v, write, largepage, gfn, pfn,
                         PT32E_ROOT_LEVEL);
@@ -1229,6 +1312,11 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, 
int write, gfn_t gfn)
 
 
        return r;
+
+out_unlock:
+       spin_unlock(&vcpu->kvm->mmu_lock);
+       kvm_release_pfn_clean(pfn);
+       return 0;
 }
 
 
@@ -1347,6 +1435,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t 
gpa,
        int r;
        int largepage = 0;
        gfn_t gfn = gpa >> PAGE_SHIFT;
+       unsigned long mmu_seq;
 
        ASSERT(vcpu);
        ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
@@ -1360,6 +1449,8 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t 
gpa,
                gfn &= ~(KVM_PAGES_PER_HPAGE-1);
                largepage = 1;
        }
+       mmu_seq = vcpu->kvm->mmu_notifier_seq;
+       /* implicit mb(), we'll read before PT lock is unlocked */
        pfn = gfn_to_pfn(vcpu->kvm, gfn);
        up_read(&current->mm->mmap_sem);
        if (is_error_pfn(pfn)) {
@@ -1367,12 +1458,19 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t 
gpa,
                return 1;
        }
        spin_lock(&vcpu->kvm->mmu_lock);
+       if (mmu_notifier_retry(vcpu, mmu_seq))
+               goto out_unlock;
        kvm_mmu_free_some_pages(vcpu);
        r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK,
                         largepage, gfn, pfn, kvm_x86_ops->get_tdp_level());
        spin_unlock(&vcpu->kvm->mmu_lock);
 
        return r;
+
+out_unlock:
+       spin_unlock(&vcpu->kvm->mmu_lock);
+       kvm_release_pfn_clean(pfn);
+       return 0;
 }
 
 static void nonpaging_free(struct kvm_vcpu *vcpu)
@@ -1672,6 +1770,8 @@ static void mmu_guess_page_from_pte_write(struct kvm_vcpu 
*vcpu, gpa_t gpa,
                gfn &= ~(KVM_PAGES_PER_HPAGE-1);
                vcpu->arch.update_pte.largepage = 1;
        }
+       vcpu->arch.update_pte.mmu_seq = vcpu->kvm->mmu_notifier_seq;
+       /* implicit mb(), we'll read before PT lock is unlocked */
        pfn = gfn_to_pfn(vcpu->kvm, gfn);
        up_read(&current->mm->mmap_sem);
 
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 4d91822..f72ac1f 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -263,6 +263,8 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct 
kvm_mmu_page *page,
        pfn = vcpu->arch.update_pte.pfn;
        if (is_error_pfn(pfn))
                return;
+       if (mmu_notifier_retry(vcpu, vcpu->arch.update_pte.mmu_seq))
+               return;
        kvm_get_pfn(pfn);
        mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0,
                     gpte & PT_DIRTY_MASK, NULL, largepage, gpte_to_gfn(gpte),
@@ -380,6 +382,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t 
addr,
        int r;
        pfn_t pfn;
        int largepage = 0;
+       unsigned long mmu_seq;
 
        pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
        kvm_mmu_audit(vcpu, "pre page fault");
@@ -413,6 +416,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t 
addr,
                        largepage = 1;
                }
        }
+       mmu_seq = vcpu->kvm->mmu_notifier_seq;
+       /* implicit mb(), we'll read before PT lock is unlocked */
        pfn = gfn_to_pfn(vcpu->kvm, walker.gfn);
        up_read(&current->mm->mmap_sem);
 
@@ -424,6 +429,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t 
addr,
        }
 
        spin_lock(&vcpu->kvm->mmu_lock);
+       if (mmu_notifier_retry(vcpu, mmu_seq))
+               goto out_unlock;
        kvm_mmu_free_some_pages(vcpu);
        shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
                                  largepage, &write_pt, pfn);
@@ -439,6 +446,11 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t 
addr,
        spin_unlock(&vcpu->kvm->mmu_lock);
 
        return write_pt;
+
+out_unlock:
+       spin_unlock(&vcpu->kvm->mmu_lock);
+       kvm_release_pfn_clean(pfn);
+       return 0;
 }
 
 static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h
index 0b6b996..d4bbf3b 100644
--- a/include/asm-x86/kvm_host.h
+++ b/include/asm-x86/kvm_host.h
@@ -13,6 +13,7 @@
 
 #include <linux/types.h>
 #include <linux/mm.h>
+#include <linux/mmu_notifier.h>
 
 #include <linux/kvm.h>
 #include <linux/kvm_para.h>
@@ -262,6 +263,7 @@ struct kvm_vcpu_arch {
                gfn_t gfn;      /* presumed gfn during guest pte update */
                pfn_t pfn;      /* pfn corresponding to that gfn */
                int largepage;
+               unsigned long mmu_seq;
        } update_pte;
 
        struct i387_fxsave_struct host_fx_image;
@@ -727,4 +729,8 @@ asmlinkage void kvm_handle_fault_on_reboot(void);
        KVM_EX_ENTRY " 666b, 667b \n\t" \
        ".popsection"
 
+#define KVM_ARCH_WANT_MMU_NOTIFIER
+int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
+int kvm_age_hva(struct kvm *kvm, unsigned long hva);
+
 #endif
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 3798097..a18aaad 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -121,6 +121,12 @@ struct kvm {
        struct kvm_coalesced_mmio_dev *coalesced_mmio_dev;
        struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
 #endif
+
+#ifdef KVM_ARCH_WANT_MMU_NOTIFIER
+       struct mmu_notifier mmu_notifier;
+       unsigned long mmu_notifier_seq;
+       long mmu_notifier_count;
+#endif
 };
 
 /* The guest did something we don't support. */
@@ -351,4 +357,22 @@ int kvm_trace_ioctl(unsigned int ioctl, unsigned long arg)
 #define kvm_trace_cleanup() ((void)0)
 #endif
 
+#ifdef KVM_ARCH_WANT_MMU_NOTIFIER
+static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long 
mmu_seq)
+{
+       if (unlikely(vcpu->kvm->mmu_notifier_count))
+               return 1;
+       /*
+        * Both reads happen under the mmu_lock and both values are
+        * modified under mmu_lock, so there's no need of smb_rmb()
+        * here in between, otherwise mmu_notifier_count should be
+        * read before mmu_notifier_seq, see
+        * mmu_notifier_invalidate_range_end write side.
+        */
+       if (vcpu->kvm->mmu_notifier_seq != mmu_seq)
+               return 1;
+       return 0;
+}
+#endif
+
 #endif
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 904d7b7..f1cb63d 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -192,6 +192,123 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
 }
 EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
 
+#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
+static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
+{
+       return container_of(mn, struct kvm, mmu_notifier);
+}
+
+static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
+                                            struct mm_struct *mm,
+                                            unsigned long address)
+{
+       struct kvm *kvm = mmu_notifier_to_kvm(mn);
+       int need_tlb_flush;
+
+       /*
+        * When ->invalidate_page runs, the linux pte has been zapped
+        * already but the page is still allocated until
+        * ->invalidate_page returns. So if we increase the sequence
+        * here the kvm page fault will notice if the spte can't be
+        * established because the page is going to be freed. If
+        * instead the kvm page fault establishes the spte before
+        * ->invalidate_page runs, kvm_unmap_hva will release it
+        * before returning.
+        *
+        * The sequence increase only need to be seen at spin_unlock
+        * time, and not at spin_lock time.
+        *
+        * Increasing the sequence after the spin_unlock would be
+        * unsafe because the kvm page fault could then establish the
+        * pte after kvm_unmap_hva returned, without noticing the page
+        * is going to be freed.
+        */
+       spin_lock(&kvm->mmu_lock);
+       kvm->mmu_notifier_seq++;
+       need_tlb_flush = kvm_unmap_hva(kvm, address);
+       spin_unlock(&kvm->mmu_lock);
+
+       /* we've to flush the tlb before the pages can be freed */
+       if (need_tlb_flush)
+               kvm_flush_remote_tlbs(kvm);
+
+}
+
+static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
+                                                   struct mm_struct *mm,
+                                                   unsigned long start,
+                                                   unsigned long end)
+{
+       struct kvm *kvm = mmu_notifier_to_kvm(mn);
+       int need_tlb_flush = 0;
+
+       spin_lock(&kvm->mmu_lock);
+       /*
+        * The count increase must become visible at unlock time as no
+        * spte can be established without taking the mmu_lock and
+        * count is also read inside the mmu_lock critical section.
+        */
+       kvm->mmu_notifier_count++;
+       for (; start < end; start += PAGE_SIZE)
+               need_tlb_flush |= kvm_unmap_hva(kvm, start);
+       spin_unlock(&kvm->mmu_lock);
+
+       /* we've to flush the tlb before the pages can be freed */
+       if (need_tlb_flush)
+               kvm_flush_remote_tlbs(kvm);
+}
+
+static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
+                                                 struct mm_struct *mm,
+                                                 unsigned long start,
+                                                 unsigned long end)
+{
+       struct kvm *kvm = mmu_notifier_to_kvm(mn);
+
+       spin_lock(&kvm->mmu_lock);
+       /*
+        * This sequence increase will notify the kvm page fault that
+        * the page that is going to be mapped in the spte could have
+        * been freed.
+        */
+       kvm->mmu_notifier_seq++;
+       /*
+        * The above sequence increase must be visible before the
+        * below count decrease but both values are read by the kvm
+        * page fault under mmu_lock spinlock so we don't need to add
+        * a smb_wmb() here in between the two.
+        */
+       kvm->mmu_notifier_count--;
+       spin_unlock(&kvm->mmu_lock);
+
+       BUG_ON(kvm->mmu_notifier_count < 0);
+}
+
+static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
+                                             struct mm_struct *mm,
+                                             unsigned long address)
+{
+       struct kvm *kvm = mmu_notifier_to_kvm(mn);
+       int young;
+
+       spin_lock(&kvm->mmu_lock);
+       young = kvm_age_hva(kvm, address);
+       spin_unlock(&kvm->mmu_lock);
+
+       if (young)
+               kvm_flush_remote_tlbs(kvm);
+
+       return young;
+}
+
+static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
+       .invalidate_page        = kvm_mmu_notifier_invalidate_page,
+       .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
+       .invalidate_range_end   = kvm_mmu_notifier_invalidate_range_end,
+       .clear_flush_young      = kvm_mmu_notifier_clear_flush_young,
+};
+#endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
+
 static struct kvm *kvm_create_vm(void)
 {
        struct kvm *kvm = kvm_arch_create_vm();
@@ -212,6 +329,21 @@ static struct kvm *kvm_create_vm(void)
                        (struct kvm_coalesced_mmio_ring *)page_address(page);
 #endif
 
+#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
+       {
+               int err;
+               kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
+               err = mmu_notifier_register(&kvm->mmu_notifier, current->mm);
+               if (err) {
+#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
+                       put_page(page);
+#endif
+                       kfree(kvm);
+                       return ERR_PTR(err);
+               }
+       }
+#endif
+
        kvm->mm = current->mm;
        atomic_inc(&kvm->mm->mm_count);
        spin_lock_init(&kvm->mmu_lock);
@@ -272,6 +404,9 @@ static void kvm_destroy_vm(struct kvm *kvm)
        if (kvm->coalesced_mmio_ring != NULL)
                free_page((unsigned long)kvm->coalesced_mmio_ring);
 #endif
+#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
+       mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
+#endif
        kvm_arch_destroy_vm(kvm);
        mmdrop(mm);
 }
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to