Hi,

Usual patch but adapted to mmu notifier #v5, works fine here as
expected.

I doubt Christoph's V4 was close to final yet, GRU wasn't covered at
all yet, not even mremap was covered at all (nor XPMEM nor GRU) in V4.

The first workable APIs for XPMEM (to close the SMP race I explained
since export-noifiers #v1) is just an idea from last night... and for
the first time xpmem I think may work safe.

I think my #v5 is small enough, should already fit KVM and GRU, it
should provide an API that allows optimization and extension over
time, and it can be extended to support XPMEM once that will work in
practice. I really think it's better idea to be able at least to test
some code before pushing in mainline some broad VM visible API. This
is what I did with KVM infact. Once KVM was solid swapping 3G over 1G
of ram I pushed the mmu notifiers to lkml.

Being dependent on XPMEM support being merged, to merge KVM/GRU
doesn't sound a good idea. My patch provides no overhead with
MMU_NOTIFIER=n too. Hope Christoph agrees with my proposal to use #v5
as the mmu core and to merge it in mainline with higher priority, to
mostly close the discussions on KVM and GRU (optimizations remains
possible) and to keep working incrementally on XPMEM and to push it in
mainline whenever you verified that it doesn't crash at runtime and
that you don't need yet another change of API.

Signed-off-by: Andrea Arcangeli <[EMAIL PROTECTED]>

diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index 4086080..c527d7d 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -18,6 +18,7 @@ config KVM
        tristate "Kernel-based Virtual Machine (KVM) support"
        depends on ARCH_SUPPORTS_KVM && EXPERIMENTAL
        select PREEMPT_NOTIFIERS
+       select MMU_NOTIFIER
        select ANON_INODES
        ---help---
          Support hosting fully virtualized guest machines using hardware
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index c85b904..adb20de 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -532,6 +532,110 @@ static void rmap_write_protect(struct kvm *kvm, u64 gfn)
                kvm_flush_remote_tlbs(kvm);
 }
 
+static void kvm_unmap_spte(struct kvm *kvm, u64 *spte)
+{
+       struct page *page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> 
PAGE_SHIFT);
+       get_page(page);
+       rmap_remove(kvm, spte);
+       set_shadow_pte(spte, shadow_trap_nonpresent_pte);
+       kvm_flush_remote_tlbs(kvm);
+       __free_page(page);
+}
+
+static void kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp)
+{
+       u64 *spte, *curr_spte;
+
+       spte = rmap_next(kvm, rmapp, NULL);
+       while (spte) {
+               BUG_ON(!(*spte & PT_PRESENT_MASK));
+               rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte, *spte);
+               curr_spte = spte;
+               spte = rmap_next(kvm, rmapp, spte);
+               kvm_unmap_spte(kvm, curr_spte);
+       }
+}
+
+void kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
+{
+       int i;
+
+       /*
+        * If mmap_sem isn't taken, we can look the memslots with only
+        * the mmu_lock by skipping over the slots with userspace_addr == 0.
+        */
+       spin_lock(&kvm->mmu_lock);
+       for (i = 0; i < kvm->nmemslots; i++) {
+               struct kvm_memory_slot *memslot = &kvm->memslots[i];
+               unsigned long start = memslot->userspace_addr;
+               unsigned long end;
+
+               /* mmu_lock protects userspace_addr */
+               if (!start)
+                       continue;
+
+               end = start + (memslot->npages << PAGE_SHIFT);
+               if (hva >= start && hva < end) {
+                       gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
+                       kvm_unmap_rmapp(kvm, &memslot->rmap[gfn_offset]);
+               }
+       }
+       spin_unlock(&kvm->mmu_lock);
+}
+
+static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp)
+{
+       u64 *spte;
+       int young = 0;
+
+       spte = rmap_next(kvm, rmapp, NULL);
+       while (spte) {
+               int _young;
+               u64 _spte = *spte;
+               BUG_ON(!(_spte & PT_PRESENT_MASK));
+               _young = _spte & PT_ACCESSED_MASK;
+               if (_young) {
+                       young = !!_young;
+                       set_shadow_pte(spte, _spte & ~PT_ACCESSED_MASK);
+               }
+               spte = rmap_next(kvm, rmapp, spte);
+       }
+       return young;
+}
+
+int kvm_age_hva(struct kvm *kvm, unsigned long hva)
+{
+       int i;
+       int young = 0;
+
+       /*
+        * If mmap_sem isn't taken, we can look the memslots with only
+        * the mmu_lock by skipping over the slots with userspace_addr == 0.
+        */
+       spin_lock(&kvm->mmu_lock);
+       for (i = 0; i < kvm->nmemslots; i++) {
+               struct kvm_memory_slot *memslot = &kvm->memslots[i];
+               unsigned long start = memslot->userspace_addr;
+               unsigned long end;
+
+               /* mmu_lock protects userspace_addr */
+               if (!start)
+                       continue;
+
+               end = start + (memslot->npages << PAGE_SHIFT);
+               if (hva >= start && hva < end) {
+                       gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
+                       young |= kvm_age_rmapp(kvm, &memslot->rmap[gfn_offset]);
+               }
+       }
+       spin_unlock(&kvm->mmu_lock);
+
+       if (young)
+               kvm_flush_remote_tlbs(kvm);
+
+       return young;
+}
+
 #ifdef MMU_DEBUG
 static int is_empty_shadow_page(u64 *spt)
 {
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 8f94a0b..a99c2ea 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3167,6 +3167,45 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
        free_page((unsigned long)vcpu->arch.pio_data);
 }
 
+static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
+{
+       struct kvm_arch *kvm_arch;
+       kvm_arch = container_of(mn, struct kvm_arch, mmu_notifier);
+       return container_of(kvm_arch, struct kvm, arch);
+}
+
+void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
+                                     struct mm_struct *mm,
+                                     unsigned long address)
+{
+       struct kvm *kvm = mmu_notifier_to_kvm(mn);
+       BUG_ON(mm != kvm->mm);
+       kvm_unmap_hva(kvm, address);
+}
+
+void kvm_mmu_notifier_invalidate_pages(struct mmu_notifier *mn,
+                                      struct mm_struct *mm,
+                                      unsigned long start, unsigned long end)
+{
+       for (; start < end; start += PAGE_SIZE)
+               kvm_mmu_notifier_invalidate_page(mn, mm, start);
+}
+
+int kvm_mmu_notifier_age_page(struct mmu_notifier *mn,
+                             struct mm_struct *mm,
+                             unsigned long address)
+{
+       struct kvm *kvm = mmu_notifier_to_kvm(mn);
+       BUG_ON(mm != kvm->mm);
+       return kvm_age_hva(kvm, address);
+}
+
+static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
+       .invalidate_page        = kvm_mmu_notifier_invalidate_page,
+       .invalidate_pages       = kvm_mmu_notifier_invalidate_pages,
+       .age_page               = kvm_mmu_notifier_age_page,
+};
+
 struct  kvm *kvm_arch_create_vm(void)
 {
        struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
@@ -3176,6 +3215,9 @@ struct  kvm *kvm_arch_create_vm(void)
 
        INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
 
+       kvm->arch.mmu_notifier.ops = &kvm_mmu_notifier_ops;
+       mmu_notifier_register(&kvm->arch.mmu_notifier, current->mm);
+
        return kvm;
 }
 
diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h
index d6db0de..72a7ff4 100644
--- a/include/asm-x86/kvm_host.h
+++ b/include/asm-x86/kvm_host.h
@@ -13,6 +13,7 @@
 
 #include <linux/types.h>
 #include <linux/mm.h>
+#include <linux/mmu_notifier.h>
 
 #include <linux/kvm.h>
 #include <linux/kvm_para.h>
@@ -287,6 +288,8 @@ struct kvm_arch{
        int round_robin_prev_vcpu;
        unsigned int tss_addr;
        struct page *apic_access_page;
+
+       struct mmu_notifier mmu_notifier;
 };
 
 struct kvm_vm_stat {
@@ -404,6 +407,8 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu);
 int kvm_mmu_setup(struct kvm_vcpu *vcpu);
 void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte);
 
+void kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
+int kvm_age_hva(struct kvm *kvm, unsigned long hva);
 int kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
 void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot);
 void kvm_mmu_zap_all(struct kvm *kvm);

-------------------------------------------------------------------------
This SF.net email is sponsored by: Microsoft
Defy all challenges. Microsoft(R) Visual Studio 2008.
http://clk.atdmt.com/MRT/go/vse0120000070mrt/direct/01/
_______________________________________________
kvm-devel mailing list
kvm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/kvm-devel

Reply via email to