Two callbacks to remove individual pages as done in rmap code

        invalidate_page()

Called from the inner loop of rmap walks to invalidate pages.

        age_page()

Called for the determination of the page referenced status.

If we do not care about page referenced status then an age_page callback
may be be omitted.

Signed-off-by: Christoph Lameter <[EMAIL PROTECTED]>
Signed-off-by: Robin Holt <[EMAIL PROTECTED]>

---
 mm/rmap.c |   13 ++++++++++---
 1 file changed, 10 insertions(+), 3 deletions(-)

Index: linux-2.6/mm/rmap.c
===================================================================
--- linux-2.6.orig/mm/rmap.c    2008-01-31 19:55:45.000000000 -0800
+++ linux-2.6/mm/rmap.c 2008-01-31 20:28:35.000000000 -0800
@@ -49,6 +49,7 @@
 #include <linux/rcupdate.h>
 #include <linux/module.h>
 #include <linux/kallsyms.h>
+#include <linux/mmu_notifier.h>
 
 #include <asm/tlbflush.h>
 
@@ -284,7 +285,8 @@ static int page_referenced_one(struct pa
        if (!pte)
                goto out;
 
-       if (ptep_clear_flush_young(vma, address, pte))
+       if (ptep_clear_flush_young(vma, address, pte) |
+           mmu_notifier_age_page(mm, address))
                referenced++;
 
        /* Pretend the page is referenced if the task has the
@@ -434,6 +436,7 @@ static int page_mkclean_one(struct page 
 
                flush_cache_page(vma, address, pte_pfn(*pte));
                entry = ptep_clear_flush(vma, address, pte);
+               mmu_notifier(invalidate_page, mm, address);
                entry = pte_wrprotect(entry);
                entry = pte_mkclean(entry);
                set_pte_at(mm, address, pte, entry);
@@ -677,7 +680,8 @@ static int try_to_unmap_one(struct page 
         * skipped over this mm) then we should reactivate it.
         */
        if (!migration && ((vma->vm_flags & VM_LOCKED) ||
-                       (ptep_clear_flush_young(vma, address, pte)))) {
+                       (ptep_clear_flush_young(vma, address, pte) |
+                               mmu_notifier_age_page(mm, address)))) {
                ret = SWAP_FAIL;
                goto out_unmap;
        }
@@ -685,6 +689,7 @@ static int try_to_unmap_one(struct page 
        /* Nuke the page table entry. */
        flush_cache_page(vma, address, page_to_pfn(page));
        pteval = ptep_clear_flush(vma, address, pte);
+       mmu_notifier(invalidate_page, mm, address);
 
        /* Move the dirty bit to the physical page now the pte is gone. */
        if (pte_dirty(pteval))
@@ -809,12 +814,14 @@ static void try_to_unmap_cluster(unsigne
                page = vm_normal_page(vma, address, *pte);
                BUG_ON(!page || PageAnon(page));
 
-               if (ptep_clear_flush_young(vma, address, pte))
+               if (ptep_clear_flush_young(vma, address, pte) |
+                   mmu_notifier_age_page(mm, address))
                        continue;
 
                /* Nuke the page table entry. */
                flush_cache_page(vma, address, pte_pfn(*pte));
                pteval = ptep_clear_flush(vma, address, pte);
+               mmu_notifier(invalidate_page, mm, address);
 
                /* If nonlinear, store the file page offset in the pte. */
                if (page->index != linear_page_index(vma, address))

-- 

-------------------------------------------------------------------------
This SF.net email is sponsored by: Microsoft
Defy all challenges. Microsoft(R) Visual Studio 2008.
http://clk.atdmt.com/MRT/go/vse0120000070mrt/direct/01/
_______________________________________________
kvm-devel mailing list
kvm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/kvm-devel

Reply via email to