Gitweb:     
http://git.kernel.org/git/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=aee16b3cee2746880e40945a9b5bff4f309cfbc4
Commit:     aee16b3cee2746880e40945a9b5bff4f309cfbc4
Parent:     eb3a1e1145ca8f12372c7c96aa0702d86a9002a9
Author:     Jeremy Fitzhardinge <[EMAIL PROTECTED]>
AuthorDate: Sun May 6 14:48:54 2007 -0700
Committer:  Linus Torvalds <[EMAIL PROTECTED]>
CommitDate: Mon May 7 12:12:51 2007 -0700

    Add apply_to_page_range() which applies a function to a pte range
    
    Add a new mm function apply_to_page_range() which applies a given function 
to
    every pte in a given virtual address range in a given mm structure.  This 
is a
    generic alternative to cut-and-pasting the Linux idiomatic pagetable walking
    code in every place that a sequence of PTEs must be accessed.
    
    Although this interface is intended to be useful in a wide range of
    situations, it is currently used specifically by several Xen subsystems, for
    example: to ensure that pagetables have been allocated for a virtual address
    range, and to construct batched special pagetable update requests to map I/O
    memory (in ioremap()).
    
    [EMAIL PROTECTED]: fix warning, unpleasantly]
    Signed-off-by: Ian Pratt <[EMAIL PROTECTED]>
    Signed-off-by: Christian Limpach <[EMAIL PROTECTED]>
    Signed-off-by: Chris Wright <[EMAIL PROTECTED]>
    Signed-off-by: Jeremy Fitzhardinge <[EMAIL PROTECTED]>
    Cc: Christoph Lameter <[EMAIL PROTECTED]>
    Cc: Matt Mackall <[EMAIL PROTECTED]>
    Acked-by: Ingo Molnar <[EMAIL PROTECTED]>
    Signed-off-by: Andrew Morton <[EMAIL PROTECTED]>
    Signed-off-by: Linus Torvalds <[EMAIL PROTECTED]>
---
 include/linux/mm.h |    5 +++
 mm/memory.c        |   94 ++++++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 99 insertions(+), 0 deletions(-)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index 60e0e4a..7bf0bd8 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1130,6 +1130,11 @@ struct page *follow_page(struct vm_area_struct *, 
unsigned long address,
 #define FOLL_GET       0x04    /* do get_page on page */
 #define FOLL_ANON      0x08    /* give ZERO_PAGE if no pgtable */
 
+typedef int (*pte_fn_t)(pte_t *pte, struct page *pmd_page, unsigned long addr,
+                       void *data);
+extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
+                              unsigned long size, pte_fn_t fn, void *data);
+
 #ifdef CONFIG_PROC_FS
 void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
 #else
diff --git a/mm/memory.c b/mm/memory.c
index e7066e7..044feb7 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1448,6 +1448,100 @@ int remap_pfn_range(struct vm_area_struct *vma, 
unsigned long addr,
 }
 EXPORT_SYMBOL(remap_pfn_range);
 
+static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
+                                    unsigned long addr, unsigned long end,
+                                    pte_fn_t fn, void *data)
+{
+       pte_t *pte;
+       int err;
+       struct page *pmd_page;
+       spinlock_t *ptl = ptl;          /* Suppress gcc warning */
+
+       pte = (mm == &init_mm) ?
+               pte_alloc_kernel(pmd, addr) :
+               pte_alloc_map_lock(mm, pmd, addr, &ptl);
+       if (!pte)
+               return -ENOMEM;
+
+       BUG_ON(pmd_huge(*pmd));
+
+       pmd_page = pmd_page(*pmd);
+
+       do {
+               err = fn(pte, pmd_page, addr, data);
+               if (err)
+                       break;
+       } while (pte++, addr += PAGE_SIZE, addr != end);
+
+       if (mm != &init_mm)
+               pte_unmap_unlock(pte-1, ptl);
+       return err;
+}
+
+static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
+                                    unsigned long addr, unsigned long end,
+                                    pte_fn_t fn, void *data)
+{
+       pmd_t *pmd;
+       unsigned long next;
+       int err;
+
+       pmd = pmd_alloc(mm, pud, addr);
+       if (!pmd)
+               return -ENOMEM;
+       do {
+               next = pmd_addr_end(addr, end);
+               err = apply_to_pte_range(mm, pmd, addr, next, fn, data);
+               if (err)
+                       break;
+       } while (pmd++, addr = next, addr != end);
+       return err;
+}
+
+static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
+                                    unsigned long addr, unsigned long end,
+                                    pte_fn_t fn, void *data)
+{
+       pud_t *pud;
+       unsigned long next;
+       int err;
+
+       pud = pud_alloc(mm, pgd, addr);
+       if (!pud)
+               return -ENOMEM;
+       do {
+               next = pud_addr_end(addr, end);
+               err = apply_to_pmd_range(mm, pud, addr, next, fn, data);
+               if (err)
+                       break;
+       } while (pud++, addr = next, addr != end);
+       return err;
+}
+
+/*
+ * Scan a region of virtual memory, filling in page tables as necessary
+ * and calling a provided function on each leaf page table.
+ */
+int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
+                       unsigned long size, pte_fn_t fn, void *data)
+{
+       pgd_t *pgd;
+       unsigned long next;
+       unsigned long end = addr + size;
+       int err;
+
+       BUG_ON(addr >= end);
+       pgd = pgd_offset(mm, addr);
+       do {
+               next = pgd_addr_end(addr, end);
+               err = apply_to_pud_range(mm, pgd, addr, next, fn, data);
+               if (err)
+                       break;
+       } while (pgd++, addr = next, addr != end);
+       return err;
+}
+EXPORT_SYMBOL_GPL(apply_to_page_range);
+
 /*
  * handle_pte_fault chooses page fault handler according to an entry
  * which was read non-atomically.  Before making any commitment, on
-
To unsubscribe from this list: send the line "unsubscribe git-commits-head" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to