The patch titled
smaps: use new ptwalks
has been added to the -mm tree. Its filename is
smaps-use-new-ptwalks.patch
Patches currently in -mm which might be from [EMAIL PROTECTED] are
swap-update-swapfile-i_sem-comment.patch
swap-correct-swapfile-nr_good_pages.patch
swap-move-destroy_swap_extents-calls.patch
swap-swap-extent-list-is-ordered.patch
swap-show-span-of-swap-extents.patch
swap-swap-unsigned-int-consistency.patch
swap-freeing-update-swap_listnext.patch
swap-get_swap_page-drop-swap_list_lock.patch
swap-scan_swap_map-restyled.patch
swap-scan_swap_map-drop-swap_device_lock.patch
swap-scan_swap_map-latency-breaks.patch
swap-swap_lock-replace-listdevice.patch
swap-update-swsusp-use-of-swap_info.patch
delete-from_swap_cache-bug_ons.patch
smaps-say-vma-not-map.patch
smaps-use-new-ptwalks.patch
From: Hugh Dickins <[EMAIL PROTECTED]>
/proc/$pid/smaps code was based on the old ptwalking style just as we
converted over to the p?d_addr_end style: convert it to the new style.
Do an easy cond_resched_lock at the end of each page table: looking at the
struct page of every pte will be heavy on the cache, and others are likely to
hack on this example, so better limit its still poor latency.
Signed-off-by: Hugh Dickins <[EMAIL PROTECTED]>
Signed-off-by: Andrew Morton <[EMAIL PROTECTED]>
---
fs/proc/task_mmu.c | 150 ++++++++++++++++++++---------------------------------
1 files changed, 58 insertions(+), 92 deletions(-)
diff -puN fs/proc/task_mmu.c~smaps-use-new-ptwalks fs/proc/task_mmu.c
--- devel/fs/proc/task_mmu.c~smaps-use-new-ptwalks 2005-07-08
22:40:52.000000000 -0700
+++ devel-akpm/fs/proc/task_mmu.c 2005-07-08 22:40:52.000000000 -0700
@@ -158,120 +158,88 @@ struct mem_size_stats
unsigned long private_dirty;
};
-static void smaps_pte_range(pmd_t *pmd,
- unsigned long address,
- unsigned long size,
- struct mem_size_stats *mss)
+static void smaps_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
+ unsigned long addr, unsigned long end,
+ struct mem_size_stats *mss)
{
- pte_t *ptep, pte;
- unsigned long end;
+ pte_t *pte, ptent;
unsigned long pfn;
struct page *page;
- if (pmd_none(*pmd))
- return;
- if (unlikely(pmd_bad(*pmd))) {
- pmd_ERROR(*pmd);
- pmd_clear(pmd);
- return;
- }
- ptep = pte_offset_map(pmd, address);
- address &= ~PMD_MASK;
- end = address + size;
- if (end > PMD_SIZE)
- end = PMD_SIZE;
+ pte = pte_offset_map(pmd, addr);
do {
- pte = *ptep;
- address += PAGE_SIZE;
- ptep++;
-
- if (pte_none(pte) || (!pte_present(pte)))
+ ptent = *pte;
+ if (pte_none(ptent) || !pte_present(ptent))
continue;
mss->resident += PAGE_SIZE;
- pfn = pte_pfn(pte);
- if (pfn_valid(pfn)) {
- page = pfn_to_page(pfn);
- if (page_count(page) >= 2) {
- if (pte_dirty(pte))
- mss->shared_dirty += PAGE_SIZE;
- else
- mss->shared_clean += PAGE_SIZE;
- } else {
- if (pte_dirty(pte))
- mss->private_dirty += PAGE_SIZE;
- else
- mss->private_clean += PAGE_SIZE;
- }
+ pfn = pte_pfn(ptent);
+ if (!pfn_valid(pfn))
+ continue;
+
+ page = pfn_to_page(pfn);
+ if (page_count(page) >= 2) {
+ if (pte_dirty(ptent))
+ mss->shared_dirty += PAGE_SIZE;
+ else
+ mss->shared_clean += PAGE_SIZE;
+ } else {
+ if (pte_dirty(ptent))
+ mss->private_dirty += PAGE_SIZE;
+ else
+ mss->private_clean += PAGE_SIZE;
}
- } while (address < end);
- pte_unmap(ptep - 1);
+ } while (pte++, addr += PAGE_SIZE, addr != end);
+ pte_unmap(pte - 1);
+ cond_resched_lock(&vma->vm_mm->page_table_lock);
}
-static void smaps_pmd_range(pud_t *pud,
- unsigned long address,
- unsigned long size,
- struct mem_size_stats *mss)
+static inline void smaps_pmd_range(struct vm_area_struct *vma, pud_t *pud,
+ unsigned long addr, unsigned long end,
+ struct mem_size_stats *mss)
{
pmd_t *pmd;
- unsigned long end;
+ unsigned long next;
- if (pud_none(*pud))
- return;
- if (unlikely(pud_bad(*pud))) {
- pud_ERROR(*pud);
- pud_clear(pud);
- return;
- }
- pmd = pmd_offset(pud, address);
- address &= ~PUD_MASK;
- end = address + size;
- if (end > PUD_SIZE)
- end = PUD_SIZE;
+ pmd = pmd_offset(pud, addr);
do {
- smaps_pte_range(pmd, address, end - address, mss);
- address = (address + PMD_SIZE) & PMD_MASK;
- pmd++;
- } while (address < end);
+ next = pmd_addr_end(addr, end);
+ if (pmd_none_or_clear_bad(pmd))
+ continue;
+ smaps_pte_range(vma, pmd, addr, next, mss);
+ } while (pmd++, addr = next, addr != end);
}
-static void smaps_pud_range(pgd_t *pgd,
- unsigned long address,
- unsigned long size,
- struct mem_size_stats *mss)
+static inline void smaps_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
+ unsigned long addr, unsigned long end,
+ struct mem_size_stats *mss)
{
pud_t *pud;
- unsigned long end;
+ unsigned long next;
- if (pgd_none(*pgd))
- return;
- if (unlikely(pgd_bad(*pgd))) {
- pgd_ERROR(*pgd);
- pgd_clear(pgd);
- return;
- }
- pud = pud_offset(pgd, address);
- address &= ~PGDIR_MASK;
- end = address + size;
- if (end > PGDIR_SIZE)
- end = PGDIR_SIZE;
+ pud = pud_offset(pgd, addr);
do {
- smaps_pmd_range(pud, address, end - address, mss);
- address = (address + PUD_SIZE) & PUD_MASK;
- pud++;
- } while (address < end);
+ next = pud_addr_end(addr, end);
+ if (pud_none_or_clear_bad(pud))
+ continue;
+ smaps_pmd_range(vma, pud, addr, next, mss);
+ } while (pud++, addr = next, addr != end);
}
-static void smaps_pgd_range(pgd_t *pgd,
- unsigned long start_address,
- unsigned long end_address,
- struct mem_size_stats *mss)
+static inline void smaps_pgd_range(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long end,
+ struct mem_size_stats *mss)
{
+ pgd_t *pgd;
+ unsigned long next;
+
+ pgd = pgd_offset(vma->vm_mm, addr);
do {
- smaps_pud_range(pgd, start_address, end_address -
start_address, mss);
- start_address = (start_address + PGDIR_SIZE) & PGDIR_MASK;
- pgd++;
- } while (start_address < end_address);
+ next = pgd_addr_end(addr, end);
+ if (pgd_none_or_clear_bad(pgd))
+ continue;
+ smaps_pud_range(vma, pgd, addr, next, mss);
+ } while (pgd++, addr = next, addr != end);
}
static int show_smap(struct seq_file *m, void *v)
@@ -286,10 +254,8 @@ static int show_smap(struct seq_file *m,
memset(&mss, 0, sizeof mss);
if (mm) {
- pgd_t *pgd;
spin_lock(&mm->page_table_lock);
- pgd = pgd_offset(mm, vma->vm_start);
- smaps_pgd_range(pgd, vma->vm_start, vma->vm_end, &mss);
+ smaps_pgd_range(vma, vma->vm_start, vma->vm_end, &mss);
spin_unlock(&mm->page_table_lock);
}
_
-
To unsubscribe from this list: send the line "unsubscribe mm-commits" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at http://vger.kernel.org/majordomo-info.html