This reverts commit 86c4127b5cbdcf9a3d182c53d48159153af40c40.

This commit added mm_struct->nr_ptds, which counts the number of page
table directories used by a process. It was ported from PCS6.

In PCS6 it is used for precharging page tables to UB_KMEMSIZE on fork.
Plus, the counter is reported in /proc/pid/status as VmPTD.

I'm not going to use this counter in the upcoming implementation of page
tables accounting, so I'm reverting the patch that brought it. Regarding
VmPTD, there is a mainstream commit dc6c9a35b66b5 ("mm: account pmd page
tables to the process"), which adds a similar statistic - VmPMD. If we
need it, we can always backport the commit.

Signed-off-by: Vladimir Davydov <vdavy...@parallels.com>
---
 arch/x86/mm/pgtable.c    |  5 -----
 fs/proc/task_mmu.c       |  2 --
 include/linux/mm_types.h |  1 -
 kernel/fork.c            |  1 -
 mm/memory.c              | 18 +++---------------
 5 files changed, 3 insertions(+), 24 deletions(-)

diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 99b13683c06a..d5aa594e6332 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -246,7 +246,6 @@ static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t 
*pgdp)
 
                        paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
                        pmd_free(mm, pmd);
-                       mm->nr_ptds--;
                }
        }
 }
@@ -271,7 +270,6 @@ static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t 
*pgd, pmd_t *pmds[])
                               sizeof(pmd_t) * PTRS_PER_PMD);
 
                pud_populate(mm, pud, pmd);
-               mm->nr_ptds++;
        }
 }
 
@@ -305,8 +303,6 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
 
        spin_unlock(&pgd_lock);
 
-       mm->nr_ptds++;
-
        return pgd;
 
 out_free_pmds:
@@ -323,7 +319,6 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
        pgd_dtor(pgd);
        paravirt_pgd_free(mm, pgd);
        free_page((unsigned long)pgd);
-       mm->nr_ptds--;
 }
 
 /*
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index e3d72d35dfef..45179ba7a441 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -53,7 +53,6 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
                "VmExe:\t%8lu kB\n"
                "VmLib:\t%8lu kB\n"
                "VmPTE:\t%8lu kB\n"
-               "VmPTD:\t%8lu kB\n"
                "VmSwap:\t%8lu kB\n",
                hiwater_vm << (PAGE_SHIFT-10),
                total_vm << (PAGE_SHIFT-10),
@@ -65,7 +64,6 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
                mm->stack_vm << (PAGE_SHIFT-10), text, lib,
                (PTRS_PER_PTE * sizeof(pte_t) *
                 atomic_long_read(&mm->nr_ptes)) >> 10,
-               mm->nr_ptds << (PAGE_SHIFT-10),
                swap << (PAGE_SHIFT-10));
 }
 
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index b7bd8aac1d39..96adb9428864 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -360,7 +360,6 @@ struct mm_struct {
        atomic_t mm_users;                      /* How many users with user 
space? */
        atomic_t mm_count;                      /* How many references to 
"struct mm_struct" (users count as 1) */
        atomic_long_t nr_ptes;                  /* Page table pages */
-       unsigned long nr_ptds;
        int map_count;                          /* number of VMAs */
 
        spinlock_t page_table_lock;             /* Protects page tables and 
some counters */
diff --git a/kernel/fork.c b/kernel/fork.c
index e2ea938603fd..505fa21d70aa 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -577,7 +577,6 @@ static struct mm_struct *mm_init(struct mm_struct *mm, 
struct task_struct *p)
        INIT_LIST_HEAD(&mm->mmlist);
        mm->core_state = NULL;
        atomic_long_set(&mm->nr_ptes, 0);
-       mm->nr_ptds = 0;
        memset(&mm->rss_stat, 0, sizeof(mm->rss_stat));
        spin_lock_init(&mm->page_table_lock);
        mm->free_area_cache = TASK_UNMAPPED_BASE;
diff --git a/mm/memory.c b/mm/memory.c
index a1c1000bdcef..3fba5739780a 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -448,9 +448,6 @@ static inline void free_pmd_range(struct mmu_gather *tlb, 
pud_t *pud,
        pmd = pmd_offset(pud, start);
        pud_clear(pud);
        pmd_free_tlb(tlb, pmd, start);
-#ifndef __PAGETABLE_PMD_FOLDED
-       tlb->mm->nr_ptds--;
-#endif
 }
 
 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
@@ -484,9 +481,6 @@ static inline void free_pud_range(struct mmu_gather *tlb, 
pgd_t *pgd,
        pud = pud_offset(pgd, start);
        pgd_clear(pgd);
        pud_free_tlb(tlb, pud, start);
-#ifndef __PAGETABLE_PUD_FOLDED
-       tlb->mm->nr_ptds--;
-#endif
 }
 
 /*
@@ -3832,10 +3826,8 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, 
unsigned long address)
        spin_lock(&mm->page_table_lock);
        if (pgd_present(*pgd))          /* Another has populated it */
                pud_free(mm, new);
-       else {
+       else
                pgd_populate(mm, pgd, new);
-               mm->nr_ptds++;
-       }
        spin_unlock(&mm->page_table_lock);
        return 0;
 }
@@ -3858,17 +3850,13 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, 
unsigned long address)
 #ifndef __ARCH_HAS_4LEVEL_HACK
        if (pud_present(*pud))          /* Another has populated it */
                pmd_free(mm, new);
-       else {
+       else
                pud_populate(mm, pud, new);
-               mm->nr_ptds++;
-       }
 #else
        if (pgd_present(*pud))          /* Another has populated it */
                pmd_free(mm, new);
-       else {
+       else
                pgd_populate(mm, pud, new);
-               mm->nr_ptds++;
-       }
 #endif /* __ARCH_HAS_4LEVEL_HACK */
        spin_unlock(&mm->page_table_lock);
        return 0;
-- 
2.1.4

_______________________________________________
Devel mailing list
Devel@openvz.org
https://lists.openvz.org/mailman/listinfo/devel

Reply via email to