Revert "x86_64: Quicklist support for x86_64"

2007-09-21 Thread Linux Kernel Mailing List
Gitweb: 
http://git.kernel.org/git/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=da8f153e51290e7438ba7da66234a864e5d3e1c1
Commit: da8f153e51290e7438ba7da66234a864e5d3e1c1
Parent: 077a6c26696c63305eebafbb71890c95e2750b6d
Author: Linus Torvalds <[EMAIL PROTECTED]>
AuthorDate: Fri Sep 21 12:09:41 2007 -0700
Committer:  Linus Torvalds <[EMAIL PROTECTED]>
CommitDate: Fri Sep 21 12:09:41 2007 -0700

Revert "x86_64: Quicklist support for x86_64"

This reverts commit 34feb2c83beb3bdf13535a36770f7e50b47ef299.

Suresh Siddha points out that this one breaks the fundamental
requirement that you cannot free page table pages before the TLB caches
are flushed.  The quicklists do not give the same kinds of guarantees
that the mmu_gather structure does, at least not in NUMA configurations.

Requested-by: Suresh Siddha <[EMAIL PROTECTED]>
Acked-by: Andi Kleen <[EMAIL PROTECTED]>
Cc: Andrew Morton <[EMAIL PROTECTED]>
Cc: Christoph Lameter <[EMAIL PROTECTED]>
Cc: Asit Mallick <[EMAIL PROTECTED]>
Cc: Tony Luck <[EMAIL PROTECTED]>
Signed-off-by: Linus Torvalds <[EMAIL PROTECTED]>
---
 arch/x86_64/Kconfig  |8 
 arch/x86_64/kernel/process.c |1 -
 arch/x86_64/kernel/smp.c |2 +-
 include/asm-x86_64/pgalloc.h |   73 ++
 include/asm-x86_64/pgtable.h |1 +
 5 files changed, 26 insertions(+), 59 deletions(-)

diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig
index ffa0364..b4d9089 100644
--- a/arch/x86_64/Kconfig
+++ b/arch/x86_64/Kconfig
@@ -60,14 +60,6 @@ config ZONE_DMA
bool
default y
 
-config QUICKLIST
-   bool
-   default y
-
-config NR_QUICK
-   int
-   default 2
-
 config ISA
bool
 
diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c
index 2842f50..9895655 100644
--- a/arch/x86_64/kernel/process.c
+++ b/arch/x86_64/kernel/process.c
@@ -208,7 +208,6 @@ void cpu_idle (void)
if (__get_cpu_var(cpu_idle_state))
__get_cpu_var(cpu_idle_state) = 0;
 
-   check_pgt_cache();
rmb();
idle = pm_idle;
if (!idle)
diff --git a/arch/x86_64/kernel/smp.c b/arch/x86_64/kernel/smp.c
index 673a300..df4a828 100644
--- a/arch/x86_64/kernel/smp.c
+++ b/arch/x86_64/kernel/smp.c
@@ -241,7 +241,7 @@ void flush_tlb_mm (struct mm_struct * mm)
}
if (!cpus_empty(cpu_mask))
flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
-   check_pgt_cache();
+
preempt_enable();
 }
 EXPORT_SYMBOL(flush_tlb_mm);
diff --git a/include/asm-x86_64/pgalloc.h b/include/asm-x86_64/pgalloc.h
index b467be6..8bb5646 100644
--- a/include/asm-x86_64/pgalloc.h
+++ b/include/asm-x86_64/pgalloc.h
@@ -4,10 +4,6 @@
 #include 
 #include 
 #include 
-#include 
-
-#define QUICK_PGD 0/* We preserve special mappings over free */
-#define QUICK_PT 1 /* Other page table pages that are zero on free */
 
 #define pmd_populate_kernel(mm, pmd, pte) \
set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte)))
@@ -24,23 +20,23 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t 
*pmd, struct page *p
 static inline void pmd_free(pmd_t *pmd)
 {
BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
-   quicklist_free(QUICK_PT, NULL, pmd);
+   free_page((unsigned long)pmd);
 }
 
 static inline pmd_t *pmd_alloc_one (struct mm_struct *mm, unsigned long addr)
 {
-   return (pmd_t *)quicklist_alloc(QUICK_PT, GFP_KERNEL|__GFP_REPEAT, 
NULL);
+   return (pmd_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
 }
 
 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
 {
-   return (pud_t *)quicklist_alloc(QUICK_PT, GFP_KERNEL|__GFP_REPEAT, 
NULL);
+   return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
 }
 
 static inline void pud_free (pud_t *pud)
 {
BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
-   quicklist_free(QUICK_PT, NULL, pud);
+   free_page((unsigned long)pud);
 }
 
 static inline void pgd_list_add(pgd_t *pgd)
@@ -61,57 +57,41 @@ static inline void pgd_list_del(pgd_t *pgd)
spin_unlock(&pgd_lock);
 }
 
-static inline void pgd_ctor(void *x)
+static inline pgd_t *pgd_alloc(struct mm_struct *mm)
 {
unsigned boundary;
-   pgd_t *pgd = x;
-   struct page *page = virt_to_page(pgd);
-
+   pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
+   if (!pgd)
+   return NULL;
+   pgd_list_add(pgd);
/*
 * Copy kernel pointers in from init.
+* Could keep a freelist or slab cache of those because the kernel
+* part never changes.
 */
boundary = pgd_index(__PAGE_OFFSET);
+   memset(pgd, 0, boundary * sizeof(pgd_t));
memcpy(pgd + boundary,
-   

x86_64: Quicklist support for x86_64

2007-07-21 Thread Linux Kernel Mailing List
Gitweb: 
http://git.kernel.org/git/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=34feb2c83beb3bdf13535a36770f7e50b47ef299
Commit: 34feb2c83beb3bdf13535a36770f7e50b47ef299
Parent: f0a7a5c93dfd1c0348dbbdb6f22cb82d99079c93
Author: Christoph Lameter <[EMAIL PROTECTED]>
AuthorDate: Sat Jul 21 17:10:30 2007 +0200
Committer:  Linus Torvalds <[EMAIL PROTECTED]>
CommitDate: Sat Jul 21 18:37:09 2007 -0700

x86_64: Quicklist support for x86_64

This adds caching of pgds and puds, pmds, pte.  That way we can avoid costly
zeroing and initialization of special mappings in the pgd.

A second quicklist is useful to separate out PGD handling.  We can carry the
initialized pgds over to the next process needing them.

Also clean up the pgd_list handling to use regular list macros.  There is no
need anymore to avoid the lru field.

Move the add/removal of the pgds to the pgdlist into the constructor /
destructor.  That way the implementation is congruent with i386.

Signed-off-by: Christoph Lameter <[EMAIL PROTECTED]>
Cc: "David S. Miller" <[EMAIL PROTECTED]>
Cc: "Luck, Tony" <[EMAIL PROTECTED]>
Acked-by: William Lee Irwin III <[EMAIL PROTECTED]>
Signed-off-by: Andrew Morton <[EMAIL PROTECTED]>
Signed-off-by: Andi Kleen <[EMAIL PROTECTED]>
Signed-off-by: Linus Torvalds <[EMAIL PROTECTED]>
---
 arch/x86_64/Kconfig  |8 
 arch/x86_64/kernel/process.c |1 +
 arch/x86_64/kernel/smp.c |2 +-
 include/asm-x86_64/pgalloc.h |   73 --
 include/asm-x86_64/pgtable.h |1 -
 5 files changed, 59 insertions(+), 26 deletions(-)

diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig
index fca1a85..45f82ae 100644
--- a/arch/x86_64/Kconfig
+++ b/arch/x86_64/Kconfig
@@ -60,6 +60,14 @@ config ZONE_DMA
bool
default y
 
+config QUICKLIST
+   bool
+   default y
+
+config NR_QUICK
+   int
+   default 2
+
 config ISA
bool
 
diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c
index 5909039..180f4c0 100644
--- a/arch/x86_64/kernel/process.c
+++ b/arch/x86_64/kernel/process.c
@@ -207,6 +207,7 @@ void cpu_idle (void)
if (__get_cpu_var(cpu_idle_state))
__get_cpu_var(cpu_idle_state) = 0;
 
+   check_pgt_cache();
rmb();
idle = pm_idle;
if (!idle)
diff --git a/arch/x86_64/kernel/smp.c b/arch/x86_64/kernel/smp.c
index df4a828..673a300 100644
--- a/arch/x86_64/kernel/smp.c
+++ b/arch/x86_64/kernel/smp.c
@@ -241,7 +241,7 @@ void flush_tlb_mm (struct mm_struct * mm)
}
if (!cpus_empty(cpu_mask))
flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
-
+   check_pgt_cache();
preempt_enable();
 }
 EXPORT_SYMBOL(flush_tlb_mm);
diff --git a/include/asm-x86_64/pgalloc.h b/include/asm-x86_64/pgalloc.h
index 8bb5646..b467be6 100644
--- a/include/asm-x86_64/pgalloc.h
+++ b/include/asm-x86_64/pgalloc.h
@@ -4,6 +4,10 @@
 #include 
 #include 
 #include 
+#include 
+
+#define QUICK_PGD 0/* We preserve special mappings over free */
+#define QUICK_PT 1 /* Other page table pages that are zero on free */
 
 #define pmd_populate_kernel(mm, pmd, pte) \
set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte)))
@@ -20,23 +24,23 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t 
*pmd, struct page *p
 static inline void pmd_free(pmd_t *pmd)
 {
BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
-   free_page((unsigned long)pmd);
+   quicklist_free(QUICK_PT, NULL, pmd);
 }
 
 static inline pmd_t *pmd_alloc_one (struct mm_struct *mm, unsigned long addr)
 {
-   return (pmd_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
+   return (pmd_t *)quicklist_alloc(QUICK_PT, GFP_KERNEL|__GFP_REPEAT, 
NULL);
 }
 
 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
 {
-   return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
+   return (pud_t *)quicklist_alloc(QUICK_PT, GFP_KERNEL|__GFP_REPEAT, 
NULL);
 }
 
 static inline void pud_free (pud_t *pud)
 {
BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
-   free_page((unsigned long)pud);
+   quicklist_free(QUICK_PT, NULL, pud);
 }
 
 static inline void pgd_list_add(pgd_t *pgd)
@@ -57,41 +61,57 @@ static inline void pgd_list_del(pgd_t *pgd)
spin_unlock(&pgd_lock);
 }
 
-static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+static inline void pgd_ctor(void *x)
 {
unsigned boundary;
-   pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
-   if (!pgd)
-   return NULL;
-   pgd_list_add(pgd);
+   pgd_t *pgd = x;
+   struct page *page = virt_to_page(pgd);
+
/*
 * Copy kernel pointers in from init.
-