detect if need to use 1G or 2M and store them in page_size_mask.

Only probe them one time.

Suggested-by: Ingo Molnar <mi...@elte.hu>
Signed-off-by: Yinghai Lu <ying...@kernel.org>
---
 arch/x86/include/asm/pgtable.h |    1 +
 arch/x86/kernel/setup.c        |    1 +
 arch/x86/mm/init.c             |   66 +++++++++++++++++++---------------------
 3 files changed, 33 insertions(+), 35 deletions(-)

diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 49afb3f..e47e4db 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -597,6 +597,7 @@ static inline int pgd_none(pgd_t pgd)
 #ifndef __ASSEMBLY__
 
 extern int direct_gbpages;
+void probe_page_size_mask(void);
 
 /* local pte updates need not use xchg for locking */
 static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index f4b9b80..d6e8c03 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -912,6 +912,7 @@ void __init setup_arch(char **cmdline_p)
        setup_real_mode();
 
        init_gbpages();
+       probe_page_size_mask();
 
        /* max_pfn_mapped is updated here */
        max_low_pfn_mapped = init_memory_mapping(0, max_low_pfn<<PAGE_SHIFT);
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index e0e6990..838e9bc 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -35,8 +35,10 @@ struct map_range {
        unsigned page_size_mask;
 };
 
-static void __init find_early_table_space(struct map_range *mr, unsigned long 
end,
-                                         int use_pse, int use_gbpages)
+static int page_size_mask;
+
+static void __init find_early_table_space(struct map_range *mr,
+                                         unsigned long end)
 {
        unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
        phys_addr_t base;
@@ -44,7 +46,7 @@ static void __init find_early_table_space(struct map_range 
*mr, unsigned long en
        puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
        tables = roundup(puds * sizeof(pud_t), PAGE_SIZE);
 
-       if (use_gbpages) {
+       if (page_size_mask & (1 << PG_LEVEL_1G)) {
                unsigned long extra;
 
                extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT);
@@ -54,7 +56,7 @@ static void __init find_early_table_space(struct map_range 
*mr, unsigned long en
 
        tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE);
 
-       if (use_pse) {
+       if (page_size_mask & (1 << PG_LEVEL_2M)) {
                unsigned long extra;
 
                extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT);
@@ -90,6 +92,30 @@ static void __init find_early_table_space(struct map_range 
*mr, unsigned long en
                (pgt_buf_top << PAGE_SHIFT) - 1);
 }
 
+void probe_page_size_mask(void)
+{
+#if !defined(CONFIG_DEBUG_PAGEALLOC) && !defined(CONFIG_KMEMCHECK)
+       /*
+        * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
+        * This will simplify cpa(), which otherwise needs to support splitting
+        * large pages into small in interrupt context, etc.
+        */
+       if (direct_gbpages)
+               page_size_mask |= 1 << PG_LEVEL_1G;
+       if (cpu_has_pse)
+               page_size_mask |= 1 << PG_LEVEL_2M;
+#endif
+
+       /* Enable PSE if available */
+       if (cpu_has_pse)
+               set_in_cr4(X86_CR4_PSE);
+
+       /* Enable PGE if available */
+       if (cpu_has_pge) {
+               set_in_cr4(X86_CR4_PGE);
+               __supported_pte_mask |= _PAGE_GLOBAL;
+       }
+}
 void __init native_pagetable_reserve(u64 start, u64 end)
 {
        memblock_reserve(start, end - start);
@@ -125,45 +151,15 @@ static int __meminit save_mr(struct map_range *mr, int 
nr_range,
 unsigned long __init_refok init_memory_mapping(unsigned long start,
                                               unsigned long end)
 {
-       unsigned long page_size_mask = 0;
        unsigned long start_pfn, end_pfn;
        unsigned long ret = 0;
        unsigned long pos;
-
        struct map_range mr[NR_RANGE_MR];
        int nr_range, i;
-       int use_pse, use_gbpages;
 
        printk(KERN_INFO "init_memory_mapping: [mem %#010lx-%#010lx]\n",
               start, end - 1);
 
-#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK)
-       /*
-        * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
-        * This will simplify cpa(), which otherwise needs to support splitting
-        * large pages into small in interrupt context, etc.
-        */
-       use_pse = use_gbpages = 0;
-#else
-       use_pse = cpu_has_pse;
-       use_gbpages = direct_gbpages;
-#endif
-
-       /* Enable PSE if available */
-       if (cpu_has_pse)
-               set_in_cr4(X86_CR4_PSE);
-
-       /* Enable PGE if available */
-       if (cpu_has_pge) {
-               set_in_cr4(X86_CR4_PGE);
-               __supported_pte_mask |= _PAGE_GLOBAL;
-       }
-
-       if (use_gbpages)
-               page_size_mask |= 1 << PG_LEVEL_1G;
-       if (use_pse)
-               page_size_mask |= 1 << PG_LEVEL_2M;
-
        memset(mr, 0, sizeof(mr));
        nr_range = 0;
 
@@ -267,7 +263,7 @@ unsigned long __init_refok init_memory_mapping(unsigned 
long start,
         * nodes are discovered.
         */
        if (!after_bootmem)
-               find_early_table_space(&mr[0], end, use_pse, use_gbpages);
+               find_early_table_space(&mr[0], end);
 
        for (i = 0; i < nr_range; i++)
                ret = kernel_physical_mapping_init(mr[i].start, mr[i].end,
-- 
1.7.7

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to