Gitweb:     
http://git.kernel.org/git/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=0889eba5b38f66d7d892a167d88284daddd3d43b
Commit:     0889eba5b38f66d7d892a167d88284daddd3d43b
Parent:     29c71111d0557385328211b130246a90f9223b46
Author:     Christoph Lameter <[EMAIL PROTECTED]>
AuthorDate: Tue Oct 16 01:24:15 2007 -0700
Committer:  Linus Torvalds <[EMAIL PROTECTED]>
CommitDate: Tue Oct 16 09:42:51 2007 -0700

    x86_64: SPARSEMEM_VMEMMAP 2M page size support
    
    x86_64 uses 2M page table entries to map its 1-1 kernel space.  We also
    implement the virtual memmap using 2M page table entries.  So there is no
    additional runtime overhead over FLATMEM, initialisation is slightly more
    complex.  As FLATMEM still references memory to obtain the mem_map pointer 
and
    SPARSEMEM_VMEMMAP uses a compile time constant, SPARSEMEM_VMEMMAP should be
    superior.
    
    With this SPARSEMEM becomes the most efficient way of handling virt_to_page,
    pfn_to_page and friends for UP, SMP and NUMA on x86_64.
    
    [EMAIL PROTECTED]: code resplit, style fixups]
    [EMAIL PROTECTED]: vmemmap x86_64: ensure end of section memmap is 
initialised]
    Signed-off-by: Christoph Lameter <[EMAIL PROTECTED]>
    Signed-off-by: Andy Whitcroft <[EMAIL PROTECTED]>
    Acked-by: Mel Gorman <[EMAIL PROTECTED]>
    Cc: Andi Kleen <[EMAIL PROTECTED]>
    Cc: KAMEZAWA Hiroyuki <[EMAIL PROTECTED]>
    Signed-off-by: Andrew Morton <[EMAIL PROTECTED]>
    Signed-off-by: Linus Torvalds <[EMAIL PROTECTED]>
---
 Documentation/x86_64/mm.txt  |    1 +
 arch/x86/mm/init_64.c        |   45 ++++++++++++++++++++++++++++++++++++++++++
 arch/x86_64/Kconfig          |    1 +
 include/asm-x86/page_64.h    |    1 +
 include/asm-x86/pgtable_64.h |    1 +
 5 files changed, 49 insertions(+), 0 deletions(-)

diff --git a/Documentation/x86_64/mm.txt b/Documentation/x86_64/mm.txt
index f42798e..b89b6d2 100644
--- a/Documentation/x86_64/mm.txt
+++ b/Documentation/x86_64/mm.txt
@@ -9,6 +9,7 @@ ffff800000000000 - ffff80ffffffffff (=40 bits) guard hole
 ffff810000000000 - ffffc0ffffffffff (=46 bits) direct mapping of all phys. 
memory
 ffffc10000000000 - ffffc1ffffffffff (=40 bits) hole
 ffffc20000000000 - ffffe1ffffffffff (=45 bits) vmalloc/ioremap space
+ffffe20000000000 - ffffe2ffffffffff (=40 bits) virtual memory map (1TB)
 ... unused hole ...
 ffffffff80000000 - ffffffff82800000 (=40 MB)   kernel text mapping, from phys 0
 ... unused hole ...
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 458893b..7d4fc63 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -748,3 +748,48 @@ const char *arch_vma_name(struct vm_area_struct *vma)
                return "[vsyscall]";
        return NULL;
 }
+
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+/*
+ * Initialise the sparsemem vmemmap using huge-pages at the PMD level.
+ */
+int __meminit vmemmap_populate(struct page *start_page,
+                                               unsigned long size, int node)
+{
+       unsigned long addr = (unsigned long)start_page;
+       unsigned long end = (unsigned long)(start_page + size);
+       unsigned long next;
+       pgd_t *pgd;
+       pud_t *pud;
+       pmd_t *pmd;
+
+       for (; addr < end; addr = next) {
+               next = pmd_addr_end(addr, end);
+
+               pgd = vmemmap_pgd_populate(addr, node);
+               if (!pgd)
+                       return -ENOMEM;
+               pud = vmemmap_pud_populate(pgd, addr, node);
+               if (!pud)
+                       return -ENOMEM;
+
+               pmd = pmd_offset(pud, addr);
+               if (pmd_none(*pmd)) {
+                       pte_t entry;
+                       void *p = vmemmap_alloc_block(PMD_SIZE, node);
+                       if (!p)
+                               return -ENOMEM;
+
+                       entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
+                       mk_pte_huge(entry);
+                       set_pmd(pmd, __pmd(pte_val(entry)));
+
+                       printk(KERN_DEBUG " [%lx-%lx] PMD ->%p on node %d\n",
+                               addr, addr + PMD_SIZE - 1, p, node);
+               } else
+                       vmemmap_verify((pte_t *)pmd, node, addr, next);
+       }
+
+       return 0;
+}
+#endif
diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig
index cf013cb..8c83dbe 100644
--- a/arch/x86_64/Kconfig
+++ b/arch/x86_64/Kconfig
@@ -409,6 +409,7 @@ config ARCH_DISCONTIGMEM_DEFAULT
 config ARCH_SPARSEMEM_ENABLE
        def_bool y
        depends on (NUMA || EXPERIMENTAL)
+       select SPARSEMEM_VMEMMAP_ENABLE
 
 config ARCH_MEMORY_PROBE
        def_bool y
diff --git a/include/asm-x86/page_64.h b/include/asm-x86/page_64.h
index 88adf1a..c3b52bc 100644
--- a/include/asm-x86/page_64.h
+++ b/include/asm-x86/page_64.h
@@ -134,6 +134,7 @@ extern unsigned long __phys_addr(unsigned long);
         VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
 
 #define __HAVE_ARCH_GATE_AREA 1        
+#define vmemmap ((struct page *)VMEMMAP_START)
 
 #include <asm-generic/memory_model.h>
 #include <asm-generic/page.h>
diff --git a/include/asm-x86/pgtable_64.h b/include/asm-x86/pgtable_64.h
index 57dd6b3..a79f535 100644
--- a/include/asm-x86/pgtable_64.h
+++ b/include/asm-x86/pgtable_64.h
@@ -137,6 +137,7 @@ static inline pte_t ptep_get_and_clear_full(struct 
mm_struct *mm, unsigned long
 #define MAXMEM          _AC(0x3fffffffffff, UL)
 #define VMALLOC_START    _AC(0xffffc20000000000, UL)
 #define VMALLOC_END      _AC(0xffffe1ffffffffff, UL)
+#define VMEMMAP_START   _AC(0xffffe20000000000, UL)
 #define MODULES_VADDR    _AC(0xffffffff88000000, UL)
 #define MODULES_END      _AC(0xfffffffffff00000, UL)
 #define MODULES_LEN   (MODULES_END - MODULES_VADDR)
-
To unsubscribe from this list: send the line "unsubscribe git-commits-head" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to