ChangeSet 1.2181.1.11, 2005/03/29 20:41:34-08:00, [EMAIL PROTECTED]

        Merge sunset.davemloft.net:/home/davem/src/BK/sparcwork-2.6
        into sunset.davemloft.net:/home/davem/src/BK/sparc-2.6



 arch/sparc64/kernel/sparc64_ksyms.c |   13 ++++++
 arch/sparc64/mm/hugetlbpage.c       |   39 ++++++++++++++++++++
 include/asm-sparc64/pgalloc.h       |   67 +-----------------------------------
 include/asm-sparc64/pgtable.h       |   38 ++++----------------
 include/asm-sparc64/tlb.h           |    4 --
 5 files changed, 64 insertions(+), 97 deletions(-)


diff -Nru a/arch/sparc64/kernel/sparc64_ksyms.c 
b/arch/sparc64/kernel/sparc64_ksyms.c
--- a/arch/sparc64/kernel/sparc64_ksyms.c       2005-04-03 21:18:03 -07:00
+++ b/arch/sparc64/kernel/sparc64_ksyms.c       2005-04-03 21:18:03 -07:00
@@ -59,6 +59,7 @@
 #include <asm/ns87303.h>
 #include <asm/timer.h>
 #include <asm/cpudata.h>
+#include <asm/rwsem.h>
 
 struct poll {
        int fd;
@@ -174,6 +175,15 @@
 EXPORT_SYMBOL(down_interruptible);
 EXPORT_SYMBOL(up);
 
+/* RW semaphores */
+EXPORT_SYMBOL(__down_read);
+EXPORT_SYMBOL(__down_read_trylock);
+EXPORT_SYMBOL(__down_write);
+EXPORT_SYMBOL(__down_write_trylock);
+EXPORT_SYMBOL(__up_read);
+EXPORT_SYMBOL(__up_write);
+EXPORT_SYMBOL(__downgrade_write);
+
 /* Atomic counter implementation. */
 EXPORT_SYMBOL(atomic_add);
 EXPORT_SYMBOL(atomic_add_ret);
@@ -209,8 +219,11 @@
 EXPORT_SYMBOL(tlb_type);
 EXPORT_SYMBOL(get_fb_unmapped_area);
 EXPORT_SYMBOL(flush_icache_range);
+
 EXPORT_SYMBOL(flush_dcache_page);
+#ifdef DCACHE_ALIASING_POSSIBLE
 EXPORT_SYMBOL(__flush_dcache_range);
+#endif
 
 EXPORT_SYMBOL(mostek_lock);
 EXPORT_SYMBOL(mstk48t02_regs);
diff -Nru a/arch/sparc64/mm/hugetlbpage.c b/arch/sparc64/mm/hugetlbpage.c
--- a/arch/sparc64/mm/hugetlbpage.c     2005-04-03 21:18:03 -07:00
+++ b/arch/sparc64/mm/hugetlbpage.c     2005-04-03 21:18:03 -07:00
@@ -20,6 +20,7 @@
 #include <asm/tlb.h>
 #include <asm/tlbflush.h>
 #include <asm/cacheflush.h>
+#include <asm/mmu_context.h>
 
 static pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
 {
@@ -217,11 +218,49 @@
        flush_tlb_range(vma, start, end);
 }
 
+static void context_reload(void *__data)
+{
+       struct mm_struct *mm = __data;
+
+       if (mm == current->mm)
+               load_secondary_context(mm);
+}
+
 int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
 {
        struct mm_struct *mm = current->mm;
        unsigned long addr;
        int ret = 0;
+
+       /* On UltraSPARC-III+ and later, configure the second half of
+        * the Data-TLB for huge pages.
+        */
+       if (tlb_type == cheetah_plus) {
+               unsigned long ctx;
+
+               spin_lock(&ctx_alloc_lock);
+               ctx = mm->context.sparc64_ctx_val;
+               ctx &= ~CTX_PGSZ_MASK;
+               ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT;
+               ctx |= CTX_PGSZ_HUGE << CTX_PGSZ1_SHIFT;
+
+               if (ctx != mm->context.sparc64_ctx_val) {
+                       /* When changing the page size fields, we
+                        * must perform a context flush so that no
+                        * stale entries match.  This flush must
+                        * occur with the original context register
+                        * settings.
+                        */
+                       do_flush_tlb_mm(mm);
+
+                       /* Reload the context register of all processors
+                        * also executing in this address space.
+                        */
+                       mm->context.sparc64_ctx_val = ctx;
+                       on_each_cpu(context_reload, mm, 0, 0);
+               }
+               spin_unlock(&ctx_alloc_lock);
+       }
 
        BUG_ON(vma->vm_start & ~HPAGE_MASK);
        BUG_ON(vma->vm_end & ~HPAGE_MASK);
diff -Nru a/include/asm-sparc64/pgalloc.h b/include/asm-sparc64/pgalloc.h
--- a/include/asm-sparc64/pgalloc.h     2005-04-03 21:18:03 -07:00
+++ b/include/asm-sparc64/pgalloc.h     2005-04-03 21:18:03 -07:00
@@ -9,6 +9,7 @@
 
 #include <asm/spitfire.h>
 #include <asm/cpudata.h>
+#include <asm/cacheflush.h>
 
 /* Page table allocation/freeing. */
 #ifdef CONFIG_SMP
@@ -19,74 +20,12 @@
        unsigned long *pgd_cache;
        unsigned long *pte_cache[2];
        unsigned int pgcache_size;
-       unsigned int pgdcache_size;
 } pgt_quicklists;
 #endif
 #define pgd_quicklist          (pgt_quicklists.pgd_cache)
 #define pmd_quicklist          ((unsigned long *)0)
 #define pte_quicklist          (pgt_quicklists.pte_cache)
 #define pgtable_cache_size     (pgt_quicklists.pgcache_size)
-#define pgd_cache_size         (pgt_quicklists.pgdcache_size)
-
-#ifndef CONFIG_SMP
-
-static __inline__ void free_pgd_fast(pgd_t *pgd)
-{
-       struct page *page = virt_to_page(pgd);
-
-       preempt_disable();
-       if (!page->lru.prev) {
-               page->lru.next = (void *) pgd_quicklist;
-               pgd_quicklist = (unsigned long *)page;
-       }
-       page->lru.prev = (void *)
-         (((unsigned long)page->lru.prev) |
-          (((unsigned long)pgd & (PAGE_SIZE / 2)) ? 2 : 1));
-       pgd_cache_size++;
-       preempt_enable();
-}
-
-static __inline__ pgd_t *get_pgd_fast(void)
-{
-        struct page *ret;
-
-       preempt_disable();
-        if ((ret = (struct page *)pgd_quicklist) != NULL) {
-                unsigned long mask = (unsigned long)ret->lru.prev;
-               unsigned long off = 0;
-
-               if (mask & 1)
-                       mask &= ~1;
-               else {
-                       off = PAGE_SIZE / 2;
-                       mask &= ~2;
-               }
-               ret->lru.prev = (void *) mask;
-               if (!mask)
-                       pgd_quicklist = (unsigned long *)ret->lru.next;
-                ret = (struct page *)(__page_address(ret) + off);
-                pgd_cache_size--;
-               preempt_enable();
-        } else {
-               struct page *page;
-
-               preempt_enable();
-               page = alloc_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
-               if (page) {
-                       ret = (struct page *)page_address(page);
-                       page->lru.prev = (void *) 2UL;
-
-                       preempt_disable();
-                       page->lru.next = (void *) pgd_quicklist;
-                       pgd_quicklist = (unsigned long *)page;
-                       pgd_cache_size++;
-                       preempt_enable();
-               }
-        }
-        return (pgd_t *)ret;
-}
-
-#else /* CONFIG_SMP */
 
 static __inline__ void free_pgd_fast(pgd_t *pgd)
 {
@@ -121,9 +60,7 @@
        free_page((unsigned long)pgd);
 }
 
-#endif /* CONFIG_SMP */
-
-#if (L1DCACHE_SIZE > PAGE_SIZE)                        /* is there D$ aliasing 
problem */
+#ifdef DCACHE_ALIASING_POSSIBLE
 #define VPTE_COLOR(address)            (((address) >> (PAGE_SHIFT + 10)) & 1UL)
 #define DCACHE_COLOR(address)          (((address) >> PAGE_SHIFT) & 1UL)
 #else
diff -Nru a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h
--- a/include/asm-sparc64/pgtable.h     2005-04-03 21:18:03 -07:00
+++ b/include/asm-sparc64/pgtable.h     2005-04-03 21:18:03 -07:00
@@ -60,44 +60,24 @@
 #define PMD_SHIFT      (PAGE_SHIFT + (PAGE_SHIFT-3))
 #define PMD_SIZE       (1UL << PMD_SHIFT)
 #define PMD_MASK       (~(PMD_SIZE-1))
-#define PMD_BITS       11
+#define PMD_BITS       (PAGE_SHIFT - 2)
 
 /* PGDIR_SHIFT determines what a third-level page table entry can map */
 #define PGDIR_SHIFT    (PAGE_SHIFT + (PAGE_SHIFT-3) + PMD_BITS)
 #define PGDIR_SIZE     (1UL << PGDIR_SHIFT)
 #define PGDIR_MASK     (~(PGDIR_SIZE-1))
+#define PGDIR_BITS     (PAGE_SHIFT - 2)
 
 #ifndef __ASSEMBLY__
 
 #include <linux/sched.h>
 
 /* Entries per page directory level. */
-#define PTRS_PER_PTE           (1UL << (PAGE_SHIFT-3))
-
-/* We the first one in this file, what we export to the kernel
- * is different so we can optimize correctly for 32-bit tasks.
- */
-#define REAL_PTRS_PER_PMD      (1UL << PMD_BITS)
-
-/* This is gross, but unless we do this gcc retests the
- * thread flag every interation in pmd traversal loops.
- */
-extern unsigned long __ptrs_per_pmd(void) __attribute_const__;
-#define PTRS_PER_PMD           __ptrs_per_pmd()
-
-/*
- * We cannot use the top address range because VPTE table lives there. This
- * formula finds the total legal virtual space in the processor, subtracts the
- * vpte size, then aligns it to the number of bytes mapped by one pgde, and
- * thus calculates the number of pgdes needed.
- */
-#define PTRS_PER_PGD (((1UL << VA_BITS) - VPTE_SIZE + (1UL << (PAGE_SHIFT + \
-                     (PAGE_SHIFT-3) + PMD_BITS)) - 1) / (1UL << (PAGE_SHIFT + \
-                     (PAGE_SHIFT-3) + PMD_BITS)))
+#define PTRS_PER_PTE   (1UL << (PAGE_SHIFT-3))
+#define PTRS_PER_PMD   (1UL << PMD_BITS)
+#define PTRS_PER_PGD   (1UL << PGDIR_BITS)
 
 /* Kernel has a separate 44bit address space. */
-#define USER_PTRS_PER_PGD      ((const int)(test_thread_flag(TIF_32BIT)) ? \
-                                (1) : (PTRS_PER_PGD))
 #define FIRST_USER_PGD_NR      0
 
 #define pte_ERROR(e)   __builtin_trap()
@@ -236,8 +216,8 @@
 
 /* PFNs are real physical page numbers.  However, mem_map only begins to record
  * per-page information starting at pfn_base.  This is to handle systems where
- * the first physical page in the machine is at some huge physical address, 
such
- * as 4GB.   This is common on a partitioned E10000, for example.
+ * the first physical page in the machine is at some huge physical address,
+ * such as 4GB.   This is common on a partitioned E10000, for example.
  */
 
 #define pfn_pte(pfn, prot)     \
@@ -308,7 +288,7 @@
 #define pte_mkdirty(pte)       (__pte(pte_val(pte) | _PAGE_MODIFIED | _PAGE_W))
 
 /* to find an entry in a page-table-directory. */
-#define pgd_index(address)     (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD))
+#define pgd_index(address)     (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 
1))
 #define pgd_offset(mm, address)        ((mm)->pgd + pgd_index(address))
 
 /* to find an entry in a kernel page-table-directory */
@@ -322,7 +302,7 @@
 /* Find an entry in the second-level page table.. */
 #define pmd_offset(pudp, address)      \
        ((pmd_t *) pud_page(*(pudp)) + \
-        (((address) >> PMD_SHIFT) & (REAL_PTRS_PER_PMD-1)))
+        (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)))
 
 /* Find an entry in the third-level page table.. */
 #define pte_index(dir, address)        \
diff -Nru a/include/asm-sparc64/tlb.h b/include/asm-sparc64/tlb.h
--- a/include/asm-sparc64/tlb.h 2005-04-03 21:18:03 -07:00
+++ b/include/asm-sparc64/tlb.h 2005-04-03 21:18:03 -07:00
@@ -89,9 +89,7 @@
        tlb_flush_mmu(mp);
 
        if (mp->tlb_frozen) {
-               unsigned long context = mm->context;
-
-               if (CTX_VALID(context))
+               if (CTX_VALID(mm->context))
                        do_flush_tlb_mm(mm);
                mp->tlb_frozen = 0;
        } else
-
To unsubscribe from this list: send the line "unsubscribe bk-commits-head" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to