Gitweb:     
http://git.kernel.org/git/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=811d50cb43eb730cc325df0c6913556e25739797
Commit:     811d50cb43eb730cc325df0c6913556e25739797
Parent:     379a95d1d2c3e3682e380084c40b6fc01e38fa1f
Author:     Paul Mundt <[EMAIL PROTECTED]>
AuthorDate: Tue Nov 20 17:01:55 2007 +0900
Committer:  Paul Mundt <[EMAIL PROTECTED]>
CommitDate: Mon Jan 28 13:18:50 2008 +0900

    sh: Move in the SH-5 TLB miss.
    
    Signed-off-by: Paul Mundt <[EMAIL PROTECTED]>
---
 arch/sh/mm/Makefile_32 |    2 +-
 arch/sh/mm/Makefile_64 |    2 +-
 arch/sh/mm/fault.c     |  303 ------------------------------------------------
 arch/sh/mm/fault_32.c  |  303 ++++++++++++++++++++++++++++++++++++++++++++++++
 arch/sh/mm/fault_64.c  |  279 ++++++++++++++++++++++++++++++++++++++++++++
 arch/sh64/mm/tlbmiss.c |  279 --------------------------------------------
 6 files changed, 584 insertions(+), 584 deletions(-)

diff --git a/arch/sh/mm/Makefile_32 b/arch/sh/mm/Makefile_32
index 9ea3793..fc089c0 100644
--- a/arch/sh/mm/Makefile_32
+++ b/arch/sh/mm/Makefile_32
@@ -12,7 +12,7 @@ obj-$(CONFIG_SH7705_CACHE_32KB)       += cache-sh7705.o
 endif
 
 mmu-y                  := tlb-nommu.o pg-nommu.o
-mmu-$(CONFIG_MMU)      := fault.o clear_page.o copy_page.o tlb-flush_32.o \
+mmu-$(CONFIG_MMU)      := fault_32.o clear_page.o copy_page.o tlb-flush_32.o \
                           ioremap_32.o
 
 obj-y                  += $(mmu-y)
diff --git a/arch/sh/mm/Makefile_64 b/arch/sh/mm/Makefile_64
index ec8deaa..82fe907 100644
--- a/arch/sh/mm/Makefile_64
+++ b/arch/sh/mm/Makefile_64
@@ -5,7 +5,7 @@
 obj-y                  := init.o extable_64.o consistent.o
 
 mmu-y                  := tlb-nommu.o pg-nommu.o
-mmu-$(CONFIG_MMU)      := ioremap_64.o tlb-flush_64.o
+mmu-$(CONFIG_MMU)      := fault_64.o ioremap_64.o tlb-flush_64.o
 
 obj-y                  += $(mmu-y)
 
diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c
deleted file mode 100644
index 60d74f7..0000000
--- a/arch/sh/mm/fault.c
+++ /dev/null
@@ -1,303 +0,0 @@
-/*
- * Page fault handler for SH with an MMU.
- *
- *  Copyright (C) 1999  Niibe Yutaka
- *  Copyright (C) 2003 - 2007  Paul Mundt
- *
- *  Based on linux/arch/i386/mm/fault.c:
- *   Copyright (C) 1995  Linus Torvalds
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/hardirq.h>
-#include <linux/kprobes.h>
-#include <asm/system.h>
-#include <asm/mmu_context.h>
-#include <asm/tlbflush.h>
-#include <asm/kgdb.h>
-
-/*
- * This routine handles page faults.  It determines the address,
- * and the problem, and then passes it off to one of the appropriate
- * routines.
- */
-asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
-                                       unsigned long writeaccess,
-                                       unsigned long address)
-{
-       struct task_struct *tsk;
-       struct mm_struct *mm;
-       struct vm_area_struct * vma;
-       int si_code;
-       int fault;
-       siginfo_t info;
-
-       trace_hardirqs_on();
-       local_irq_enable();
-
-#ifdef CONFIG_SH_KGDB
-       if (kgdb_nofault && kgdb_bus_err_hook)
-               kgdb_bus_err_hook();
-#endif
-
-       tsk = current;
-       mm = tsk->mm;
-       si_code = SEGV_MAPERR;
-
-       if (unlikely(address >= TASK_SIZE)) {
-               /*
-                * Synchronize this task's top level page-table
-                * with the 'reference' page table.
-                *
-                * Do _not_ use "tsk" here. We might be inside
-                * an interrupt in the middle of a task switch..
-                */
-               int offset = pgd_index(address);
-               pgd_t *pgd, *pgd_k;
-               pud_t *pud, *pud_k;
-               pmd_t *pmd, *pmd_k;
-
-               pgd = get_TTB() + offset;
-               pgd_k = swapper_pg_dir + offset;
-
-               /* This will never happen with the folded page table. */
-               if (!pgd_present(*pgd)) {
-                       if (!pgd_present(*pgd_k))
-                               goto bad_area_nosemaphore;
-                       set_pgd(pgd, *pgd_k);
-                       return;
-               }
-
-               pud = pud_offset(pgd, address);
-               pud_k = pud_offset(pgd_k, address);
-               if (pud_present(*pud) || !pud_present(*pud_k))
-                       goto bad_area_nosemaphore;
-               set_pud(pud, *pud_k);
-
-               pmd = pmd_offset(pud, address);
-               pmd_k = pmd_offset(pud_k, address);
-               if (pmd_present(*pmd) || !pmd_present(*pmd_k))
-                       goto bad_area_nosemaphore;
-               set_pmd(pmd, *pmd_k);
-
-               return;
-       }
-
-       /*
-        * If we're in an interrupt or have no user
-        * context, we must not take the fault..
-        */
-       if (in_atomic() || !mm)
-               goto no_context;
-
-       down_read(&mm->mmap_sem);
-
-       vma = find_vma(mm, address);
-       if (!vma)
-               goto bad_area;
-       if (vma->vm_start <= address)
-               goto good_area;
-       if (!(vma->vm_flags & VM_GROWSDOWN))
-               goto bad_area;
-       if (expand_stack(vma, address))
-               goto bad_area;
-/*
- * Ok, we have a good vm_area for this memory access, so
- * we can handle it..
- */
-good_area:
-       si_code = SEGV_ACCERR;
-       if (writeaccess) {
-               if (!(vma->vm_flags & VM_WRITE))
-                       goto bad_area;
-       } else {
-               if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
-                       goto bad_area;
-       }
-
-       /*
-        * If for any reason at all we couldn't handle the fault,
-        * make sure we exit gracefully rather than endlessly redo
-        * the fault.
-        */
-survive:
-       fault = handle_mm_fault(mm, vma, address, writeaccess);
-       if (unlikely(fault & VM_FAULT_ERROR)) {
-               if (fault & VM_FAULT_OOM)
-                       goto out_of_memory;
-               else if (fault & VM_FAULT_SIGBUS)
-                       goto do_sigbus;
-               BUG();
-       }
-       if (fault & VM_FAULT_MAJOR)
-               tsk->maj_flt++;
-       else
-               tsk->min_flt++;
-
-       up_read(&mm->mmap_sem);
-       return;
-
-/*
- * Something tried to access memory that isn't in our memory map..
- * Fix it, but check if it's kernel or user first..
- */
-bad_area:
-       up_read(&mm->mmap_sem);
-
-bad_area_nosemaphore:
-       if (user_mode(regs)) {
-               info.si_signo = SIGSEGV;
-               info.si_errno = 0;
-               info.si_code = si_code;
-               info.si_addr = (void *) address;
-               force_sig_info(SIGSEGV, &info, tsk);
-               return;
-       }
-
-no_context:
-       /* Are we prepared to handle this kernel fault?  */
-       if (fixup_exception(regs))
-               return;
-
-/*
- * Oops. The kernel tried to access some bad page. We'll have to
- * terminate things with extreme prejudice.
- *
- */
-
-       bust_spinlocks(1);
-
-       if (oops_may_print()) {
-               __typeof__(pte_val(__pte(0))) page;
-
-               if (address < PAGE_SIZE)
-                       printk(KERN_ALERT "Unable to handle kernel NULL "
-                                         "pointer dereference");
-               else
-                       printk(KERN_ALERT "Unable to handle kernel paging "
-                                         "request");
-               printk(" at virtual address %08lx\n", address);
-               printk(KERN_ALERT "pc = %08lx\n", regs->pc);
-               page = (unsigned long)get_TTB();
-               if (page) {
-                       page = ((__typeof__(page) *)page)[address >> 
PGDIR_SHIFT];
-                       printk(KERN_ALERT "*pde = %08lx\n", page);
-                       if (page & _PAGE_PRESENT) {
-                               page &= PAGE_MASK;
-                               address &= 0x003ff000;
-                               page = ((__typeof__(page) *)
-                                               __va(page))[address >>
-                                                           PAGE_SHIFT];
-                               printk(KERN_ALERT "*pte = %08lx\n", page);
-                       }
-               }
-       }
-
-       die("Oops", regs, writeaccess);
-       bust_spinlocks(0);
-       do_exit(SIGKILL);
-
-/*
- * We ran out of memory, or some other thing happened to us that made
- * us unable to handle the page fault gracefully.
- */
-out_of_memory:
-       up_read(&mm->mmap_sem);
-       if (is_global_init(current)) {
-               yield();
-               down_read(&mm->mmap_sem);
-               goto survive;
-       }
-       printk("VM: killing process %s\n", tsk->comm);
-       if (user_mode(regs))
-               do_group_exit(SIGKILL);
-       goto no_context;
-
-do_sigbus:
-       up_read(&mm->mmap_sem);
-
-       /*
-        * Send a sigbus, regardless of whether we were in kernel
-        * or user mode.
-        */
-       info.si_signo = SIGBUS;
-       info.si_errno = 0;
-       info.si_code = BUS_ADRERR;
-       info.si_addr = (void *)address;
-       force_sig_info(SIGBUS, &info, tsk);
-
-       /* Kernel mode? Handle exceptions or die */
-       if (!user_mode(regs))
-               goto no_context;
-}
-
-#ifdef CONFIG_SH_STORE_QUEUES
-/*
- * This is a special case for the SH-4 store queues, as pages for this
- * space still need to be faulted in before it's possible to flush the
- * store queue cache for writeout to the remapped region.
- */
-#define P3_ADDR_MAX            (P4SEG_STORE_QUE + 0x04000000)
-#else
-#define P3_ADDR_MAX            P4SEG
-#endif
-
-/*
- * Called with interrupts disabled.
- */
-asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs,
-                                        unsigned long writeaccess,
-                                        unsigned long address)
-{
-       pgd_t *pgd;
-       pud_t *pud;
-       pmd_t *pmd;
-       pte_t *pte;
-       pte_t entry;
-
-#ifdef CONFIG_SH_KGDB
-       if (kgdb_nofault && kgdb_bus_err_hook)
-               kgdb_bus_err_hook();
-#endif
-
-       /*
-        * We don't take page faults for P1, P2, and parts of P4, these
-        * are always mapped, whether it be due to legacy behaviour in
-        * 29-bit mode, or due to PMB configuration in 32-bit mode.
-        */
-       if (address >= P3SEG && address < P3_ADDR_MAX) {
-               pgd = pgd_offset_k(address);
-       } else {
-               if (unlikely(address >= TASK_SIZE || !current->mm))
-                       return 1;
-
-               pgd = pgd_offset(current->mm, address);
-       }
-
-       pud = pud_offset(pgd, address);
-       if (pud_none_or_clear_bad(pud))
-               return 1;
-       pmd = pmd_offset(pud, address);
-       if (pmd_none_or_clear_bad(pmd))
-               return 1;
-
-       pte = pte_offset_kernel(pmd, address);
-       entry = *pte;
-       if (unlikely(pte_none(entry) || pte_not_present(entry)))
-               return 1;
-       if (unlikely(writeaccess && !pte_write(entry)))
-               return 1;
-
-       if (writeaccess)
-               entry = pte_mkdirty(entry);
-       entry = pte_mkyoung(entry);
-
-       set_pte(pte, entry);
-       update_mmu_cache(NULL, address, entry);
-
-       return 0;
-}
diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c
new file mode 100644
index 0000000..60d74f7
--- /dev/null
+++ b/arch/sh/mm/fault_32.c
@@ -0,0 +1,303 @@
+/*
+ * Page fault handler for SH with an MMU.
+ *
+ *  Copyright (C) 1999  Niibe Yutaka
+ *  Copyright (C) 2003 - 2007  Paul Mundt
+ *
+ *  Based on linux/arch/i386/mm/fault.c:
+ *   Copyright (C) 1995  Linus Torvalds
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/hardirq.h>
+#include <linux/kprobes.h>
+#include <asm/system.h>
+#include <asm/mmu_context.h>
+#include <asm/tlbflush.h>
+#include <asm/kgdb.h>
+
+/*
+ * This routine handles page faults.  It determines the address,
+ * and the problem, and then passes it off to one of the appropriate
+ * routines.
+ */
+asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
+                                       unsigned long writeaccess,
+                                       unsigned long address)
+{
+       struct task_struct *tsk;
+       struct mm_struct *mm;
+       struct vm_area_struct * vma;
+       int si_code;
+       int fault;
+       siginfo_t info;
+
+       trace_hardirqs_on();
+       local_irq_enable();
+
+#ifdef CONFIG_SH_KGDB
+       if (kgdb_nofault && kgdb_bus_err_hook)
+               kgdb_bus_err_hook();
+#endif
+
+       tsk = current;
+       mm = tsk->mm;
+       si_code = SEGV_MAPERR;
+
+       if (unlikely(address >= TASK_SIZE)) {
+               /*
+                * Synchronize this task's top level page-table
+                * with the 'reference' page table.
+                *
+                * Do _not_ use "tsk" here. We might be inside
+                * an interrupt in the middle of a task switch..
+                */
+               int offset = pgd_index(address);
+               pgd_t *pgd, *pgd_k;
+               pud_t *pud, *pud_k;
+               pmd_t *pmd, *pmd_k;
+
+               pgd = get_TTB() + offset;
+               pgd_k = swapper_pg_dir + offset;
+
+               /* This will never happen with the folded page table. */
+               if (!pgd_present(*pgd)) {
+                       if (!pgd_present(*pgd_k))
+                               goto bad_area_nosemaphore;
+                       set_pgd(pgd, *pgd_k);
+                       return;
+               }
+
+               pud = pud_offset(pgd, address);
+               pud_k = pud_offset(pgd_k, address);
+               if (pud_present(*pud) || !pud_present(*pud_k))
+                       goto bad_area_nosemaphore;
+               set_pud(pud, *pud_k);
+
+               pmd = pmd_offset(pud, address);
+               pmd_k = pmd_offset(pud_k, address);
+               if (pmd_present(*pmd) || !pmd_present(*pmd_k))
+                       goto bad_area_nosemaphore;
+               set_pmd(pmd, *pmd_k);
+
+               return;
+       }
+
+       /*
+        * If we're in an interrupt or have no user
+        * context, we must not take the fault..
+        */
+       if (in_atomic() || !mm)
+               goto no_context;
+
+       down_read(&mm->mmap_sem);
+
+       vma = find_vma(mm, address);
+       if (!vma)
+               goto bad_area;
+       if (vma->vm_start <= address)
+               goto good_area;
+       if (!(vma->vm_flags & VM_GROWSDOWN))
+               goto bad_area;
+       if (expand_stack(vma, address))
+               goto bad_area;
+/*
+ * Ok, we have a good vm_area for this memory access, so
+ * we can handle it..
+ */
+good_area:
+       si_code = SEGV_ACCERR;
+       if (writeaccess) {
+               if (!(vma->vm_flags & VM_WRITE))
+                       goto bad_area;
+       } else {
+               if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
+                       goto bad_area;
+       }
+
+       /*
+        * If for any reason at all we couldn't handle the fault,
+        * make sure we exit gracefully rather than endlessly redo
+        * the fault.
+        */
+survive:
+       fault = handle_mm_fault(mm, vma, address, writeaccess);
+       if (unlikely(fault & VM_FAULT_ERROR)) {
+               if (fault & VM_FAULT_OOM)
+                       goto out_of_memory;
+               else if (fault & VM_FAULT_SIGBUS)
+                       goto do_sigbus;
+               BUG();
+       }
+       if (fault & VM_FAULT_MAJOR)
+               tsk->maj_flt++;
+       else
+               tsk->min_flt++;
+
+       up_read(&mm->mmap_sem);
+       return;
+
+/*
+ * Something tried to access memory that isn't in our memory map..
+ * Fix it, but check if it's kernel or user first..
+ */
+bad_area:
+       up_read(&mm->mmap_sem);
+
+bad_area_nosemaphore:
+       if (user_mode(regs)) {
+               info.si_signo = SIGSEGV;
+               info.si_errno = 0;
+               info.si_code = si_code;
+               info.si_addr = (void *) address;
+               force_sig_info(SIGSEGV, &info, tsk);
+               return;
+       }
+
+no_context:
+       /* Are we prepared to handle this kernel fault?  */
+       if (fixup_exception(regs))
+               return;
+
+/*
+ * Oops. The kernel tried to access some bad page. We'll have to
+ * terminate things with extreme prejudice.
+ *
+ */
+
+       bust_spinlocks(1);
+
+       if (oops_may_print()) {
+               __typeof__(pte_val(__pte(0))) page;
+
+               if (address < PAGE_SIZE)
+                       printk(KERN_ALERT "Unable to handle kernel NULL "
+                                         "pointer dereference");
+               else
+                       printk(KERN_ALERT "Unable to handle kernel paging "
+                                         "request");
+               printk(" at virtual address %08lx\n", address);
+               printk(KERN_ALERT "pc = %08lx\n", regs->pc);
+               page = (unsigned long)get_TTB();
+               if (page) {
+                       page = ((__typeof__(page) *)page)[address >> 
PGDIR_SHIFT];
+                       printk(KERN_ALERT "*pde = %08lx\n", page);
+                       if (page & _PAGE_PRESENT) {
+                               page &= PAGE_MASK;
+                               address &= 0x003ff000;
+                               page = ((__typeof__(page) *)
+                                               __va(page))[address >>
+                                                           PAGE_SHIFT];
+                               printk(KERN_ALERT "*pte = %08lx\n", page);
+                       }
+               }
+       }
+
+       die("Oops", regs, writeaccess);
+       bust_spinlocks(0);
+       do_exit(SIGKILL);
+
+/*
+ * We ran out of memory, or some other thing happened to us that made
+ * us unable to handle the page fault gracefully.
+ */
+out_of_memory:
+       up_read(&mm->mmap_sem);
+       if (is_global_init(current)) {
+               yield();
+               down_read(&mm->mmap_sem);
+               goto survive;
+       }
+       printk("VM: killing process %s\n", tsk->comm);
+       if (user_mode(regs))
+               do_group_exit(SIGKILL);
+       goto no_context;
+
+do_sigbus:
+       up_read(&mm->mmap_sem);
+
+       /*
+        * Send a sigbus, regardless of whether we were in kernel
+        * or user mode.
+        */
+       info.si_signo = SIGBUS;
+       info.si_errno = 0;
+       info.si_code = BUS_ADRERR;
+       info.si_addr = (void *)address;
+       force_sig_info(SIGBUS, &info, tsk);
+
+       /* Kernel mode? Handle exceptions or die */
+       if (!user_mode(regs))
+               goto no_context;
+}
+
+#ifdef CONFIG_SH_STORE_QUEUES
+/*
+ * This is a special case for the SH-4 store queues, as pages for this
+ * space still need to be faulted in before it's possible to flush the
+ * store queue cache for writeout to the remapped region.
+ */
+#define P3_ADDR_MAX            (P4SEG_STORE_QUE + 0x04000000)
+#else
+#define P3_ADDR_MAX            P4SEG
+#endif
+
+/*
+ * Called with interrupts disabled.
+ */
+asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs,
+                                        unsigned long writeaccess,
+                                        unsigned long address)
+{
+       pgd_t *pgd;
+       pud_t *pud;
+       pmd_t *pmd;
+       pte_t *pte;
+       pte_t entry;
+
+#ifdef CONFIG_SH_KGDB
+       if (kgdb_nofault && kgdb_bus_err_hook)
+               kgdb_bus_err_hook();
+#endif
+
+       /*
+        * We don't take page faults for P1, P2, and parts of P4, these
+        * are always mapped, whether it be due to legacy behaviour in
+        * 29-bit mode, or due to PMB configuration in 32-bit mode.
+        */
+       if (address >= P3SEG && address < P3_ADDR_MAX) {
+               pgd = pgd_offset_k(address);
+       } else {
+               if (unlikely(address >= TASK_SIZE || !current->mm))
+                       return 1;
+
+               pgd = pgd_offset(current->mm, address);
+       }
+
+       pud = pud_offset(pgd, address);
+       if (pud_none_or_clear_bad(pud))
+               return 1;
+       pmd = pmd_offset(pud, address);
+       if (pmd_none_or_clear_bad(pmd))
+               return 1;
+
+       pte = pte_offset_kernel(pmd, address);
+       entry = *pte;
+       if (unlikely(pte_none(entry) || pte_not_present(entry)))
+               return 1;
+       if (unlikely(writeaccess && !pte_write(entry)))
+               return 1;
+
+       if (writeaccess)
+               entry = pte_mkdirty(entry);
+       entry = pte_mkyoung(entry);
+
+       set_pte(pte, entry);
+       update_mmu_cache(NULL, address, entry);
+
+       return 0;
+}
diff --git a/arch/sh/mm/fault_64.c b/arch/sh/mm/fault_64.c
new file mode 100644
index 0000000..1469aa7
--- /dev/null
+++ b/arch/sh/mm/fault_64.c
@@ -0,0 +1,279 @@
+/*
+ * The SH64 TLB miss.
+ *
+ * Original code from fault.c
+ * Copyright (C) 2000, 2001  Paolo Alberelli
+ *
+ * Fast PTE->TLB refill path
+ * Copyright (C) 2003 [EMAIL PROTECTED]
+ *
+ * IMPORTANT NOTES :
+ * The do_fast_page_fault function is called from a context in entry.S
+ * where very few registers have been saved.  In particular, the code in
+ * this file must be compiled not to use ANY caller-save registers that
+ * are not part of the restricted save set.  Also, it means that code in
+ * this file must not make calls to functions elsewhere in the kernel, or
+ * else the excepting context will see corruption in its caller-save
+ * registers.  Plus, the entry.S save area is non-reentrant, so this code
+ * has to run with SR.BL==1, i.e. no interrupts taken inside it and panic
+ * on any exception.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/interrupt.h>
+#include <asm/system.h>
+#include <asm/tlb.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/pgalloc.h>
+#include <asm/mmu_context.h>
+#include <asm/cpu/registers.h>
+
+/* Callable from fault.c, so not static */
+inline void __do_tlb_refill(unsigned long address,
+                            unsigned long long is_text_not_data, pte_t *pte)
+{
+       unsigned long long ptel;
+       unsigned long long pteh=0;
+       struct tlb_info *tlbp;
+       unsigned long long next;
+
+       /* Get PTEL first */
+       ptel = pte_val(*pte);
+
+       /*
+        * Set PTEH register
+        */
+       pteh = address & MMU_VPN_MASK;
+
+       /* Sign extend based on neff. */
+#if (NEFF == 32)
+       /* Faster sign extension */
+       pteh = (unsigned long long)(signed long long)(signed long)pteh;
+#else
+       /* General case */
+       pteh = (pteh & NEFF_SIGN) ? (pteh | NEFF_MASK) : pteh;
+#endif
+
+       /* Set the ASID. */
+       pteh |= get_asid() << PTEH_ASID_SHIFT;
+       pteh |= PTEH_VALID;
+
+       /* Set PTEL register, set_pte has performed the sign extension */
+       ptel &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */
+
+       tlbp = is_text_not_data ? &(cpu_data->itlb) : &(cpu_data->dtlb);
+       next = tlbp->next;
+       __flush_tlb_slot(next);
+       asm volatile ("putcfg %0,1,%2\n\n\t"
+                     "putcfg %0,0,%1\n"
+                     :  : "r" (next), "r" (pteh), "r" (ptel) );
+
+       next += TLB_STEP;
+       if (next > tlbp->last) next = tlbp->first;
+       tlbp->next = next;
+
+}
+
+static int handle_vmalloc_fault(struct mm_struct *mm,
+                               unsigned long protection_flags,
+                                unsigned long long textaccess,
+                               unsigned long address)
+{
+       pgd_t *dir;
+       pud_t *pud;
+       pmd_t *pmd;
+       static pte_t *pte;
+       pte_t entry;
+
+       dir = pgd_offset_k(address);
+
+       pud = pud_offset(dir, address);
+       if (pud_none_or_clear_bad(pud))
+               return 0;
+
+       pmd = pmd_offset(pud, address);
+       if (pmd_none_or_clear_bad(pmd))
+               return 0;
+
+       pte = pte_offset_kernel(pmd, address);
+       entry = *pte;
+
+       if (pte_none(entry) || !pte_present(entry))
+               return 0;
+       if ((pte_val(entry) & protection_flags) != protection_flags)
+               return 0;
+
+        __do_tlb_refill(address, textaccess, pte);
+
+       return 1;
+}
+
+static int handle_tlbmiss(struct mm_struct *mm,
+                         unsigned long long protection_flags,
+                         unsigned long long textaccess,
+                         unsigned long address)
+{
+       pgd_t *dir;
+       pud_t *pud;
+       pmd_t *pmd;
+       pte_t *pte;
+       pte_t entry;
+
+       /* NB. The PGD currently only contains a single entry - there is no
+          page table tree stored for the top half of the address space since
+          virtual pages in that region should never be mapped in user mode.
+          (In kernel mode, the only things in that region are the 512Mb super
+          page (locked in), and vmalloc (modules) +  I/O device pages (handled
+          by handle_vmalloc_fault), so no PGD for the upper half is required
+          by kernel mode either).
+
+          See how mm->pgd is allocated and initialised in pgd_alloc to see why
+          the next test is necessary.  - RPC */
+       if (address >= (unsigned long) TASK_SIZE)
+               /* upper half - never has page table entries. */
+               return 0;
+
+       dir = pgd_offset(mm, address);
+       if (pgd_none(*dir) || !pgd_present(*dir))
+               return 0;
+       if (!pgd_present(*dir))
+               return 0;
+
+       pud = pud_offset(dir, address);
+       if (pud_none(*pud) || !pud_present(*pud))
+               return 0;
+
+       pmd = pmd_offset(pud, address);
+       if (pmd_none(*pmd) || !pmd_present(*pmd))
+               return 0;
+
+       pte = pte_offset_kernel(pmd, address);
+       entry = *pte;
+
+       if (pte_none(entry) || !pte_present(entry))
+               return 0;
+
+       /*
+        * If the page doesn't have sufficient protection bits set to
+        * service the kind of fault being handled, there's not much
+        * point doing the TLB refill.  Punt the fault to the general
+        * handler.
+        */
+       if ((pte_val(entry) & protection_flags) != protection_flags)
+               return 0;
+
+        __do_tlb_refill(address, textaccess, pte);
+
+       return 1;
+}
+
+/*
+ * Put all this information into one structure so that everything is just
+ * arithmetic relative to a single base address.  This reduces the number
+ * of movi/shori pairs needed just to load addresses of static data.
+ */
+struct expevt_lookup {
+       unsigned short protection_flags[8];
+       unsigned char  is_text_access[8];
+       unsigned char  is_write_access[8];
+};
+
+#define PRU (1<<9)
+#define PRW (1<<8)
+#define PRX (1<<7)
+#define PRR (1<<6)
+
+#define DIRTY (_PAGE_DIRTY | _PAGE_ACCESSED)
+#define YOUNG (_PAGE_ACCESSED)
+
+/* Sized as 8 rather than 4 to allow checking the PTE's PRU bit against whether
+   the fault happened in user mode or privileged mode. */
+static struct expevt_lookup expevt_lookup_table = {
+       .protection_flags = {PRX, PRX, 0, 0, PRR, PRR, PRW, PRW},
+       .is_text_access   = {1,   1,   0, 0, 0,   0,   0,   0}
+};
+
+/*
+   This routine handles page faults that can be serviced just by refilling a
+   TLB entry from an existing page table entry.  (This case represents a very
+   large majority of page faults.) Return 1 if the fault was successfully
+   handled.  Return 0 if the fault could not be handled.  (This leads into the
+   general fault handling in fault.c which deals with mapping file-backed
+   pages, stack growth, segmentation faults, swapping etc etc)
+ */
+asmlinkage int do_fast_page_fault(unsigned long long ssr_md,
+                                 unsigned long long expevt,
+                                 unsigned long address)
+{
+       struct task_struct *tsk;
+       struct mm_struct *mm;
+       unsigned long long textaccess;
+       unsigned long long protection_flags;
+       unsigned long long index;
+       unsigned long long expevt4;
+
+       /* The next few lines implement a way of hashing EXPEVT into a
+        * small array index which can be used to lookup parameters
+        * specific to the type of TLBMISS being handled.
+        *
+        * Note:
+        *      ITLBMISS has EXPEVT==0xa40
+        *      RTLBMISS has EXPEVT==0x040
+        *      WTLBMISS has EXPEVT==0x060
+        */
+       expevt4 = (expevt >> 4);
+       /* TODO : xor ssr_md into this expression too. Then we can check
+        * that PRU is set when it needs to be. */
+       index = expevt4 ^ (expevt4 >> 5);
+       index &= 7;
+       protection_flags = expevt_lookup_table.protection_flags[index];
+       textaccess       = expevt_lookup_table.is_text_access[index];
+
+#ifdef CONFIG_SH64_PROC_TLB
+       ++calls_to_do_fast_page_fault;
+#endif
+
+       /* SIM
+        * Note this is now called with interrupts still disabled
+        * This is to cope with being called for a missing IO port
+        * address with interrupts disabled. This should be fixed as
+        * soon as we have a better 'fast path' miss handler.
+        *
+        * Plus take care how you try and debug this stuff.
+        * For example, writing debug data to a port which you
+        * have just faulted on is not going to work.
+        */
+
+       tsk = current;
+       mm = tsk->mm;
+
+       if ((address >= VMALLOC_START && address < VMALLOC_END) ||
+           (address >= IOBASE_VADDR  && address < IOBASE_END)) {
+               if (ssr_md)
+                       /*
+                        * Process-contexts can never have this address
+                        * range mapped
+                        */
+                       if (handle_vmalloc_fault(mm, protection_flags,
+                                                textaccess, address))
+                               return 1;
+       } else if (!in_interrupt() && mm) {
+               if (handle_tlbmiss(mm, protection_flags, textaccess, address))
+                       return 1;
+       }
+
+       return 0;
+}
diff --git a/arch/sh64/mm/tlbmiss.c b/arch/sh64/mm/tlbmiss.c
deleted file mode 100644
index b767d6c..0000000
--- a/arch/sh64/mm/tlbmiss.c
+++ /dev/null
@@ -1,279 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * arch/sh64/mm/tlbmiss.c
- *
- * Original code from fault.c
- * Copyright (C) 2000, 2001  Paolo Alberelli
- *
- * Fast PTE->TLB refill path
- * Copyright (C) 2003 [EMAIL PROTECTED]
- *
- * IMPORTANT NOTES :
- * The do_fast_page_fault function is called from a context in entry.S where 
very few registers
- * have been saved.  In particular, the code in this file must be compiled not 
to use ANY
- * caller-save registers that are not part of the restricted save set.  Also, 
it means that
- * code in this file must not make calls to functions elsewhere in the kernel, 
or else the
- * excepting context will see corruption in its caller-save registers.  Plus, 
the entry.S save
- * area is non-reentrant, so this code has to run with SR.BL==1, i.e. no 
interrupts taken inside
- * it and panic on any exception.
- *
- */
-
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/ptrace.h>
-#include <linux/mman.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/interrupt.h>
-
-#include <asm/system.h>
-#include <asm/tlb.h>
-#include <asm/io.h>
-#include <asm/uaccess.h>
-#include <asm/pgalloc.h>
-#include <asm/mmu_context.h>
-#include <asm/registers.h>             /* required by inline asm statements */
-
-/* Callable from fault.c, so not static */
-inline void __do_tlb_refill(unsigned long address,
-                            unsigned long long is_text_not_data, pte_t *pte)
-{
-       unsigned long long ptel;
-       unsigned long long pteh=0;
-       struct tlb_info *tlbp;
-       unsigned long long next;
-
-       /* Get PTEL first */
-       ptel = pte_val(*pte);
-
-       /*
-        * Set PTEH register
-        */
-       pteh = address & MMU_VPN_MASK;
-
-       /* Sign extend based on neff. */
-#if (NEFF == 32)
-       /* Faster sign extension */
-       pteh = (unsigned long long)(signed long long)(signed long)pteh;
-#else
-       /* General case */
-       pteh = (pteh & NEFF_SIGN) ? (pteh | NEFF_MASK) : pteh;
-#endif
-
-       /* Set the ASID. */
-       pteh |= get_asid() << PTEH_ASID_SHIFT;
-       pteh |= PTEH_VALID;
-
-       /* Set PTEL register, set_pte has performed the sign extension */
-       ptel &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */
-
-       tlbp = is_text_not_data ? &(cpu_data->itlb) : &(cpu_data->dtlb);
-       next = tlbp->next;
-       __flush_tlb_slot(next);
-       asm volatile ("putcfg %0,1,%2\n\n\t"
-                     "putcfg %0,0,%1\n"
-                     :  : "r" (next), "r" (pteh), "r" (ptel) );
-
-       next += TLB_STEP;
-       if (next > tlbp->last) next = tlbp->first;
-       tlbp->next = next;
-
-}
-
-static int handle_vmalloc_fault(struct mm_struct *mm, unsigned long 
protection_flags,
-                                unsigned long long textaccess,
-                               unsigned long address)
-{
-       pgd_t *dir;
-       pmd_t *pmd;
-       static pte_t *pte;
-       pte_t entry;
-
-       dir = pgd_offset_k(address);
-       pmd = pmd_offset(dir, address);
-
-       if (pmd_none(*pmd)) {
-               return 0;
-       }
-
-       if (pmd_bad(*pmd)) {
-               pmd_clear(pmd);
-               return 0;
-       }
-
-       pte = pte_offset_kernel(pmd, address);
-       entry = *pte;
-
-       if (pte_none(entry) || !pte_present(entry)) {
-               return 0;
-       }
-
-       if ((pte_val(entry) & protection_flags) != protection_flags) {
-               return 0;
-       }
-
-        __do_tlb_refill(address, textaccess, pte);
-
-       return 1;
-}
-
-static int handle_tlbmiss(struct mm_struct *mm, unsigned long long 
protection_flags,
-                       unsigned long long textaccess,
-                       unsigned long address)
-{
-       pgd_t *dir;
-       pmd_t *pmd;
-       pte_t *pte;
-       pte_t entry;
-
-       /* NB. The PGD currently only contains a single entry - there is no
-          page table tree stored for the top half of the address space since
-          virtual pages in that region should never be mapped in user mode.
-          (In kernel mode, the only things in that region are the 512Mb super
-          page (locked in), and vmalloc (modules) +  I/O device pages (handled
-          by handle_vmalloc_fault), so no PGD for the upper half is required
-          by kernel mode either).
-
-          See how mm->pgd is allocated and initialised in pgd_alloc to see why
-          the next test is necessary.  - RPC */
-       if (address >= (unsigned long) TASK_SIZE) {
-               /* upper half - never has page table entries. */
-               return 0;
-       }
-       dir = pgd_offset(mm, address);
-       if (pgd_none(*dir)) {
-               return 0;
-       }
-       if (!pgd_present(*dir)) {
-               return 0;
-       }
-
-       pmd = pmd_offset(dir, address);
-       if (pmd_none(*pmd)) {
-               return 0;
-       }
-       if (!pmd_present(*pmd)) {
-               return 0;
-       }
-       pte = pte_offset_kernel(pmd, address);
-       entry = *pte;
-       if (pte_none(entry)) {
-               return 0;
-       }
-       if (!pte_present(entry)) {
-               return 0;
-       }
-
-       /* If the page doesn't have sufficient protection bits set to service 
the
-          kind of fault being handled, there's not much point doing the TLB 
refill.
-          Punt the fault to the general handler. */
-       if ((pte_val(entry) & protection_flags) != protection_flags) {
-               return 0;
-       }
-
-        __do_tlb_refill(address, textaccess, pte);
-
-       return 1;
-}
-
-/* Put all this information into one structure so that everything is just 
arithmetic
-   relative to a single base address.  This reduces the number of movi/shori 
pairs needed
-   just to load addresses of static data. */
-struct expevt_lookup {
-       unsigned short protection_flags[8];
-       unsigned char  is_text_access[8];
-       unsigned char  is_write_access[8];
-};
-
-#define PRU (1<<9)
-#define PRW (1<<8)
-#define PRX (1<<7)
-#define PRR (1<<6)
-
-#define DIRTY (_PAGE_DIRTY | _PAGE_ACCESSED)
-#define YOUNG (_PAGE_ACCESSED)
-
-/* Sized as 8 rather than 4 to allow checking the PTE's PRU bit against whether
-   the fault happened in user mode or privileged mode. */
-static struct expevt_lookup expevt_lookup_table = {
-       .protection_flags = {PRX, PRX, 0, 0, PRR, PRR, PRW, PRW},
-       .is_text_access   = {1,   1,   0, 0, 0,   0,   0,   0}
-};
-
-/*
-   This routine handles page faults that can be serviced just by refilling a
-   TLB entry from an existing page table entry.  (This case represents a very
-   large majority of page faults.) Return 1 if the fault was successfully
-   handled.  Return 0 if the fault could not be handled.  (This leads into the
-   general fault handling in fault.c which deals with mapping file-backed
-   pages, stack growth, segmentation faults, swapping etc etc)
- */
-asmlinkage int do_fast_page_fault(unsigned long long ssr_md, unsigned long 
long expevt,
-                                 unsigned long address)
-{
-       struct task_struct *tsk;
-       struct mm_struct *mm;
-       unsigned long long textaccess;
-       unsigned long long protection_flags;
-       unsigned long long index;
-       unsigned long long expevt4;
-
-       /* The next few lines implement a way of hashing EXPEVT into a small 
array index
-          which can be used to lookup parameters specific to the type of 
TLBMISS being
-          handled.  Note:
-          ITLBMISS has EXPEVT==0xa40
-          RTLBMISS has EXPEVT==0x040
-          WTLBMISS has EXPEVT==0x060
-       */
-
-       expevt4 = (expevt >> 4);
-       /* TODO : xor ssr_md into this expression too.  Then we can check that 
PRU is set
-          when it needs to be. */
-       index = expevt4 ^ (expevt4 >> 5);
-       index &= 7;
-       protection_flags = expevt_lookup_table.protection_flags[index];
-       textaccess       = expevt_lookup_table.is_text_access[index];
-
-#ifdef CONFIG_SH64_PROC_TLB
-       ++calls_to_do_fast_page_fault;
-#endif
-
-       /* SIM
-        * Note this is now called with interrupts still disabled
-        * This is to cope with being called for a missing IO port
-        * address with interrupts disabled. This should be fixed as
-        * soon as we have a better 'fast path' miss handler.
-        *
-        * Plus take care how you try and debug this stuff.
-        * For example, writing debug data to a port which you
-        * have just faulted on is not going to work.
-        */
-
-       tsk = current;
-       mm = tsk->mm;
-
-       if ((address >= VMALLOC_START && address < VMALLOC_END) ||
-           (address >= IOBASE_VADDR  && address < IOBASE_END)) {
-               if (ssr_md) {
-                       /* Process-contexts can never have this address range 
mapped */
-                       if (handle_vmalloc_fault(mm, protection_flags, 
textaccess, address)) {
-                               return 1;
-                       }
-               }
-       } else if (!in_interrupt() && mm) {
-               if (handle_tlbmiss(mm, protection_flags, textaccess, address)) {
-                       return 1;
-               }
-       }
-
-       return 0;
-}
-
-
To unsubscribe from this list: send the line "unsubscribe git-commits-head" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to