Gitweb:     
http://git.kernel.org/git/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=959f7d587e236a2d218f527771f156c336409d11
Commit:     959f7d587e236a2d218f527771f156c336409d11
Parent:     256b22ca66987c537064dc25b0b267966189b5ba
Author:     Paul Mundt <[EMAIL PROTECTED]>
AuthorDate: Sat Nov 10 20:35:53 2007 +0900
Committer:  Paul Mundt <[EMAIL PROTECTED]>
CommitDate: Mon Jan 28 13:18:44 2008 +0900

    sh: Move over the SH-5 head.S and tlb.h.
    
    Signed-off-by: Paul Mundt <[EMAIL PROTECTED]>
---
 arch/sh/kernel/Makefile_32 |    2 +-
 arch/sh/kernel/Makefile_64 |    2 +-
 arch/sh/kernel/head.S      |  120 --------------
 arch/sh/kernel/head_32.S   |  120 ++++++++++++++
 arch/sh/kernel/head_64.S   |  367 +++++++++++++++++++++++++++++++++++++++++++
 arch/sh64/kernel/head.S    |  372 --------------------------------------------
 include/asm-sh/tlb.h       |   10 +-
 include/asm-sh/tlb_64.h    |   69 ++++++++
 include/asm-sh64/tlb.h     |   92 -----------
 9 files changed, 567 insertions(+), 587 deletions(-)

diff --git a/arch/sh/kernel/Makefile_32 b/arch/sh/kernel/Makefile_32
index 29b44eb..35be5e9 100644
--- a/arch/sh/kernel/Makefile_32
+++ b/arch/sh/kernel/Makefile_32
@@ -2,7 +2,7 @@
 # Makefile for the Linux/SuperH kernel.
 #
 
-extra-y        := head.o init_task.o vmlinux.lds
+extra-y        := head_32.o init_task.o vmlinux.lds
 
 obj-y  := debugtraps.o io.o io_generic.o irq.o machvec.o process_32.o \
           ptrace_32.o semaphore.o setup.o signal_32.o sys_sh.o syscalls.o \
diff --git a/arch/sh/kernel/Makefile_64 b/arch/sh/kernel/Makefile_64
index fb87d64..6e72ed4 100644
--- a/arch/sh/kernel/Makefile_64
+++ b/arch/sh/kernel/Makefile_64
@@ -1,4 +1,4 @@
-extra-y        := head.o init_task.o vmlinux.lds
+extra-y        := head_64.o init_task.o vmlinux.lds
 
 obj-y  := debugtraps.o io.o io_generic.o irq.o machvec.o process_64.o \
           ptrace_64.o semaphore.o setup.o signal_64.o sys_sh64.o syscalls.o \
diff --git a/arch/sh/kernel/head.S b/arch/sh/kernel/head.S
deleted file mode 100644
index 3338239..0000000
--- a/arch/sh/kernel/head.S
+++ /dev/null
@@ -1,120 +0,0 @@
-/* $Id: head.S,v 1.7 2003/09/01 17:58:19 lethal Exp $
- *
- *  arch/sh/kernel/head.S
- *
- *  Copyright (C) 1999, 2000  Niibe Yutaka & Kaz Kojima
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Head.S contains the SH exception handlers and startup code.
- */
-#include <linux/linkage.h>
-#include <asm/thread_info.h>
-
-#ifdef CONFIG_CPU_SH4A
-#define SYNCO()                synco
-
-#define PREFI(label, reg)      \
-       mov.l   label, reg;     \
-       prefi   @reg
-#else
-#define SYNCO()
-#define PREFI(label, reg)
-#endif
-
-       .section        .empty_zero_page, "aw"
-ENTRY(empty_zero_page)
-       .long   1               /* MOUNT_ROOT_RDONLY */
-       .long   0               /* RAMDISK_FLAGS */
-       .long   0x0200          /* ORIG_ROOT_DEV */
-       .long   1               /* LOADER_TYPE */
-       .long   0x00360000      /* INITRD_START */
-       .long   0x000a0000      /* INITRD_SIZE */
-       .long   0
-1:
-       .skip   PAGE_SIZE - empty_zero_page - 1b
-
-       .section        .text.head, "ax"
-
-/*
- * Condition at the entry of _stext:
- *
- *   BSC has already been initialized.
- *   INTC may or may not be initialized.
- *   VBR may or may not be initialized.
- *   MMU may or may not be initialized.
- *   Cache may or may not be initialized.
- *   Hardware (including on-chip modules) may or may not be initialized. 
- *
- */
-ENTRY(_stext)
-       !                       Initialize Status Register
-       mov.l   1f, r0          ! MD=1, RB=0, BL=0, IMASK=0xF
-       ldc     r0, sr
-       !                       Initialize global interrupt mask
-#ifdef CONFIG_CPU_HAS_SR_RB
-       mov     #0, r0
-       ldc     r0, r6_bank
-#endif
-       
-       /*
-        * Prefetch if possible to reduce cache miss penalty.
-        *
-        * We do this early on for SH-4A as a micro-optimization,
-        * as later on we will have speculative execution enabled
-        * and this will become less of an issue.
-        */
-       PREFI(5f, r0)
-       PREFI(6f, r0)
-
-       !
-       mov.l   2f, r0
-       mov     r0, r15         ! Set initial r15 (stack pointer)
-#ifdef CONFIG_CPU_HAS_SR_RB
-       mov.l   7f, r0
-       ldc     r0, r7_bank     ! ... and initial thread_info
-#endif
-       
-       !                       Clear BSS area
-#ifdef CONFIG_SMP      
-       mov.l   3f, r0
-       cmp/eq  #0, r0          ! skip clear if set to zero
-       bt      10f
-#endif
-       
-       mov.l   3f, r1
-       add     #4, r1
-       mov.l   4f, r2
-       mov     #0, r0
-9:     cmp/hs  r2, r1
-       bf/s    9b              ! while (r1 < r2)
-        mov.l  r0,@-r2
-
-10:            
-       !                       Additional CPU initialization
-       mov.l   6f, r0
-       jsr     @r0
-        nop
-
-       SYNCO()                 ! Wait for pending instructions..
-       
-       !                       Start kernel
-       mov.l   5f, r0
-       jmp     @r0
-        nop
-
-       .balign 4
-#if defined(CONFIG_CPU_SH2)
-1:     .long   0x000000F0              ! IMASK=0xF
-#else
-1:     .long   0x400080F0              ! MD=1, RB=0, BL=0, FD=1, IMASK=0xF
-#endif
-ENTRY(stack_start)
-2:     .long   init_thread_union+THREAD_SIZE
-3:     .long   __bss_start
-4:     .long   _end
-5:     .long   start_kernel
-6:     .long   sh_cpu_init
-7:     .long   init_thread_union
diff --git a/arch/sh/kernel/head_32.S b/arch/sh/kernel/head_32.S
new file mode 100644
index 0000000..3338239
--- /dev/null
+++ b/arch/sh/kernel/head_32.S
@@ -0,0 +1,120 @@
+/* $Id: head.S,v 1.7 2003/09/01 17:58:19 lethal Exp $
+ *
+ *  arch/sh/kernel/head.S
+ *
+ *  Copyright (C) 1999, 2000  Niibe Yutaka & Kaz Kojima
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Head.S contains the SH exception handlers and startup code.
+ */
+#include <linux/linkage.h>
+#include <asm/thread_info.h>
+
+#ifdef CONFIG_CPU_SH4A
+#define SYNCO()                synco
+
+#define PREFI(label, reg)      \
+       mov.l   label, reg;     \
+       prefi   @reg
+#else
+#define SYNCO()
+#define PREFI(label, reg)
+#endif
+
+       .section        .empty_zero_page, "aw"
+ENTRY(empty_zero_page)
+       .long   1               /* MOUNT_ROOT_RDONLY */
+       .long   0               /* RAMDISK_FLAGS */
+       .long   0x0200          /* ORIG_ROOT_DEV */
+       .long   1               /* LOADER_TYPE */
+       .long   0x00360000      /* INITRD_START */
+       .long   0x000a0000      /* INITRD_SIZE */
+       .long   0
+1:
+       .skip   PAGE_SIZE - empty_zero_page - 1b
+
+       .section        .text.head, "ax"
+
+/*
+ * Condition at the entry of _stext:
+ *
+ *   BSC has already been initialized.
+ *   INTC may or may not be initialized.
+ *   VBR may or may not be initialized.
+ *   MMU may or may not be initialized.
+ *   Cache may or may not be initialized.
+ *   Hardware (including on-chip modules) may or may not be initialized. 
+ *
+ */
+ENTRY(_stext)
+       !                       Initialize Status Register
+       mov.l   1f, r0          ! MD=1, RB=0, BL=0, IMASK=0xF
+       ldc     r0, sr
+       !                       Initialize global interrupt mask
+#ifdef CONFIG_CPU_HAS_SR_RB
+       mov     #0, r0
+       ldc     r0, r6_bank
+#endif
+       
+       /*
+        * Prefetch if possible to reduce cache miss penalty.
+        *
+        * We do this early on for SH-4A as a micro-optimization,
+        * as later on we will have speculative execution enabled
+        * and this will become less of an issue.
+        */
+       PREFI(5f, r0)
+       PREFI(6f, r0)
+
+       !
+       mov.l   2f, r0
+       mov     r0, r15         ! Set initial r15 (stack pointer)
+#ifdef CONFIG_CPU_HAS_SR_RB
+       mov.l   7f, r0
+       ldc     r0, r7_bank     ! ... and initial thread_info
+#endif
+       
+       !                       Clear BSS area
+#ifdef CONFIG_SMP      
+       mov.l   3f, r0
+       cmp/eq  #0, r0          ! skip clear if set to zero
+       bt      10f
+#endif
+       
+       mov.l   3f, r1
+       add     #4, r1
+       mov.l   4f, r2
+       mov     #0, r0
+9:     cmp/hs  r2, r1
+       bf/s    9b              ! while (r1 < r2)
+        mov.l  r0,@-r2
+
+10:            
+       !                       Additional CPU initialization
+       mov.l   6f, r0
+       jsr     @r0
+        nop
+
+       SYNCO()                 ! Wait for pending instructions..
+       
+       !                       Start kernel
+       mov.l   5f, r0
+       jmp     @r0
+        nop
+
+       .balign 4
+#if defined(CONFIG_CPU_SH2)
+1:     .long   0x000000F0              ! IMASK=0xF
+#else
+1:     .long   0x400080F0              ! MD=1, RB=0, BL=0, FD=1, IMASK=0xF
+#endif
+ENTRY(stack_start)
+2:     .long   init_thread_union+THREAD_SIZE
+3:     .long   __bss_start
+4:     .long   _end
+5:     .long   start_kernel
+6:     .long   sh_cpu_init
+7:     .long   init_thread_union
diff --git a/arch/sh/kernel/head_64.S b/arch/sh/kernel/head_64.S
new file mode 100644
index 0000000..8015af6
--- /dev/null
+++ b/arch/sh/kernel/head_64.S
@@ -0,0 +1,367 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * arch/sh64/kernel/head.S
+ *
+ * Copyright (C) 2000, 2001  Paolo Alberelli
+ * Copyright (C) 2003, 2004  Paul Mundt
+ *
+ *
+ * [EMAIL PROTECTED]:   2nd May 2002
+ *    Moved definition of empty_zero_page to its own section allowing
+ *    it to be placed at an absolute address known at load time.
+ *
+ * [EMAIL PROTECTED]:          9th May 2003
+ *    Kill off GLOBAL_NAME() usage.
+ *
+ * [EMAIL PROTECTED]:          8th May 2004
+ *    Add early SCIF console DTLB mapping.
+ */
+#include <asm/page.h>
+#include <asm/cache.h>
+#include <asm/tlb.h>
+#include <asm/cpu/registers.h>
+#include <asm/cpu/mmu_context.h>
+#include <asm/thread_info.h>
+
+/*
+ * MMU defines: TLB boundaries.
+ */
+
+#define MMUIR_FIRST    ITLB_FIXED
+#define MMUIR_END      ITLB_LAST_VAR_UNRESTRICTED+TLB_STEP
+#define MMUIR_STEP     TLB_STEP
+
+#define MMUDR_FIRST    DTLB_FIXED
+#define MMUDR_END      DTLB_LAST_VAR_UNRESTRICTED+TLB_STEP
+#define MMUDR_STEP     TLB_STEP
+
+/* Safety check : CONFIG_CACHED_MEMORY_OFFSET has to be a multiple of 512Mb */
+#if (CONFIG_CACHED_MEMORY_OFFSET & ((1UL<<29)-1))
+#error "CONFIG_CACHED_MEMORY_OFFSET must be a multiple of 512Mb"
+#endif
+
+/*
+ * MMU defines: Fixed TLBs.
+ */
+/* Deal safely with the case where the base of RAM is not 512Mb aligned */
+
+#define ALIGN_512M_MASK (0xffffffffe0000000)
+#define ALIGNED_EFFECTIVE ((CONFIG_CACHED_MEMORY_OFFSET + CONFIG_MEMORY_START) 
& ALIGN_512M_MASK)
+#define ALIGNED_PHYSICAL (CONFIG_MEMORY_START & ALIGN_512M_MASK)
+
+#define MMUIR_TEXT_H   (0x0000000000000003 | ALIGNED_EFFECTIVE)
+                       /* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */
+
+#define MMUIR_TEXT_L   (0x000000000000009a | ALIGNED_PHYSICAL)
+                       /* 512 Mb, Cacheable, Write-back, execute, Not User, 
Ph. Add. */
+
+#define MMUDR_CACHED_H 0x0000000000000003 | ALIGNED_EFFECTIVE
+                       /* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */
+#define MMUDR_CACHED_L 0x000000000000015a | ALIGNED_PHYSICAL
+                       /* 512 Mb, Cacheable, Write-back, read/write, Not User, 
Ph. Add. */
+
+#ifdef CONFIG_ICACHE_DISABLED
+#define        ICCR0_INIT_VAL  ICCR0_OFF                       /* ICACHE off */
+#else
+#define        ICCR0_INIT_VAL  ICCR0_ON | ICCR0_ICI            /* ICE + ICI */
+#endif
+#define        ICCR1_INIT_VAL  ICCR1_NOLOCK                    /* No locking */
+
+#if defined (CONFIG_DCACHE_DISABLED)
+#define        OCCR0_INIT_VAL  OCCR0_OFF                          /* D-cache: 
off  */
+#elif defined (CONFIG_DCACHE_WRITE_THROUGH)
+#define        OCCR0_INIT_VAL  OCCR0_ON | OCCR0_OCI | OCCR0_WT    /* D-cache: 
on,   */
+                                                          /* WT, invalidate */
+#elif defined (CONFIG_DCACHE_WRITE_BACK)
+#define        OCCR0_INIT_VAL  OCCR0_ON | OCCR0_OCI | OCCR0_WB    /* D-cache: 
on,   */
+                                                          /* WB, invalidate */
+#else
+#error preprocessor flag CONFIG_DCACHE_... not recognized!
+#endif
+
+#define        OCCR1_INIT_VAL  OCCR1_NOLOCK                       /* No 
locking     */
+
+       .section        .empty_zero_page, "aw"
+       .global empty_zero_page
+
+empty_zero_page:
+       .long   1               /* MOUNT_ROOT_RDONLY */
+       .long   0               /* RAMDISK_FLAGS */
+       .long   0x0200          /* ORIG_ROOT_DEV */
+       .long   1               /* LOADER_TYPE */
+       .long   0x00800000      /* INITRD_START */
+       .long   0x00800000      /* INITRD_SIZE */
+       .long   0
+
+       .text
+       .balign 4096,0,4096
+
+       .section        .data, "aw"
+       .balign PAGE_SIZE
+
+       .section        .data, "aw"
+       .balign PAGE_SIZE
+
+       .global swapper_pg_dir
+swapper_pg_dir:
+       .space PAGE_SIZE, 0
+
+       .global empty_bad_page
+empty_bad_page:
+       .space PAGE_SIZE, 0
+
+       .global empty_bad_pte_table
+empty_bad_pte_table:
+       .space PAGE_SIZE, 0
+
+       .global fpu_in_use
+fpu_in_use:    .quad   0
+
+
+       .section        .text.head, "ax"
+       .balign L1_CACHE_BYTES
+/*
+ * Condition at the entry of __stext:
+ * . Reset state:
+ *   . SR.FD    = 1            (FPU disabled)
+ *   . SR.BL    = 1            (Exceptions disabled)
+ *   . SR.MD    = 1            (Privileged Mode)
+ *   . SR.MMU   = 0            (MMU Disabled)
+ *   . SR.CD    = 0            (CTC User Visible)
+ *   . SR.IMASK = Undefined    (Interrupt Mask)
+ *
+ * Operations supposed to be performed by __stext:
+ * . prevent speculative fetch onto device memory while MMU is off
+ * . reflect as much as possible SH5 ABI (r15, r26, r27, r18)
+ * . first, save CPU state and set it to something harmless
+ * . any CPU detection and/or endianness settings (?)
+ * . initialize EMI/LMI (but not TMU/RTC/INTC/SCIF): TBD
+ * . set initial TLB entries for cached and uncached regions
+ *   (no fine granularity paging)
+ * . set initial cache state
+ * . enable MMU and caches
+ * . set CPU to a consistent state
+ *   . registers (including stack pointer and current/KCR0)
+ *   . NOT expecting to set Exception handling nor VBR/RESVEC/DCR
+ *     at this stage. This is all to later Linux initialization steps.
+ *   . initialize FPU
+ * . clear BSS
+ * . jump into start_kernel()
+ * . be prepared to hopeless start_kernel() returns.
+ *
+ */
+       .global _stext
+_stext:
+       /*
+        * Prevent speculative fetch on device memory due to
+        * uninitialized target registers.
+        */
+       ptabs/u ZERO, tr0
+       ptabs/u ZERO, tr1
+       ptabs/u ZERO, tr2
+       ptabs/u ZERO, tr3
+       ptabs/u ZERO, tr4
+       ptabs/u ZERO, tr5
+       ptabs/u ZERO, tr6
+       ptabs/u ZERO, tr7
+       synci
+
+       /*
+        * Read/Set CPU state. After this block:
+        * r29 = Initial SR
+        */
+       getcon  SR, r29
+       movi    SR_HARMLESS, r20
+       putcon  r20, SR
+
+       /*
+        * Initialize EMI/LMI. To Be Done.
+        */
+
+       /*
+        * CPU detection and/or endianness settings (?). To Be Done.
+        * Pure PIC code here, please ! Just save state into r30.
+         * After this block:
+        * r30 = CPU type/Platform Endianness
+        */
+
+       /*
+        * Set initial TLB entries for cached and uncached regions.
+        * Note: PTA/BLINK is PIC code, PTABS/BLINK isn't !
+        */
+       /* Clear ITLBs */
+       pta     clear_ITLB, tr1
+       movi    MMUIR_FIRST, r21
+       movi    MMUIR_END, r22
+clear_ITLB:
+       putcfg  r21, 0, ZERO            /* Clear MMUIR[n].PTEH.V */
+       addi    r21, MMUIR_STEP, r21
+        bne    r21, r22, tr1
+
+       /* Clear DTLBs */
+       pta     clear_DTLB, tr1
+       movi    MMUDR_FIRST, r21
+       movi    MMUDR_END, r22
+clear_DTLB:
+       putcfg  r21, 0, ZERO            /* Clear MMUDR[n].PTEH.V */
+       addi    r21, MMUDR_STEP, r21
+        bne    r21, r22, tr1
+
+       /* Map one big (512Mb) page for ITLB */
+       movi    MMUIR_FIRST, r21
+       movi    MMUIR_TEXT_L, r22       /* PTEL first */
+       add.l   r22, r63, r22           /* Sign extend */
+       putcfg  r21, 1, r22             /* Set MMUIR[0].PTEL */
+       movi    MMUIR_TEXT_H, r22       /* PTEH last */
+       add.l   r22, r63, r22           /* Sign extend */
+       putcfg  r21, 0, r22             /* Set MMUIR[0].PTEH */
+
+       /* Map one big CACHED (512Mb) page for DTLB */
+       movi    MMUDR_FIRST, r21
+       movi    MMUDR_CACHED_L, r22     /* PTEL first */
+       add.l   r22, r63, r22           /* Sign extend */
+       putcfg  r21, 1, r22             /* Set MMUDR[0].PTEL */
+       movi    MMUDR_CACHED_H, r22     /* PTEH last */
+       add.l   r22, r63, r22           /* Sign extend */
+       putcfg  r21, 0, r22             /* Set MMUDR[0].PTEH */
+
+#ifdef CONFIG_EARLY_PRINTK
+       /*
+        * Setup a DTLB translation for SCIF phys.
+        */
+       addi    r21, MMUDR_STEP, r21
+       movi    0x0a03, r22     /* SCIF phys */
+       shori   0x0148, r22
+       putcfg  r21, 1, r22     /* PTEL first */
+       movi    0xfa03, r22     /* 0xfa030000, fixed SCIF virt */
+       shori   0x0003, r22
+       putcfg  r21, 0, r22     /* PTEH last */
+#endif
+
+       /*
+        * Set cache behaviours.
+        */
+       /* ICache */
+       movi    ICCR_BASE, r21
+       movi    ICCR0_INIT_VAL, r22
+       movi    ICCR1_INIT_VAL, r23
+       putcfg  r21, ICCR_REG0, r22
+       putcfg  r21, ICCR_REG1, r23
+
+       /* OCache */
+       movi    OCCR_BASE, r21
+       movi    OCCR0_INIT_VAL, r22
+       movi    OCCR1_INIT_VAL, r23
+       putcfg  r21, OCCR_REG0, r22
+       putcfg  r21, OCCR_REG1, r23
+
+
+       /*
+        * Enable Caches and MMU. Do the first non-PIC jump.
+         * Now head.S global variables, constants and externs
+        * can be used.
+        */
+       getcon  SR, r21
+       movi    SR_ENABLE_MMU, r22
+       or      r21, r22, r21
+       putcon  r21, SSR
+       movi    hyperspace, r22
+       ori     r22, 1, r22         /* Make it SHmedia, not required but..*/
+       putcon  r22, SPC
+       synco
+       rte                         /* And now go into the hyperspace ... */
+hyperspace:                        /* ... that's the next instruction !  */
+
+       /*
+        * Set CPU to a consistent state.
+        * r31 = FPU support flag
+        * tr0/tr7 in use. Others give a chance to loop somewhere safe
+        */
+       movi    start_kernel, r32
+       ori     r32, 1, r32
+
+       ptabs   r32, tr0                    /* r32 = _start_kernel address      
  */
+       pta/u   hopeless, tr1
+       pta/u   hopeless, tr2
+       pta/u   hopeless, tr3
+       pta/u   hopeless, tr4
+       pta/u   hopeless, tr5
+       pta/u   hopeless, tr6
+       pta/u   hopeless, tr7
+       gettr   tr1, r28                        /* r28 = hopeless address */
+
+       /* Set initial stack pointer */
+       movi    init_thread_union, SP
+       putcon  SP, KCR0                /* Set current to init_task */
+       movi    THREAD_SIZE, r22        /* Point to the end */
+       add     SP, r22, SP
+
+       /*
+        * Initialize FPU.
+        * Keep FPU flag in r31. After this block:
+        * r31 = FPU flag
+        */
+       movi fpu_in_use, r31    /* Temporary */
+
+#ifdef CONFIG_SH_FPU
+       getcon  SR, r21
+       movi    SR_ENABLE_FPU, r22
+       and     r21, r22, r22
+       putcon  r22, SR                 /* Try to enable */
+       getcon  SR, r22
+       xor     r21, r22, r21
+       shlri   r21, 15, r21            /* Supposedly 0/1 */
+       st.q    r31, 0 , r21            /* Set fpu_in_use */
+#else
+       movi    0, r21
+       st.q    r31, 0 , r21            /* Set fpu_in_use */
+#endif
+       or      r21, ZERO, r31          /* Set FPU flag at last */
+
+#ifndef CONFIG_SH_NO_BSS_INIT
+/* Don't clear BSS if running on slow platforms such as an RTL simulation,
+   remote memory via SHdebug link, etc.  For these the memory can be guaranteed
+   to be all zero on boot anyway. */
+       /*
+        * Clear bss
+        */
+       pta     clear_quad, tr1
+       movi    __bss_start, r22
+       movi    _end, r23
+clear_quad:
+       st.q    r22, 0, ZERO
+       addi    r22, 8, r22
+       bne     r22, r23, tr1           /* Both quad aligned, see vmlinux.lds.S 
*/
+#endif
+       pta/u   hopeless, tr1
+
+       /* Say bye to head.S but be prepared to wrongly get back ... */
+       blink   tr0, LINK
+
+       /* If we ever get back here through LINK/tr1-tr7 */
+       pta/u   hopeless, tr7
+
+hopeless:
+       /*
+        * Something's badly wrong here. Loop endlessly,
+         * there's nothing more we can do about it.
+        *
+        * Note on hopeless: it can be jumped into invariably
+        * before or after jumping into hyperspace. The only
+        * requirement is to be PIC called (PTA) before and
+        * any way (PTA/PTABS) after. According to Virtual
+        * to Physical mapping a simulator/emulator can easily
+        * tell where we came here from just looking at hopeless
+        * (PC) address.
+        *
+        * For debugging purposes:
+        * (r28) hopeless/loop address
+        * (r29) Original SR
+        * (r30) CPU type/Platform endianness
+        * (r31) FPU Support
+        * (r32) _start_kernel address
+        */
+       blink   tr7, ZERO
diff --git a/arch/sh64/kernel/head.S b/arch/sh64/kernel/head.S
deleted file mode 100644
index 186406d..0000000
--- a/arch/sh64/kernel/head.S
+++ /dev/null
@@ -1,372 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * arch/sh64/kernel/head.S
- *
- * Copyright (C) 2000, 2001  Paolo Alberelli
- * Copyright (C) 2003, 2004  Paul Mundt
- *
- *
- * [EMAIL PROTECTED]:   2nd May 2002
- *    Moved definition of empty_zero_page to its own section allowing
- *    it to be placed at an absolute address known at load time.
- *
- * [EMAIL PROTECTED]:          9th May 2003
- *    Kill off GLOBAL_NAME() usage.
- *
- * [EMAIL PROTECTED]:          8th May 2004
- *    Add early SCIF console DTLB mapping.
- */
-
-
-#include <asm/page.h>
-#include <asm/mmu_context.h>
-#include <asm/cache.h>
-#include <asm/tlb.h>
-#include <asm/processor.h>
-#include <asm/registers.h>
-#include <asm/thread_info.h>
-
-/*
- * MMU defines: TLB boundaries.
- */
-
-#define MMUIR_FIRST    ITLB_FIXED
-#define MMUIR_END      ITLB_LAST_VAR_UNRESTRICTED+TLB_STEP
-#define MMUIR_STEP     TLB_STEP
-
-#define MMUDR_FIRST    DTLB_FIXED
-#define MMUDR_END      DTLB_LAST_VAR_UNRESTRICTED+TLB_STEP
-#define MMUDR_STEP     TLB_STEP
-
-/* Safety check : CONFIG_CACHED_MEMORY_OFFSET has to be a multiple of 512Mb */
-#if (CONFIG_CACHED_MEMORY_OFFSET & ((1UL<<29)-1))
-#error "CONFIG_CACHED_MEMORY_OFFSET must be a multiple of 512Mb"
-#endif
-
-/*
- * MMU defines: Fixed TLBs.
- */
-/* Deal safely with the case where the base of RAM is not 512Mb aligned */
-
-#define ALIGN_512M_MASK (0xffffffffe0000000)
-#define ALIGNED_EFFECTIVE ((CONFIG_CACHED_MEMORY_OFFSET + CONFIG_MEMORY_START) 
& ALIGN_512M_MASK)
-#define ALIGNED_PHYSICAL (CONFIG_MEMORY_START & ALIGN_512M_MASK)
-
-#define MMUIR_TEXT_H   (0x0000000000000003 | ALIGNED_EFFECTIVE)
-                       /* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */
-
-#define MMUIR_TEXT_L   (0x000000000000009a | ALIGNED_PHYSICAL)
-                       /* 512 Mb, Cacheable, Write-back, execute, Not User, 
Ph. Add. */
-
-#define MMUDR_CACHED_H 0x0000000000000003 | ALIGNED_EFFECTIVE
-                       /* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */
-#define MMUDR_CACHED_L 0x000000000000015a | ALIGNED_PHYSICAL
-                       /* 512 Mb, Cacheable, Write-back, read/write, Not User, 
Ph. Add. */
-
-#ifdef CONFIG_ICACHE_DISABLED
-#define        ICCR0_INIT_VAL  ICCR0_OFF                       /* ICACHE off */
-#else
-#define        ICCR0_INIT_VAL  ICCR0_ON | ICCR0_ICI            /* ICE + ICI */
-#endif
-#define        ICCR1_INIT_VAL  ICCR1_NOLOCK                    /* No locking */
-
-#if defined (CONFIG_DCACHE_DISABLED)
-#define        OCCR0_INIT_VAL  OCCR0_OFF                          /* D-cache: 
off  */
-#elif defined (CONFIG_DCACHE_WRITE_THROUGH)
-#define        OCCR0_INIT_VAL  OCCR0_ON | OCCR0_OCI | OCCR0_WT    /* D-cache: 
on,   */
-                                                          /* WT, invalidate */
-#elif defined (CONFIG_DCACHE_WRITE_BACK)
-#define        OCCR0_INIT_VAL  OCCR0_ON | OCCR0_OCI | OCCR0_WB    /* D-cache: 
on,   */
-                                                          /* WB, invalidate */
-#else
-#error preprocessor flag CONFIG_DCACHE_... not recognized!
-#endif
-
-#define        OCCR1_INIT_VAL  OCCR1_NOLOCK                       /* No 
locking     */
-
-       .section        .empty_zero_page, "aw"
-       .global empty_zero_page
-
-empty_zero_page:
-       .long   1               /* MOUNT_ROOT_RDONLY */
-       .long   0               /* RAMDISK_FLAGS */
-       .long   0x0200          /* ORIG_ROOT_DEV */
-       .long   1               /* LOADER_TYPE */
-       .long   0x00800000      /* INITRD_START */
-       .long   0x00800000      /* INITRD_SIZE */
-       .long   0
-
-       .text
-       .balign 4096,0,4096
-
-       .section        .data, "aw"
-       .balign PAGE_SIZE
-
-       .section        .data, "aw"
-       .balign PAGE_SIZE
-
-       .global swapper_pg_dir
-swapper_pg_dir:
-       .space PAGE_SIZE, 0
-
-       .global empty_bad_page
-empty_bad_page:
-       .space PAGE_SIZE, 0
-
-       .global empty_bad_pte_table
-empty_bad_pte_table:
-       .space PAGE_SIZE, 0
-
-       .global fpu_in_use
-fpu_in_use:    .quad   0
-
-
-       .section        .text.head, "ax"
-       .balign L1_CACHE_BYTES
-/*
- * Condition at the entry of __stext:
- * . Reset state:
- *   . SR.FD    = 1            (FPU disabled)
- *   . SR.BL    = 1            (Exceptions disabled)
- *   . SR.MD    = 1            (Privileged Mode)
- *   . SR.MMU   = 0            (MMU Disabled)
- *   . SR.CD    = 0            (CTC User Visible)
- *   . SR.IMASK = Undefined    (Interrupt Mask)
- *
- * Operations supposed to be performed by __stext:
- * . prevent speculative fetch onto device memory while MMU is off
- * . reflect as much as possible SH5 ABI (r15, r26, r27, r18)
- * . first, save CPU state and set it to something harmless
- * . any CPU detection and/or endianness settings (?)
- * . initialize EMI/LMI (but not TMU/RTC/INTC/SCIF): TBD
- * . set initial TLB entries for cached and uncached regions
- *   (no fine granularity paging)
- * . set initial cache state
- * . enable MMU and caches
- * . set CPU to a consistent state
- *   . registers (including stack pointer and current/KCR0)
- *   . NOT expecting to set Exception handling nor VBR/RESVEC/DCR
- *     at this stage. This is all to later Linux initialization steps.
- *   . initialize FPU
- * . clear BSS
- * . jump into start_kernel()
- * . be prepared to hopeless start_kernel() returns.
- *
- */
-       .global _stext
-_stext:
-       /*
-        * Prevent speculative fetch on device memory due to
-        * uninitialized target registers.
-        */
-       ptabs/u ZERO, tr0
-       ptabs/u ZERO, tr1
-       ptabs/u ZERO, tr2
-       ptabs/u ZERO, tr3
-       ptabs/u ZERO, tr4
-       ptabs/u ZERO, tr5
-       ptabs/u ZERO, tr6
-       ptabs/u ZERO, tr7
-       synci
-
-       /*
-        * Read/Set CPU state. After this block:
-        * r29 = Initial SR
-        */
-       getcon  SR, r29
-       movi    SR_HARMLESS, r20
-       putcon  r20, SR
-
-       /*
-        * Initialize EMI/LMI. To Be Done.
-        */
-
-       /*
-        * CPU detection and/or endianness settings (?). To Be Done.
-        * Pure PIC code here, please ! Just save state into r30.
-         * After this block:
-        * r30 = CPU type/Platform Endianness
-        */
-
-       /*
-        * Set initial TLB entries for cached and uncached regions.
-        * Note: PTA/BLINK is PIC code, PTABS/BLINK isn't !
-        */
-       /* Clear ITLBs */
-       pta     clear_ITLB, tr1
-       movi    MMUIR_FIRST, r21
-       movi    MMUIR_END, r22
-clear_ITLB:
-       putcfg  r21, 0, ZERO            /* Clear MMUIR[n].PTEH.V */
-       addi    r21, MMUIR_STEP, r21
-        bne    r21, r22, tr1
-
-       /* Clear DTLBs */
-       pta     clear_DTLB, tr1
-       movi    MMUDR_FIRST, r21
-       movi    MMUDR_END, r22
-clear_DTLB:
-       putcfg  r21, 0, ZERO            /* Clear MMUDR[n].PTEH.V */
-       addi    r21, MMUDR_STEP, r21
-        bne    r21, r22, tr1
-
-       /* Map one big (512Mb) page for ITLB */
-       movi    MMUIR_FIRST, r21
-       movi    MMUIR_TEXT_L, r22       /* PTEL first */
-       add.l   r22, r63, r22           /* Sign extend */
-       putcfg  r21, 1, r22             /* Set MMUIR[0].PTEL */
-       movi    MMUIR_TEXT_H, r22       /* PTEH last */
-       add.l   r22, r63, r22           /* Sign extend */
-       putcfg  r21, 0, r22             /* Set MMUIR[0].PTEH */
-
-       /* Map one big CACHED (512Mb) page for DTLB */
-       movi    MMUDR_FIRST, r21
-       movi    MMUDR_CACHED_L, r22     /* PTEL first */
-       add.l   r22, r63, r22           /* Sign extend */
-       putcfg  r21, 1, r22             /* Set MMUDR[0].PTEL */
-       movi    MMUDR_CACHED_H, r22     /* PTEH last */
-       add.l   r22, r63, r22           /* Sign extend */
-       putcfg  r21, 0, r22             /* Set MMUDR[0].PTEH */
-
-#ifdef CONFIG_EARLY_PRINTK
-       /*
-        * Setup a DTLB translation for SCIF phys.
-        */
-       addi    r21, MMUDR_STEP, r21
-       movi    0x0a03, r22     /* SCIF phys */
-       shori   0x0148, r22
-       putcfg  r21, 1, r22     /* PTEL first */
-       movi    0xfa03, r22     /* 0xfa030000, fixed SCIF virt */
-       shori   0x0003, r22
-       putcfg  r21, 0, r22     /* PTEH last */
-#endif
-
-       /*
-        * Set cache behaviours.
-        */
-       /* ICache */
-       movi    ICCR_BASE, r21
-       movi    ICCR0_INIT_VAL, r22
-       movi    ICCR1_INIT_VAL, r23
-       putcfg  r21, ICCR_REG0, r22
-       putcfg  r21, ICCR_REG1, r23
-
-       /* OCache */
-       movi    OCCR_BASE, r21
-       movi    OCCR0_INIT_VAL, r22
-       movi    OCCR1_INIT_VAL, r23
-       putcfg  r21, OCCR_REG0, r22
-       putcfg  r21, OCCR_REG1, r23
-
-
-       /*
-        * Enable Caches and MMU. Do the first non-PIC jump.
-         * Now head.S global variables, constants and externs
-        * can be used.
-        */
-       getcon  SR, r21
-       movi    SR_ENABLE_MMU, r22
-       or      r21, r22, r21
-       putcon  r21, SSR
-       movi    hyperspace, r22
-       ori     r22, 1, r22         /* Make it SHmedia, not required but..*/
-       putcon  r22, SPC
-       synco
-       rte                         /* And now go into the hyperspace ... */
-hyperspace:                        /* ... that's the next instruction !  */
-
-       /*
-        * Set CPU to a consistent state.
-        * r31 = FPU support flag
-        * tr0/tr7 in use. Others give a chance to loop somewhere safe
-        */
-       movi    start_kernel, r32
-       ori     r32, 1, r32
-
-       ptabs   r32, tr0                    /* r32 = _start_kernel address      
  */
-       pta/u   hopeless, tr1
-       pta/u   hopeless, tr2
-       pta/u   hopeless, tr3
-       pta/u   hopeless, tr4
-       pta/u   hopeless, tr5
-       pta/u   hopeless, tr6
-       pta/u   hopeless, tr7
-       gettr   tr1, r28                        /* r28 = hopeless address */
-
-       /* Set initial stack pointer */
-       movi    init_thread_union, SP
-       putcon  SP, KCR0                /* Set current to init_task */
-       movi    THREAD_SIZE, r22        /* Point to the end */
-       add     SP, r22, SP
-
-       /*
-        * Initialize FPU.
-        * Keep FPU flag in r31. After this block:
-        * r31 = FPU flag
-        */
-       movi fpu_in_use, r31    /* Temporary */
-
-#ifdef CONFIG_SH_FPU
-       getcon  SR, r21
-       movi    SR_ENABLE_FPU, r22
-       and     r21, r22, r22
-       putcon  r22, SR                 /* Try to enable */
-       getcon  SR, r22
-       xor     r21, r22, r21
-       shlri   r21, 15, r21            /* Supposedly 0/1 */
-       st.q    r31, 0 , r21            /* Set fpu_in_use */
-#else
-       movi    0, r21
-       st.q    r31, 0 , r21            /* Set fpu_in_use */
-#endif
-       or      r21, ZERO, r31          /* Set FPU flag at last */
-
-#ifndef CONFIG_SH_NO_BSS_INIT
-/* Don't clear BSS if running on slow platforms such as an RTL simulation,
-   remote memory via SHdebug link, etc.  For these the memory can be guaranteed
-   to be all zero on boot anyway. */
-       /*
-        * Clear bss
-        */
-       pta     clear_quad, tr1
-       movi    __bss_start, r22
-       movi    _end, r23
-clear_quad:
-       st.q    r22, 0, ZERO
-       addi    r22, 8, r22
-       bne     r22, r23, tr1           /* Both quad aligned, see vmlinux.lds.S 
*/
-#endif
-       pta/u   hopeless, tr1
-
-       /* Say bye to head.S but be prepared to wrongly get back ... */
-       blink   tr0, LINK
-
-       /* If we ever get back here through LINK/tr1-tr7 */
-       pta/u   hopeless, tr7
-
-hopeless:
-       /*
-        * Something's badly wrong here. Loop endlessly,
-         * there's nothing more we can do about it.
-        *
-        * Note on hopeless: it can be jumped into invariably
-        * before or after jumping into hyperspace. The only
-        * requirement is to be PIC called (PTA) before and
-        * any way (PTA/PTABS) after. According to Virtual
-        * to Physical mapping a simulator/emulator can easily
-        * tell where we came here from just looking at hopeless
-        * (PC) address.
-        *
-        * For debugging purposes:
-        * (r28) hopeless/loop address
-        * (r29) Original SR
-        * (r30) CPU type/Platform endianness
-        * (r31) FPU Support
-        * (r32) _start_kernel address
-        */
-       blink   tr7, ZERO
-
-
diff --git a/include/asm-sh/tlb.h b/include/asm-sh/tlb.h
index 53d185b..56ad1fb 100644
--- a/include/asm-sh/tlb.h
+++ b/include/asm-sh/tlb.h
@@ -1,6 +1,12 @@
 #ifndef __ASM_SH_TLB_H
 #define __ASM_SH_TLB_H
 
+#ifdef CONFIG_SUPERH64
+# include "tlb_64.h"
+#endif
+
+#ifndef __ASSEMBLY__
+
 #define tlb_start_vma(tlb, vma) \
        flush_cache_range(vma, vma->vm_start, vma->vm_end)
 
@@ -15,4 +21,6 @@
 #define tlb_flush(tlb)                         flush_tlb_mm((tlb)->mm)
 
 #include <asm-generic/tlb.h>
-#endif
+
+#endif /* __ASSEMBLY__ */
+#endif /* __ASM_SH_TLB_H */
diff --git a/include/asm-sh/tlb_64.h b/include/asm-sh/tlb_64.h
new file mode 100644
index 0000000..0308e05
--- /dev/null
+++ b/include/asm-sh/tlb_64.h
@@ -0,0 +1,69 @@
+/*
+ * include/asm-sh/tlb_64.h
+ *
+ * Copyright (C) 2003  Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#ifndef __ASM_SH_TLB_64_H
+#define __ASM_SH_TLB_64_H
+
+/* ITLB defines */
+#define ITLB_FIXED     0x00000000      /* First fixed ITLB, see head.S */
+#define ITLB_LAST_VAR_UNRESTRICTED     0x000003F0      /* Last ITLB */
+
+/* DTLB defines */
+#define DTLB_FIXED     0x00800000      /* First fixed DTLB, see head.S */
+#define DTLB_LAST_VAR_UNRESTRICTED     0x008003F0      /* Last DTLB */
+
+#ifndef __ASSEMBLY__
+
+/**
+ * for_each_dtlb_entry
+ *
+ * @tlb:       TLB entry
+ *
+ * Iterate over free (non-wired) DTLB entries
+ */
+#define for_each_dtlb_entry(tlb)               \
+       for (tlb  = cpu_data->dtlb.first;       \
+            tlb <= cpu_data->dtlb.last;        \
+            tlb += cpu_data->dtlb.step)
+
+/**
+ * for_each_itlb_entry
+ *
+ * @tlb:       TLB entry
+ *
+ * Iterate over free (non-wired) ITLB entries
+ */
+#define for_each_itlb_entry(tlb)               \
+       for (tlb  = cpu_data->itlb.first;       \
+            tlb <= cpu_data->itlb.last;        \
+            tlb += cpu_data->itlb.step)
+
+/**
+ * __flush_tlb_slot
+ *
+ * @slot:      Address of TLB slot.
+ *
+ * Flushes TLB slot @slot.
+ */
+static inline void __flush_tlb_slot(unsigned long long slot)
+{
+       __asm__ __volatile__ ("putcfg %0, 0, r63\n" : : "r" (slot));
+}
+
+/* arch/sh64/mm/tlb.c */
+int sh64_tlb_init(void);
+unsigned long long sh64_next_free_dtlb_entry(void);
+unsigned long long sh64_get_wired_dtlb_entry(void);
+int sh64_put_wired_dtlb_entry(unsigned long long entry);
+void sh64_setup_tlb_slot(unsigned long long config_addr, unsigned long eaddr,
+                        unsigned long asid, unsigned long paddr);
+void sh64_teardown_tlb_slot(unsigned long long config_addr);
+
+#endif /* __ASSEMBLY__ */
+#endif /* __ASM_SH_TLB_64_H */
diff --git a/include/asm-sh64/tlb.h b/include/asm-sh64/tlb.h
deleted file mode 100644
index 4979408..0000000
--- a/include/asm-sh64/tlb.h
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * include/asm-sh64/tlb.h
- *
- * Copyright (C) 2003  Paul Mundt
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- */
-#ifndef __ASM_SH64_TLB_H
-#define __ASM_SH64_TLB_H
-
-/*
- * Note! These are mostly unused, we just need the xTLB_LAST_VAR_UNRESTRICTED
- * for head.S! Once this limitation is gone, we can clean the rest of this up.
- */
-
-/* ITLB defines */
-#define ITLB_FIXED     0x00000000      /* First fixed ITLB, see head.S */
-#define ITLB_LAST_VAR_UNRESTRICTED     0x000003F0      /* Last ITLB */
-
-/* DTLB defines */
-#define DTLB_FIXED     0x00800000      /* First fixed DTLB, see head.S */
-#define DTLB_LAST_VAR_UNRESTRICTED     0x008003F0      /* Last DTLB */
-
-#ifndef __ASSEMBLY__
-
-/**
- * for_each_dtlb_entry
- *
- * @tlb:       TLB entry
- *
- * Iterate over free (non-wired) DTLB entries
- */
-#define for_each_dtlb_entry(tlb)               \
-       for (tlb  = cpu_data->dtlb.first;       \
-            tlb <= cpu_data->dtlb.last;        \
-            tlb += cpu_data->dtlb.step)
-
-/**
- * for_each_itlb_entry
- *
- * @tlb:       TLB entry
- *
- * Iterate over free (non-wired) ITLB entries
- */
-#define for_each_itlb_entry(tlb)               \
-       for (tlb  = cpu_data->itlb.first;       \
-            tlb <= cpu_data->itlb.last;        \
-            tlb += cpu_data->itlb.step)
-
-/**
- * __flush_tlb_slot
- *
- * @slot:      Address of TLB slot.
- *
- * Flushes TLB slot @slot.
- */
-static inline void __flush_tlb_slot(unsigned long long slot)
-{
-       __asm__ __volatile__ ("putcfg %0, 0, r63\n" : : "r" (slot));
-}
-
-/* arch/sh64/mm/tlb.c */
-extern int sh64_tlb_init(void);
-extern unsigned long long sh64_next_free_dtlb_entry(void);
-extern unsigned long long sh64_get_wired_dtlb_entry(void);
-extern int sh64_put_wired_dtlb_entry(unsigned long long entry);
-
-extern void sh64_setup_tlb_slot(unsigned long long config_addr, unsigned long 
eaddr, unsigned long asid, unsigned long paddr);
-extern void sh64_teardown_tlb_slot(unsigned long long config_addr);
-
-#define tlb_start_vma(tlb, vma) \
-       flush_cache_range(vma, vma->vm_start, vma->vm_end)
-
-#define tlb_end_vma(tlb, vma)  \
-       flush_tlb_range(vma, vma->vm_start, vma->vm_end)
-
-#define __tlb_remove_tlb_entry(tlb, pte, address)      do { } while (0)
-
-/*
- * Flush whole TLBs for MM
- */
-#define tlb_flush(tlb)         flush_tlb_mm((tlb)->mm)
-
-#include <asm-generic/tlb.h>
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* __ASM_SH64_TLB_H */
-
-
To unsubscribe from this list: send the line "unsubscribe git-commits-head" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to