Early ARM CPUs had the exception vector table at address 0.  That's a
terrible idea, since that overlaps with the address space most kernels
use for userland.  This was fixed in later versions of the
architecture which allows for putting them somewhere high up in the
address space (where the kernel lives) or even remap them to an
arbitrary address.  All ARMv7 CPUs have this feature so we use it
unconditionally.  And therefore the code in cpuswitch() that tries to
switch around the vector page is useless.

ok?


Index: cpuswitch7.S
===================================================================
RCS file: /cvs/src/sys/arch/arm/arm/cpuswitch7.S,v
retrieving revision 1.10
diff -u -p -r1.10 cpuswitch7.S
--- cpuswitch7.S        25 Apr 2016 12:16:20 -0000      1.10
+++ cpuswitch7.S        9 Aug 2016 18:07:13 -0000
@@ -87,13 +87,6 @@
 
 /* LINTSTUB: include <sys/param.h> */
        
-/*
- * These are used for switching the translation table/DACR.
- * Since the vector page can be invalid for a short time, we must
- * disable both regular IRQs *and* FIQs.
- *
- * XXX: This is not necessary if the vector table is relocated.
- */
 #define IRQdisableALL \
        cpsid   if
 
@@ -263,8 +256,6 @@ ENTRY(cpu_switchto)
        /* rem: r10 = old L1 */
        /* rem: r11 = new L1 */
 
-       ldr     r7, [r9, #(PCB_PL1VEC)]
-
        /*
         * At this point we need to kill IRQ's again.
         *
@@ -272,36 +263,11 @@ ENTRY(cpu_switchto)
         */
        IRQdisableALL
 
-       /*
-        * Ensure the vector table is accessible by fixing up the L1
-        */
-       cmp     r7, #0                  /* No need to fixup vector table? */
-       ldrne   r2, [r7]                /* But if yes, fetch current value */
-       ldrne   r0, [r9, #(PCB_L1VEC)]  /* Fetch new vector_page value */
        mcr     CP15_DACR(r1)           /* Update DACR for new context */
-       cmpne   r2, r0                  /* Stuffing the same value? */
-#ifndef PMAP_INCLUDE_PTE_SYNC
-       strne   r0, [r7]                /* Nope, update it */
-#else
-       beq     .Lcs_same_vector
-       str     r0, [r7]                /* Otherwise, update it */
-
-       /*
-        * Need to sync the cache to make sure that last store is
-        * visible to the MMU.
-        */
-       ldr     r2, .Lcpufuncs
-       mov     r0, r7
-       mov     r1, #4
-       mov     lr, pc
-       ldr     pc, [r2, #CF_DCACHE_WB_RANGE]
-
-.Lcs_same_vector:
-#endif /* PMAP_INCLUDE_PTE_SYNC */
 
        cmp     r10, r11                /* Switching to the same L1? */
        ldr     r10, .Lcpufuncs
-       beq     .Lcs_same_l1            /* Yup. */
+       beq     .Lcs_context_switched   /* Yup. */
 
        /*
         * Do a full context switch, including full TLB flush.
@@ -309,19 +275,6 @@ ENTRY(cpu_switchto)
        mov     r0, r11
        mov     lr, pc
        ldr     pc, [r10, #CF_CONTEXT_SWITCH]
-
-       b       .Lcs_context_switched
-
-       /*
-        * We're switching to a different process in the same L1.
-        * In this situation, we only need to flush the TLB for the
-        * vector_page mapping, and even then only if r7 is non-NULL.
-        */
-.Lcs_same_l1:
-       cmp     r7, #0
-       movne   r0, #0                  /* We *know* vector_page's VA is 0x0 */
-       movne   lr, pc
-       ldrne   pc, [r10, #CF_TLB_FLUSHID_SE]
 
 .Lcs_context_switched:
 

Reply via email to