Module Name:    src
Committed By:   matt
Date:           Wed Jun 12 21:34:12 UTC 2013

Modified Files:
        src/sys/arch/arm/arm: fiq.c vectors.S
        src/sys/arch/arm/arm32: arm32_kvminit.c arm32_machdep.c pmap.c
        src/sys/arch/arm/conf: files.arm
        src/sys/arch/arm/include/arm32: pmap.h

Log Message:
Add a ARM_HAS_VBAR option which forces the use of the VBAR register.  This
allows much code to deal with vector_page mappings to be eliminated.  On a
BEAGLEBONE kernel, this saves 8KB of text and instructions that never have
to be executed.  (The PJ4B has VBAR but doesn't implement the security
extensions it is part of so a method was needed to allow it use VBAR with
relying on the default test for the security extensions.)


To generate a diff of this commit:
cvs rdiff -u -r1.6 -r1.7 src/sys/arch/arm/arm/fiq.c \
    src/sys/arch/arm/arm/vectors.S
cvs rdiff -u -r1.19 -r1.20 src/sys/arch/arm/arm32/arm32_kvminit.c
cvs rdiff -u -r1.93 -r1.94 src/sys/arch/arm/arm32/arm32_machdep.c
cvs rdiff -u -r1.256 -r1.257 src/sys/arch/arm/arm32/pmap.c
cvs rdiff -u -r1.118 -r1.119 src/sys/arch/arm/conf/files.arm
cvs rdiff -u -r1.119 -r1.120 src/sys/arch/arm/include/arm32/pmap.h

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/arm/arm/fiq.c
diff -u src/sys/arch/arm/arm/fiq.c:1.6 src/sys/arch/arm/arm/fiq.c:1.7
--- src/sys/arch/arm/arm/fiq.c:1.6	Wed Nov 19 06:29:48 2008
+++ src/sys/arch/arm/arm/fiq.c	Wed Jun 12 21:34:12 2013
@@ -1,4 +1,4 @@
-/*	$NetBSD: fiq.c,v 1.6 2008/11/19 06:29:48 matt Exp $	*/
+/*	$NetBSD: fiq.c,v 1.7 2013/06/12 21:34:12 matt Exp $	*/
 
 /*
  * Copyright (c) 2001, 2002 Wasabi Systems, Inc.
@@ -36,7 +36,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: fiq.c,v 1.6 2008/11/19 06:29:48 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: fiq.c,v 1.7 2013/06/12 21:34:12 matt Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -62,6 +62,7 @@ extern char fiq_nullhandler[], fiq_nullh
 #define	FIQ_BIT		R15_FIQ_DISABLE
 #endif /* __PROG32 */
 
+#ifndef ARM_HAS_VBAR
 /*
  * fiq_installhandler:
  *
@@ -176,3 +177,4 @@ fiq_release(struct fiqhandler *fh)
 	oldirqstate &= ~FIQ_BIT;
 	restore_interrupts(oldirqstate);
 }
+#endif /* !ARM_HAS_VBAR */
Index: src/sys/arch/arm/arm/vectors.S
diff -u src/sys/arch/arm/arm/vectors.S:1.6 src/sys/arch/arm/arm/vectors.S:1.7
--- src/sys/arch/arm/arm/vectors.S:1.6	Wed Jun 12 15:10:13 2013
+++ src/sys/arch/arm/arm/vectors.S	Wed Jun 12 21:34:12 2013
@@ -1,4 +1,4 @@
-/*	$NetBSD: vectors.S,v 1.6 2013/06/12 15:10:13 matt Exp $	*/
+/*	$NetBSD: vectors.S,v 1.7 2013/06/12 21:34:12 matt Exp $	*/
 
 /*
  * Copyright (C) 1994-1997 Mark Brinicombe
@@ -33,6 +33,7 @@
 
 #include "assym.h"
 #include "opt_cputypes.h"
+#include "opt_cpuoptions.h"
 #include <machine/asm.h>
 
 /*
@@ -44,11 +45,9 @@
  */
 
 	.text
-	.align	0
-	.global	_C_LABEL(page0), _C_LABEL(page0_data), _C_LABEL(page0_end)
 	.global _C_LABEL(fiqvector)
 
-#if defined(CPU_ARMV7) || defined(CPU_ARM11)
+#if defined(CPU_ARMV7) || defined(CPU_ARM11) || defined(ARM_HAS_VBAR)
 	/*
 	 * ARMv[67] processors with the Security Extension have the VBAR
 	 * which redirects the low vector to any 32-byte aligned address. 
@@ -67,11 +66,14 @@ _C_LABEL(page0rel):
 	b	irq_entry
 #ifdef __ARM_FIQ_INDIRECT
 	b	_C_LABEL(fiqvector)
-#else
+#elif !defined(ARM_HAS_VBAR)
 	b	.Lfiqvector
 #endif
-#endif
+#endif /* CPU_ARMV7 || CPU_ARM11 || ARM_HAS_VBAR */
 
+#ifndef ARM_HAS_VBAR
+	.global	_C_LABEL(page0), _C_LABEL(page0_data), _C_LABEL(page0_end)
+	.align	0
 _C_LABEL(page0):
 	ldr	pc, .Lreset_target
 	ldr	pc, .Lundefined_target
@@ -82,13 +84,20 @@ _C_LABEL(page0):
 	ldr	pc, .Lirq_target
 #ifdef __ARM_FIQ_INDIRECT
 	ldr	pc, .Lfiq_target
-#else
+#endif
+#endif /* !ARM_HAS_VBAR */
+#ifndef __ARM_FIQ_INDIRECT
 .Lfiqvector:
+#ifdef ARM_HAS_VBAR
+	.set	_C_LABEL(fiqvector), . - _C_LABEL(page0rel)
+#else
 	.set	_C_LABEL(fiqvector), . - _C_LABEL(page0)
+#endif
 	subs	pc, lr, #4
 	.org	.Lfiqvector + 0x100
 #endif
 
+#ifndef ARM_HAS_VBAR
 _C_LABEL(page0_data):
 .Lreset_target:
 	.word	reset_entry
@@ -118,6 +127,7 @@ _C_LABEL(page0_data):
 	.word	0	/* pad it out */
 #endif
 _C_LABEL(page0_end):
+#endif /* ARM_HAS_VBAR */
 
 #ifdef __ARM_FIQ_INDIRECT
 	.data

Index: src/sys/arch/arm/arm32/arm32_kvminit.c
diff -u src/sys/arch/arm/arm32/arm32_kvminit.c:1.19 src/sys/arch/arm/arm32/arm32_kvminit.c:1.20
--- src/sys/arch/arm/arm32/arm32_kvminit.c:1.19	Wed Jun 12 17:13:05 2013
+++ src/sys/arch/arm/arm32/arm32_kvminit.c	Wed Jun 12 21:34:12 2013
@@ -1,4 +1,4 @@
-/*	$NetBSD: arm32_kvminit.c,v 1.19 2013/06/12 17:13:05 matt Exp $	*/
+/*	$NetBSD: arm32_kvminit.c,v 1.20 2013/06/12 21:34:12 matt Exp $	*/
 
 /*
  * Copyright (c) 2002, 2003, 2005  Genetec Corporation.  All rights reserved.
@@ -122,7 +122,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: arm32_kvminit.c,v 1.19 2013/06/12 17:13:05 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: arm32_kvminit.c,v 1.20 2013/06/12 21:34:12 matt Exp $");
 
 #include <sys/param.h>
 #include <sys/device.h>
@@ -372,7 +372,9 @@ arm32_kernel_vm_init(vaddr_t kernel_vm_b
 #else
 	const size_t cpu_num = 1;
 #endif
-#if defined(CPU_ARMV7) || defined(CPU_ARM11)
+#ifdef ARM_HAS_VBAR
+	const bool map_vectors_p = false;
+#elif defined(CPU_ARMV7) || defined(CPU_ARM11)
 	const bool map_vectors_p = vectors == ARM_VECTORS_LOW
 	    && !(armreg_pfr1_read() & ARM_PFR1_SEC_MASK);
 #else

Index: src/sys/arch/arm/arm32/arm32_machdep.c
diff -u src/sys/arch/arm/arm32/arm32_machdep.c:1.93 src/sys/arch/arm/arm32/arm32_machdep.c:1.94
--- src/sys/arch/arm/arm32/arm32_machdep.c:1.93	Wed Jun 12 17:13:05 2013
+++ src/sys/arch/arm/arm32/arm32_machdep.c	Wed Jun 12 21:34:12 2013
@@ -1,4 +1,4 @@
-/*	$NetBSD: arm32_machdep.c,v 1.93 2013/06/12 17:13:05 matt Exp $	*/
+/*	$NetBSD: arm32_machdep.c,v 1.94 2013/06/12 21:34:12 matt Exp $	*/
 
 /*
  * Copyright (c) 1994-1998 Mark Brinicombe.
@@ -42,7 +42,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: arm32_machdep.c,v 1.93 2013/06/12 17:13:05 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: arm32_machdep.c,v 1.94 2013/06/12 21:34:12 matt Exp $");
 
 #include "opt_modular.h"
 #include "opt_md.h"
@@ -130,14 +130,16 @@ extern void configure(void);
 void
 arm32_vector_init(vaddr_t va, int which)
 {
-#if defined(CPU_ARMV7) || defined(CPU_ARM11)
+#if defined(CPU_ARMV7) || defined(CPU_ARM11) || defined(ARM_HAS_VBAR)
 	/*
 	 * If this processor has the security extension, don't bother
 	 * to move/map the vector page.  Simply point VBAR to the copy
 	 * that exists in the .text segment.
 	 */
+#ifndef ARM_HAS_VBAR
 	if (va == ARM_VECTORS_LOW
 	    && (armreg_pfr1_read() && ARM_PFR1_SEC_MASK) != 0) {
+#endif
 		extern const uint32_t page0rel[];
 		vector_page = (vaddr_t)page0rel;
 		KASSERT((vector_page & 0x1f) == 0);
@@ -147,8 +149,11 @@ arm32_vector_init(vaddr_t va, int which)
 #endif
 		cpu_control(CPU_CONTROL_VECRELOC, 0);
 		return;
+#ifndef ARM_HAS_VBAR
 	}
 #endif
+#endif
+#ifndef ARM_HAS_VBAR
 	if (CPU_IS_PRIMARY(curcpu())) {
 		extern unsigned int page0[], page0_data[];
 		unsigned int *vectors = (int *) va;
@@ -193,6 +198,7 @@ arm32_vector_init(vaddr_t va, int which)
 		 */
 		cpu_control(CPU_CONTROL_VECRELOC, CPU_CONTROL_VECRELOC);
 	}
+#endif
 }
 
 /*
@@ -257,8 +263,10 @@ cpu_startup(void)
 	/* Set the CPU control register */
 	cpu_setup(boot_args);
 
+#ifndef ARM_HAS_VBAR
 	/* Lock down zero page */
 	vector_page_setprot(VM_PROT_READ);
+#endif
 
 	/*
 	 * Give pmap a chance to set up a few more things now the vm

Index: src/sys/arch/arm/arm32/pmap.c
diff -u src/sys/arch/arm/arm32/pmap.c:1.256 src/sys/arch/arm/arm32/pmap.c:1.257
--- src/sys/arch/arm/arm32/pmap.c:1.256	Wed Jun 12 07:13:18 2013
+++ src/sys/arch/arm/arm32/pmap.c	Wed Jun 12 21:34:12 2013
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.c,v 1.256 2013/06/12 07:13:18 matt Exp $	*/
+/*	$NetBSD: pmap.c,v 1.257 2013/06/12 21:34:12 matt Exp $	*/
 
 /*
  * Copyright 2003 Wasabi Systems, Inc.
@@ -212,7 +212,7 @@
 #include <arm/cpuconf.h>
 #include <arm/arm32/katelib.h>
 
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.256 2013/06/12 07:13:18 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.257 2013/06/12 21:34:12 matt Exp $");
 
 #ifdef PMAP_DEBUG
 
@@ -1554,6 +1554,7 @@ pmap_pmap_ctor(void *arg, void *v, int f
 static void
 pmap_pinit(pmap_t pm)
 {
+#ifndef ARM_HAS_VBAR
 	struct l2_bucket *l2b;
 
 	if (vector_page < KERNEL_BASE) {
@@ -1571,6 +1572,7 @@ pmap_pinit(pmap_t pm)
 		    L1_C_DOM(pm->pm_domain);
 	} else
 		pm->pm_pl1vec = NULL;
+#endif
 }
 
 #ifdef PMAP_CACHE_VIVT
@@ -2823,6 +2825,11 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_
 	pt_entry_t *ptep, npte, opte;
 	u_int nflags;
 	u_int oflags;
+#ifdef ARM_HAS_VBAR
+	const bool vector_page_p = false;
+#else
+	const bool vector_page_p = (va == vector_page);
+#endif
 
 	NPDEBUG(PDB_ENTER, printf("pmap_enter: pm %p va 0x%lx pa 0x%lx prot %x flag %x\n", pm, va, pa, prot, flags));
 
@@ -3014,8 +3021,8 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_
 		/*
 		 * Make sure the vector table is mapped cacheable
 		 */
-		if ((pm != pmap_kernel() && va == vector_page) ||
-		    (flags & ARM32_MMAP_CACHEABLE)) {
+		if ((vector_page_p && pm != pmap_kernel())
+		    || (flags & ARM32_MMAP_CACHEABLE)) {
 			npte |= pte_l2_s_cache_mode;
 		} else if (flags & ARM32_MMAP_WRITECOMBINE) {
 			npte |= pte_l2_s_wc_mode;
@@ -3053,8 +3060,9 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_
 	/*
 	 * Make sure userland mappings get the right permissions
 	 */
-	if (pm != pmap_kernel() && va != vector_page)
+	if (!vector_page_p && pm != pmap_kernel()) {
 		npte |= L2_S_PROT_U;
+	}
 
 	/*
 	 * Keep the stats up to date
@@ -3081,7 +3089,7 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_
 			 * We only need to frob the cache/tlb if this pmap
 			 * is current
 			 */
-			if (va != vector_page && l2pte_valid(npte)) {
+			if (!vector_page_p && l2pte_valid(npte)) {
 				/*
 				 * This mapping is likely to be accessed as
 				 * soon as we return to userland. Fix up the
@@ -4282,6 +4290,7 @@ pmap_activate(struct lwp *l)
 	/* No interrupts while we frob the TTB/DACR */
 	oldirqstate = disable_interrupts(IF32_bits);
 
+#ifndef ARM_HAS_VBAR
 	/*
 	 * For ARM_VECTORS_LOW, we MUST, I repeat, MUST fix up the L1
 	 * entry corresponding to 'vector_page' in the incoming L1 table
@@ -4294,6 +4303,7 @@ pmap_activate(struct lwp *l)
 		*npm->pm_pl1vec = npm->pm_l1vec;
 		PTE_SYNC(npm->pm_pl1vec);
 	}
+#endif
 
 	cpu_domains(ndacr);
 
@@ -4439,6 +4449,7 @@ pmap_destroy(pmap_t pm)
 	 * reference count is zero, free pmap resources and then free pmap.
 	 */
 
+#ifndef ARM_HAS_VBAR
 	if (vector_page < KERNEL_BASE) {
 		KDASSERT(!pmap_is_current(pm));
 
@@ -4446,6 +4457,7 @@ pmap_destroy(pmap_t pm)
 		pmap_remove(pm, vector_page, vector_page + PAGE_SIZE);
 		pmap_update(pm);
 	}
+#endif
 
 	LIST_REMOVE(pm, pm_list);
 
@@ -5124,6 +5136,7 @@ out:
 
 /************************ Utility routines ****************************/
 
+#ifndef ARM_HAS_VBAR
 /*
  * vector_page_setprot:
  *
@@ -5156,6 +5169,7 @@ vector_page_setprot(int prot)
 	cpu_tlb_flushD_SE(vector_page);
 	cpu_cpwait();
 }
+#endif
 
 /*
  * Fetch pointers to the PDE/PTE for the given pmap/VA pair.
@@ -5438,6 +5452,7 @@ pmap_bootstrap(vaddr_t vstart, vaddr_t v
 	TAILQ_INIT(&l1_lru_list);
 	pmap_init_l1(l1, l1pt);
 
+#ifndef ARM_HAS_VBAR
 	/* Set up vector page L1 details, if necessary */
 	if (vector_page < KERNEL_BASE) {
 		pm->pm_pl1vec = &pm->pm_l1->l1_kva[L1_IDX(vector_page)];
@@ -5447,6 +5462,7 @@ pmap_bootstrap(vaddr_t vstart, vaddr_t v
 		    L1_C_DOM(pm->pm_domain);
 	} else
 		pm->pm_pl1vec = NULL;
+#endif
 
 	/*
 	 * Initialize the pmap cache

Index: src/sys/arch/arm/conf/files.arm
diff -u src/sys/arch/arm/conf/files.arm:1.118 src/sys/arch/arm/conf/files.arm:1.119
--- src/sys/arch/arm/conf/files.arm:1.118	Wed Jun 12 07:12:10 2013
+++ src/sys/arch/arm/conf/files.arm	Wed Jun 12 21:34:12 2013
@@ -1,4 +1,4 @@
-#	$NetBSD: files.arm,v 1.118 2013/06/12 07:12:10 matt Exp $
+#	$NetBSD: files.arm,v 1.119 2013/06/12 21:34:12 matt Exp $
 
 # temporary define to allow easy moving to ../arch/arm/arm32
 defflag				ARM32
@@ -36,6 +36,7 @@ defflag  opt_cpuoptions.h	TPIDRPRW_IS_CU
 defflag  opt_cpuoptions.h	ARM11_PMC CORTEX_PMC
 defflag  opt_cpuoptions.h	ARM11_CACHE_WRITE_THROUGH
 defflag	 opt_cpuoptions.h	ARM11MPCORE_COMPAT_MMU
+defflag	 opt_cpuoptions.h	ARM_USE_VBAR
 # use extended small page in compatible MMU mode for ARMv6
 defflag  opt_cpuoptions.h	ARMV6_EXTENDED_SMALL_PAGE
 

Index: src/sys/arch/arm/include/arm32/pmap.h
diff -u src/sys/arch/arm/include/arm32/pmap.h:1.119 src/sys/arch/arm/include/arm32/pmap.h:1.120
--- src/sys/arch/arm/include/arm32/pmap.h:1.119	Wed Dec 12 15:09:37 2012
+++ src/sys/arch/arm/include/arm32/pmap.h	Wed Jun 12 21:34:12 2013
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.h,v 1.119 2012/12/12 15:09:37 matt Exp $	*/
+/*	$NetBSD: pmap.h,v 1.120 2013/06/12 21:34:12 matt Exp $	*/
 
 /*
  * Copyright (c) 2002, 2003 Wasabi Systems, Inc.
@@ -186,7 +186,9 @@ struct pmap {
 	bool			pm_remove_all;
 	bool			pm_activated;
 	struct l1_ttable	*pm_l1;
+#ifndef ARM_HAS_VBAR
 	pd_entry_t		*pm_pl1vec;
+#endif
 	pd_entry_t		pm_l1vec;
 	union pmap_cache_state	pm_cstate;
 	struct uvm_object	pm_obj;

Reply via email to