Module Name:    src
Committed By:   matt
Date:           Wed Jun 12 17:13:05 UTC 2013

Modified Files:
        src/sys/arch/arm/arm32: arm32_kvminit.c arm32_machdep.c

Log Message:
Add support for the VBAR (vector base address register) to remap the vectors
into the kernel text segment.  This register is available on arm1176 and
all cortex processors since they all have the arm security (trustzone)
extension.  We avoid having to specially map either vector page (0x00000000
or 0xffff0000) and use VBAR to both to page0rel in the text segment.  These
vector group differs from the normal page0, that since it's in the kernel,
it can branch directly to the exception routine, instead of loading the
address into the PC.  This should result in a tiny improvement in speed since
we eliminate a TLB mapping the vector page and a load on every exception.

XXX Add __HAVE_ARM_TRUSTZONE cpp define and use that to eliminate all code
have to do with manipulating the vector page.


To generate a diff of this commit:
cvs rdiff -u -r1.18 -r1.19 src/sys/arch/arm/arm32/arm32_kvminit.c
cvs rdiff -u -r1.92 -r1.93 src/sys/arch/arm/arm32/arm32_machdep.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/arm/arm32/arm32_kvminit.c
diff -u src/sys/arch/arm/arm32/arm32_kvminit.c:1.18 src/sys/arch/arm/arm32/arm32_kvminit.c:1.19
--- src/sys/arch/arm/arm32/arm32_kvminit.c:1.18	Wed Feb 27 22:15:46 2013
+++ src/sys/arch/arm/arm32/arm32_kvminit.c	Wed Jun 12 17:13:05 2013
@@ -1,4 +1,4 @@
-/*	$NetBSD: arm32_kvminit.c,v 1.18 2013/02/27 22:15:46 matt Exp $	*/
+/*	$NetBSD: arm32_kvminit.c,v 1.19 2013/06/12 17:13:05 matt Exp $	*/
 
 /*
  * Copyright (c) 2002, 2003, 2005  Genetec Corporation.  All rights reserved.
@@ -122,7 +122,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: arm32_kvminit.c,v 1.18 2013/02/27 22:15:46 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: arm32_kvminit.c,v 1.19 2013/06/12 17:13:05 matt Exp $");
 
 #include <sys/param.h>
 #include <sys/device.h>
@@ -372,6 +372,12 @@ arm32_kernel_vm_init(vaddr_t kernel_vm_b
 #else
 	const size_t cpu_num = 1;
 #endif
+#if defined(CPU_ARMV7) || defined(CPU_ARM11)
+	const bool map_vectors_p = vectors == ARM_VECTORS_LOW
+	    && !(armreg_pfr1_read() & ARM_PFR1_SEC_MASK);
+#else
+	const bool map_vectors_p = true;
+#endif
 
 #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
 	KASSERT(mapallmem_p);
@@ -448,15 +454,18 @@ arm32_kernel_vm_init(vaddr_t kernel_vm_b
 	 * allocate the L2 page.
 	 */
 
-	/*
-	 * First allocate L2 page for the vectors.
-	 */
+	if (map_vectors_p) {
+		/*
+		 * First allocate L2 page for the vectors.
+		 */
 #ifdef VERBOSE_INIT_ARM
-	printf(" vector");
+		printf(" vector");
 #endif
-	valloc_pages(bmi, &bmi->bmi_vector_l2pt, L2_TABLE_SIZE / PAGE_SIZE,
-	    VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE, true);
-	add_pages(bmi, &bmi->bmi_vector_l2pt);
+		valloc_pages(bmi, &bmi->bmi_vector_l2pt,
+		    L2_TABLE_SIZE / PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE,
+		    PTE_PAGETABLE, true);
+		add_pages(bmi, &bmi->bmi_vector_l2pt);
+	}
 
 	/*
 	 * Now allocate L2 pages for the kernel
@@ -525,13 +534,15 @@ arm32_kernel_vm_init(vaddr_t kernel_vm_b
 	add_pages(bmi, &msgbuf);
 	msgbufphys = msgbuf.pv_pa;
 
-	/*
-	 * Allocate a page for the system vector page.
-	 * This page will just contain the system vectors and can be
-	 * shared by all processes.
-	 */
-	valloc_pages(bmi, &systempage, 1, VM_PROT_READ|VM_PROT_WRITE,
-	    PTE_CACHE, true);
+	if (map_vectors_p) {
+		/*
+		 * Allocate a page for the system vector page.
+		 * This page will just contain the system vectors and can be
+		 * shared by all processes.
+		 */
+		valloc_pages(bmi, &systempage, 1, VM_PROT_READ|VM_PROT_WRITE,
+		    PTE_CACHE, true);
+	}
 	systempage.pv_va = vectors;
 
 	/*
@@ -568,14 +579,17 @@ arm32_kernel_vm_init(vaddr_t kernel_vm_b
 	vaddr_t l1pt_va = kernel_l1pt.pv_va;
 	paddr_t l1pt_pa = kernel_l1pt.pv_pa;
 
-	/* Map the L2 pages tables in the L1 page table */
-	pmap_link_l2pt(l1pt_va, systempage.pv_va & -L2_S_SEGSIZE,
-	    &bmi->bmi_vector_l2pt);
-#ifdef VERBOSE_INIT_ARM
-	printf("%s: adding L2 pt (VA %#lx, PA %#lx) for VA %#lx\n (vectors)",
-	    __func__, bmi->bmi_vector_l2pt.pv_va, bmi->bmi_vector_l2pt.pv_pa,
-	    systempage.pv_va);
+	if (map_vectors_p) {
+		/* Map the L2 pages tables in the L1 page table */
+		pmap_link_l2pt(l1pt_va, systempage.pv_va & -L2_S_SEGSIZE,
+		    &bmi->bmi_vector_l2pt);
+#ifdef VERBOSE_INIT_ARM
+		printf("%s: adding L2 pt (VA %#lx, PA %#lx) "
+		    "for VA %#lx\n (vectors)",
+		    __func__, bmi->bmi_vector_l2pt.pv_va,
+		    bmi->bmi_vector_l2pt.pv_pa, systempage.pv_va);
 #endif
+	}
 
 	const vaddr_t kernel_base =
 	    KERN_PHYSTOV(bmi, bmi->bmi_kernelstart & -L2_S_SEGSIZE);
@@ -772,9 +786,11 @@ arm32_kernel_vm_init(vaddr_t kernel_vm_b
 	 * Now we map the stuff that isn't directly after the kernel
 	 */
 
-	/* Map the vector page. */
-	pmap_map_entry(l1pt_va, systempage.pv_va, systempage.pv_pa,
-	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
+	if (map_vectors_p) {
+		/* Map the vector page. */
+		pmap_map_entry(l1pt_va, systempage.pv_va, systempage.pv_pa,
+		    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
+	}
 
 	/* Map the Mini-Data cache clean area. */ 
 #if ARM_MMU_XSCALE == 1
@@ -851,10 +867,12 @@ arm32_kernel_vm_init(vaddr_t kernel_vm_b
 	    msgbuf.pv_pa, msgbuf.pv_pa + (msgbuf_pgs * PAGE_SIZE) - 1,
 	    msgbuf.pv_va, msgbuf.pv_va + (msgbuf_pgs * PAGE_SIZE) - 1,
 	    (int)msgbuf_pgs);
-	printf(mem_fmt, "Exception Vectors",
-	    systempage.pv_pa, systempage.pv_pa + PAGE_SIZE - 1,
-	    systempage.pv_va, systempage.pv_va + PAGE_SIZE - 1,
-	    1);
+	if (map_vectors_p) {
+		printf(mem_fmt, "Exception Vectors",
+		    systempage.pv_pa, systempage.pv_pa + PAGE_SIZE - 1,
+		    systempage.pv_va, systempage.pv_va + PAGE_SIZE - 1,
+		    1);
+	}
 	for (size_t i = 0; i < bmi->bmi_nfreeblocks; i++) {
 		pv = &bmi->bmi_freeblocks[i];
 

Index: src/sys/arch/arm/arm32/arm32_machdep.c
diff -u src/sys/arch/arm/arm32/arm32_machdep.c:1.92 src/sys/arch/arm/arm32/arm32_machdep.c:1.93
--- src/sys/arch/arm/arm32/arm32_machdep.c:1.92	Sun Feb  3 15:57:09 2013
+++ src/sys/arch/arm/arm32/arm32_machdep.c	Wed Jun 12 17:13:05 2013
@@ -1,4 +1,4 @@
-/*	$NetBSD: arm32_machdep.c,v 1.92 2013/02/03 15:57:09 matt Exp $	*/
+/*	$NetBSD: arm32_machdep.c,v 1.93 2013/06/12 17:13:05 matt Exp $	*/
 
 /*
  * Copyright (c) 1994-1998 Mark Brinicombe.
@@ -42,7 +42,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: arm32_machdep.c,v 1.92 2013/02/03 15:57:09 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: arm32_machdep.c,v 1.93 2013/06/12 17:13:05 matt Exp $");
 
 #include "opt_modular.h"
 #include "opt_md.h"
@@ -130,6 +130,25 @@ extern void configure(void);
 void
 arm32_vector_init(vaddr_t va, int which)
 {
+#if defined(CPU_ARMV7) || defined(CPU_ARM11)
+	/*
+	 * If this processor has the security extension, don't bother
+	 * to move/map the vector page.  Simply point VBAR to the copy
+	 * that exists in the .text segment.
+	 */
+	if (va == ARM_VECTORS_LOW
+	    && (armreg_pfr1_read() && ARM_PFR1_SEC_MASK) != 0) {
+		extern const uint32_t page0rel[];
+		vector_page = (vaddr_t)page0rel;
+		KASSERT((vector_page & 0x1f) == 0);
+		armreg_vbar_write(vector_page);
+#ifdef VERBOSE_INIT_ARM
+		printf(" vbar=%p", page0rel);
+#endif
+		cpu_control(CPU_CONTROL_VECRELOC, 0);
+		return;
+	}
+#endif
 	if (CPU_IS_PRIMARY(curcpu())) {
 		extern unsigned int page0[], page0_data[];
 		unsigned int *vectors = (int *) va;

Reply via email to