Module Name:    src
Committed By:   matt
Date:           Wed Jun 12 07:17:23 UTC 2013

Modified Files:
        src/sys/arch/arm/arm: vectors.S

Log Message:
If we are using the VBAR to access the system vectors, we can just branch
to the exception routines avoiding a load.  VBAR only exists for those
processors which implement the Security extension.


To generate a diff of this commit:
cvs rdiff -u -r1.4 -r1.5 src/sys/arch/arm/arm/vectors.S

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/arm/arm/vectors.S
diff -u src/sys/arch/arm/arm/vectors.S:1.4 src/sys/arch/arm/arm/vectors.S:1.5
--- src/sys/arch/arm/arm/vectors.S:1.4	Sat Aug 17 16:36:32 2002
+++ src/sys/arch/arm/arm/vectors.S	Wed Jun 12 07:17:23 2013
@@ -1,4 +1,4 @@
-/*	$NetBSD: vectors.S,v 1.4 2002/08/17 16:36:32 thorpej Exp $	*/
+/*	$NetBSD: vectors.S,v 1.5 2013/06/12 07:17:23 matt Exp $	*/
 
 /*
  * Copyright (C) 1994-1997 Mark Brinicombe
@@ -32,6 +32,7 @@
  */
 
 #include "assym.h"
+#include "opt_cputypes.h"
 #include <machine/asm.h>
 
 /*
@@ -47,6 +48,26 @@
 	.global	_C_LABEL(page0), _C_LABEL(page0_data), _C_LABEL(page0_end)
 	.global _C_LABEL(fiqvector)
 
+#if defined(CPU_ARMV7) || defined(CPU_ARM11)
+	/*
+	 * ARMv[67] processors with the Security Extension have the VBAR
+	 * which redirects the low vector to any 32-byte aligned address. 
+	 * Since we are in kernel, we can just do a relative branch to the
+	 * exception code and avoid the intermediate load.
+	 */
+	.global	_C_LABEL(page0rel)
+	.p2align 5
+_C_LABEL(page0rel):
+	b	reset_entry
+	b	undefined_entry
+	b	swi_entry
+	b	prefetch_abort_entry
+	b	data_abort_entry
+	b	address_exception_entry
+	b	irq_entry
+	b	_C_LABEL(fiqvector)
+#endif
+
 _C_LABEL(page0):
 	ldr	pc, .Lreset_target
 	ldr	pc, .Lundefined_target

Reply via email to