Module Name:    src
Committed By:   matt
Date:           Mon Aug 20 13:03:41 UTC 2012

Modified Files:
        src/sys/arch/arm/arm32: pmap.c
        src/sys/arch/arm/include/arm32: pmap.h pte.h

Log Message:
Add support for mapping SuperSection on armv6 and armv7.  These always
a domain of 0 so move the kernel from domain 15 to domain 0.


To generate a diff of this commit:
cvs rdiff -u -r1.229 -r1.230 src/sys/arch/arm/arm32/pmap.c
cvs rdiff -u -r1.102 -r1.103 src/sys/arch/arm/include/arm32/pmap.h
cvs rdiff -u -r1.11 -r1.12 src/sys/arch/arm/include/arm32/pte.h

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/arm/arm32/pmap.c
diff -u src/sys/arch/arm/arm32/pmap.c:1.229 src/sys/arch/arm/arm32/pmap.c:1.230
--- src/sys/arch/arm/arm32/pmap.c:1.229	Fri Jul 13 06:02:58 2012
+++ src/sys/arch/arm/arm32/pmap.c	Mon Aug 20 13:03:41 2012
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.c,v 1.229 2012/07/13 06:02:58 skrll Exp $	*/
+/*	$NetBSD: pmap.c,v 1.230 2012/08/20 13:03:41 matt Exp $	*/
 
 /*
  * Copyright 2003 Wasabi Systems, Inc.
@@ -211,7 +211,7 @@
 #include <machine/param.h>
 #include <arm/arm32/katelib.h>
 
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.229 2012/07/13 06:02:58 skrll Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.230 2012/08/20 13:03:41 matt Exp $");
 
 #ifdef PMAP_DEBUG
 
@@ -1149,7 +1149,7 @@ pmap_alloc_l1(pmap_t pm)
 	 * Fix up the relevant bits in the pmap structure
 	 */
 	pm->pm_l1 = l1;
-	pm->pm_domain = domain;
+	pm->pm_domain = domain + 1;
 }
 
 /*
@@ -1172,8 +1172,8 @@ pmap_free_l1(pmap_t pm)
 	/*
 	 * Free up the domain number which was allocated to the pmap
 	 */
-	l1->l1_domain_free[pm->pm_domain] = l1->l1_domain_first;
-	l1->l1_domain_first = pm->pm_domain;
+	l1->l1_domain_free[pm->pm_domain - 1] = l1->l1_domain_first;
+	l1->l1_domain_first = pm->pm_domain - 1;
 	l1->l1_domain_use_count--;
 
 	/*
@@ -5692,7 +5692,7 @@ vsize_t
 pmap_map_chunk(vaddr_t l1pt, vaddr_t va, paddr_t pa, vsize_t size,
     int prot, int cache)
 {
-	pd_entry_t *pde = (pd_entry_t *) l1pt;
+	pd_entry_t *pdep = (pd_entry_t *) l1pt;
 	pt_entry_t *pte, f1, f2s, f2l;
 	vsize_t resid;  
 	int i;
@@ -5731,15 +5731,37 @@ pmap_map_chunk(vaddr_t l1pt, vaddr_t va,
 	size = resid;
 
 	while (resid > 0) {
+#ifdef _ARM_ARCH_6
+		/* See if we can use a supersection mapping. */
+		if (L1_SS_PROTO && L1_SS_MAPPABLE_P(va, pa, resid)) {
+			/* Supersection are always domain 0 */
+			pd_entry_t pde = L1_SS_PROTO | pa |
+			    L1_S_PROT(PTE_KERNEL, prot) | f1;
+#ifdef VERBOSE_INIT_ARM
+			printf("sS");
+#endif
+			for (size_t s = va >> L1_S_SHIFT,
+			     e = s + L1_SS_SIZE / L1_S_SIZE;
+			     s < e;
+			     s++) {
+				pdep[s] = pde;
+				PTE_SYNC(&pdep[s]);
+			}
+			va += L1_SS_SIZE;
+			pa += L1_SS_SIZE;
+			resid -= L1_SS_SIZE;
+			continue;
+		}
+#endif
 		/* See if we can use a section mapping. */
 		if (L1_S_MAPPABLE_P(va, pa, resid)) {
 #ifdef VERBOSE_INIT_ARM
 			printf("S");
 #endif
-			pde[va >> L1_S_SHIFT] = L1_S_PROTO | pa |
+			pdep[va >> L1_S_SHIFT] = L1_S_PROTO | pa |
 			    L1_S_PROT(PTE_KERNEL, prot) | f1 |
 			    L1_S_DOM(PMAP_DOMAIN_KERNEL);
-			PTE_SYNC(&pde[va >> L1_S_SHIFT]);
+			PTE_SYNC(&pdep[va >> L1_S_SHIFT]);
 			va += L1_S_SIZE;
 			pa += L1_S_SIZE;
 			resid -= L1_S_SIZE;
@@ -5751,15 +5773,15 @@ pmap_map_chunk(vaddr_t l1pt, vaddr_t va,
 		 * one is actually in the corresponding L1 slot
 		 * for the current VA.
 		 */
-		if ((pde[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C)
+		if ((pdep[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C)
 			panic("pmap_map_chunk: no L2 table for VA 0x%08lx", va);
 
 #ifndef ARM32_NEW_VM_LAYOUT
 		pte = (pt_entry_t *)
-		    kernel_pt_lookup(pde[va >> L1_S_SHIFT] & L2_S_FRAME);
+		    kernel_pt_lookup(pdep[va >> L1_S_SHIFT] & L2_S_FRAME);
 #else
 		pte = (pt_entry_t *) kernel_pt_lookup(
-		    pde[L1_IDX(va)] & L1_C_ADDR_MASK);
+		    pdep[L1_IDX(va)] & L1_C_ADDR_MASK);
 #endif
 		if (pte == NULL)
 			panic("pmap_map_chunk: can't find L2 table for VA"
@@ -5935,6 +5957,7 @@ pt_entry_t	pte_l2_l_prot_w;
 pt_entry_t	pte_l2_l_prot_ro;
 pt_entry_t	pte_l2_l_prot_mask;
 
+pt_entry_t	pte_l1_ss_proto;
 pt_entry_t	pte_l1_s_proto;
 pt_entry_t	pte_l1_c_proto;
 pt_entry_t	pte_l2_s_proto;
@@ -5962,22 +5985,28 @@ pmap_pte_init_generic(void)
 	/*
 	 * If we have a write-through cache, set B and C.  If
 	 * we have a write-back cache, then we assume setting
-	 * only C will make those pages write-through.
+	 * only C will make those pages write-through (except for those
+	 * Cortex CPUs which can read the L1 caches).
 	 */
-	if (cpufuncs.cf_dcache_wb_range == (void *) cpufunc_nullop) {
+	if (cpufuncs.cf_dcache_wb_range == (void *) cpufunc_nullop
+#if ARM_MMU_V7 > 1
+	    || (CPU_ID_CORTEX_P(curcpu()->ci_arm_cpuid)
+		&& !CPU_ID_CORTEX_A8_P(curcpu()->ci_arm_cpuid))
+#endif
+	    || false) {
 		pte_l1_s_cache_mode_pt = L1_S_B|L1_S_C;
 		pte_l2_l_cache_mode_pt = L2_B|L2_C;
 		pte_l2_s_cache_mode_pt = L2_B|L2_C;
-	} else {
 #if ARM_MMU_V6 > 1
+	} else if (CPU_ID_ARM11_P(curcpu()->ci_arm_cpuid)) {
 		pte_l1_s_cache_mode_pt = L1_S_B|L1_S_C; /* arm116 errata 399234 */
 		pte_l2_l_cache_mode_pt = L2_B|L2_C; /* arm116 errata 399234 */
 		pte_l2_s_cache_mode_pt = L2_B|L2_C; /* arm116 errata 399234 */
-#else
-		pte_l1_s_cache_mode_pt = L1_S_C;
-		pte_l2_l_cache_mode_pt = L2_C;
-		pte_l2_s_cache_mode_pt = L2_C;
 #endif
+	} else {
+		pte_l1_s_cache_mode_pt = L1_S_C;	/* write through */
+		pte_l2_l_cache_mode_pt = L2_C;		/* write through */
+		pte_l2_s_cache_mode_pt = L2_C;		/* write through */
 	}
 
 	pte_l1_s_prot_u = L1_S_PROT_U_generic;
@@ -5995,6 +6024,7 @@ pmap_pte_init_generic(void)
 	pte_l2_l_prot_ro = L2_L_PROT_RO_generic;
 	pte_l2_l_prot_mask = L2_L_PROT_MASK_generic;
 
+	pte_l1_ss_proto = L1_SS_PROTO_generic;
 	pte_l1_s_proto = L1_S_PROTO_generic;
 	pte_l1_c_proto = L1_C_PROTO_generic;
 	pte_l2_s_proto = L2_S_PROTO_generic;
@@ -6219,6 +6249,7 @@ pmap_pte_init_xscale(void)
 	pte_l2_l_prot_ro = L2_L_PROT_RO_xscale;
 	pte_l2_l_prot_mask = L2_L_PROT_MASK_xscale;
 
+	pte_l1_ss_proto = L1_SS_PROTO_xscale;
 	pte_l1_s_proto = L1_S_PROTO_xscale;
 	pte_l1_c_proto = L1_C_PROTO_xscale;
 	pte_l2_s_proto = L2_S_PROTO_xscale;
@@ -6403,6 +6434,7 @@ pmap_pte_init_arm11mpcore(void)
 	pte_l2_l_prot_ro = L2_L_PROT_RO_generic;
 	pte_l2_l_prot_mask = L2_L_PROT_MASK_generic;
 
+	pte_l1_ss_proto = L1_SS_PROTO_armv6;
 	pte_l1_s_proto = L1_S_PROTO_armv6;
 	pte_l1_c_proto = L1_C_PROTO_armv6;
 	pte_l2_s_proto = L2_S_PROTO_armv6c;
@@ -6412,6 +6444,7 @@ pmap_pte_init_arm11mpcore(void)
 	pte_l2_l_prot_ro = L2_L_PROT_RO_armv6n;
 	pte_l2_l_prot_mask = L2_L_PROT_MASK_armv6n;
 
+	pte_l1_ss_proto = L1_SS_PROTO_armv6;
 	pte_l1_s_proto = L1_S_PROTO_armv6;
 	pte_l1_c_proto = L1_C_PROTO_armv6;
 	pte_l2_s_proto = L2_S_PROTO_armv6n;
@@ -6455,6 +6488,7 @@ pmap_pte_init_armv7(void)
 	pte_l2_l_prot_ro = L2_L_PROT_RO_armv7;
 	pte_l2_l_prot_mask = L2_L_PROT_MASK_armv7;
 
+	pte_l1_ss_proto = L1_SS_PROTO_armv7;
 	pte_l1_s_proto = L1_S_PROTO_armv7;
 	pte_l1_c_proto = L1_C_PROTO_armv7;
 	pte_l2_s_proto = L2_S_PROTO_armv7;

Index: src/sys/arch/arm/include/arm32/pmap.h
diff -u src/sys/arch/arm/include/arm32/pmap.h:1.102 src/sys/arch/arm/include/arm32/pmap.h:1.103
--- src/sys/arch/arm/include/arm32/pmap.h:1.102	Sun Jul 29 00:07:10 2012
+++ src/sys/arch/arm/include/arm32/pmap.h	Mon Aug 20 13:03:41 2012
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.h,v 1.102 2012/07/29 00:07:10 matt Exp $	*/
+/*	$NetBSD: pmap.h,v 1.103 2012/08/20 13:03:41 matt Exp $	*/
 
 /*
  * Copyright (c) 2002, 2003 Wasabi Systems, Inc.
@@ -207,16 +207,18 @@ typedef struct pv_addr {
 	paddr_t pv_pa;
 	vaddr_t pv_va;
 	vsize_t pv_size;
+	uint8_t pv_cache;
+	uint8_t pv_prot;
 } pv_addr_t;
 typedef SLIST_HEAD(, pv_addr) pv_addrqh_t;
 
 extern pv_addrqh_t pmap_freeq;
-extern pv_addr_t kernelpages;
 extern pv_addr_t kernelstack;
 extern pv_addr_t abtstack;
 extern pv_addr_t fiqstack;
 extern pv_addr_t irqstack;
 extern pv_addr_t undstack;
+extern pv_addr_t idlestack;
 extern pv_addr_t systempage;
 extern pv_addr_t kernel_l1pt;
 
@@ -472,12 +474,15 @@ void	pmap_pte_init_arm9(void);
 #if defined(CPU_ARM10)
 void	pmap_pte_init_arm10(void);
 #endif /* CPU_ARM10 */
-#if defined(CPU_ARM11)
+#if defined(CPU_ARM11)	/* ARM_MMU_V6 */
 void	pmap_pte_init_arm11(void);
 #endif /* CPU_ARM11 */
-#if defined(CPU_ARM11MPCORE)
+#if defined(CPU_ARM11MPCORE)	/* ARM_MMU_V6 */
 void	pmap_pte_init_arm11mpcore(void);
 #endif
+#if ARM_MMU_V7 == 1
+void	pmap_pte_init_armv7(void);
+#endif /* ARM_MMU_V7 */
 #endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */
 
 #if ARM_MMU_SA1 == 1
@@ -496,10 +501,6 @@ void	xscale_setup_minidata(vaddr_t, vadd
 void	pmap_uarea(vaddr_t);
 #endif /* ARM_MMU_XSCALE == 1 */
 
-#if ARM_MMU_V7 == 1
-void	pmap_pte_init_armv7(void);
-#endif /* ARM_MMU_V7 */
-
 extern pt_entry_t		pte_l1_s_cache_mode;
 extern pt_entry_t		pte_l1_s_cache_mask;
 
@@ -532,6 +533,7 @@ extern pt_entry_t		pte_l2_l_prot_w;
 extern pt_entry_t		pte_l2_l_prot_ro;
 extern pt_entry_t		pte_l2_l_prot_mask;
 
+extern pt_entry_t		pte_l1_ss_proto;
 extern pt_entry_t		pte_l1_s_proto;
 extern pt_entry_t		pte_l1_c_proto;
 extern pt_entry_t		pte_l2_s_proto;
@@ -546,8 +548,8 @@ extern void (*pmap_zero_page_func)(paddr
 /*
  * Definitions for MMU domains
  */
-#define	PMAP_DOMAINS		15	/* 15 'user' domains (0-14) */
-#define	PMAP_DOMAIN_KERNEL	15	/* The kernel uses domain #15 */
+#define	PMAP_DOMAINS		15	/* 15 'user' domains (1-15) */
+#define	PMAP_DOMAIN_KERNEL	0	/* The kernel uses domain #0 */
 
 /*
  * These macros define the various bit masks in the PTE.
@@ -642,6 +644,11 @@ extern void (*pmap_zero_page_func)(paddr
 #define	L1_S_PROTO_armv6	(L1_TYPE_S)
 #define	L1_S_PROTO_armv7	(L1_TYPE_S)
 
+#define	L1_SS_PROTO_generic	0
+#define	L1_SS_PROTO_xscale	0
+#define	L1_SS_PROTO_armv6	(L1_TYPE_S | L1_S_V6_SS)
+#define	L1_SS_PROTO_armv7	(L1_TYPE_S | L1_S_V6_SS)
+
 #define	L1_C_PROTO_generic	(L1_TYPE_C | L1_C_IMP2)
 #define	L1_C_PROTO_xscale	(L1_TYPE_C)
 #define	L1_C_PROTO_armv6	(L1_TYPE_C)
@@ -684,6 +691,7 @@ extern void (*pmap_zero_page_func)(paddr
 #define	L2_L_CACHE_MASK		pte_l2_l_cache_mask
 #define	L2_S_CACHE_MASK		pte_l2_s_cache_mask
 
+#define	L1_SS_PROTO		pte_l1_ss_proto
 #define	L1_S_PROTO		pte_l1_s_proto
 #define	L1_C_PROTO		pte_l1_c_proto
 #define	L2_S_PROTO		pte_l2_s_proto
@@ -710,6 +718,7 @@ extern void (*pmap_zero_page_func)(paddr
 #define	L2_L_CACHE_MASK		L2_L_CACHE_MASK_generic
 #define	L2_S_CACHE_MASK		L2_S_CACHE_MASK_generic
 
+#define	L1_SS_PROTO		L1_SS_PROTO_generic
 #define	L1_S_PROTO		L1_S_PROTO_generic
 #define	L1_C_PROTO		L1_C_PROTO_generic
 #define	L2_S_PROTO		L2_S_PROTO_generic
@@ -738,6 +747,7 @@ extern void (*pmap_zero_page_func)(paddr
 
 /* These prototypes make writeable mappings, while the other MMU types
  * make read-only mappings. */
+#define	L1_SS_PROTO		L1_SS_PROTO_armv6
 #define	L1_S_PROTO		L1_S_PROTO_armv6
 #define	L1_C_PROTO		L1_C_PROTO_armv6
 #define	L2_S_PROTO		L2_S_PROTO_armv6n
@@ -764,6 +774,7 @@ extern void (*pmap_zero_page_func)(paddr
 #define	L2_L_CACHE_MASK		L2_L_CACHE_MASK_generic
 #define	L2_S_CACHE_MASK		L2_S_CACHE_MASK_generic
 
+#define	L1_SS_PROTO		L1_SS_PROTO_generic
 #define	L1_S_PROTO		L1_S_PROTO_generic
 #define	L1_C_PROTO		L1_C_PROTO_generic
 #define	L2_S_PROTO		L2_S_PROTO_generic
@@ -790,6 +801,7 @@ extern void (*pmap_zero_page_func)(paddr
 #define	L2_L_CACHE_MASK		L2_L_CACHE_MASK_xscale
 #define	L2_S_CACHE_MASK		L2_S_CACHE_MASK_xscale
 
+#define	L1_SS_PROTO		L1_SS_PROTO_xscale
 #define	L1_S_PROTO		L1_S_PROTO_xscale
 #define	L1_C_PROTO		L1_C_PROTO_xscale
 #define	L2_S_PROTO		L2_S_PROTO_xscale
@@ -818,6 +830,7 @@ extern void (*pmap_zero_page_func)(paddr
 
 /* These prototypes make writeable mappings, while the other MMU types
  * make read-only mappings. */
+#define	L1_SS_PROTO		L1_SS_PROTO_armv7
 #define	L1_S_PROTO		L1_S_PROTO_armv7
 #define	L1_C_PROTO		L1_C_PROTO_armv7
 #define	L2_S_PROTO		L2_S_PROTO_armv7
@@ -852,9 +865,12 @@ extern void (*pmap_zero_page_func)(paddr
 				 (((pr) & VM_PROT_WRITE) ? L2_S_PROT_W : L2_S_PROT_RO))
 
 /*
- * Macros to test if a mapping is mappable with an L1 Section mapping
- * or an L2 Large Page mapping.
+ * Macros to test if a mapping is mappable with an L1 SuperSection,
+ * L1 Section, or an L2 Large Page mapping.
  */
+#define	L1_SS_MAPPABLE_P(va, pa, size)					\
+	((((va) | (pa)) & L1_SS_OFFSET) == 0 && (size) >= L1_SS_SIZE)
+
 #define	L1_S_MAPPABLE_P(va, pa, size)					\
 	((((va) | (pa)) & L1_S_OFFSET) == 0 && (size) >= L1_S_SIZE)
 

Index: src/sys/arch/arm/include/arm32/pte.h
diff -u src/sys/arch/arm/include/arm32/pte.h:1.11 src/sys/arch/arm/include/arm32/pte.h:1.12
--- src/sys/arch/arm/include/arm32/pte.h:1.11	Thu Aug 16 07:26:23 2012
+++ src/sys/arch/arm/include/arm32/pte.h	Mon Aug 20 13:03:41 2012
@@ -1,4 +1,4 @@
-/*	$NetBSD: pte.h,v 1.11 2012/08/16 07:26:23 matt Exp $	*/
+/*	$NetBSD: pte.h,v 1.12 2012/08/20 13:03:41 matt Exp $	*/
 
 /*
  * Copyright (c) 2001, 2002 Wasabi Systems, Inc.
@@ -167,6 +167,8 @@ typedef uint32_t	pt_entry_t;	/* L2 table
 #define	L1_S_V6_APX	0x00008000	/* ARMv6 AP eXtension */
 #define	L1_S_V6_S	0x00010000	/* ARMv6 Shared */
 #define	L1_S_V6_nG	0x00020000	/* ARMv6 not-Global */
+#define	L1_S_V6_SS	0x00040000	/* ARMv6 SuperSection */
+#define	L1_S_V6_NS	0x00080000	/* ARMv6 Not Secure */
 
 /* L1 Coarse Descriptor */
 #define	L1_C_IMP0	0x00000004	/* implementation defined */
@@ -298,7 +300,7 @@ typedef uint32_t	pt_entry_t;	/* L2 table
 #define	TEX_XSCALE_X	0x01		/* X modifies C and B */
 
 /*
- * Type Extension bits for ARM V6 MMU
+ * Type Extension bits for ARM V6 and V7 MMU
  *
  * TEX C B                                    Shared
  * 000 0 0  Strong order                      yes

Reply via email to