Module Name:    src
Committed By:   skrll
Date:           Thu Jul 16 11:36:35 UTC 2020

Modified Files:
        src/sys/arch/aarch64/aarch64: aarch64_machdep.c efi_machdep.c locore.S
            pmap.c pmapboot.c start.S
        src/sys/arch/aarch64/include: asan.h pmap.h
        src/sys/arch/evbarm/conf: GENERIC64

Log Message:
pmapboot_enter simplication
- bootpage_alloc in asm becomes pmapboot_pagealloc in C
- PMAPBOOT_ENTER_NOBLOCK is removed as it's not used
- PMAPBOOT_ENTER_NOOVERWRITE is removed as it's now always on
- physpage_allocator argument is removed as it's always
  pmapboot_pagealloc
- Support for EARLYCONS without CONSADDR is removed so that the identity
  map for CONSADDR is always known.

For the assembly files:
 2 files changed, 40 insertions(+), 89 deletions(-)

LGTM ryo


To generate a diff of this commit:
cvs rdiff -u -r1.44 -r1.45 src/sys/arch/aarch64/aarch64/aarch64_machdep.c
cvs rdiff -u -r1.5 -r1.6 src/sys/arch/aarch64/aarch64/efi_machdep.c
cvs rdiff -u -r1.66 -r1.67 src/sys/arch/aarch64/aarch64/locore.S
cvs rdiff -u -r1.83 -r1.84 src/sys/arch/aarch64/aarch64/pmap.c
cvs rdiff -u -r1.7 -r1.8 src/sys/arch/aarch64/aarch64/pmapboot.c \
    src/sys/arch/aarch64/aarch64/start.S
cvs rdiff -u -r1.7 -r1.8 src/sys/arch/aarch64/include/asan.h
cvs rdiff -u -r1.40 -r1.41 src/sys/arch/aarch64/include/pmap.h
cvs rdiff -u -r1.159 -r1.160 src/sys/arch/evbarm/conf/GENERIC64

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/aarch64/aarch64/aarch64_machdep.c
diff -u src/sys/arch/aarch64/aarch64/aarch64_machdep.c:1.44 src/sys/arch/aarch64/aarch64/aarch64_machdep.c:1.45
--- src/sys/arch/aarch64/aarch64/aarch64_machdep.c:1.44	Wed Jul  1 08:01:07 2020
+++ src/sys/arch/aarch64/aarch64/aarch64_machdep.c	Thu Jul 16 11:36:35 2020
@@ -1,4 +1,4 @@
-/* $NetBSD: aarch64_machdep.c,v 1.44 2020/07/01 08:01:07 ryo Exp $ */
+/* $NetBSD: aarch64_machdep.c,v 1.45 2020/07/16 11:36:35 skrll Exp $ */
 
 /*-
  * Copyright (c) 2014 The NetBSD Foundation, Inc.
@@ -30,7 +30,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(1, "$NetBSD: aarch64_machdep.c,v 1.44 2020/07/01 08:01:07 ryo Exp $");
+__KERNEL_RCSID(1, "$NetBSD: aarch64_machdep.c,v 1.45 2020/07/16 11:36:35 skrll Exp $");
 
 #include "opt_arm_debug.h"
 #include "opt_cpuoptions.h"
@@ -161,9 +161,9 @@ cpu_kernel_vm_init(uint64_t memory_start
 		start = trunc_page(bootconfig.dram[blk].address);
 		end = round_page(bootconfig.dram[blk].address +
 		    (uint64_t)bootconfig.dram[blk].pages * PAGE_SIZE);
+
 		pmapboot_enter_range(AARCH64_PA_TO_KVA(start), start,
-		    end - start, ksegattr, PMAPBOOT_ENTER_NOOVERWRITE,
-		    bootpage_alloc, printf);
+		    end - start, ksegattr, printf);
 	}
 	aarch64_dcache_wbinv_all();
 

Index: src/sys/arch/aarch64/aarch64/efi_machdep.c
diff -u src/sys/arch/aarch64/aarch64/efi_machdep.c:1.5 src/sys/arch/aarch64/aarch64/efi_machdep.c:1.6
--- src/sys/arch/aarch64/aarch64/efi_machdep.c:1.5	Mon Dec 16 00:03:50 2019
+++ src/sys/arch/aarch64/aarch64/efi_machdep.c	Thu Jul 16 11:36:35 2020
@@ -1,4 +1,4 @@
-/* $NetBSD: efi_machdep.c,v 1.5 2019/12/16 00:03:50 jmcneill Exp $ */
+/* $NetBSD: efi_machdep.c,v 1.6 2020/07/16 11:36:35 skrll Exp $ */
 
 /*-
  * Copyright (c) 2018 The NetBSD Foundation, Inc.
@@ -30,7 +30,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: efi_machdep.c,v 1.5 2019/12/16 00:03:50 jmcneill Exp $");
+__KERNEL_RCSID(0, "$NetBSD: efi_machdep.c,v 1.6 2020/07/16 11:36:35 skrll Exp $");
 
 #include <sys/param.h>
 #include <uvm/uvm_extern.h>
@@ -70,7 +70,7 @@ arm_efirt_md_map_range(vaddr_t va, paddr
 		panic("arm_efirt_md_map_range: unsupported type %d", type);
 	}
 
-	pmapboot_enter(va, pa, sz, L3_SIZE, attr, 0, bootpage_alloc, NULL);
+	pmapboot_enter(va, pa, sz, L3_SIZE, attr, NULL);
 	while (sz >= PAGE_SIZE) {
 		aarch64_tlbi_by_va(va);
 		va += PAGE_SIZE;

Index: src/sys/arch/aarch64/aarch64/locore.S
diff -u src/sys/arch/aarch64/aarch64/locore.S:1.66 src/sys/arch/aarch64/aarch64/locore.S:1.67
--- src/sys/arch/aarch64/aarch64/locore.S:1.66	Sun Jul 12 06:37:39 2020
+++ src/sys/arch/aarch64/aarch64/locore.S	Thu Jul 16 11:36:35 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: locore.S,v 1.66 2020/07/12 06:37:39 skrll Exp $	*/
+/*	$NetBSD: locore.S,v 1.67 2020/07/16 11:36:35 skrll Exp $	*/
 
 /*
  * Copyright (c) 2017 Ryo Shimizu <r...@nerv.org>
@@ -38,7 +38,7 @@
 #include <aarch64/hypervisor.h>
 #include "assym.h"
 
-RCSID("$NetBSD: locore.S,v 1.66 2020/07/12 06:37:39 skrll Exp $")
+RCSID("$NetBSD: locore.S,v 1.67 2020/07/16 11:36:35 skrll Exp $")
 
 #ifdef AARCH64_DEVICE_MEM_STRONGLY_ORDERED
 #define	MAIR_DEVICE_MEM		MAIR_DEVICE_nGnRnE
@@ -54,9 +54,12 @@ RCSID("$NetBSD: locore.S,v 1.66 2020/07/
 #define LOCORE_EL2
 
 #define BOOT_AP_STACKSIZE	256	/* size of temporally stack for APs */
-#define BOOTPAGE_ALLOC_MAX	(1024 * 1024)	/* reserved size from _end[] */
+#define PMAPBOOT_PAGEALLOCMAX	(1024 * 1024)	/* reserved size from _end[] */
 
 #if (defined(VERBOSE_INIT_ARM) || defined(DEBUG_LOCORE)) && defined(EARLYCONS)
+#if !defined(CONSADDR)
+#error CONSADDR required with EARLYCONS
+#endif
 #define VERBOSE_LOCORE
 #endif
 
@@ -774,11 +777,11 @@ init_mmutable:
 	stp	x26, lr, [sp, #-16]!
 
 	/* first allocated page must be kernel l0pt = ARM_BOOTSTRAP_LxPT */
-	bl	bootpage_alloc
+	bl	pmapboot_pagealloc
 	cbz	x0, init_mmutable_error
 	msr	ttbr1_el1, x0
 
-	bl	bootpage_alloc
+	bl	pmapboot_pagealloc
 	cbz	x0, init_mmutable_error
 	msr	ttbr0_el1, x0
 
@@ -799,85 +802,62 @@ init_mmutable:
 	 *     x2: psize_t size,
 	 *     x3: psize_t blocksize,  // L[123]_SIZE
 	 *     x4: pt_entry_t attr,    // pte attributes. LX_BLKPAG_*
-	 *     x5: flags,
-	 *     x6: pd_entry_t *(*physpage_allocator)(void),
-	 *     x7: void (*pr)(const char *, ...)
+	 *     x5: void (*pr)(const char *, ...)
 	 *  );
 	 */
 
 #ifdef CONSADDR
-	VPRINT("Creating VA=PA tables for CONSADDR\n")
-	mov	x7, x26				/* pr func */
-	adr	x6, bootpage_alloc		/* allocator */
-	mov	x5, xzr				/* flags = 0 */
-	mov	x4, #LX_BLKPAG_ATTR_DEVICE_MEM | LX_BLKPAG_AP_RW	/* attr */
-	orr	x4, x4, #LX_BLKPAG_UXN | LX_BLKPAG_PXN
-	mov	x3, #L2_SIZE			/* blocksize */
+	VPRINT("Creating identity mapping for CONSADDR\n")
+	ldr	x0, =CONSADDR			/* va = CONADDR (physical) */
+	mov	x1, x0				/* pa = va */
 	mov	x2, #L2_SIZE			/* size */
-	ldr	x1, =CONSADDR			/* pa */
-	mov	x0, x1				/* va */
-	bl	pmapboot_enter
-	cbnz	x0, init_mmutable_error
-#elif defined(EARLYCONS)
-	/* CONSADDR is unknown, but need to map UART */
-	VPRINT("Creating VA=PA tables (0x00000000-0xffffffff)\n")
-	mov	x7, x26				/* pr func */
-	adr	x6, bootpage_alloc		/* allocator */
-	mov	x5, xzr				/* flags = 0 */
-	mov	x4, #LX_BLKPAG_ATTR_DEVICE_MEM | LX_BLKPAG_AP_RW	/* attr */
-	orr	x4, x4, #LX_BLKPAG_UXN | LX_BLKPAG_PXN
 	mov	x3, #L2_SIZE			/* blocksize */
-	mov	x2, #(1024*1024*1024*4)		/* size */
-	mov	x1, xzr				/* pa */
-	mov	x0, xzr				/* va */
+	mov	x4, #LX_BLKPAG_ATTR_DEVICE_MEM | LX_BLKPAG_AP_RW
+	orr	x4, x4, #LX_BLKPAG_UXN | LX_BLKPAG_PXN	/* attr */
+	mov	x5, x26				/* pr func */
 	bl	pmapboot_enter
 	cbnz	x0, init_mmutable_error
 #endif
 
 	/* identity mapping for kernel image */
-	VPRINT("Creating VA=PA tables for kernel image\n")
-	mov	x7, x26				/* pr func */
-	adr	x6, bootpage_alloc		/* allocator */
-	mov	x5, xzr				/* flags = 0 */
-	mov	x4, #LX_BLKPAG_ATTR_NORMAL_NC | LX_BLKPAG_AP_RW	/* attr */
-	mov	x3, #L2_SIZE			/* blocksize */
-	adr	x0, start			/* va = start */
+	VPRINT("Creating identity mapping for kernel image\n")
+	adrl	x0, start			/* va = start (physical) */
+
+	mov	x1, x0				/* pa = va */
 	adrl	x2, _end
-	sub	x2, x2, x0			/* size = _end - start */
-	add	x2, x2, #BOOTPAGE_ALLOC_MAX	/* for bootpage_alloc() */
-	mov	x1, x0				/* pa */
+	sub	x2, x2, x1			/* size = _end - start */
+	add	x2, x2, #PMAPBOOT_PAGEALLOCMAX	/* for pmapboot_pagealloc() */
+	mov	x3, #L2_SIZE			/* blocksize */
+	mov	x4, #LX_BLKPAG_ATTR_NORMAL_NC | LX_BLKPAG_AP_RW	/* attr */
+	mov	x5, x26				/* pr func */
 	bl	pmapboot_enter
 	cbnz	x0, init_mmutable_error
 
 #ifdef FDT
-	VPRINT("Creating VA=PA tables for FDT\n")
+	VPRINT("Creating identity mapping for FDT\n")
 	adrl	x8, _C_LABEL(fdt_addr_r)
 	ldr	x8, [x8]
 
-	mov	x7, x26				/* pr func */
-	adr	x6, bootpage_alloc		/* allocator */
-	mov	x5, xzr				/* flags = 0 */
-	mov	x4, #LX_BLKPAG_ATTR_NORMAL_NC | LX_BLKPAG_AP_RW	/* attr */
-	orr	x4, x4, #LX_BLKPAG_UXN | LX_BLKPAG_PXN
-	mov	x3, #L2_SIZE			/* blocksize */
-	mov	x2, #L2_SIZE			/* size */
-	mov	x1, x8				/* pa */
 	mov	x0, x8				/* va */
+	mov	x1, x8				/* pa */
+	mov	x2, #L2_SIZE			/* size */
+	mov	x3, #L2_SIZE			/* blocksize */
+	mov	x4, #LX_BLKPAG_ATTR_NORMAL_NC | LX_BLKPAG_AP_RW
+	orr	x4, x4, #LX_BLKPAG_UXN | LX_BLKPAG_PXN	/* attr */
+	mov	x5, x26				/* pr func */
 	bl	pmapboot_enter
 	cbnz	x0, init_mmutable_error
 #endif
 
 	VPRINT("Creating KVA=PA tables\n")
-	mov	x7, x26				/* pr func */
-	adr	x6, bootpage_alloc		/* allocator */
-	mov	x5, xzr				/* flags = 0 */
-	mov	x4, #LX_BLKPAG_ATTR_NORMAL_WB | LX_BLKPAG_AP_RW	/* attr */
-	orr	x4, x4, #LX_BLKPAG_UXN
-	mov	x3, #L2_SIZE			/* blocksize */
-	adr	x1, start			/* pa = start */
+	ldr	x0, =start			/* va */
+	adrl	x1, start			/* pa = start (physical) */
 	adrl	x2, _end
 	sub	x2, x2, x1			/* size = _end - start */
-	ldr	x0, =start			/* va */
+	mov	x3, #L2_SIZE			/* blocksize */
+	mov	x4, #LX_BLKPAG_ATTR_NORMAL_WB | LX_BLKPAG_AP_RW	/* attr */
+	orr	x4, x4, #LX_BLKPAG_UXN
+	mov	x5, x26				/* pr func */
 	bl	pmapboot_enter
 	cbnz	x0, init_mmutable_error
 
@@ -890,40 +870,6 @@ init_mmutable_done:
 	ldp	x26, lr, [sp], #16
 	ret
 
-/* return PA of allocated page */
-ENTRY_NP(bootpage_alloc)
-	/* x2 = kernend_extra */
-	adrl	x3, kernend_extra
-	ldr	x2, [x3]
-	/* if (kernend_extra < 0) return NULL */
-	mov	x0, xzr
-	cmp	x2, xzr
-	bmi	bootpage_alloc_done
-
-	/* x0 = PA of _end[] */
-	adrl	x1, kern_vtopdiff
-	ldr	x1, [x1]
-	ldr	x0, =ARM_BOOTSTRAP_LxPT
-	sub	x0, x0, x1
-
-	/* x0 = ARM_BOOTSTRAP_LxPT + kernend_extra */
-	add	x0, x0, x2
-
-	/* kernend_extra += PAGE_SIZE; */
-	add	x2, x2, #PAGE_SIZE
-	str	x2, [x3]
-
-	/* clear allocated page */
-	mov	x1, x0
-	add	x2, x1, #PAGE_SIZE
-1:	stp	xzr, xzr, [x1], #16
-	cmp	x1, x2
-	bcc	1b
-bootpage_alloc_done:
-	ret
-END(bootpage_alloc)
-
-
 mmu_disable:
 	dsb	sy
 	mrs	x0, sctlr_el1

Index: src/sys/arch/aarch64/aarch64/pmap.c
diff -u src/sys/arch/aarch64/aarch64/pmap.c:1.83 src/sys/arch/aarch64/aarch64/pmap.c:1.84
--- src/sys/arch/aarch64/aarch64/pmap.c:1.83	Sat Jul  4 16:58:11 2020
+++ src/sys/arch/aarch64/aarch64/pmap.c	Thu Jul 16 11:36:35 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.c,v 1.83 2020/07/04 16:58:11 rin Exp $	*/
+/*	$NetBSD: pmap.c,v 1.84 2020/07/16 11:36:35 skrll Exp $	*/
 
 /*
  * Copyright (c) 2017 Ryo Shimizu <r...@nerv.org>
@@ -27,7 +27,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.83 2020/07/04 16:58:11 rin Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.84 2020/07/16 11:36:35 skrll Exp $");
 
 #include "opt_arm_debug.h"
 #include "opt_ddb.h"
@@ -322,8 +322,7 @@ pmap_map_chunk(vaddr_t va, paddr_t pa, v
 
 	attr = _pmap_pte_adjust_prot(0, prot, VM_PROT_ALL, false);
 	attr = _pmap_pte_adjust_cacheflags(attr, flags);
-	pmapboot_enter_range(va, pa, resid, attr,
-	    PMAPBOOT_ENTER_NOOVERWRITE, bootpage_alloc, printf);
+	pmapboot_enter_range(va, pa, resid, attr, printf);
 	aarch64_tlbi_all();
 
 	return resid;

Index: src/sys/arch/aarch64/aarch64/pmapboot.c
diff -u src/sys/arch/aarch64/aarch64/pmapboot.c:1.7 src/sys/arch/aarch64/aarch64/pmapboot.c:1.8
--- src/sys/arch/aarch64/aarch64/pmapboot.c:1.7	Mon Apr 13 05:40:25 2020
+++ src/sys/arch/aarch64/aarch64/pmapboot.c	Thu Jul 16 11:36:35 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmapboot.c,v 1.7 2020/04/13 05:40:25 maxv Exp $	*/
+/*	$NetBSD: pmapboot.c,v 1.8 2020/07/16 11:36:35 skrll Exp $	*/
 
 /*
  * Copyright (c) 2018 Ryo Shimizu <r...@nerv.org>
@@ -27,7 +27,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmapboot.c,v 1.7 2020/04/13 05:40:25 maxv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmapboot.c,v 1.8 2020/07/16 11:36:35 skrll Exp $");
 
 #include "opt_arm_debug.h"
 #include "opt_ddb.h"
@@ -41,6 +41,7 @@ __KERNEL_RCSID(0, "$NetBSD: pmapboot.c,v
 
 #include <aarch64/armreg.h>
 #include <aarch64/cpufunc.h>
+#include <aarch64/machdep.h>
 #include <aarch64/pmap.h>
 #include <aarch64/pte.h>
 
@@ -194,14 +195,12 @@ tlb_contiguous_p(vaddr_t addr, vaddr_t s
  */
 int
 pmapboot_enter(vaddr_t va, paddr_t pa, psize_t size, psize_t blocksize,
-    pt_entry_t attr, uint64_t flags, pd_entry_t *(*physpage_allocator)(void),
-    void (*pr)(const char *, ...) __printflike(1, 2))
+    pt_entry_t attr, void (*pr)(const char *, ...) __printflike(1, 2))
 {
 	int level, idx0, idx1, idx2, idx3, nskip = 0;
 	int ttbr __unused;
 	vaddr_t va_end;
 	pd_entry_t *l0, *l1, *l2, *l3, pte;
-	bool noblock, nooverwrite;
 #ifdef OPTIMIZE_TLB_CONTIG
 	vaddr_t va_start;
 	pd_entry_t *ll;
@@ -222,17 +221,13 @@ pmapboot_enter(vaddr_t va, paddr_t pa, p
 		return -1;
 	}
 
-	noblock = flags & PMAPBOOT_ENTER_NOBLOCK;
-	nooverwrite = flags & PMAPBOOT_ENTER_NOOVERWRITE;
-
 	VPRINTF("pmapboot_enter: va=0x%lx, pa=0x%lx, size=0x%lx, "
-	    "blocksize=0x%lx, attr=0x%016lx, "
-	    "noblock=%d, nooverwrite=%d\n",
-	    va, pa, size, blocksize, attr, noblock, nooverwrite);
+	    "blocksize=0x%lx, attr=0x%016lx\n",
+	    va, pa, size, blocksize, attr);
 
-	va_end = (va + size - 1) & ~(blocksize - 1);
 	pa &= ~(blocksize - 1);
 	va &= ~(blocksize - 1);
+	va_end = (va + size + blocksize- 1) & ~(blocksize - 1);
 #ifdef OPTIMIZE_TLB_CONTIG
 	va_start = va;
 #endif
@@ -254,7 +249,7 @@ pmapboot_enter(vaddr_t va, paddr_t pa, p
 		return -1;
 	}
 
-	while (va <= va_end) {
+	while (va < va_end) {
 #ifdef OPTIMIZE_TLB_CONTIG
 		ll = NULL;
 		llidx = -1;
@@ -262,7 +257,7 @@ pmapboot_enter(vaddr_t va, paddr_t pa, p
 
 		idx0 = l0pde_index(va);
 		if (l0[idx0] == 0) {
-			l1 = physpage_allocator();
+			l1 = pmapboot_pagealloc();
 			if (l1 == NULL) {
 				VPRINTF("pmapboot_enter: cannot allocate L1 page\n");
 				return -1;
@@ -278,13 +273,6 @@ pmapboot_enter(vaddr_t va, paddr_t pa, p
 
 		idx1 = l1pde_index(va);
 		if (level == 1) {
-			if (noblock)
-				goto nextblk;
-			if (nooverwrite && l1pde_valid(l1[idx1])) {
-				nskip++;
-				goto nextblk;
-			}
-
 			pte = pa |
 			    L1_BLOCK |
 			    LX_BLKPAG_AF |
@@ -298,6 +286,12 @@ pmapboot_enter(vaddr_t va, paddr_t pa, p
 			ll = l1;
 			llidx = idx1;
 #endif
+
+			if (l1pde_valid(l1[idx1]) && l1[idx1] != pte) {
+				nskip++;
+				goto nextblk;
+			}
+
 			l1[idx1] = pte;
 			VPRINTF("TTBR%d[%d][%d]\t= %016lx:", ttbr,
 			    idx0, idx1, pte);
@@ -306,7 +300,7 @@ pmapboot_enter(vaddr_t va, paddr_t pa, p
 		}
 
 		if (!l1pde_valid(l1[idx1])) {
-			l2 = physpage_allocator();
+			l2 = pmapboot_pagealloc();
 			if (l2 == NULL) {
 				VPRINTF("pmapboot_enter: cannot allocate L2 page\n");
 				return -1;
@@ -323,13 +317,6 @@ pmapboot_enter(vaddr_t va, paddr_t pa, p
 
 		idx2 = l2pde_index(va);
 		if (level == 2) {
-			if (noblock)
-				goto nextblk;
-			if (nooverwrite && l2pde_valid(l2[idx2])) {
-				nskip++;
-				goto nextblk;
-			}
-
 			pte = pa |
 			    L2_BLOCK |
 			    LX_BLKPAG_AF |
@@ -343,6 +330,11 @@ pmapboot_enter(vaddr_t va, paddr_t pa, p
 			ll = l2;
 			llidx = idx2;
 #endif
+			if (l2pde_valid(l2[idx2]) && l2[idx2] != pte) {
+				nskip++;
+				goto nextblk;
+			}
+
 			l2[idx2] = pte;
 			VPRINTF("TTBR%d[%d][%d][%d]\t= %016lx:", ttbr,
 			    idx0, idx1, idx2, pte);
@@ -351,7 +343,7 @@ pmapboot_enter(vaddr_t va, paddr_t pa, p
 		}
 
 		if (!l2pde_valid(l2[idx2])) {
-			l3 = physpage_allocator();
+			l3 = pmapboot_pagealloc();
 			if (l3 == NULL) {
 				VPRINTF("pmapboot_enter: cannot allocate L3 page\n");
 				return -1;
@@ -367,12 +359,6 @@ pmapboot_enter(vaddr_t va, paddr_t pa, p
 		}
 
 		idx3 = l3pte_index(va);
-		if (noblock)
-			goto nextblk;
-		if (nooverwrite && l3pte_valid(l3[idx3])) {
-			nskip++;
-			goto nextblk;
-		}
 
 		pte = pa |
 		    L3_PAGE |
@@ -387,11 +373,15 @@ pmapboot_enter(vaddr_t va, paddr_t pa, p
 		ll = l3;
 		llidx = idx3;
 #endif
+		if (l3pte_valid(l3[idx3]) && l3[idx3] != pte) {
+			nskip++;
+			goto nextblk;
+		}
+
 		l3[idx3] = pte;
 		VPRINTF("TTBR%d[%d][%d][%d][%d]\t= %lx:", ttbr,
 		    idx0, idx1, idx2, idx3, pte);
 		VPRINT_PTE(pte, 3);
-
  nextblk:
 #ifdef OPTIMIZE_TLB_CONTIG
 		/*
@@ -432,9 +422,30 @@ pmapboot_enter(vaddr_t va, paddr_t pa, p
 	return nskip;
 }
 
+paddr_t pmapboot_pagebase __attribute__((__section__(".data")));
+
+pd_entry_t *
+pmapboot_pagealloc(void)
+{
+	extern long kernend_extra;
+
+	if (kernend_extra < 0)
+		return NULL;
+
+	paddr_t pa = pmapboot_pagebase + kernend_extra;
+	kernend_extra += PAGE_SIZE;
+
+	char *s = (char *)pa;
+	char *e = s + PAGE_SIZE;
+
+	while (s < e)
+	    *s++ = 0;
+
+	return (pd_entry_t *)pa;
+}
+
 int
 pmapboot_enter_range(vaddr_t va, paddr_t pa, psize_t size, pt_entry_t attr,
-    uint64_t flags, pd_entry_t *(*physpage_allocator)(void),
     void (*pr)(const char *, ...) __printflike(1, 2))
 {
 	vaddr_t vend;
@@ -452,8 +463,7 @@ pmapboot_enter_range(vaddr_t va, paddr_t
 		mapsize = nblocks * L3_SIZE;
 		VPRINTF("Creating L3 tables: %016lx-%016lx : %016lx-%016lx\n",
 		    va, va + mapsize - 1, pa, pa + mapsize - 1);
-		nskip += pmapboot_enter(va, pa, mapsize, L3_SIZE, attr, flags,
-		    physpage_allocator, NULL);
+		nskip += pmapboot_enter(va, pa, mapsize, L3_SIZE, attr, pr);
 		va += mapsize;
 		pa += mapsize;
 		left -= mapsize;
@@ -466,8 +476,7 @@ pmapboot_enter_range(vaddr_t va, paddr_t
 		mapsize = nblocks * L2_SIZE;
 		VPRINTF("Creating L2 tables: %016lx-%016lx : %016lx-%016lx\n",
 		    va, va + mapsize - 1, pa, pa + mapsize - 1);
-		nskip += pmapboot_enter(va, pa, mapsize, L2_SIZE, attr, flags,
-		    physpage_allocator, NULL);
+		nskip += pmapboot_enter(va, pa, mapsize, L2_SIZE, attr, pr);
 		va += mapsize;
 		pa += mapsize;
 		left -= mapsize;
@@ -478,8 +487,7 @@ pmapboot_enter_range(vaddr_t va, paddr_t
 		mapsize = nblocks * L1_SIZE;
 		VPRINTF("Creating L1 tables: %016lx-%016lx : %016lx-%016lx\n",
 		    va, va + mapsize - 1, pa, pa + mapsize - 1);
-		nskip += pmapboot_enter(va, pa, mapsize, L1_SIZE, attr, flags,
-		    physpage_allocator, NULL);
+		nskip += pmapboot_enter(va, pa, mapsize, L1_SIZE, attr, pr);
 		va += mapsize;
 		pa += mapsize;
 		left -= mapsize;
@@ -490,8 +498,7 @@ pmapboot_enter_range(vaddr_t va, paddr_t
 		mapsize = nblocks * L2_SIZE;
 		VPRINTF("Creating L2 tables: %016lx-%016lx : %016lx-%016lx\n",
 		    va, va + mapsize - 1, pa, pa + mapsize - 1);
-		nskip += pmapboot_enter(va, pa, mapsize, L2_SIZE, attr, flags,
-		    physpage_allocator, NULL);
+		nskip += pmapboot_enter(va, pa, mapsize, L2_SIZE, attr, pr);
 		va += mapsize;
 		pa += mapsize;
 		left -= mapsize;
@@ -502,8 +509,7 @@ pmapboot_enter_range(vaddr_t va, paddr_t
 		mapsize = nblocks * L3_SIZE;
 		VPRINTF("Creating L3 tables: %016lx-%016lx : %016lx-%016lx\n",
 		    va, va + mapsize - 1, pa, pa + mapsize - 1);
-		nskip += pmapboot_enter(va, pa, mapsize, L3_SIZE, attr, flags,
-		    physpage_allocator, NULL);
+		nskip += pmapboot_enter(va, pa, mapsize, L3_SIZE, attr, pr);
 		va += mapsize;
 		pa += mapsize;
 		left -= mapsize;
Index: src/sys/arch/aarch64/aarch64/start.S
diff -u src/sys/arch/aarch64/aarch64/start.S:1.7 src/sys/arch/aarch64/aarch64/start.S:1.8
--- src/sys/arch/aarch64/aarch64/start.S:1.7	Sun Jan 19 16:12:56 2020
+++ src/sys/arch/aarch64/aarch64/start.S	Thu Jul 16 11:36:35 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: start.S,v 1.7 2020/01/19 16:12:56 skrll Exp $	*/
+/*	$NetBSD: start.S,v 1.8 2020/07/16 11:36:35 skrll Exp $	*/
 
 /*
  * Copyright (c) 2017 Ryo Shimizu <r...@nerv.org>
@@ -32,7 +32,7 @@
 
 #include <aarch64/asm.h>
 
-RCSID("$NetBSD: start.S,v 1.7 2020/01/19 16:12:56 skrll Exp $")
+RCSID("$NetBSD: start.S,v 1.8 2020/07/16 11:36:35 skrll Exp $")
 
 /*
  * Padding at start of kernel image to make room for 64-byte header
@@ -71,6 +71,11 @@ start:
 	adrl	x8, fdt_addr_r
 	str	x0, [x8]
 
+	adrl	x8, pmapboot_pagebase
+	ldr	x9, =ARM_BOOTSTRAP_LxPT
+	sub	x9, x9, x10
+	str	x9, [x8]
+
 	b	aarch64_start		/* aarch64_start() @ aarch64/locore.S */
 
 ENTRY_NP(uartputc)

Index: src/sys/arch/aarch64/include/asan.h
diff -u src/sys/arch/aarch64/include/asan.h:1.7 src/sys/arch/aarch64/include/asan.h:1.8
--- src/sys/arch/aarch64/include/asan.h:1.7	Tue Jun 23 17:21:55 2020
+++ src/sys/arch/aarch64/include/asan.h	Thu Jul 16 11:36:35 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: asan.h,v 1.7 2020/06/23 17:21:55 maxv Exp $	*/
+/*	$NetBSD: asan.h,v 1.8 2020/07/16 11:36:35 skrll Exp $	*/
 
 /*
  * Copyright (c) 2018 The NetBSD Foundation, Inc.
@@ -68,7 +68,7 @@ __md_palloc(void)
 	paddr_t pa;
 
 	if (__predict_false(__md_early))
-		pa = (paddr_t)bootpage_alloc();
+		pa = (paddr_t)pmapboot_pagealloc();
 	else
 		pa = pmap_alloc_pdp(pmap_kernel(), NULL, 0, false);
 

Index: src/sys/arch/aarch64/include/pmap.h
diff -u src/sys/arch/aarch64/include/pmap.h:1.40 src/sys/arch/aarch64/include/pmap.h:1.41
--- src/sys/arch/aarch64/include/pmap.h:1.40	Sun Jun 14 21:47:15 2020
+++ src/sys/arch/aarch64/include/pmap.h	Thu Jul 16 11:36:35 2020
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.h,v 1.40 2020/06/14 21:47:15 ad Exp $ */
+/* $NetBSD: pmap.h,v 1.41 2020/07/16 11:36:35 skrll Exp $ */
 
 /*-
  * Copyright (c) 2014 The NetBSD Foundation, Inc.
@@ -173,17 +173,12 @@ void pmap_db_ttbrdump(bool, vaddr_t, voi
 pt_entry_t *kvtopte(vaddr_t);
 pt_entry_t pmap_kvattr(vaddr_t, vm_prot_t);
 
-/* locore.S */
-pd_entry_t *bootpage_alloc(void);
-
-/* pmap_locore.c */
-int pmapboot_enter(vaddr_t, paddr_t, psize_t, psize_t,
-    pt_entry_t, uint64_t, pd_entry_t *(*)(void),
+/* pmapboot.c */
+pd_entry_t *pmapboot_pagealloc(void);
+int pmapboot_enter(vaddr_t, paddr_t, psize_t, psize_t, pt_entry_t,
     void (*pr)(const char *, ...) __printflike(1, 2));
-#define PMAPBOOT_ENTER_NOBLOCK		0x00000001
-#define PMAPBOOT_ENTER_NOOVERWRITE	0x00000002
-int pmapboot_enter_range(vaddr_t, paddr_t, psize_t, pt_entry_t, uint64_t,
-    pd_entry_t *(*)(void), void (*)(const char *, ...) __printflike(1, 2));
+int pmapboot_enter_range(vaddr_t, paddr_t, psize_t, pt_entry_t,
+    void (*)(const char *, ...) __printflike(1, 2));
 int pmapboot_protect(vaddr_t, vaddr_t, vm_prot_t);
 void pmap_db_pte_print(pt_entry_t, int,
     void (*pr)(const char *, ...) __printflike(1, 2));

Index: src/sys/arch/evbarm/conf/GENERIC64
diff -u src/sys/arch/evbarm/conf/GENERIC64:1.159 src/sys/arch/evbarm/conf/GENERIC64:1.160
--- src/sys/arch/evbarm/conf/GENERIC64:1.159	Mon May 18 21:19:35 2020
+++ src/sys/arch/evbarm/conf/GENERIC64	Thu Jul 16 11:36:35 2020
@@ -1,5 +1,5 @@
 #
-#	$NetBSD: GENERIC64,v 1.159 2020/05/18 21:19:35 jmcneill Exp $
+#	$NetBSD: GENERIC64,v 1.160 2020/07/16 11:36:35 skrll Exp $
 #
 #	GENERIC ARM (aarch64) kernel
 #
@@ -36,15 +36,15 @@ options 	INCLUDE_CONFIG_FILE
 #options 	UVMHIST_PRINT,KERNHIST_DELAY=0
 
 # EARLYCONS is required for early init messages from VERBOSE_INIT_ARM.
-#options 	EARLYCONS=bcm2837
-#options 	EARLYCONS=bcm2711
+#options 	EARLYCONS=bcm2837, CONSADDR=0x3f215040
+#options 	EARLYCONS=bcm2711, CONSADDR=0xfe215040
 #options 	EARLYCONS=meson, CONSADDR=0xc81004c0
 #options 	EARLYCONS=rk3328, CONSADDR=0xff130000
 #options 	EARLYCONS=rk3399, CONSADDR=0xff1a0000
 #options 	EARLYCONS=sunxi, CONSADDR=0x01c28000
 #options 	EARLYCONS=tegra, CONSADDR=0x70006000
 #options 	EARLYCONS=thunderx, CONSADDR=0x87e024000000
-#options 	EARLYCONS=virt
+#options 	EARLYCONS=virt, CONSADDR=0x09000000
 
 # Pointer Authentication (PAC).
 #makeoptions 	ARMV83_PAC=1

Reply via email to