Module Name:    src
Committed By:   ryo
Date:           Wed Aug 15 11:10:45 UTC 2018

Modified Files:
        src/sys/arch/aarch64/aarch64: aarch64_machdep.c
Added Files:
        src/sys/arch/aarch64/aarch64: kobj_machdep.c

Log Message:
MODULAR support


To generate a diff of this commit:
cvs rdiff -u -r1.8 -r1.9 src/sys/arch/aarch64/aarch64/aarch64_machdep.c
cvs rdiff -u -r0 -r1.1 src/sys/arch/aarch64/aarch64/kobj_machdep.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/aarch64/aarch64/aarch64_machdep.c
diff -u src/sys/arch/aarch64/aarch64/aarch64_machdep.c:1.8 src/sys/arch/aarch64/aarch64/aarch64_machdep.c:1.9
--- src/sys/arch/aarch64/aarch64/aarch64_machdep.c:1.8	Sun Aug  5 06:48:50 2018
+++ src/sys/arch/aarch64/aarch64/aarch64_machdep.c	Wed Aug 15 11:10:45 2018
@@ -1,4 +1,4 @@
-/* $NetBSD: aarch64_machdep.c,v 1.8 2018/08/05 06:48:50 skrll Exp $ */
+/* $NetBSD: aarch64_machdep.c,v 1.9 2018/08/15 11:10:45 ryo Exp $ */
 
 /*-
  * Copyright (c) 2014 The NetBSD Foundation, Inc.
@@ -30,16 +30,18 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(1, "$NetBSD: aarch64_machdep.c,v 1.8 2018/08/05 06:48:50 skrll Exp $");
+__KERNEL_RCSID(1, "$NetBSD: aarch64_machdep.c,v 1.9 2018/08/15 11:10:45 ryo Exp $");
 
 #include "opt_arm_debug.h"
 #include "opt_ddb.h"
 #include "opt_kernhist.h"
+#include "opt_modular.h"
 
 #include <sys/param.h>
 #include <sys/types.h>
 #include <sys/bus.h>
 #include <sys/kauth.h>
+#include <sys/module.h>
 #include <sys/msgbuf.h>
 #include <sys/sysctl.h>
 
@@ -87,6 +89,11 @@ const pcu_ops_t * const pcu_ops_md_defs[
 
 struct vm_map *phys_map;
 
+#ifdef MODULAR
+vaddr_t module_start, module_end;
+static struct vm_map module_map_store;
+#endif
+
 /* XXX */
 vaddr_t physical_start;
 vaddr_t physical_end;
@@ -158,6 +165,7 @@ initarm_common(vaddr_t kvm_base, vsize_t
 	psize_t memsize_total;
 	vaddr_t kernstart, kernend;
 	vaddr_t kernstart_l2, kernend_l2;	/* L2 table 2MB aligned */
+	vaddr_t kernelvmstart;
 	int i;
 
 	aarch64_getcacheinfo();
@@ -168,6 +176,25 @@ initarm_common(vaddr_t kvm_base, vsize_t
 	kernend = round_page((vaddr_t)_end);
 	kernstart_l2 = kernstart & -L2_SIZE;		/* trunk L2_SIZE(2M) */
 	kernend_l2 = (kernend + L2_SIZE - 1) & -L2_SIZE;/* round L2_SIZE(2M) */
+	kernelvmstart = kernend_l2;
+
+#ifdef MODULAR
+	/*
+	 * aarch64 compiler (gcc & llvm) uses R_AARCH_CALL26/R_AARCH_JUMP26
+	 * for function calling/jumping.
+	 * (at this time, both compilers doesn't support -mlong-calls)
+	 * therefore kernel modules should be loaded within maximum 26bit word,
+	 * or +-128MB from kernel.
+	 */
+#define MODULE_RESERVED_MAX	(1024 * 1024 * 128)
+#define MODULE_RESERVED_SIZE	(1024 * 1024 * 32)	/* good enough? */
+	module_start = kernelvmstart;
+	module_end = kernend_l2 + MODULE_RESERVED_SIZE;
+	if (module_end >= kernstart_l2 + MODULE_RESERVED_MAX)
+		module_end = kernstart_l2 + MODULE_RESERVED_MAX;
+	KASSERT(module_end > kernend_l2);
+	kernelvmstart = module_end;
+#endif /* MODULAR */
 
 	paddr_t kernstart_phys = KERN_VTOPHYS(kernstart);
 	paddr_t kernend_phys = KERN_VTOPHYS(kernend);
@@ -188,6 +215,10 @@ initarm_common(vaddr_t kvm_base, vsize_t
 	    "kernel_start          = 0x%016lx\n"
 	    "kernel_end            = 0x%016lx\n"
 	    "kernel_end_l2         = 0x%016lx\n"
+#ifdef MODULAR
+	    "module_start          = 0x%016lx\n"
+	    "module_end            = 0x%016lx\n"
+#endif
 	    "(kernel va area)\n"
 	    "(devmap va area)\n"
 	    "VM_MAX_KERNEL_ADDRESS = 0x%016lx\n"
@@ -202,6 +233,10 @@ initarm_common(vaddr_t kvm_base, vsize_t
 	    kernstart,
 	    kernend,
 	    kernend_l2,
+#ifdef MODULAR
+	    module_start,
+	    module_end,
+#endif
 	    VM_MAX_KERNEL_ADDRESS);
 
 	/*
@@ -273,7 +308,7 @@ initarm_common(vaddr_t kvm_base, vsize_t
 	 * kernel image is mapped on L2 table (2MB*n) by locore.S
 	 * virtual space start from 2MB aligned kernend
 	 */
-	pmap_bootstrap(kernend_l2, VM_MAX_KERNEL_ADDRESS);
+	pmap_bootstrap(kernelvmstart, VM_MAX_KERNEL_ADDRESS);
 
 	/*
 	 * setup lwp0
@@ -428,6 +463,14 @@ machdep_init(void)
 	cpu_reset_address0 = NULL;
 }
 
+#ifdef MODULAR
+/* Push any modules loaded by the boot loader */
+void
+module_init_md(void)
+{
+}
+#endif /* MODULAR */
+
 bool
 mm_md_direct_mapped_phys(paddr_t pa, vaddr_t *vap)
 {
@@ -465,6 +508,12 @@ cpu_startup(void)
 	phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
 	   VM_PHYS_SIZE, 0, FALSE, NULL);
 
+#ifdef MODULAR
+	uvm_map_setup(&module_map_store, module_start, module_end, 0);
+	module_map_store.pmap = pmap_kernel();
+	module_map = &module_map_store;
+#endif
+
 	/* Hello! */
 	banner();
 }

Added files:

Index: src/sys/arch/aarch64/aarch64/kobj_machdep.c
diff -u /dev/null src/sys/arch/aarch64/aarch64/kobj_machdep.c:1.1
--- /dev/null	Wed Aug 15 11:10:45 2018
+++ src/sys/arch/aarch64/aarch64/kobj_machdep.c	Wed Aug 15 11:10:45 2018
@@ -0,0 +1,364 @@
+/*	$NetBSD: kobj_machdep.c,v 1.1 2018/08/15 11:10:45 ryo Exp $	*/
+
+/*
+ * Copyright (c) 2018 Ryo Shimizu <r...@nerv.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__KERNEL_RCSID(0, "$NetBSD: kobj_machdep.c,v 1.1 2018/08/15 11:10:45 ryo Exp $");
+
+#define ELFSIZE		ARCH_ELFSIZE
+
+#include "opt_ddb.h"
+
+#include <sys/param.h>
+#include <sys/kobj.h>
+#include <sys/exec.h>
+#include <sys/exec_elf.h>
+#include <sys/errno.h>
+#include <sys/queue.h>
+#include <sys/tree.h>
+
+#include <aarch64/cpufunc.h>
+
+/* #define KOBJ_MACHDEP_DEBUG */
+
+#ifdef KOBJ_MACHDEP_DEBUG
+#ifdef DDB
+#include <aarch64/db_machdep.h>	/* for strdisasm() */
+#endif
+
+struct rtypeinfo {
+	Elf_Word rtype;
+	const char *name;
+};
+
+static const struct rtypeinfo rtypetbl[] = {
+	{ R_AARCH64_ABS64,		"R_AARCH64_ABS64"		},
+	{ R_AARCH64_ADD_ABS_LO12_NC,	"R_AARCH64_ADD_ABS_LO12_NC"	},
+	{ R_AARCH_LDST64_ABS_LO12_NC,	"R_AARCH64_LDST64_ABS_LO12_NC"	},
+	{ R_AARCH_LDST32_ABS_LO12_NC,	"R_AARCH64_LDST32_ABS_LO12_NC"	},
+	{ R_AARCH_LDST16_ABS_LO12_NC,	"R_AARCH64_LDST16_ABS_LO12_NC"	},
+	{ R_AARCH64_LDST8_ABS_LO12_NC,	"R_AARCH64_LDST8_ABS_LO12_NC"	},
+	{ R_AARCH64_ADR_PREL_PG_HI21_NC, "R_AARCH64_ADR_PREL_PG_HI21_NC"},
+	{ R_AARCH64_ADR_PREL_PG_HI21,	"R_AARCH64_ADR_PREL_PG_HI21"	},
+	{ R_AARCH_JUMP26,		"R_AARCH64_JUMP26"		},
+	{ R_AARCH_CALL26,		"R_AARCH64_CALL26"		},
+	{ R_AARCH64_PREL32,		"R_AARCH64_PREL32"		},
+	{ R_AARCH64_PREL16,		"R_AARCH64_PREL16"		}
+};
+
+static const char *
+strrtype(Elf_Word rtype)
+{
+	int i;
+	static char buf[64];
+
+	for (i = 0; i < __arraycount(rtypetbl); i++) {
+		if (rtypetbl[i].rtype == rtype)
+			return rtypetbl[i].name;
+	}
+	snprintf(buf, sizeof(buf), "RELOCATION-TYPE-%d", rtype);
+	return buf;
+}
+#endif /* KOBJ_MACHDEP_DEBUG */
+
+static inline bool
+checkalign(Elf_Addr addr, int alignbyte, void *where, Elf64_Addr off)
+{
+	if ((addr & (alignbyte - 1)) != 0) {
+		printf("kobj_reloc: Relocation 0x%jx unaligned at %p"
+		    " (base+0x%jx). must be aligned %d\n",
+		    (uintptr_t)addr, where, off, alignbyte);
+		return true;
+	}
+	return false;
+}
+
+static inline bool
+checkoverflow(Elf_Addr addr, int bitwidth, Elf_Addr targetaddr,
+    const char *bitscale, void *where, Elf64_Addr off)
+{
+	const Elf_Addr mask = ~__BITS(bitwidth - 1, 0);
+
+	if (((addr & mask) != 0) && ((addr & mask) != mask)) {
+		printf("kobj_reloc: Relocation 0x%jx too far from %p"
+		    " (base+0x%jx) for %dbit%s\n",
+		    (uintptr_t)targetaddr, where, off, bitwidth, bitscale);
+		return true;
+	}
+	return false;
+}
+
+#define WIDTHMASK(w)	(0xffffffffffffffffUL >> (64 - (w)))
+
+int
+kobj_reloc(kobj_t ko, uintptr_t relocbase, const void *data,
+    bool isrela, bool local)
+{
+	Elf_Addr saddr, addend, raddr, val;
+	Elf64_Addr off, *where;
+	Elf32_Addr *where32;
+	uint16_t *where16;
+	Elf_Word rtype, symidx;
+	const Elf_Rela *rela;
+	int error;
+	uint32_t *insn, immhi, immlo, shift;
+	bool nc = false;
+#ifdef KOBJ_MACHDEP_DEBUG
+#ifdef DDB
+	char disasmbuf[256];
+#endif
+	Elf_Addr old;
+#endif /* KOBJ_MACHDEP_DEBUG */
+
+
+#ifdef KOBJ_MACHDEP_DEBUG
+	printf("%s:%d: ko=%p, relocbase=0x%jx, data=%p"
+	    ", isrela=%d, local=%d\n", __func__, __LINE__,
+	    ko, relocbase, data, isrela, local);
+#endif /* KOBJ_MACHDEP_DEBUG */
+
+	if (!isrela) {
+		printf("kobj_reloc: REL relocations not supported");
+		error = 1;
+		goto done;
+	}
+
+	rela = (const Elf_Rela *)data;
+	addend = rela->r_addend;
+	rtype = ELF_R_TYPE(rela->r_info);
+	symidx = ELF_R_SYM(rela->r_info);
+	off = rela->r_offset;
+	where = (Elf_Addr *)(relocbase + off);
+
+	/* pointer to 32bit, 16bit, and instruction */
+	where32 = (void *)where;
+	where16 = (void *)where;
+	insn = (uint32_t *)where;
+
+	/* no need to lookup any symbols */
+	switch (rtype) {
+	case R_AARCH64_NONE:
+	case R_AARCH64_NONE2:
+		return 0;
+	}
+
+	error = kobj_sym_lookup(ko, symidx, &saddr);
+	if (error != 0) {
+		printf("kobj_reloc: symidx %d lookup failure."
+		    " relocation type %d at %p (base+0x%jx)\n",
+		    symidx, rtype, where, off);
+		goto done;
+	}
+
+#ifdef KOBJ_MACHDEP_DEBUG
+	printf("%s:%d: symidx=%d, saddr=0x%jx, addend=0x%jx\n",
+	    __func__, __LINE__, symidx, (uintptr_t)saddr, (uintptr_t)addend);
+	printf("%s:%d: rtype=%s, where=%p (base+0x%jx)\n",
+	    __func__, __LINE__, strrtype(rtype), where, off);
+	old = *where;
+#ifdef DDB
+	snprintf(disasmbuf, sizeof(disasmbuf), "%08x %s",
+	    *insn, strdisasm((vaddr_t)insn));
+#endif
+#endif /* KOBJ_MACHDEP_DEBUG */
+
+	switch (rtype) {
+	case R_AARCH64_ABS64:
+		/*
+		 * S + A
+		 *  e.g.) .quad <sym>+addend
+		 */
+		*where = saddr + addend;
+		break;
+	case R_AARCH64_ABS32:
+		/*
+		 * S + A
+		 *  e.g.) .word <sym>+addend
+		 */
+		*where32 = saddr + addend;
+		break;
+	case R_AARCH64_ABS16:
+		/*
+		 * S + A
+		 *  e.g.) .short <sym>+addend
+		 */
+		*where16 = saddr + addend;
+		break;
+	case R_AARCH64_ADD_ABS_LO12_NC:
+	case R_AARCH64_LDST8_ABS_LO12_NC:
+	case R_AARCH_LDST16_ABS_LO12_NC:
+	case R_AARCH_LDST32_ABS_LO12_NC:
+	case R_AARCH_LDST64_ABS_LO12_NC:
+		switch (rtype) {
+		case R_AARCH64_ADD_ABS_LO12_NC:
+		case R_AARCH64_LDST8_ABS_LO12_NC:
+			shift = 0;
+			break;
+		case R_AARCH_LDST16_ABS_LO12_NC:
+			shift = 1;
+			break;
+		case R_AARCH_LDST32_ABS_LO12_NC:
+			shift = 2;
+			break;
+		case R_AARCH_LDST64_ABS_LO12_NC:
+			shift = 3;
+			break;
+		default:
+			panic("illegal rtype: %d\n", rtype);
+		}
+		/*
+		 * S + A
+		 *  e.g.) add  x0,x0,#:lo12:<sym>+<addend>
+		 *        ldrb w0,[x0,#:lo12:<sym>+<addend>]
+		 *        ldrh w0,[x0,#:lo12:<sym>+<addend>]
+		 *        ldr  w0,[x0,#:lo12:<sym>+<addend>]
+		 *        ldr  x0,[x0,#:lo12:<sym>+<addend>]
+		 */
+		val = saddr + addend;
+		if (checkalign(val, 1 << shift, where, off)) {
+			error = 1;
+			break;
+		}
+		val &= WIDTHMASK(12);
+		val >>= shift;
+		*insn = (*insn & ~__BITS(21,10)) | (val << 10);
+		break;
+
+	case R_AARCH64_ADR_PREL_PG_HI21_NC:
+		nc = true;
+		/* FALLTHRU */
+	case R_AARCH64_ADR_PREL_PG_HI21:
+		/*
+		 * Page(S + A) - Page(P)
+		 *  e.g.) adrp x0,<sym>+<addend>
+		 */
+		val = saddr + addend;
+		val = val >> 12;
+		raddr = val << 12;
+		val -= (uintptr_t)where >> 12;
+		if (!nc && checkoverflow(val, 21, val, " x 4k", where, off)) {
+			error = 1;
+			break;
+		}
+		immlo = val & WIDTHMASK(2);
+		immhi = (val >> 2) & WIDTHMASK(19);
+		*insn = (*insn & ~(__BITS(30,29) | __BITS(23,5))) |
+		    (immlo << 29) | (immhi << 5);
+		break;
+
+	case R_AARCH_JUMP26:
+	case R_AARCH_CALL26:
+		/*
+		 * S + A - P
+		 *  e.g.) b <sym>+<addend>
+		 *        bl <sym>+<addend>
+		 */
+		raddr = saddr + addend;
+		val = raddr - (uintptr_t)where;
+		if (checkalign(val, 4, where, off)) {
+			error = 1;
+			break;
+		}
+		val = (intptr_t)val >> 2;
+		if (checkoverflow(val, 26, val, " word", where, off)) {
+			error = 1;
+			break;
+		}
+		val &= WIDTHMASK(26);
+		*insn = (*insn & ~__BITS(25,0)) | val;
+		break;
+
+	case R_AARCH64_PREL64:
+		/*
+		 * S + A - P
+		 *  e.g.) 1: .quad <sym>+<addend>-1b
+		 */
+		raddr = saddr + addend;
+		val = raddr - (uintptr_t)where;
+		if (checkoverflow(val, 64, val, "", where, off)) {
+			error = 1;
+			break;
+		}
+		*where = val;
+		break;
+	case R_AARCH64_PREL32:
+		/*
+		 * S + A - P
+		 *  e.g.) 1: .word <sym>+<addend>-1b
+		 */
+		raddr = saddr + addend;
+		val = raddr - (uintptr_t)where;
+		if (checkoverflow(val, 32, val, "", where, off)) {
+			error = 1;
+			break;
+		}
+		*where32 = val;
+		break;
+	case R_AARCH64_PREL16:
+		/*
+		 * S + A - P
+		 *  e.g.) 1: .short <sym>+<addend>-1b
+		 */
+		raddr = saddr + addend;
+		val = raddr - (uintptr_t)where;
+		if (checkoverflow(val, 16, val, "", where, off)) {
+			error = 1;
+			break;
+		}
+		*where16 = val;
+		break;
+	default:
+		printf("kobj_reloc: unsupported relocation type %d"
+		    " at %p (base+0x%jx) symidx %u\n",
+		    rtype, where, off, symidx);
+		error = 1;
+		break;
+	}
+
+#ifdef KOBJ_MACHDEP_DEBUG
+	printf("%s: reloc\n", __func__);
+	printf("%s:  *where %016jx\n", __func__, (uintptr_t)old);
+	printf("%s:      -> %016jx\n", __func__, (uintptr_t)*where);
+#ifdef DDB
+	printf("%s:    insn %s\n", __func__, disasmbuf);
+	printf("%s:      -> %08x %s\n", __func__,
+	    *insn, strdisasm((vaddr_t)insn));
+#endif
+	printf("\n");
+#endif /* KOBJ_MACHDEP_DEBUG */
+
+ done:
+	if (error != 0)
+		return -1;
+	return 0;
+}
+
+int
+kobj_machdep(kobj_t ko, void *base, size_t size, bool load)
+{
+	return 0;
+}

Reply via email to