Module Name:    src
Committed By:   maxv
Date:           Tue Oct 10 09:29:14 UTC 2017

Added Files:
        src/sys/arch/amd64/stand: Makefile
        src/sys/arch/amd64/stand/prekern: Makefile console.c elf.c locore.S
            mm.c pdir.h prekern.c prekern.h prekern.ldscript redef.h trap.S

Log Message:
Add the amd64 prekern. It is a kernel relocator used for Kernel ASLR (see
tech-kern@). It works, but is not yet linked to the build system, because
I can't build a distribution right now.


To generate a diff of this commit:
cvs rdiff -u -r0 -r1.1 src/sys/arch/amd64/stand/Makefile
cvs rdiff -u -r0 -r1.1 src/sys/arch/amd64/stand/prekern/Makefile \
    src/sys/arch/amd64/stand/prekern/console.c \
    src/sys/arch/amd64/stand/prekern/elf.c \
    src/sys/arch/amd64/stand/prekern/locore.S \
    src/sys/arch/amd64/stand/prekern/mm.c \
    src/sys/arch/amd64/stand/prekern/pdir.h \
    src/sys/arch/amd64/stand/prekern/prekern.c \
    src/sys/arch/amd64/stand/prekern/prekern.h \
    src/sys/arch/amd64/stand/prekern/prekern.ldscript \
    src/sys/arch/amd64/stand/prekern/redef.h \
    src/sys/arch/amd64/stand/prekern/trap.S

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Added files:

Index: src/sys/arch/amd64/stand/Makefile
diff -u /dev/null src/sys/arch/amd64/stand/Makefile:1.1
--- /dev/null	Tue Oct 10 09:29:14 2017
+++ src/sys/arch/amd64/stand/Makefile	Tue Oct 10 09:29:14 2017
@@ -0,0 +1,5 @@
+#	$NetBSD: Makefile,v 1.1 2017/10/10 09:29:14 maxv Exp $
+
+SUBDIR= prekern
+
+.include <bsd.subdir.mk>

Index: src/sys/arch/amd64/stand/prekern/Makefile
diff -u /dev/null src/sys/arch/amd64/stand/prekern/Makefile:1.1
--- /dev/null	Tue Oct 10 09:29:14 2017
+++ src/sys/arch/amd64/stand/prekern/Makefile	Tue Oct 10 09:29:14 2017
@@ -0,0 +1,40 @@
+#	$NetBSD: Makefile,v 1.1 2017/10/10 09:29:14 maxv Exp $
+
+PROG=		prekern
+SRCS=	locore.S trap.S prekern.c mm.c console.c elf.c
+
+NOSSP=		# defined
+NOPIE=		# defined
+NOMAN=		1
+
+S=	${.CURDIR}/../../../..
+
+.PATH: ${.CURDIR}
+
+BINDIR=		/usr/mdec
+BINMODE=	444
+
+.include <bsd.own.mk>
+
+CPPFLAGS+=	-I. -I${S}
+
+.include <bsd.klinks.mk>
+
+CPPFLAGS+=	-DKERNEL -D__x86_64__
+CFLAGS+=	-Wall -Werror -mno-red-zone -mno-mmx -mno-sse -mno-avx -ffreestanding
+STRIPFLAG=
+LINKFLAGS=	-X -z max-page-size=0x100000 -Ttext 0x100000 -T prekern.ldscript
+
+LIBCRT0=	# nothing
+LIBCRTI=	# nothing
+LIBC=		# nothing
+LIBCRTBEGIN=	# nothing
+LIBCRTEND=	# nothing
+
+${PROG}: ${OBJS}
+	${LD} ${LINKFLAGS} -o ${.TARGET} ${OBJS}
+
+all:	${PROG}
+
+.include <bsd.prog.mk>
+
Index: src/sys/arch/amd64/stand/prekern/console.c
diff -u /dev/null src/sys/arch/amd64/stand/prekern/console.c:1.1
--- /dev/null	Tue Oct 10 09:29:14 2017
+++ src/sys/arch/amd64/stand/prekern/console.c	Tue Oct 10 09:29:14 2017
@@ -0,0 +1,120 @@
+/*	$NetBSD: console.c,v 1.1 2017/10/10 09:29:14 maxv Exp $	*/
+
+/*
+ * Copyright (c) 2017 The NetBSD Foundation, Inc. All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Maxime Villard.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "prekern.h"
+
+extern vaddr_t atdevbase;
+#define CONS_WID 80
+#define CONS_HEI 25
+
+static char *cons_start;
+static size_t cons_x, cons_y;
+static char cons_buffer[CONS_WID * 2 * CONS_HEI];
+
+void init_cons()
+{
+	cons_start = (char *)atdevbase + (0xB8000 - IOM_BEGIN);
+	cons_x = 0;
+	cons_y = 0;
+}
+
+static void check_scroll()
+{
+	char *src, *dst;
+	size_t i;
+
+	if (cons_y != CONS_HEI)
+		return;
+
+	for (i = 0; i < CONS_HEI-1; i++) {
+		dst = &cons_buffer[0] + i * (CONS_WID * 2);
+		src = &cons_buffer[0] + (i + 1) * (CONS_WID * 2);
+		memcpy(dst, src, (CONS_WID * 2));
+	}
+	memset(&cons_buffer[0] + (CONS_WID * 2) * (CONS_HEI-1), 0,
+	    (CONS_WID * 2));
+	cons_y--;
+	memcpy(cons_start, &cons_buffer[0], (CONS_WID * 2) * (CONS_HEI-1));
+}
+
+void print_ext(int color, char *buf)
+{
+	char *ptr, *scr;
+	size_t i;
+
+	for (i = 0; buf[i] != '\0'; i++) {
+		if (buf[i] == '\n') {
+			cons_x = 0;
+			cons_y++;
+			check_scroll();
+		} else {
+			if (cons_x + 1 == CONS_WID) {
+				cons_x = 0;
+				cons_y++;
+				check_scroll();
+			}
+			ptr = (cons_start + 2 * cons_x + 160 * cons_y);
+			scr = (cons_buffer + 2 * cons_x + 160 * cons_y);
+			ptr[0] = scr[0] = buf[i];
+			ptr[1] = scr[1] = color;
+			cons_x++;
+		}
+	}
+}
+
+void print(char *buf)
+{
+	print_ext(WHITE_ON_BLACK, buf);
+}
+
+void print_state(bool ok, char *buf)
+{
+	print("[");
+	if (ok)
+		print_ext(GREEN_ON_BLACK, "+");
+	else
+		print_ext(RED_ON_BLACK, "!");
+	print("] ");
+	print(buf);
+	print("\n");
+}
+
+void print_banner()
+{
+	char *banner = 
+		"           __________                 __                        \n"
+		"           \\______   \\_______   ____ |  | __ ___________  ____  \n"
+		"            |     ___/\\_  __ \\_/ __ \\|  |/ // __ \\_  __ \\/    \\ \n"
+		"            |    |     |  | \\/\\  ___/|    <\\  ___/|  | \\/   |  \\\n"
+		"            |____|     |__|    \\___  >__|_ \\\\___  >__|  |___|  /\n"
+		"                                   \\/     \\/    \\/           \\/    Version 1.0\n"
+	;
+	print(banner);
+}
Index: src/sys/arch/amd64/stand/prekern/elf.c
diff -u /dev/null src/sys/arch/amd64/stand/prekern/elf.c:1.1
--- /dev/null	Tue Oct 10 09:29:14 2017
+++ src/sys/arch/amd64/stand/prekern/elf.c	Tue Oct 10 09:29:14 2017
@@ -0,0 +1,520 @@
+/*	$NetBSD: elf.c,v 1.1 2017/10/10 09:29:14 maxv Exp $	*/
+
+/*
+ * Copyright (c) 2017 The NetBSD Foundation, Inc. All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Maxime Villard.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define	ELFSIZE	64
+
+#include "prekern.h"
+#include <sys/exec_elf.h>
+
+struct elfinfo {
+	Elf_Ehdr *ehdr;
+	Elf_Shdr *shdr;
+	char *shstrtab;
+	size_t shstrsz;
+	Elf_Sym *symtab;
+	size_t symcnt;
+	char *strtab;
+	size_t strsz;
+	struct {
+		vaddr_t va;
+		size_t sz;
+	} text;
+	struct {
+		vaddr_t va;
+		size_t sz;
+	} rodata;
+	struct {
+		vaddr_t va;
+		size_t sz;
+	} data;
+};
+
+static struct elfinfo eif;
+static const char entrypoint[] = "start_prekern";
+
+/* XXX */
+static int
+memcmp(const char *a, const char *b, size_t c)
+{
+	size_t i;
+	for (i = 0; i < c; i++) {
+		if (a[i] != b[i])
+			return 1;
+	}
+	return 0;
+}
+static int
+strcmp(char *a, char *b)
+{
+	size_t i;
+	for (i = 0; a[i] != '\0'; i++) {
+		if (a[i] != b[i])
+			return 1;
+	}
+	return 0;
+}
+
+
+static int
+elf_check_header()
+{
+	if (memcmp((char *)eif.ehdr->e_ident, ELFMAG, SELFMAG) != 0 ||
+	    eif.ehdr->e_ident[EI_CLASS] != ELFCLASS) {
+		return -1;
+	}
+	return 0;
+}
+
+static vaddr_t
+elf_get_entrypoint()
+{
+	Elf_Sym *sym;
+	size_t i;
+	char *buf;
+
+	for (i = 0; i < eif.symcnt; i++) {
+		sym = &eif.symtab[i];
+
+		if (ELF_ST_TYPE(sym->st_info) != STT_FUNC)
+			continue;
+		if (sym->st_name == 0)
+			continue;
+		if (sym->st_shndx == SHN_UNDEF)
+			continue; /* Skip external references */
+		buf = eif.strtab + sym->st_name;
+
+		if (!memcmp(buf, entrypoint, sizeof(entrypoint))) {
+			return (vaddr_t)sym->st_value;
+		}
+	}
+
+	return 0;
+}
+
+static Elf_Shdr *
+elf_find_section(char *name)
+{
+	char *buf;
+	size_t i;
+
+	for (i = 0; i < eif.ehdr->e_shnum; i++) {
+		if (eif.shdr[i].sh_name == 0) {
+			continue;
+		}
+		buf = eif.shstrtab + eif.shdr[i].sh_name;
+		if (!strcmp(name, buf)) {
+			return &eif.shdr[i];
+		}
+	}
+
+	return NULL;
+}
+
+static uintptr_t
+elf_sym_lookup(size_t symidx)
+{
+	const Elf_Sym *sym;
+	char *buf, *secname;
+	Elf_Shdr *sec;
+
+	if (symidx >= eif.symcnt) {
+		fatal("elf_sym_lookup: symbol beyond table");
+	}
+	sym = &eif.symtab[symidx];
+	buf = eif.strtab + sym->st_name;
+
+	if (sym->st_shndx == SHN_UNDEF) {
+		if (!memcmp(buf, "__start_link_set", 16)) {
+			secname = buf + 8;
+			sec = elf_find_section(secname);
+			if (sec == NULL) {
+				fatal("elf_sym_lookup: unknown start link set");
+			}
+			return (uintptr_t)((uint8_t *)eif.ehdr +
+			    sec->sh_offset);
+		}
+		if (!memcmp(buf, "__stop_link_set", 15)) {
+			secname = buf + 7;
+			sec = elf_find_section(secname);
+			if (sec == NULL) {
+				fatal("elf_sym_lookup: unknown stop link set");
+			}
+			return (uintptr_t)((uint8_t *)eif.ehdr +
+			    sec->sh_offset + sec->sh_size);
+		}
+
+		fatal("elf_sym_lookup: external symbol");
+	}
+	if (sym->st_value == 0) {
+		fatal("elf_sym_lookup: zero value");
+	}
+	return (uintptr_t)sym->st_value;
+}
+
+static void
+elf_apply_reloc(uintptr_t relocbase, const void *data, bool isrela)
+{
+	Elf64_Addr *where, val;
+	Elf32_Addr *where32, val32;
+	Elf64_Addr addr;
+	Elf64_Addr addend;
+	uintptr_t rtype, symidx;
+	const Elf_Rel *rel;
+	const Elf_Rela *rela;
+
+	if (isrela) {
+		rela = (const Elf_Rela *)data;
+		where = (Elf64_Addr *)(relocbase + rela->r_offset);
+		addend = rela->r_addend;
+		rtype = ELF_R_TYPE(rela->r_info);
+		symidx = ELF_R_SYM(rela->r_info);
+	} else {
+		rel = (const Elf_Rel *)data;
+		where = (Elf64_Addr *)(relocbase + rel->r_offset);
+		rtype = ELF_R_TYPE(rel->r_info);
+		symidx = ELF_R_SYM(rel->r_info);
+		/* Addend is 32 bit on 32 bit relocs */
+		switch (rtype) {
+		case R_X86_64_PC32:
+		case R_X86_64_32:
+		case R_X86_64_32S:
+			addend = *(Elf32_Addr *)where;
+			break;
+		default:
+			addend = *where;
+			break;
+		}
+	}
+
+	switch (rtype) {
+	case R_X86_64_NONE:	/* none */
+		break;
+
+	case R_X86_64_64:		/* S + A */
+		addr = elf_sym_lookup(symidx);
+		val = addr + addend;
+		*where = val;
+		break;
+
+	case R_X86_64_PC32:	/* S + A - P */
+		addr = elf_sym_lookup(symidx);
+		where32 = (Elf32_Addr *)where;
+		val32 = (Elf32_Addr)(addr + addend - (Elf64_Addr)where);
+		*where32 = val32;
+		break;
+
+	case R_X86_64_32:	/* S + A */
+	case R_X86_64_32S:	/* S + A sign extend */
+		addr = elf_sym_lookup(symidx);
+		val32 = (Elf32_Addr)(addr + addend);
+		where32 = (Elf32_Addr *)where;
+		*where32 = val32;
+		break;
+
+	case R_X86_64_GLOB_DAT:	/* S */
+	case R_X86_64_JUMP_SLOT:/* XXX need addend + offset */
+		addr = elf_sym_lookup(symidx);
+		*where = addr;
+		break;
+
+	case R_X86_64_RELATIVE:	/* B + A */
+		addr = relocbase + addend;
+		val = addr;
+		*where = val;
+		break;
+
+	default:
+		fatal("elf_apply_reloc: unexpected relocation type");
+	}
+}
+
+static void
+elf_build_info(vaddr_t baseva)
+{
+	vaddr_t secva, minva, maxva;
+	size_t secsz;
+	size_t i, j;
+
+	memset(&eif, 0, sizeof(struct elfinfo));
+
+	eif.ehdr = (Elf_Ehdr *)baseva;
+	eif.shdr = (Elf_Shdr *)((uint8_t *)eif.ehdr + eif.ehdr->e_shoff);
+
+	if (elf_check_header(&eif) == -1) {
+		fatal("elf_build_info: wrong kernel ELF header");
+	}
+
+	/* Locate the section names */
+	j = eif.ehdr->e_shstrndx;
+	if (j == SHN_UNDEF) {
+		fatal("elf_build_info: shstrtab not found");
+	}
+	if (j >= eif.ehdr->e_shnum) {
+		fatal("elf_build_info: wrong shstrtab index");
+	}
+	eif.shstrtab = (char *)((uint8_t *)eif.ehdr + eif.shdr[j].sh_offset);
+	eif.shstrsz = eif.shdr[j].sh_size;
+
+	/* Locate the symbol table */
+	for (i = 0; i < eif.ehdr->e_shnum; i++) {
+		if (eif.shdr[i].sh_type == SHT_SYMTAB)
+			break;
+	}
+	if (i == eif.ehdr->e_shnum) {
+		fatal("elf_build_info: symtab not found");
+	}
+	eif.symtab = (Elf_Sym *)((uint8_t *)eif.ehdr + eif.shdr[i].sh_offset);
+	eif.symcnt = eif.shdr[i].sh_size / sizeof(Elf_Sym);
+
+	/* Also locate the string table */
+	j = eif.shdr[i].sh_link;
+	if (j == SHN_UNDEF || j >= eif.ehdr->e_shnum) {
+		fatal("elf_build_info: wrong strtab index");
+	}
+	if (eif.shdr[j].sh_type != SHT_STRTAB) {
+		fatal("elf_build_info: wrong strtab type");
+	}
+	eif.strtab = (char *)((uint8_t *)eif.ehdr + eif.shdr[j].sh_offset);
+	eif.strsz = eif.shdr[j].sh_size;
+
+	/*
+	 * Save the locations of the kernel segments. Attention: there is a
+	 * difference between "segment" and "section". A segment can contain
+	 * several sections.
+	 */
+
+	/* text */
+	minva = 0xFFFFFFFFFFFFFFFF, maxva = 0;
+	for (i = 0; i < eif.ehdr->e_shnum; i++) {
+		if (eif.shdr[i].sh_type != SHT_NOBITS &&
+		    eif.shdr[i].sh_type != SHT_PROGBITS) {
+			continue;
+		}
+		if (!(eif.shdr[i].sh_flags & SHF_EXECINSTR)) {
+			continue;
+		}
+		secva = baseva + eif.shdr[i].sh_offset;
+		secsz = eif.shdr[i].sh_size;
+		if (secva < minva) {
+			minva = secva;
+		}
+		if (secva + secsz > maxva) {
+			maxva = secva + secsz;
+		}
+	}
+	eif.text.va = minva;
+	eif.text.sz = roundup(maxva - minva, PAGE_SIZE);
+	ASSERT(eif.text.va % PAGE_SIZE == 0);
+
+	/* rodata */
+	minva = 0xFFFFFFFFFFFFFFFF, maxva = 0;
+	for (i = 0; i < eif.ehdr->e_shnum; i++) {
+		if (eif.shdr[i].sh_type != SHT_NOBITS &&
+		    eif.shdr[i].sh_type != SHT_PROGBITS) {
+			continue;
+		}
+		if ((eif.shdr[i].sh_flags & (SHF_EXECINSTR|SHF_WRITE))) {
+			continue;
+		}
+		secva = baseva + eif.shdr[i].sh_offset;
+		secsz = eif.shdr[i].sh_size;
+		if (secva < minva) {
+			minva = secva;
+		}
+		if (secva + secsz > maxva) {
+			maxva = secva + secsz;
+		}
+	}
+	eif.rodata.va = minva;
+	eif.rodata.sz = roundup(maxva - minva, PAGE_SIZE);
+	ASSERT(eif.rodata.va % PAGE_SIZE == 0);
+
+	/* data */
+	minva = 0xFFFFFFFFFFFFFFFF, maxva = 0;
+	for (i = 0; i < eif.ehdr->e_shnum; i++) {
+		if (eif.shdr[i].sh_type != SHT_NOBITS &&
+		    eif.shdr[i].sh_type != SHT_PROGBITS) {
+			continue;
+		}
+		if (!(eif.shdr[i].sh_flags & SHF_WRITE) ||
+		    (eif.shdr[i].sh_flags & SHF_EXECINSTR)) {
+			continue;
+		}
+		secva = baseva + eif.shdr[i].sh_offset;
+		secsz = eif.shdr[i].sh_size;
+		if (secva < minva) {
+			minva = secva;
+		}
+		if (secva + secsz > maxva) {
+			maxva = secva + secsz;
+		}
+	}
+	eif.data.va = minva;
+	eif.data.sz = roundup(maxva - minva, PAGE_SIZE);
+	ASSERT(eif.data.va % PAGE_SIZE == 0);
+}
+
+vaddr_t
+elf_kernel_reloc(vaddr_t baseva)
+{
+	vaddr_t secva, ent;
+	Elf_Sym *sym;
+	size_t i, j;
+
+	elf_build_info(baseva);
+
+	print_state(true, "ELF info created");
+
+	/*
+	 * The loaded sections are: SHT_PROGBITS, SHT_NOBITS, SHT_STRTAB,
+	 * SHT_SYMTAB.
+	 */
+
+	/*
+	 * Update all symbol values with the appropriate offset.
+	 */
+	for (i = 0; i < eif.ehdr->e_shnum; i++) {
+		if (eif.shdr[i].sh_type != SHT_NOBITS &&
+		    eif.shdr[i].sh_type != SHT_PROGBITS) {
+			continue;
+		}
+		secva = baseva + eif.shdr[i].sh_offset;
+		for (j = 0; j < eif.symcnt; j++) {
+			sym = &eif.symtab[j];
+			if (sym->st_shndx != i) {
+				continue;
+			}
+			sym->st_value += (Elf_Addr)secva;
+		}
+	}
+
+	print_state(true, "Symbol values updated");
+
+	/*
+	 * Perform relocations without addend if there are any.
+	 */
+	for (i = 0; i < eif.ehdr->e_shnum; i++) {
+		Elf_Rel *reltab, *rel;
+		size_t secidx, nrel;
+		uintptr_t base;
+
+		if (eif.shdr[i].sh_type != SHT_REL)
+			continue;
+
+		reltab = (Elf_Rel *)((uint8_t *)eif.ehdr + eif.shdr[i].sh_offset);
+		nrel = eif.shdr[i].sh_size / sizeof(Elf_Rel);
+
+		secidx = eif.shdr[i].sh_info;
+		if (secidx >= eif.ehdr->e_shnum) {
+			fatal("elf_kernel_reloc: wrong REL relocation");
+		}
+		base = (uintptr_t)eif.ehdr + eif.shdr[secidx].sh_offset;
+
+		for (j = 0; j < nrel; j++) {
+			rel = &reltab[j];
+			elf_apply_reloc(base, rel, false);
+		}
+	}
+
+	print_state(true, "REL relocations applied");
+
+	/*
+	 * Perform relocations with addend if there are any.
+	 */
+	for (i = 0; i < eif.ehdr->e_shnum; i++) {
+		Elf_Rela *relatab, *rela;
+		size_t secidx, nrela;
+		uintptr_t base;
+
+		if (eif.shdr[i].sh_type != SHT_RELA)
+			continue;
+
+		relatab = (Elf_Rela *)((uint8_t *)eif.ehdr + eif.shdr[i].sh_offset);
+		nrela = eif.shdr[i].sh_size / sizeof(Elf_Rela);
+
+		secidx = eif.shdr[i].sh_info;
+		if (secidx >= eif.ehdr->e_shnum) {
+			fatal("elf_kernel_reloc: wrong RELA relocation");
+		}
+		base = (uintptr_t)eif.ehdr + eif.shdr[secidx].sh_offset;
+
+		for (j = 0; j < nrela; j++) {
+			rela = &relatab[j];
+			elf_apply_reloc(base, rela, true);
+		}
+	}
+
+	print_state(true, "RELA relocations applied");
+
+	/*
+	 * Get the entry point.
+	 */
+	ent = elf_get_entrypoint(&eif);
+	if (ent == 0) {
+		fatal("elf_kernel_reloc: entry point not found");
+	}
+
+	print_state(true, "Entry point found");
+
+	/*
+	 * Remap the code segments with proper permissions.
+	 */
+	mm_mprotect(eif.text.va, eif.text.sz, MM_PROT_READ|MM_PROT_EXECUTE);
+	mm_mprotect(eif.rodata.va, eif.rodata.sz, MM_PROT_READ);
+	mm_mprotect(eif.data.va, eif.data.sz, MM_PROT_READ|MM_PROT_WRITE);
+
+	print_state(true, "Segments protection updated");
+
+	return ent;
+}
+
+void
+elf_get_text(vaddr_t *va, paddr_t *pa, size_t *sz)
+{
+	*va = eif.text.va;
+	*pa = mm_vatopa(eif.text.va);
+	*sz = eif.text.sz;
+}
+
+void
+elf_get_rodata(vaddr_t *va, paddr_t *pa, size_t *sz)
+{
+	*va = eif.rodata.va;
+	*pa = mm_vatopa(eif.rodata.va);
+	*sz = eif.rodata.sz;
+}
+
+void
+elf_get_data(vaddr_t *va, paddr_t *pa, size_t *sz)
+{
+	*va = eif.data.va;
+	*pa = mm_vatopa(eif.data.va);
+	*sz = eif.data.sz;
+}
Index: src/sys/arch/amd64/stand/prekern/locore.S
diff -u /dev/null src/sys/arch/amd64/stand/prekern/locore.S:1.1
--- /dev/null	Tue Oct 10 09:29:14 2017
+++ src/sys/arch/amd64/stand/prekern/locore.S	Tue Oct 10 09:29:14 2017
@@ -0,0 +1,602 @@
+/*	$NetBSD: locore.S,v 1.1 2017/10/10 09:29:14 maxv Exp $	*/
+
+/*
+ * Copyright (c) 1998, 2000, 2007, 2008, 2016, 2017 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Charles M. Hannum and by Maxime Villard.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define _LOCORE
+
+/* Override user-land alignment before including asm.h */
+#define	ALIGN_DATA	.align	8
+#define ALIGN_TEXT	.align 16,0x90
+#define _ALIGN_TEXT	ALIGN_TEXT
+
+#include <machine/asm.h>
+#include <machine/param.h>
+#include <machine/pte.h>
+#include <machine/psl.h>
+#include <machine/segments.h>
+#include <machine/specialreg.h>
+#include <machine/trap.h>
+
+#define _KERNEL
+#include <machine/bootinfo.h>
+#undef _KERNEL
+
+#include "pdir.h"
+#include "redef.h"
+
+/* 32bit version of PG_NX */
+#define PG_NX32	0x80000000
+
+#define TABLE_L2_ENTRIES (NKL2_KIMG_ENTRIES + 1)
+#define TABLE_L3_ENTRIES NKL3_KIMG_ENTRIES
+
+#define PROC0_PML4_OFF	0
+#define PROC0_STK_OFF	(PROC0_PML4_OFF + 1 * PAGE_SIZE)
+#define PROC0_PTP3_OFF	(PROC0_STK_OFF + UPAGES * PAGE_SIZE)
+#define PROC0_PTP2_OFF	(PROC0_PTP3_OFF + NKL4_KIMG_ENTRIES * PAGE_SIZE)
+#define PROC0_PTP1_OFF	(PROC0_PTP2_OFF + TABLE_L3_ENTRIES * PAGE_SIZE)
+#define TABLESIZE \
+  ((NKL4_KIMG_ENTRIES + TABLE_L3_ENTRIES + TABLE_L2_ENTRIES + 1 + UPAGES) \
+    * PAGE_SIZE)
+
+/*
+ * fillkpt - Fill in a kernel page table
+ *	eax = pte (page frame | control | status)
+ *	ebx = page table address
+ *	ecx = number of pages to map
+ *
+ * Each entry is 8 (PDE_SIZE) bytes long: we must set the 4 upper bytes to 0.
+ */
+#define fillkpt	\
+	cmpl	$0,%ecx			;	/* zero-sized? */	\
+	je 	2f			; \
+1:	movl	$0,(PDE_SIZE-4)(%ebx)	;	/* upper 32 bits: 0 */	\
+	movl	%eax,(%ebx)		;	/* store phys addr */	\
+	addl	$PDE_SIZE,%ebx		;	/* next PTE/PDE */	\
+	addl	$PAGE_SIZE,%eax		;	/* next phys page */	\
+	loop	1b			; \
+2:					;
+
+/*
+ * fillkpt_nox - Same as fillkpt, but sets the NX/XD bit.
+ */
+#define fillkpt_nox \
+	cmpl	$0,%ecx			;	/* zero-sized? */	\
+	je 	2f			; \
+	pushl	%ebp			; \
+	movl	_C_LABEL(nox_flag),%ebp	; \
+1:	movl	%ebp,(PDE_SIZE-4)(%ebx)	;	/* upper 32 bits: NX */ \
+	movl	%eax,(%ebx)		;	/* store phys addr */	\
+	addl	$PDE_SIZE,%ebx		;	/* next PTE/PDE */	\
+	addl	$PAGE_SIZE,%eax		;	/* next phys page */	\
+	loop	1b			; \
+	popl	%ebp			; \
+2:					;
+
+/*
+ * fillkpt_blank - Fill in a kernel page table with blank entries
+ *	ebx = page table address
+ *	ecx = number of pages to map
+ */
+#define fillkpt_blank	\
+	cmpl	$0,%ecx			;	/* zero-sized? */	\
+	je 	2f			; \
+1:	movl	$0,(PDE_SIZE-4)(%ebx)	;	/* upper 32 bits: 0 */	\
+	movl	$0,(%ebx)		;	/* lower 32 bits: 0 */	\
+	addl	$PDE_SIZE,%ebx		;	/* next PTE/PDE */	\
+	loop	1b			; \
+2:					;
+
+/*
+ * Initialization
+ */
+	.data
+
+	.globl	_C_LABEL(tablesize)
+	.globl	_C_LABEL(nox_flag)
+	.globl	_C_LABEL(cpuid_level)
+	.globl	_C_LABEL(esym)
+	.globl	_C_LABEL(eblob)
+	.globl	_C_LABEL(atdevbase)
+	.globl	_C_LABEL(PDPpaddr)
+	.globl	_C_LABEL(boothowto)
+	.globl	_C_LABEL(bootinfo)
+	.globl	_C_LABEL(biosbasemem)
+	.globl	_C_LABEL(biosextmem)
+	.globl	_C_LABEL(stkpa)
+	.globl	_C_LABEL(stkva)
+	.globl	_C_LABEL(kernpa_start)
+	.globl	_C_LABEL(kernpa_end)
+
+	.type	_C_LABEL(tablesize), @object
+_C_LABEL(tablesize):	.long	TABLESIZE
+END(tablesize)
+	.type	_C_LABEL(nox_flag), @object
+LABEL(nox_flag)		.long	0	/* 32bit NOX flag, set if supported */
+END(nox_flag)
+	.type	_C_LABEL(cpuid_level), @object
+LABEL(cpuid_level)	.long	-1	/* max. level accepted by cpuid instr */
+END(cpuid_level)
+	.type	_C_LABEL(esym), @object
+LABEL(esym)		.quad	0	/* ptr to end of syms */
+END(esym)
+	.type	_C_LABEL(eblob), @object
+LABEL(eblob)		.quad	0	/* ptr to end of modules */
+END(eblob)
+	.type	_C_LABEL(atdevbase), @object
+LABEL(atdevbase)	.quad	0	/* location of start of iomem in virt */
+END(atdevbase)
+	.type	_C_LABEL(PDPpaddr), @object
+LABEL(PDPpaddr)		.quad	0	/* paddr of PTD, for libkvm */
+END(PDPpaddr)
+	.type	_C_LABEL(biosbasemem), @object
+LABEL(biosbasemem)	.long	0	/* base memory reported by BIOS */
+END(biosbasemem)
+	.type	_C_LABEL(biosextmem), @object
+LABEL(biosextmem)	.long	0	/* extended memory reported by BIOS */
+END(biosextmem)
+	.type	_C_LABEL(stkpa), @object
+LABEL(stkpa)		.quad	0
+END(stkpa)
+	.type	_C_LABEL(stkva), @object
+LABEL(stkva)		.quad	0
+END(stkva)
+	.type	_C_LABEL(kernpa_start), @object
+LABEL(kernpa_start)	.quad	0
+END(kernpa_start)
+	.type	_C_LABEL(kernpa_end), @object
+LABEL(kernpa_end)	.quad	0
+END(kernpa_end)
+
+	.globl	gdt64_lo
+	.globl	gdt64_start
+
+#define GDT64_LIMIT gdt64_end-gdt64_start-1
+/* Temporary gdt64, with base address in low memory */
+	.type	_C_LABEL(gdt64_lo), @object
+LABEL(gdt64_lo)
+	.word	GDT64_LIMIT
+	.quad	gdt64_start
+END(gdt64_lo)
+.align 64
+#undef GDT64_LIMIT
+
+	.type	_C_LABEL(gdt64_start), @object
+LABEL(gdt64_start)
+	.quad 0x0000000000000000	/* always empty */
+	.quad 0x00af9a000000ffff	/* kernel CS */
+	.quad 0x00cf92000000ffff	/* kernel DS */
+	.quad 0x0000000000000000	/* kernel TSS [1/2] */
+	.quad 0x0000000000000000	/* kernel TSS [2/2] */
+END(gdt64_start)
+gdt64_end:
+
+	.type	_C_LABEL(farjmp64), @object
+_C_LABEL(farjmp64):
+	.long	longmode
+	.word	GSEL(GCODE_SEL, SEL_KPL)
+END(farjmp64)
+
+	/* Space for the temporary stack */
+	.size	tmpstk, tmpstk - .
+	.space	512
+tmpstk:
+
+	.text
+
+ENTRY(start)
+	.code32
+
+	/* Warm boot */
+	movw	$0x1234,0x472
+
+	/*
+	 * Load parameters from the stack (32 bits):
+	 *     boothowto, [bootdev], bootinfo, esym, biosextmem, biosbasemem
+	 * We are not interested in 'bootdev'.
+	 */
+
+	/* Load 'boothowto' */
+	movl	4(%esp),%eax
+	movl	%eax,_C_LABEL(boothowto)
+
+	/* Load 'bootinfo' */
+	movl	12(%esp),%eax
+	testl	%eax,%eax		/* bootinfo = NULL? */
+	jz	bootinfo_finished
+
+	movl	(%eax),%ebx		/* number of entries */
+	movl	$_C_LABEL(bootinfo),%ebp
+	movl	%ebp,%edx
+	addl	$BOOTINFO_MAXSIZE,%ebp
+	movl	%ebx,(%edx)
+	addl	$4,%edx
+
+bootinfo_entryloop:
+	testl	%ebx,%ebx		/* no remaining entries? */
+	jz	bootinfo_finished
+
+	addl	$4,%eax
+	movl	(%eax),%ecx		/* address of entry */
+	pushl	%edi
+	pushl	%esi
+	pushl	%eax
+
+	movl	(%ecx),%eax		/* btinfo_common::len (size of entry) */
+	movl	%edx,%edi
+	addl	(%ecx),%edx		/* update dest pointer */
+	cmpl	%ebp,%edx		/* beyond bootinfo+BOOTINFO_MAXSIZE? */
+	jg	bootinfo_overflow
+
+	movl	%ecx,%esi
+	movl	%eax,%ecx
+
+	/* If any modules were loaded, record where they end. */
+	cmpl	$BTINFO_MODULELIST,4(%esi) /* btinfo_common::type */
+	jne	0f
+	pushl	12(%esi)		/* btinfo_modulelist::endpa */
+	popl	_C_LABEL(eblob)
+0:
+
+	/* Record the information about the kernel. */
+	cmpl	$BTINFO_PREKERN,4(%esi) /* btinfo_common::type */
+	jne	0f
+	pushl	8(%esi)		/* btinfo_prekern::kernpa_start */
+	popl	_C_LABEL(kernpa_start)
+	pushl	12(%esi)	/* btinfo_prekern::kernpa_end */
+	popl	_C_LABEL(kernpa_end)
+0:
+
+	rep
+	movsb				/* copy esi -> edi */
+	popl	%eax
+	popl	%esi
+	popl	%edi
+	subl	$1,%ebx			/* decrement the # of entries */
+	jmp	bootinfo_entryloop
+
+bootinfo_overflow:
+	/*
+	 * Cleanup for overflow case. Pop the registers, and correct the number
+	 * of entries.
+	 */
+	popl	%eax
+	popl	%esi
+	popl	%edi
+	movl	$_C_LABEL(bootinfo),%ebp
+	movl	%ebp,%edx
+	subl	%ebx,(%edx)		/* correct the number of entries */
+
+bootinfo_finished:
+	/* Load 'esym' */
+	movl	16(%esp),%eax
+	movl	$_C_LABEL(esym),%ebp
+	movl	%eax,(%ebp)
+
+	/* Load 'biosextmem' */
+	movl	20(%esp),%eax
+	movl	$_C_LABEL(biosextmem),%ebp
+	movl	%eax,(%ebp)
+
+	/* Load 'biosbasemem' */
+	movl	24(%esp),%eax
+	movl	$_C_LABEL(biosbasemem),%ebp
+	movl	%eax,(%ebp)
+
+	/*
+	 * Done with the parameters!
+	 */
+
+	/* First, reset the PSL. */
+	pushl	$PSL_MBO
+	popfl
+
+	/* Switch to new stack now. */
+	movl	$_C_LABEL(tmpstk),%esp
+
+	xorl	%eax,%eax
+	cpuid
+	movl	%eax,_C_LABEL(cpuid_level)
+
+	/*
+	 * Retrieve the NX/XD flag. We use the 32bit version of PG_NX.
+	 */
+	movl	$0x80000001,%eax
+	cpuid
+	andl	$CPUID_NOX,%edx
+	jz	no_NOX
+	movl	$PG_NX32,_C_LABEL(nox_flag)
+no_NOX:
+
+/*
+ * There are four levels of pages in amd64: PML4 -> PDP -> PD -> PT. They will
+ * be referred to as: L4 -> L3 -> L2 -> L1.
+ *
+ * Physical address space:
+ * +---------------+----------+--------------+--------+---------------------+-
+ * | PREKERN IMAGE |**UNUSED**| KERNEL IMAGE | [SYMS] | [PRELOADED MODULES] |
+ * +---------------+----------+--------------+--------+---------------------+-
+ *                (1)                       (2)      (3)                   (4)
+ * ------------------+
+ *  BOOTSTRAP TABLES |
+ * ------------------+
+ *                  (5)
+ *
+ * Virtual address space of the prekern:
+ * +---------------+----------+------------------+-------------+
+ * | PREKERN IMAGE |**UNUSED**| BOOTSTRAP TABLES | ISA I/O MEM |
+ * +---------------+----------+------------------+-------------+
+ *
+ * PROC0 STK is obviously not linked as a page level. It just happens to be
+ * caught between L4 and L3.
+ *
+ * (PROC0 STK + L4 + L3 + L2 + L1) is later referred to as BOOTSTRAP TABLES.
+ *
+ * Important note: the kernel segments are properly 4k-aligned
+ * (see kern.ldscript), so there's no need to enforce alignment.
+ */
+
+	/* Find end of the prekern image; brings us on (1). */
+	movl	$_C_LABEL(__prekern_end),%edi
+
+	/* Find end of the kernel image; brind us on (2). */
+	movl	_C_LABEL(kernpa_end),%eax
+	testl	%eax,%eax
+	jz	1f
+	movl	%eax,%edi
+1:
+
+	/* Find end of the kernel symbols; brinds us on (3). */
+#if (NKSYMS || defined(DDB) || defined(MODULAR)) && !defined(makeoptions_COPY_SYMTAB) /* XXX */
+	movl	_C_LABEL(esym),%eax
+	testl	%eax,%eax
+	jz	1f
+	movl	%eax,%edi
+1:
+#endif
+
+	/* Find end of the kernel preloaded modules; brings us on (4). */
+	movl	_C_LABEL(eblob),%eax
+	testl	%eax,%eax
+	jz	1f
+	movl	%eax,%edi
+1:
+
+	/* We are on (3). Align up for BOOTSTRAP TABLES. */
+	movl	%edi,%esi
+	addl	$PGOFSET,%esi
+	andl	$~PGOFSET,%esi
+
+	/* We are on the BOOTSTRAP TABLES. Save L4's physical address. */
+	movl	$_C_LABEL(PDPpaddr),%ebp
+	movl	%esi,(%ebp)
+	movl	$0,4(%ebp)
+
+	/* Now, zero out the BOOTSTRAP TABLES (before filling them in). */
+	movl	%esi,%edi
+	xorl	%eax,%eax
+	cld
+	movl	$TABLESIZE,%ecx
+	shrl	$2,%ecx
+	rep
+	stosl				/* copy eax -> edi */
+
+/*
+ * Build the page tables and levels. We go from L1 to L4, and link the levels
+ * together.
+ */
+	/*
+	 * Build L1.
+	 */
+	leal	(PROC0_PTP1_OFF)(%esi),%ebx
+
+	/* Skip the area below the prekern text. */
+	movl	$(PREKERNTEXTOFF - PREKERNBASE),%ecx
+	shrl	$PGSHIFT,%ecx
+	fillkpt_blank
+
+	/* Map the prekern text RX. */
+	movl	$(PREKERNTEXTOFF - PREKERNBASE),%eax	/* start of TEXT */
+	movl	$_C_LABEL(__rodata_start),%ecx
+	subl	%eax,%ecx
+	shrl	$PGSHIFT,%ecx
+	orl	$(PG_V|PG_KR),%eax
+	fillkpt
+
+	/* Map the prekern rodata R. */
+	movl	$_C_LABEL(__rodata_start),%eax
+	movl	$_C_LABEL(__data_start),%ecx
+	subl	%eax,%ecx
+	shrl	$PGSHIFT,%ecx
+	orl	$(PG_V|PG_KR),%eax
+	fillkpt_nox
+
+	/* Map the prekern data+bss RW. */
+	movl	$_C_LABEL(__data_start),%eax
+	movl	$_C_LABEL(__prekern_end),%ecx
+	subl	%eax,%ecx
+	shrl	$PGSHIFT,%ecx
+	orl	$(PG_V|PG_KW),%eax
+	fillkpt_nox
+
+	/* Map some blank space, to keep pa = va. */
+	movl	$_C_LABEL(__prekern_end),%eax
+	movl	%esi,%ecx		/* start of BOOTSTRAP TABLES */
+	subl	%eax,%ecx
+	shrl	$PGSHIFT,%ecx
+	fillkpt_blank
+
+	/* Map the BOOTSTRAP TABLES RW. */
+	movl	%esi,%eax		/* start of BOOTSTRAP TABLES */
+	movl	$TABLESIZE,%ecx		/* length of BOOTSTRAP TABLES */
+	shrl	$PGSHIFT,%ecx
+	orl	$(PG_V|PG_KW),%eax
+	fillkpt_nox
+
+	/* Map the ISA I/O MEM RW. */
+	movl	$IOM_BEGIN,%eax
+	movl	$IOM_SIZE,%ecx	/* size of ISA I/O MEM */
+	shrl	$PGSHIFT,%ecx
+	orl	$(PG_V|PG_KW/*|PG_N*/),%eax
+	fillkpt_nox
+
+	/*
+	 * Build L2. Linked to L1.
+	 */
+	leal	(PROC0_PTP2_OFF)(%esi),%ebx
+	leal	(PROC0_PTP1_OFF)(%esi),%eax
+	orl	$(PG_V|PG_KW),%eax
+	movl	$(NKL2_KIMG_ENTRIES+1),%ecx
+	fillkpt
+
+	/*
+	 * Build L3. Linked to L2.
+	 */
+	leal	(PROC0_PTP3_OFF)(%esi),%ebx
+	leal	(PROC0_PTP2_OFF)(%esi),%eax
+	orl	$(PG_V|PG_KW),%eax
+	movl	$NKL3_KIMG_ENTRIES,%ecx
+	fillkpt
+
+	/*
+	 * Build L4. Linked to L3.
+	 */
+	leal	(PROC0_PML4_OFF)(%esi),%ebx
+	leal	(PROC0_PTP3_OFF)(%esi),%eax
+	orl	$(PG_V|PG_KW),%eax
+	movl	$NKL4_KIMG_ENTRIES,%ecx
+	fillkpt
+
+	/* Install recursive top level PDE (one entry) */
+	leal	(PROC0_PML4_OFF + PDIR_SLOT_PTE * PDE_SIZE)(%esi),%ebx
+	leal	(PROC0_PML4_OFF)(%esi),%eax
+	orl	$(PG_V|PG_KW),%eax
+	movl	$1,%ecx
+	fillkpt_nox
+
+	/*
+	 * Startup checklist:
+	 * 1. Enable PAE (and SSE while here).
+	 */
+	movl	%cr4,%eax
+	orl	$(CR4_PAE|CR4_OSFXSR|CR4_OSXMMEXCPT),%eax
+	movl	%eax,%cr4
+
+	/*
+	 * 2. Set Long Mode Enable in EFER. Also enable the syscall extensions,
+	 *    and NOX if available.
+	 */
+	movl	$MSR_EFER,%ecx
+	rdmsr
+	xorl	%eax,%eax
+	orl	$(EFER_LME|EFER_SCE),%eax
+	movl	_C_LABEL(nox_flag),%ebx
+	cmpl	$0,%ebx
+	je 	skip_NOX
+	orl	$(EFER_NXE),%eax
+skip_NOX:
+	wrmsr
+
+	/*
+	 * 3. Load %cr3 with pointer to PML4.
+	 */
+	movl	%esi,%eax
+	movl	%eax,%cr3
+
+	/*
+	 * 4. Enable paging and the rest of it.
+	 */
+	movl	%cr0,%eax
+	orl	$(CR0_PE|CR0_PG|CR0_NE|CR0_TS|CR0_MP|CR0_WP|CR0_AM),%eax
+	movl	%eax,%cr0
+	jmp	compat
+compat:
+
+	/*
+	 * 5. Not quite done yet, we're now in a compatibility segment, in
+	 *    legacy mode. We must jump to a long mode segment. Need to set up
+	 *    a GDT with a long mode segment in it to do that.
+	 */
+	movl	$_C_LABEL(gdt64_lo),%eax
+	lgdt	(%eax)
+	movl	$_C_LABEL(farjmp64),%eax
+	ljmp	*(%eax)
+
+	.code64
+longmode:
+
+	/*
+	 * We have arrived. Everything is identity-mapped.
+	 */
+
+	/* Store atdevbase. */
+	movq	$TABLESIZE,%rdx
+	addq	%rsi,%rdx
+	movq	%rdx,_C_LABEL(atdevbase)(%rip)
+
+	/* Set up bootstrap stack. */
+	leaq	(PROC0_STK_OFF)(%rsi),%rax
+	movq	%rax,_C_LABEL(stkpa)(%rip)
+	leaq	(USPACE-FRAMESIZE)(%rax),%rsp
+	xorq	%rbp,%rbp			/* mark end of frames */
+
+	xorw	%ax,%ax
+	movw	%ax,%gs
+	movw	%ax,%fs
+
+	/* The first physical page available. */
+	leaq	(TABLESIZE)(%rsi),%rdi
+
+	/*
+	 * Continue execution in C.
+	 */
+	call	_C_LABEL(init_prekern)
+
+	ret
+END(start)
+
+/* -------------------------------------------------------------------------- */
+
+ENTRY(lidt)
+	lidt	(%rdi)
+	ret
+
+ENTRY(rdtsc)
+	xorq	%rax,%rax
+	rdtsc
+	shlq	$32,%rdx
+	orq	%rdx,%rax
+	ret
+
+ENTRY(jump_kernel)
+	movq	_C_LABEL(stkva),%rsp
+	movq	$exec_kernel,%rax
+	jmpq	*%rax
+
Index: src/sys/arch/amd64/stand/prekern/mm.c
diff -u /dev/null src/sys/arch/amd64/stand/prekern/mm.c:1.1
--- /dev/null	Tue Oct 10 09:29:14 2017
+++ src/sys/arch/amd64/stand/prekern/mm.c	Tue Oct 10 09:29:14 2017
@@ -0,0 +1,227 @@
+/*	$NetBSD: mm.c,v 1.1 2017/10/10 09:29:14 maxv Exp $	*/
+
+/*
+ * Copyright (c) 2017 The NetBSD Foundation, Inc. All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Maxime Villard.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "prekern.h"
+
+static const pt_entry_t protection_codes[3] = {
+	[MM_PROT_READ] = PG_RO | PG_NX,
+	[MM_PROT_WRITE] = PG_RW | PG_NX,
+	[MM_PROT_EXECUTE] = PG_RO,
+	/* RWX does not exist */
+};
+
+extern paddr_t kernpa_start, kernpa_end;
+vaddr_t iom_base;
+
+paddr_t pa_avail = 0;
+static vaddr_t va_avail = (PREKERNBASE + NKL2_KIMG_ENTRIES * NBPD_L2);
+static vaddr_t va_end = (PREKERNBASE + (NKL2_KIMG_ENTRIES + 1) * NBPD_L2);
+
+void
+mm_init(paddr_t first_pa)
+{
+	pa_avail = first_pa;
+}
+
+static paddr_t
+mm_palloc(size_t npages)
+{
+	paddr_t pa = pa_avail;
+	pa_avail += npages * PAGE_SIZE;
+	return pa;
+}
+
+static vaddr_t
+mm_valloc(size_t npages)
+{
+	vaddr_t va = va_avail;
+	va_avail += npages * PAGE_SIZE;
+	if (va_avail > va_end) {
+		fatal("mm_valloc: no VA left");
+	}
+	return va;
+}
+
+static void
+mm_enter_pa(paddr_t pa, vaddr_t va, pte_prot_t prot)
+{
+	PTE_BASE[pl1_i(va)] = pa | PG_V | protection_codes[prot];
+}
+
+static void
+mm_flush_va(vaddr_t va)
+{
+	asm volatile("invlpg (%0)" ::"r" (va) : "memory");
+}
+
+paddr_t
+mm_vatopa(vaddr_t va)
+{
+	return (PTE_BASE[pl1_i(va)] & PG_FRAME);
+}
+
+void
+mm_mprotect(vaddr_t startva, size_t size, int prot)
+{
+	size_t i, npages;
+	vaddr_t va;
+	paddr_t pa;
+
+	ASSERT(size % PAGE_SIZE == 0);
+	npages = size / PAGE_SIZE;
+
+	for (i = 0; i < npages; i++) {
+		va = startva + i * PAGE_SIZE;
+		pa = (PTE_BASE[pl1_i(va)] & PG_FRAME);
+		mm_enter_pa(pa, va, prot);
+		mm_flush_va(va);
+	}
+}
+
+static void
+mm_map_va(vaddr_t startva, vaddr_t endva)
+{
+	size_t i, idx, size, nL4e, nL3e, nL2e;
+	size_t L4e_idx, L3e_idx, L2e_idx;
+	vaddr_t L3page_va, L2page_va;
+	paddr_t L3page_pa, L2page_pa, L1page_pa;
+	pd_entry_t *pdir;
+
+	/*
+	 * Initialize constants.
+	 */
+	size = endva - startva;
+	nL4e = roundup(size, NBPD_L4) / NBPD_L4;
+	nL3e = roundup(size, NBPD_L3) / NBPD_L3;
+	nL2e = roundup(size, NBPD_L2) / NBPD_L2;
+	L4e_idx = pl4_i(startva);
+	L3e_idx = pl3_i(startva % NBPD_L4);
+	L2e_idx = pl2_i(startva % NBPD_L3);
+
+	/*
+	 * Map the sub-tree itself.
+	 */
+	L3page_va = mm_valloc(nL4e);
+	L3page_pa = mm_palloc(nL4e);
+	L2page_va = mm_valloc(nL3e);
+	L2page_pa = mm_palloc(nL3e);
+
+	L1page_pa = mm_palloc(nL2e);
+
+	for (i = 0; i < nL4e; i++) {
+		mm_enter_pa(L3page_pa + i * PAGE_SIZE,
+		    L3page_va + i * PAGE_SIZE, MM_PROT_READ|MM_PROT_WRITE);
+		memset((void *)(L3page_va + i * PAGE_SIZE), 0, PAGE_SIZE);
+	}
+
+	for (i = 0; i < nL3e; i++) {
+		mm_enter_pa(L2page_pa + i * PAGE_SIZE,
+		    L2page_va + i * PAGE_SIZE, MM_PROT_READ|MM_PROT_WRITE);
+		memset((void *)(L2page_va + i * PAGE_SIZE), 0, PAGE_SIZE);
+	}
+
+	/*
+	 * Now link the levels together.
+	 */
+	pdir = (pt_entry_t *)L3page_va;
+	for (i = 0, idx = L3e_idx; i < nL3e; i++, idx++) {
+		pdir[idx] = (L2page_pa + i * PAGE_SIZE) | PG_V | PG_RW;
+	}
+
+	pdir = (pt_entry_t *)L2page_va;
+	for (i = 0, idx = L2e_idx; i < nL2e; i++, idx++) {
+		pdir[idx] = (L1page_pa + i * PAGE_SIZE) | PG_V | PG_RW;
+	}
+
+	/*
+	 * Finally, link the sub-tree into the tree.
+	 */
+	L4_BASE[L4e_idx] = L3page_pa | PG_V | PG_RW;
+}
+
+/*
+ * Select a random VA, and create a page tree. The size of this tree is
+ * actually hard-coded, and matches the one created by the generic NetBSD
+ * locore.
+ */
+static vaddr_t
+mm_rand_base()
+{
+	vaddr_t randva;
+	uint64_t rnd;
+	size_t size;
+
+	size = (NKL2_KIMG_ENTRIES + 1) * NBPD_L2;
+
+	/* yes, this is ridiculous */
+	rnd = rdtsc();
+	randva = rounddown(KASLR_WINDOW_BASE + rnd % (KASLR_WINDOW_SIZE - size),
+	    PAGE_SIZE);
+
+	mm_map_va(randva, randva + size);
+
+	return randva;
+}
+
+/*
+ * Virtual address space of the kernel:
+ * +---------------+---------------------+------------------+-------------+
+ * | KERNEL + SYMS | [PRELOADED MODULES] | BOOTSTRAP TABLES | ISA I/O MEM |
+ * +---------------+---------------------+------------------+-------------+
+ * We basically choose a random VA, and map everything contiguously starting
+ * from there. Note that the physical pages allocated by mm_palloc are part
+ * of the BOOTSTRAP TABLES.
+ */
+vaddr_t
+mm_map_kernel()
+{
+	size_t i, npages, size;
+	vaddr_t baseva;
+
+	size = (pa_avail - kernpa_start);
+	baseva = mm_rand_base();
+	npages = size / PAGE_SIZE;
+
+	/* Enter the whole area linearly */
+	for (i = 0; i < npages; i++) {
+		mm_enter_pa(kernpa_start + i * PAGE_SIZE,
+		    baseva + i * PAGE_SIZE, MM_PROT_READ|MM_PROT_WRITE);
+	}
+
+	/* Enter the ISA I/O MEM */
+	iom_base = baseva + npages * PAGE_SIZE;
+	npages = IOM_SIZE / PAGE_SIZE;
+	for (i = 0; i < npages; i++) {
+		mm_enter_pa(IOM_BEGIN + i * PAGE_SIZE,
+		    iom_base + i * PAGE_SIZE, MM_PROT_READ|MM_PROT_WRITE);
+	}
+
+	return baseva;
+}
Index: src/sys/arch/amd64/stand/prekern/pdir.h
diff -u /dev/null src/sys/arch/amd64/stand/prekern/pdir.h:1.1
--- /dev/null	Tue Oct 10 09:29:14 2017
+++ src/sys/arch/amd64/stand/prekern/pdir.h	Tue Oct 10 09:29:14 2017
@@ -0,0 +1,97 @@
+/*	$NetBSD: pdir.h,v 1.1 2017/10/10 09:29:14 maxv Exp $	*/
+
+/*
+ * Copyright (c) 2017 The NetBSD Foundation, Inc. All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Maxime Villard.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define PREKERNBASE		0x0
+#define PREKERNTEXTOFF	(PREKERNBASE + 0x100000)
+
+#define L4_SLOT_PREKERN	0 /* pl4_i(PREKERNBASE) */
+#define L4_SLOT_PTE		255
+
+#define PDIR_SLOT_KERN	L4_SLOT_PREKERN
+#define PDIR_SLOT_PTE	L4_SLOT_PTE
+
+#define PTE_BASE	((pt_entry_t *)(L4_SLOT_PTE * NBPD_L4))
+
+#define L1_BASE	PTE_BASE
+#define L2_BASE	((pd_entry_t *)((char *)L1_BASE + L4_SLOT_PTE * NBPD_L3))
+#define L3_BASE	((pd_entry_t *)((char *)L2_BASE + L4_SLOT_PTE * NBPD_L2))
+#define L4_BASE	((pd_entry_t *)((char *)L3_BASE + L4_SLOT_PTE * NBPD_L1))
+
+#define PDP_BASE	L4_BASE
+
+#define NKL4_MAX_ENTRIES	(unsigned long)1
+#define NKL3_MAX_ENTRIES	(unsigned long)(NKL4_MAX_ENTRIES * 512)
+#define NKL2_MAX_ENTRIES	(unsigned long)(NKL3_MAX_ENTRIES * 512)
+#define NKL1_MAX_ENTRIES	(unsigned long)(NKL2_MAX_ENTRIES * 512)
+
+#define NKL4_KIMG_ENTRIES	1
+#define NKL3_KIMG_ENTRIES	1
+#define NKL2_KIMG_ENTRIES	32
+
+/*
+ * Now we define various constants for playing with virtual addresses.
+ */
+#define L1_SHIFT	12
+#define L2_SHIFT	21
+#define L3_SHIFT	30
+#define L4_SHIFT	39
+#define NBPD_L1		(1UL << L1_SHIFT) /* # bytes mapped by L1 ent (4K) */
+#define NBPD_L2		(1UL << L2_SHIFT) /* # bytes mapped by L2 ent (2MB) */
+#define NBPD_L3		(1UL << L3_SHIFT) /* # bytes mapped by L3 ent (1G) */
+#define NBPD_L4		(1UL << L4_SHIFT) /* # bytes mapped by L4 ent (512G) */
+
+#define L4_MASK		0x0000ff8000000000
+#define L3_MASK		0x0000007fc0000000
+#define L2_MASK		0x000000003fe00000
+#define L1_MASK		0x00000000001ff000
+
+#define L4_FRAME	L4_MASK
+#define L3_FRAME	(L4_FRAME|L3_MASK)
+#define L2_FRAME	(L3_FRAME|L2_MASK)
+#define L1_FRAME	(L2_FRAME|L1_MASK)
+
+/*
+ * Mask to get rid of the sign-extended part of addresses.
+ */
+#define VA_SIGN_MASK		0xffff000000000000
+#define VA_SIGN_NEG(va)		((va) | VA_SIGN_MASK)
+/* XXXfvdl this one's not right */
+#define VA_SIGN_POS(va)		((va) & ~VA_SIGN_MASK)
+
+/*
+ * pl*_i: generate index into pde/pte arrays in virtual space
+ *
+ * pl_i(va, X) == plX_i(va) <= pl_i_roundup(va, X)
+ */
+#define pl1_i(VA)	(((VA_SIGN_POS(VA)) & L1_FRAME) >> L1_SHIFT)
+#define pl2_i(VA)	(((VA_SIGN_POS(VA)) & L2_FRAME) >> L2_SHIFT)
+#define pl3_i(VA)	(((VA_SIGN_POS(VA)) & L3_FRAME) >> L3_SHIFT)
+#define pl4_i(VA)	(((VA_SIGN_POS(VA)) & L4_FRAME) >> L4_SHIFT)
+
Index: src/sys/arch/amd64/stand/prekern/prekern.c
diff -u /dev/null src/sys/arch/amd64/stand/prekern/prekern.c:1.1
--- /dev/null	Tue Oct 10 09:29:14 2017
+++ src/sys/arch/amd64/stand/prekern/prekern.c	Tue Oct 10 09:29:14 2017
@@ -0,0 +1,382 @@
+/*	$NetBSD: prekern.c,v 1.1 2017/10/10 09:29:14 maxv Exp $	*/
+
+/*
+ * Copyright (c) 2017 The NetBSD Foundation, Inc. All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Maxime Villard.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "prekern.h"
+
+#include <machine/reg.h>
+#include <machine/specialreg.h>
+#include <machine/frame.h>
+
+#define _KERNEL
+#include <machine/bootinfo.h>
+#undef _KERNEL
+
+#include <machine/tss.h>
+#include <machine/segments.h>
+
+int boothowto;
+struct bootinfo bootinfo;
+
+extern paddr_t kernpa_start, kernpa_end;
+
+extern uint64_t *gdt64_start;
+uint8_t idtstore[PAGE_SIZE];
+uint8_t faultstack[PAGE_SIZE];
+struct x86_64_tss prekern_tss;
+
+/* GDT offsets */
+#define PREKERN_GDT_NUL_OFF	(0 * 8)
+#define PREKERN_GDT_CS_OFF	(1 * 8)
+#define PREKERN_GDT_DS_OFF	(2 * 8)
+#define PREKERN_GDT_TSS_OFF	(3 * 8)
+
+#define IDTVEC(name) __CONCAT(X, name)
+typedef void (vector)(void);
+extern vector *IDTVEC(exceptions)[];
+
+void fatal(char *msg)
+{
+	print("\n");
+	print_ext(RED_ON_BLACK, "********** FATAL ***********\n");
+	print_ext(RED_ON_BLACK, msg);
+	print("\n");
+	print_ext(RED_ON_BLACK, "****************************\n");
+
+	while (1);
+}
+
+/* -------------------------------------------------------------------------- */
+
+struct smallframe {
+	uint64_t sf_trapno;
+	uint64_t sf_err;
+	uint64_t sf_rip;
+	uint64_t sf_cs;
+	uint64_t sf_rflags;
+	uint64_t sf_rsp;
+	uint64_t sf_ss;
+};
+
+static void setregion(struct region_descriptor *, void *, uint16_t);
+static void setgate(struct gate_descriptor *, void *, int, int, int, int);
+static void set_sys_segment(struct sys_segment_descriptor *, void *,
+    size_t, int, int, int);
+static void set_sys_gdt(int, void *, size_t, int, int, int);
+static void init_tss();
+static void init_idt();
+
+void trap(struct smallframe *);
+
+static char *trap_type[] = {
+	"privileged instruction fault",		/*  0 T_PRIVINFLT */
+	"breakpoint trap",			/*  1 T_BPTFLT */
+	"arithmetic trap",			/*  2 T_ARITHTRAP */
+	"asynchronous system trap",		/*  3 T_ASTFLT */
+	"protection fault",			/*  4 T_PROTFLT */
+	"trace trap",				/*  5 T_TRCTRAP */
+	"page fault",				/*  6 T_PAGEFLT */
+	"alignment fault",			/*  7 T_ALIGNFLT */
+	"integer divide fault",			/*  8 T_DIVIDE */
+	"non-maskable interrupt",		/*  9 T_NMI */
+	"overflow trap",			/* 10 T_OFLOW */
+	"bounds check fault",			/* 11 T_BOUND */
+	"FPU not available fault",		/* 12 T_DNA */
+	"double fault",				/* 13 T_DOUBLEFLT */
+	"FPU operand fetch fault",		/* 14 T_FPOPFLT */
+	"invalid TSS fault",			/* 15 T_TSSFLT */
+	"segment not present fault",		/* 16 T_SEGNPFLT */
+	"stack fault",				/* 17 T_STKFLT */
+	"machine check fault",			/* 18 T_MCA */
+	"SSE FP exception",			/* 19 T_XMM */
+	"reserved trap",			/* 20 T_RESERVED */
+};
+int	trap_types = __arraycount(trap_type);
+
+/*
+ * Trap handler.
+ */
+void
+trap(struct smallframe *sf)
+{
+	uint64_t trapno = sf->sf_trapno;
+	char *buf;
+
+	if (trapno < trap_types) {
+		buf = trap_type[trapno];
+	} else {
+		buf = "unknown trap";
+	}
+
+	print("\n");
+	print_ext(RED_ON_BLACK, "****** FAULT OCCURRED ******\n");
+	print_ext(RED_ON_BLACK, buf);
+	print("\n");
+	print_ext(RED_ON_BLACK, "****************************\n");
+
+	while (1);
+}
+
+static void
+setregion(struct region_descriptor *rd, void *base, uint16_t limit)
+{
+	rd->rd_limit = limit;
+	rd->rd_base = (uint64_t)base;
+}
+
+static void
+setgate(struct gate_descriptor *gd, void *func, int ist, int type, int dpl,
+	int sel)
+{
+	gd->gd_looffset = (uint64_t)func & 0xffff;
+	gd->gd_selector = sel;
+	gd->gd_ist = ist;
+	gd->gd_type = type;
+	gd->gd_dpl = dpl;
+	gd->gd_p = 1;
+	gd->gd_hioffset = (uint64_t)func >> 16;
+	gd->gd_zero = 0;
+	gd->gd_xx1 = 0;
+	gd->gd_xx2 = 0;
+	gd->gd_xx3 = 0;
+}
+
+static void
+set_sys_segment(struct sys_segment_descriptor *sd, void *base, size_t limit,
+	int type, int dpl, int gran)
+{
+	memset(sd, 0, sizeof(*sd));
+	sd->sd_lolimit = (unsigned)limit;
+	sd->sd_lobase = (uint64_t)base;
+	sd->sd_type = type;
+	sd->sd_dpl = dpl;
+	sd->sd_p = 1;
+	sd->sd_hilimit = (unsigned)limit >> 16;
+	sd->sd_gran = gran;
+	sd->sd_hibase = (uint64_t)base >> 24;
+}
+
+static void
+set_sys_gdt(int slotoff, void *base, size_t limit, int type, int dpl, int gran)
+{
+	struct sys_segment_descriptor sd;
+
+	set_sys_segment(&sd, base, limit, type, dpl, gran);
+
+	memcpy(&gdt64_start + slotoff, &sd, sizeof(sd));
+}
+
+static void init_tss()
+{
+	memset(&prekern_tss, 0, sizeof(prekern_tss));
+	prekern_tss.tss_ist[0] = (uintptr_t)(&faultstack[PAGE_SIZE-1]) & ~0xf;
+
+	set_sys_gdt(PREKERN_GDT_TSS_OFF, &prekern_tss,
+	    sizeof(struct x86_64_tss) - 1, SDT_SYS386TSS, SEL_KPL, 0);
+}
+
+static void init_idt()
+{
+	struct region_descriptor region;
+	struct gate_descriptor *idt;
+	size_t i;
+
+	idt = (struct gate_descriptor *)&idtstore;
+	for (i = 0; i < NCPUIDT; i++) {
+		setgate(&idt[i], IDTVEC(exceptions)[i], 0, SDT_SYS386IGT,
+		    SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
+	}
+
+	setregion(&region, &idtstore, PAGE_SIZE - 1);
+	lidt(&region);
+}
+
+/* -------------------------------------------------------------------------- */
+
+struct bootspace bootspace;
+
+struct prekern_args {
+	int boothowto;
+	void *bootinfo;
+	void *bootspace;
+	int esym;
+	int biosextmem;
+	int biosbasemem;
+	int cpuid_level;
+	uint32_t nox_flag;
+	uint64_t PDPpaddr;
+	vaddr_t atdevbase;
+	vaddr_t lwp0uarea;
+	paddr_t first_avail;
+};
+
+struct prekern_args pkargs;
+
+static void
+init_bootspace(vaddr_t baseva)
+{
+	extern vaddr_t iom_base;
+	extern uint64_t PDPpaddr;
+
+	elf_get_text(&bootspace.text.va, &bootspace.text.pa,
+	    &bootspace.text.sz);
+	elf_get_rodata(&bootspace.rodata.va, &bootspace.rodata.pa,
+	    &bootspace.rodata.sz);
+	elf_get_data(&bootspace.data.va, &bootspace.data.pa,
+	    &bootspace.data.sz);
+	bootspace.boot.va = bootspace.data.va + bootspace.data.sz;
+	bootspace.boot.pa = mm_vatopa(bootspace.boot.va);
+	bootspace.boot.sz = (size_t)(iom_base + IOM_SIZE) -
+	    (size_t)bootspace.boot.va;
+	bootspace.spareva = baseva + NKL2_KIMG_ENTRIES * NBPD_L2;
+	bootspace.pdir = baseva + (PDPpaddr - kernpa_start);
+	bootspace.emodule = baseva + NKL2_KIMG_ENTRIES * NBPD_L2;
+}
+
+static void
+init_prekern_args(vaddr_t baseva)
+{
+	extern int esym;
+	extern int biosextmem;
+	extern int biosbasemem;
+	extern int cpuid_level;
+	extern uint32_t nox_flag;
+	extern uint64_t PDPpaddr;
+	extern vaddr_t iom_base;
+	extern paddr_t stkpa;
+	extern paddr_t pa_avail;
+
+	memset(&pkargs, 0, sizeof(pkargs));
+	pkargs.boothowto = boothowto;
+	pkargs.bootinfo = (void *)&bootinfo;
+	pkargs.bootspace = &bootspace;
+	pkargs.esym = esym;
+	pkargs.biosextmem = biosextmem;
+	pkargs.biosbasemem = biosbasemem;
+	pkargs.cpuid_level = cpuid_level;
+	pkargs.nox_flag = nox_flag;
+	pkargs.PDPpaddr = PDPpaddr;
+	pkargs.atdevbase = iom_base;
+	pkargs.lwp0uarea = baseva + (stkpa - kernpa_start);
+	pkargs.first_avail = pa_avail;
+
+	extern vaddr_t stkva;
+	stkva = pkargs.lwp0uarea + (USPACE - FRAMESIZE);
+}
+
+void
+exec_kernel(vaddr_t ent)
+{
+	int (*jumpfunc)(struct prekern_args *);
+	int ret;
+
+	/*
+	 * Normally, the function does not return. If it does, it means the
+	 * kernel had trouble processing the arguments, and we panic here. The
+	 * return value is here for debug.
+	 */
+	jumpfunc = (void *)ent;
+	ret = (*jumpfunc)(&pkargs);
+
+	if (ret == -1) {
+		fatal("kernel returned -1");
+	} else {
+		fatal("kernel returned unknown value");
+	}
+}
+
+/*
+ * Main entry point of the Prekern.
+ */
+void
+init_prekern(paddr_t pa_start)
+{
+	vaddr_t baseva, ent;
+
+	init_cons();
+	print_banner();
+
+	if (kernpa_start == 0 || kernpa_end == 0) {
+		fatal("init_prekern: unable to locate the kernel");
+	}
+	if (kernpa_start != (1UL << 21)) {
+		fatal("init_prekern: invalid kernpa_start");
+	}
+	if (kernpa_start % PAGE_SIZE != 0) {
+		fatal("init_prekern: kernpa_start not aligned");
+	}
+	if (kernpa_end % PAGE_SIZE != 0) {
+		fatal("init_prekern: kernpa_end not aligned");
+	}
+	if (kernpa_end <= kernpa_start) {
+		fatal("init_prekern: kernpa_end >= kernpa_start");
+	}
+
+	/*
+	 * Our physical space starts after the end of the kernel.
+	 */
+	if (pa_start < kernpa_end) {
+		fatal("init_prekern: physical space inside kernel");
+	}
+	mm_init(pa_start);
+
+	/*
+	 * Init the TSS and IDT. We mostly don't care about this, they are just
+	 * here to properly handle traps.
+	 */
+	init_tss();
+	init_idt();
+
+	print_state(true, "Prekern loaded");
+
+	/*
+	 * Relocate the kernel.
+	 */
+	baseva = mm_map_kernel();
+	ent = elf_kernel_reloc(baseva);
+
+	/*
+	 * Build the bootspace.
+	 */
+	init_bootspace(baseva);
+
+	/*
+	 * Build the arguments.
+	 */
+	init_prekern_args(baseva);
+
+	/*
+	 * Finally, jump into the kernel.
+	 */
+	print_state(true, "Jumping into the kernel");
+	jump_kernel(ent);
+
+	fatal("init_prekern: unreachable!");
+}
+
Index: src/sys/arch/amd64/stand/prekern/prekern.h
diff -u /dev/null src/sys/arch/amd64/stand/prekern/prekern.h:1.1
--- /dev/null	Tue Oct 10 09:29:14 2017
+++ src/sys/arch/amd64/stand/prekern/prekern.h	Tue Oct 10 09:29:14 2017
@@ -0,0 +1,112 @@
+/*	$NetBSD: prekern.h,v 1.1 2017/10/10 09:29:14 maxv Exp $	*/
+
+/*
+ * Copyright (c) 2017 The NetBSD Foundation, Inc. All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Maxime Villard.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+#include <sys/param.h>
+#include <sys/stdbool.h>
+#include <machine/pte.h>
+
+#include "pdir.h"
+#include "redef.h"
+
+#define MM_PROT_READ	0x00
+#define MM_PROT_WRITE	0x01
+#define MM_PROT_EXECUTE	0x02
+
+#define ASSERT(a) if (!(a)) fatal("ASSERT");
+#define memset(d, v, l) __builtin_memset(d, v, l)
+#define memcpy(d, v, l) __builtin_memcpy(d, v, l)
+typedef uint64_t paddr_t;
+typedef uint64_t vaddr_t;
+typedef uint64_t pt_entry_t;
+typedef uint64_t pd_entry_t;
+typedef uint64_t pte_prot_t;
+#define WHITE_ON_BLACK 0x07
+#define RED_ON_BLACK 0x04
+#define GREEN_ON_BLACK 0x02
+
+#define KASLR_WINDOW_BASE	KERNBASE		/* max - 2GB */
+#define KASLR_WINDOW_SIZE	(2LLU * (1 << 30))	/* 2GB */
+
+/* -------------------------------------------------------------------------- */
+
+struct bootspace {
+	struct {
+		vaddr_t va;
+		paddr_t pa;
+		size_t sz;
+	} text;
+	struct {
+		vaddr_t va;
+		paddr_t pa;
+		size_t sz;
+	} rodata;
+	struct {
+		vaddr_t va;
+		paddr_t pa;
+		size_t sz;
+	} data;
+	struct {
+		vaddr_t va;
+		paddr_t pa;
+		size_t sz;
+	} boot;
+	vaddr_t spareva;
+	vaddr_t pdir;
+	vaddr_t emodule;
+};
+
+/* console.c */
+void init_cons();
+void print_ext(int, char *);
+void print(char *);
+void print_state(bool, char *);
+void print_banner();
+
+/* elf.c */
+vaddr_t elf_kernel_reloc(vaddr_t);
+void elf_get_text(vaddr_t *, paddr_t *, size_t *);
+void elf_get_rodata(vaddr_t *, paddr_t *, size_t *);
+void elf_get_data(vaddr_t *, paddr_t *, size_t *);
+
+/* locore.S */
+void lidt(void *);
+uint64_t rdtsc();
+void jump_kernel();
+
+/* mm.c */
+void mm_init(paddr_t);
+paddr_t mm_vatopa(vaddr_t);
+void mm_mprotect(vaddr_t, size_t, int);
+vaddr_t mm_map_kernel();
+
+/* prekern.c */
+void fatal(char *);
+
Index: src/sys/arch/amd64/stand/prekern/prekern.ldscript
diff -u /dev/null src/sys/arch/amd64/stand/prekern/prekern.ldscript:1.1
--- /dev/null	Tue Oct 10 09:29:14 2017
+++ src/sys/arch/amd64/stand/prekern/prekern.ldscript	Tue Oct 10 09:29:14 2017
@@ -0,0 +1,54 @@
+/*	$NetBSD: prekern.ldscript,v 1.1 2017/10/10 09:29:14 maxv Exp $	*/
+
+__PAGE_SIZE = 0x1000 ;
+
+ENTRY(start)
+SECTIONS
+{
+	.text : AT (ADDR(.text) & 0x0fffffff)
+	{
+		*(.text)
+		*(.text.*)
+		*(.stub)
+	} =0xCC
+	_etext = . ;
+	PROVIDE (etext = .) ;
+
+	. = ALIGN(__PAGE_SIZE);
+
+	__rodata_start = . ;
+	.rodata :
+	{
+		*(.rodata)
+		*(.rodata.*)
+	}
+
+	. = ALIGN(__PAGE_SIZE);
+
+	__data_start = . ;
+	.data :
+	{
+		*(.data)
+	}
+
+	__bss_start = . ;
+	.bss :
+	{
+		*(.bss)
+		*(.bss.*)
+		*(COMMON)
+	}
+
+	. = ALIGN(__PAGE_SIZE);
+
+	/* End of the prekern image */
+	__prekern_end = . ;
+
+	_end = . ;
+	PROVIDE (end = .) ;
+	.note.netbsd.ident :
+	{
+		KEEP(*(.note.netbsd.ident));
+	}
+}
+
Index: src/sys/arch/amd64/stand/prekern/redef.h
diff -u /dev/null src/sys/arch/amd64/stand/prekern/redef.h:1.1
--- /dev/null	Tue Oct 10 09:29:14 2017
+++ src/sys/arch/amd64/stand/prekern/redef.h	Tue Oct 10 09:29:14 2017
@@ -0,0 +1,47 @@
+/*	$NetBSD: redef.h,v 1.1 2017/10/10 09:29:14 maxv Exp $	*/
+
+/*
+ * Copyright (c) 2017 The NetBSD Foundation, Inc. All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Maxime Villard.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define PAGE_SIZE 4096
+
+/* --------------------------------------------------------------------------
+ * *** dev/isa/isareg.h *****************************************************
+ * -------------------------------------------------------------------------- */
+
+#define	IOM_BEGIN	0x0a0000		/* Start of I/O Memory "hole" */
+#define	IOM_END		0x100000		/* End of I/O Memory "hole" */
+#define	IOM_SIZE	(IOM_END - IOM_BEGIN)
+
+/* --------------------------------------------------------------------------
+ * *** amd64/genassym.cf ****************************************************
+ * -------------------------------------------------------------------------- */
+
+#define PDE_SIZE 8
+#define FRAMESIZE 8 /* XXX */
+
Index: src/sys/arch/amd64/stand/prekern/trap.S
diff -u /dev/null src/sys/arch/amd64/stand/prekern/trap.S:1.1
--- /dev/null	Tue Oct 10 09:29:14 2017
+++ src/sys/arch/amd64/stand/prekern/trap.S	Tue Oct 10 09:29:14 2017
@@ -0,0 +1,194 @@
+/*	$NetBSD: trap.S,v 1.1 2017/10/10 09:29:14 maxv Exp $	*/
+
+/*
+ * Copyright (c) 2017 The NetBSD Foundation, Inc. All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Maxime Villard.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define _KERNEL
+
+/* Override user-land alignment before including asm.h */
+#define	ALIGN_DATA	.align	8
+#define ALIGN_TEXT	.align 16,0x90
+#define _ALIGN_TEXT	ALIGN_TEXT
+
+#include <machine/asm.h>
+
+#include <machine/trap.h>
+
+#define	TRAPENTRY(a) \
+	pushq $(a); \
+	jmp _C_LABEL(alltraps);
+
+#define	ZTRAPENTRY(a) \
+	pushq $0; \
+	pushq $(a); \
+	jmp _C_LABEL(alltraps);
+
+	.text
+
+IDTVEC(trap00)
+	ZTRAPENTRY(T_DIVIDE)
+IDTVEC_END(trap00)
+
+IDTVEC(trap01)
+	ZTRAPENTRY(T_TRCTRAP)
+IDTVEC_END(trap01)
+
+IDTVEC(trap02)
+	ZTRAPENTRY(T_NMI)
+IDTVEC_END(trap02)
+
+IDTVEC(trap03)
+	ZTRAPENTRY(T_BPTFLT)
+IDTVEC_END(trap03)
+
+IDTVEC(trap04)
+	ZTRAPENTRY(T_OFLOW)
+IDTVEC_END(trap04)
+
+IDTVEC(trap05)
+	ZTRAPENTRY(T_BOUND)
+IDTVEC_END(trap05)
+
+IDTVEC(trap06)
+	ZTRAPENTRY(T_PRIVINFLT)
+IDTVEC_END(trap06)
+
+IDTVEC(trap07)
+	ZTRAPENTRY(T_DNA)
+IDTVEC_END(trap07)
+
+IDTVEC(trap08)
+	TRAPENTRY(T_DOUBLEFLT)
+IDTVEC_END(trap08)
+
+IDTVEC(trap09)
+	ZTRAPENTRY(T_FPOPFLT)
+IDTVEC_END(trap09)
+
+IDTVEC(trap0a)
+	TRAPENTRY(T_TSSFLT)
+IDTVEC_END(trap0a)
+
+IDTVEC(trap0b)		/* #NP() Segment not present */
+	ZTRAPENTRY(T_SEGNPFLT)
+IDTVEC_END(trap0b)
+
+IDTVEC(trap0c)		/* #SS() Stack exception */
+	ZTRAPENTRY(T_STKFLT)
+IDTVEC_END(trap0c)
+
+IDTVEC(trap0d)		/* #GP() General protection */
+	ZTRAPENTRY(T_PROTFLT)
+IDTVEC_END(trap0d)
+
+IDTVEC(trap0e)
+	TRAPENTRY(T_PAGEFLT)
+IDTVEC_END(trap0e)
+
+IDTVEC(trap0f)
+	ZTRAPENTRY(T_ASTFLT)
+IDTVEC_END(trap0f)
+
+IDTVEC(trap10)
+	ZTRAPENTRY(T_ARITHTRAP)
+IDTVEC_END(trap10)
+
+IDTVEC(trap11)
+	TRAPENTRY(T_ALIGNFLT)
+IDTVEC_END(trap11)
+
+IDTVEC(trap12)
+	ZTRAPENTRY(T_MCA)
+IDTVEC_END(trap12)
+
+IDTVEC(trap13)
+	ZTRAPENTRY(T_XMM)
+IDTVEC_END(trap13)
+
+IDTVEC(trap14)
+IDTVEC(trap15)
+IDTVEC(trap16)
+IDTVEC(trap17)
+IDTVEC(trap18)
+IDTVEC(trap19)
+IDTVEC(trap1a)
+IDTVEC(trap1b)
+IDTVEC(trap1c)
+IDTVEC(trap1d)
+IDTVEC(trap1e)
+IDTVEC(trap1f)
+	/* 20 - 31 reserved for future exp */
+	ZTRAPENTRY(T_RESERVED)
+IDTVEC_END(trap1f)
+IDTVEC_END(trap1e)
+IDTVEC_END(trap1d)
+IDTVEC_END(trap1c)
+IDTVEC_END(trap1b)
+IDTVEC_END(trap1a)
+IDTVEC_END(trap19)
+IDTVEC_END(trap18)
+IDTVEC_END(trap17)
+IDTVEC_END(trap16)
+IDTVEC_END(trap15)
+IDTVEC_END(trap14)
+
+IDTVEC(exceptions)
+	.quad	_C_LABEL(Xtrap00), _C_LABEL(Xtrap01)
+	.quad	_C_LABEL(Xtrap02), _C_LABEL(Xtrap03)
+	.quad	_C_LABEL(Xtrap04), _C_LABEL(Xtrap05)
+	.quad	_C_LABEL(Xtrap06), _C_LABEL(Xtrap07)
+	.quad	_C_LABEL(Xtrap08), _C_LABEL(Xtrap09)
+	.quad	_C_LABEL(Xtrap0a), _C_LABEL(Xtrap0b)
+	.quad	_C_LABEL(Xtrap0c), _C_LABEL(Xtrap0d)
+	.quad	_C_LABEL(Xtrap0e), _C_LABEL(Xtrap0f)
+	.quad	_C_LABEL(Xtrap10), _C_LABEL(Xtrap11)
+	.quad	_C_LABEL(Xtrap12), _C_LABEL(Xtrap13)
+	.quad	_C_LABEL(Xtrap14), _C_LABEL(Xtrap15)
+	.quad	_C_LABEL(Xtrap16), _C_LABEL(Xtrap17)
+	.quad	_C_LABEL(Xtrap18), _C_LABEL(Xtrap19)
+	.quad	_C_LABEL(Xtrap1a), _C_LABEL(Xtrap1b)
+	.quad	_C_LABEL(Xtrap1c), _C_LABEL(Xtrap1d)
+	.quad	_C_LABEL(Xtrap1e), _C_LABEL(Xtrap1f)
+IDTVEC_END(exceptions)
+
+/*
+ * Arguments pushed on the stack:
+ *  tf_trapno
+ *  tf_err: Dummy inserted if not defined
+ *  tf_rip
+ *  tf_cs
+ *  tf_rflags
+ *  tf_rsp
+ *  tf_ss
+ */
+
+NENTRY(alltraps)
+	movq	%rsp,%rdi
+	call	_C_LABEL(trap)
+	/* NOTREACHED */
+END(alltraps)

Reply via email to