Implement asm_mmu_enable and flush_tlb_all, and then make a final change to mmu.c in order to link it into arm64. The final change is to map the code read-only. This is necessary because armv8 forces all writable code shared between EL1 and EL0 to be PXN.
Signed-off-by: Andrew Jones <drjo...@redhat.com> --- arm/cstart64.S | 64 ++++++++++++++++++++++++++++++++++++++++++++ arm/flat.lds | 1 + config/config-arm-common.mak | 1 + config/config-arm.mak | 1 - lib/arm/asm/mmu.h | 1 + lib/arm/mmu.c | 10 ++++++- lib/arm64/asm/mmu-api.h | 1 + lib/arm64/asm/mmu.h | 16 +++++++---- lib/arm64/asm/processor.h | 14 ++++++++++ lib/arm64/processor.c | 26 +++++++++++++++++- 10 files changed, 127 insertions(+), 8 deletions(-) create mode 100644 lib/arm64/asm/mmu-api.h diff --git a/arm/cstart64.S b/arm/cstart64.S index d1860a94fb2d3..5151f4c77d745 100644 --- a/arm/cstart64.S +++ b/arm/cstart64.S @@ -8,6 +8,9 @@ #define __ASSEMBLY__ #include <asm/asm-offsets.h> #include <asm/ptrace.h> +#include <asm/processor.h> +#include <asm/page.h> +#include <asm/pgtable-hwdef.h> .section .init @@ -55,6 +58,67 @@ halt: b 1b /* + * asm_mmu_enable + * Inputs: + * x0 is the base address of the translation table + * Outputs: none + * + * Adapted from + * arch/arm64/kernel/head.S + * arch/arm64/mm/proc.S + */ + +/* + * Memory region attributes for LPAE: + * + * n = AttrIndx[2:0] + * n MAIR + * DEVICE_nGnRnE 000 00000000 + * DEVICE_nGnRE 001 00000100 + * DEVICE_GRE 010 00001100 + * NORMAL_NC 011 01000100 + * NORMAL 100 11111111 + */ +#define MAIR(attr, mt) ((attr) << ((mt) * 8)) + +.globl asm_mmu_enable +asm_mmu_enable: + ic iallu // I+BTB cache invalidate + tlbi vmalle1is // invalidate I + D TLBs + dsb ish + + /* TCR */ + ldr x1, =TCR_TxSZ(VA_BITS) | \ + TCR_TG0_64K | TCR_TG1_64K | \ + TCR_IRGN_WBWA | TCR_ORGN_WBWA | \ + TCR_SHARED + mov x2, #3 // 011 is 42 bits + bfi x1, x2, #32, #3 + msr tcr_el1, x1 + + /* MAIR */ + ldr x1, =MAIR(0x00, MT_DEVICE_nGnRnE) | \ + MAIR(0x04, MT_DEVICE_nGnRE) | \ + MAIR(0x0c, MT_DEVICE_GRE) | \ + MAIR(0x44, MT_NORMAL_NC) | \ + MAIR(0xff, MT_NORMAL) + msr mair_el1, x1 + + /* TTBR0 */ + msr ttbr0_el1, x0 + isb + + /* SCTLR */ + mrs x1, sctlr_el1 + orr x1, x1, SCTLR_EL1_C + orr x1, x1, SCTLR_EL1_I + orr x1, x1, SCTLR_EL1_M + msr sctlr_el1, x1 + isb + + ret + +/* * Vectors * Adapted from arch/arm64/kernel/entry.S */ diff --git a/arm/flat.lds b/arm/flat.lds index 89a55720d728f..a8849ee0939a8 100644 --- a/arm/flat.lds +++ b/arm/flat.lds @@ -3,6 +3,7 @@ SECTIONS { .text : { *(.init) *(.text) *(.text.*) } . = ALIGN(64K); + etext = .; .data : { exception_stacks = .; . += 64K; diff --git a/config/config-arm-common.mak b/config/config-arm-common.mak index b61a2a6044ab2..b01e9ab836b2d 100644 --- a/config/config-arm-common.mak +++ b/config/config-arm-common.mak @@ -33,6 +33,7 @@ cflatobjs += lib/virtio-mmio.o cflatobjs += lib/chr-testdev.o cflatobjs += lib/arm/io.o cflatobjs += lib/arm/setup.o +cflatobjs += lib/arm/mmu.o libeabi = lib/arm/libeabi.a eabiobjs = lib/arm/eabi_compat.o diff --git a/config/config-arm.mak b/config/config-arm.mak index 96686fb639d2d..16e2cb5c103a3 100644 --- a/config/config-arm.mak +++ b/config/config-arm.mak @@ -15,7 +15,6 @@ CFLAGS += -mcpu=$(PROCESSOR) cstart.o = $(TEST_DIR)/cstart.o cflatobjs += lib/arm/spinlock.o cflatobjs += lib/arm/processor.o -cflatobjs += lib/arm/mmu.o # arm specific tests tests = diff --git a/lib/arm/asm/mmu.h b/lib/arm/asm/mmu.h index 5ec7a6ce5886b..c1bd01c9ee1b9 100644 --- a/lib/arm/asm/mmu.h +++ b/lib/arm/asm/mmu.h @@ -9,6 +9,7 @@ #include <asm/barrier.h> #define PTE_USER L_PTE_USER +#define PTE_RDONLY PTE_AP2 #define PTE_SHARED L_PTE_SHARED #define PTE_AF PTE_EXT_AF #define PTE_WBWA L_PTE_MT_WRITEALLOC diff --git a/lib/arm/mmu.c b/lib/arm/mmu.c index 55d18a10e1ebd..1c024538663ce 100644 --- a/lib/arm/mmu.c +++ b/lib/arm/mmu.c @@ -8,6 +8,8 @@ #include <asm/setup.h> #include <asm/mmu.h> +extern unsigned long etext; + pgd_t *mmu_idmap; static bool mmu_on; @@ -72,13 +74,19 @@ void mmu_enable_idmap(void) { unsigned long phys_end = sizeof(long) == 8 || !(PHYS_END >> 32) ? PHYS_END : 0xfffff000; + unsigned long code_end = (unsigned long)&etext; mmu_idmap = pgd_alloc(); mmu_init_io_sect(mmu_idmap, PHYS_IO_OFFSET); + /* armv8 requires code shared between EL1 and EL0 to be read-only */ mmu_set_range_ptes(mmu_idmap, PHYS_OFFSET, - PHYS_OFFSET, phys_end, + PHYS_OFFSET, code_end, + __pgprot(PTE_WBWA | PTE_RDONLY | PTE_USER)); + + mmu_set_range_ptes(mmu_idmap, code_end, + code_end, phys_end, __pgprot(PTE_WBWA | PTE_USER)); mmu_enable(mmu_idmap); diff --git a/lib/arm64/asm/mmu-api.h b/lib/arm64/asm/mmu-api.h new file mode 100644 index 0000000000000..7cd7096a93553 --- /dev/null +++ b/lib/arm64/asm/mmu-api.h @@ -0,0 +1 @@ +#include "../../arm/asm/mmu-api.h" diff --git a/lib/arm64/asm/mmu.h b/lib/arm64/asm/mmu.h index cbafbca6701e7..18b4d6be18fae 100644 --- a/lib/arm64/asm/mmu.h +++ b/lib/arm64/asm/mmu.h @@ -5,14 +5,20 @@ * * This work is licensed under the terms of the GNU LGPL, version 2. */ +#include <asm/pgtable.h> +#include <asm/barrier.h> -static inline bool mmu_enabled(void) -{ - return false; -} +#define PMD_SECT_UNCACHED PMD_ATTRINDX(MT_DEVICE_nGnRE) +#define PTE_WBWA PTE_ATTRINDX(MT_NORMAL) -static inline void mmu_enable_idmap(void) +static inline void flush_tlb_all(void) { + dsb(ishst); + asm("tlbi vmalle1is"); + dsb(ish); + isb(); } +#include <asm/mmu-api.h> + #endif /* __ASMARM64_MMU_H_ */ diff --git a/lib/arm64/asm/processor.h b/lib/arm64/asm/processor.h index 66296f549f87e..f73ffb5e4bc95 100644 --- a/lib/arm64/asm/processor.h +++ b/lib/arm64/asm/processor.h @@ -5,6 +5,18 @@ * * This work is licensed under the terms of the GNU LGPL, version 2. */ + +/* System Control Register (SCTLR_EL1) bits */ +#define SCTLR_EL1_EE (1 << 25) +#define SCTLR_EL1_WXN (1 << 19) +#define SCTLR_EL1_I (1 << 12) +#define SCTLR_EL1_SA0 (1 << 4) +#define SCTLR_EL1_SA (1 << 3) +#define SCTLR_EL1_C (1 << 2) +#define SCTLR_EL1_A (1 << 1) +#define SCTLR_EL1_M (1 << 0) + +#ifndef __ASSEMBLY__ #include <asm/ptrace.h> enum vector { @@ -38,6 +50,7 @@ extern void install_exception_handler(enum vector v, unsigned int ec, extern void show_regs(struct pt_regs *regs); extern void *get_sp(void); +extern bool get_far(unsigned int esr, unsigned long *far); static inline unsigned long current_level(void) { @@ -49,4 +62,5 @@ static inline unsigned long current_level(void) extern bool user_mode; extern void start_usr(void (*func)(void *arg), void *arg, unsigned long sp_usr); +#endif /* !__ASSEMBLY__ */ #endif /* _ASMARM64_PROCESSOR_H_ */ diff --git a/lib/arm64/processor.c b/lib/arm64/processor.c index 7230a8ab3f702..7dc0b2e026134 100644 --- a/lib/arm64/processor.c +++ b/lib/arm64/processor.c @@ -84,9 +84,32 @@ void *get_sp(void) return (void *)sp; } +bool get_far(unsigned int esr, unsigned long *far) +{ + unsigned int ec = esr >> ESR_EL1_EC_SHIFT; + + asm volatile("mrs %0, far_el1": "=r" (*far)); + + switch (ec) { + case ESR_EL1_EC_IABT_EL0: + case ESR_EL1_EC_IABT_EL1: + case ESR_EL1_EC_PC_ALIGN: + case ESR_EL1_EC_DABT_EL0: + case ESR_EL1_EC_DABT_EL1: + case ESR_EL1_EC_WATCHPT_EL0: + case ESR_EL1_EC_WATCHPT_EL1: + if ((esr & 0x3f /* DFSC */) != 0x10 + || !(esr & 0x400 /* FnV */)) + return true; + } + return false; +} + static void bad_exception(enum vector v, struct pt_regs *regs, unsigned int esr, bool bad_vector) { + unsigned long far; + bool far_valid = get_far(esr, &far); unsigned int ec = esr >> ESR_EL1_EC_SHIFT; if (bad_vector) { @@ -104,7 +127,8 @@ static void bad_exception(enum vector v, struct pt_regs *regs, } printf("Vector: %d (%s)\n", v, vector_names[v]); - printf("ESR_EL1: %08lx, ec=0x%x (%s)\n", esr, ec, ec_names[ec]); + printf("ESR_EL1: %8s%08lx, ec=0x%x (%s)\n", "", esr, ec, ec_names[ec]); + printf("FAR_EL1: %016lx (%svalid)\n", far, far_valid ? "" : "not "); printf("Exception frame registers:\n"); show_regs(regs); abort(); -- 1.9.3 -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html