Implement `p2m_alloc_table`, `p2m_init` and `p2m_final_teardown` for MPU systems. Additionally implement a helper function `generate_vsctlr` for p2m initalization.
Signed-off-by: Harry Ramsey <[email protected]> --- xen/arch/arm/include/asm/arm32/mpu.h | 2 + xen/arch/arm/include/asm/arm64/mpu.h | 2 + xen/arch/arm/include/asm/mpu.h | 5 ++ xen/arch/arm/mpu/p2m.c | 71 ++++++++++++++++++++++++++-- 4 files changed, 77 insertions(+), 3 deletions(-) diff --git a/xen/arch/arm/include/asm/arm32/mpu.h b/xen/arch/arm/include/asm/arm32/mpu.h index 2cf0f8cbac..d565230f84 100644 --- a/xen/arch/arm/include/asm/arm32/mpu.h +++ b/xen/arch/arm/include/asm/arm32/mpu.h @@ -11,6 +11,8 @@ */ #define MPU_REGION_RES0 0x0 +#define VSCTLR_VMID_SHIFT 16 + /* Hypervisor Protection Region Base Address Register */ typedef union { struct { diff --git a/xen/arch/arm/include/asm/arm64/mpu.h b/xen/arch/arm/include/asm/arm64/mpu.h index 4f694190a8..8b86a03fee 100644 --- a/xen/arch/arm/include/asm/arm64/mpu.h +++ b/xen/arch/arm/include/asm/arm64/mpu.h @@ -7,6 +7,8 @@ #define MPU_REGION_RES0 (0xFFFFULL << 48) +#define VSCTLR_VMID_SHIFT 48 + /* Protection Region Base Address Register */ typedef union { struct __packed { diff --git a/xen/arch/arm/include/asm/mpu.h b/xen/arch/arm/include/asm/mpu.h index 72fa5b00b8..4ae583a7e9 100644 --- a/xen/arch/arm/include/asm/mpu.h +++ b/xen/arch/arm/include/asm/mpu.h @@ -87,6 +87,11 @@ static inline bool region_is_valid(const pr_t *pr) return pr->prlar.reg.en; } +static inline register_t generate_vsctlr(uint16_t vmid) +{ + return ((register_t)vmid << VSCTLR_VMID_SHIFT); +} + #endif /* __ASSEMBLER__ */ #endif /* __ARM_MPU_H__ */ diff --git a/xen/arch/arm/mpu/p2m.c b/xen/arch/arm/mpu/p2m.c index f7fb58ab6a..1598f9ab64 100644 --- a/xen/arch/arm/mpu/p2m.c +++ b/xen/arch/arm/mpu/p2m.c @@ -28,10 +28,62 @@ void p2m_dump_info(struct domain *d) BUG_ON("unimplemented"); } +static int __init p2m_alloc_table(struct domain *d) +{ + struct p2m_domain *p2m = p2m_get_hostp2m(d); + pr_t* p2m_map; + + p2m_map = alloc_xenheap_pages(P2M_ROOT_ORDER, 0); + if ( !p2m_map ) + { + printk(XENLOG_G_ERR "DOM%pd: p2m: unable to allocate P2M MPU mapping table\n", d); + return -ENOMEM; + } + clear_page(p2m_map); + + p2m->root = virt_to_page((const void *)p2m_map); + + return 0; +} + int p2m_init(struct domain *d) { - BUG_ON("unimplemented"); - return -EINVAL; + struct p2m_domain *p2m = p2m_get_hostp2m(d); + int rc = 0; + unsigned int cpu; + + rwlock_init(&p2m->lock); + + p2m->vmid = INVALID_VMID; + p2m->max_mapped_gfn = _gfn(0); + p2m->lowest_mapped_gfn = _gfn(ULONG_MAX); + + p2m->default_access = p2m_access_rwx; + /* mem_access is NOT supported in MPU system. */ + p2m->mem_access_enabled = false; + + /* Ensure that the type chosen is large enough for MAX_VIRT_CPUS. */ + BUILD_BUG_ON((1 << (sizeof(p2m->last_vcpu_ran[0]) * 8)) < MAX_VIRT_CPUS); + + for_each_possible_cpu(cpu) + p2m->last_vcpu_ran[cpu] = INVALID_VCPU_ID; + + /* + * "Trivial" initialization is now complete. Set the backpointer so that + * p2m_teardown() and related functions know to do something. + */ + p2m->domain = d; + + rc = p2m_alloc_vmid(d); + if ( rc ) + return rc; + p2m->vsctlr = generate_vsctlr(p2m->vmid); + + rc = p2m_alloc_table(d); + if ( rc ) + return rc; + + return rc; } void p2m_save_state(struct vcpu *p) @@ -46,7 +98,20 @@ void p2m_restore_state(struct vcpu *n) void p2m_final_teardown(struct domain *d) { - BUG_ON("unimplemented"); + struct p2m_domain *p2m = p2m_get_hostp2m(d); + + /* p2m not actually initialized */ + if ( !p2m->domain ) + return; + + if ( p2m->root ) + free_xenheap_pages(p2m->root, P2M_ROOT_ORDER); + + p2m->root = NULL; + + p2m_free_vmid(d); + + p2m->domain = NULL; } bool p2m_resolve_translation_fault(struct domain *d, gfn_t gfn) -- 2.43.0
