From: Antonios Motakis <[email protected]>

We currently support 3 levels of page tables for a 39 bits PA range
on ARM. This patch implements support for 4 level page tables,
and 3 level page tables with a concatenated level 1 root page
table.

On AArch32 we stick with the current restriction of building for
a 39 bit physical address space; however this change will allow
us to support a 40 to 48 bit PARange on AArch64.

Signed-off-by: Antonios Motakis <[email protected]>
---
 hypervisor/arch/arm/include/asm/paging.h       | 19 +++++-
 hypervisor/arch/arm/include/asm/paging_modes.h |  5 +-
 hypervisor/arch/arm/mmu_cell.c                 |  8 ++-
 hypervisor/arch/arm/paging.c                   | 82 +++++++++++++++++++++++++-
 4 files changed, 104 insertions(+), 10 deletions(-)

diff --git a/hypervisor/arch/arm/include/asm/paging.h 
b/hypervisor/arch/arm/include/asm/paging.h
index 28ba3e0..98fc343 100644
--- a/hypervisor/arch/arm/include/asm/paging.h
+++ b/hypervisor/arch/arm/include/asm/paging.h
@@ -31,11 +31,13 @@
  * by IPA[20:12].
  * This would allows to cover a 4GB memory map by using 4 concatenated level-2
  * page tables and thus provide better table walk performances.
- * For the moment, the core doesn't allow to use concatenated pages, so we will
- * use three levels instead, starting at level 1.
+ * For the moment, we will implement the first level for AArch32 using only
+ * one level.
  *
- * TODO: add a "u32 concatenated" field to the paging struct
+ * TODO: implement larger PARange support for AArch32
  */
+#define ARM_CELL_ROOT_PT_SZ    1
+
 #if MAX_PAGE_TABLE_LEVELS < 3
 #define T0SZ                   0
 #define SL0                    0
@@ -164,6 +166,17 @@
 
 typedef u64 *pt_entry_t;
 
+extern unsigned int cpu_parange;
+
+/* return the bits supported for the physical address range for this
+ * machine; in arch_paging_init this value will be kept in
+ * cpu_parange for later reference */
+static inline unsigned int get_cpu_parange(void)
+{
+       /* TODO: implement proper PARange support on AArch32 */
+       return 39;
+}
+
 /* Only executed on hypervisor paging struct changes */
 static inline void arch_paging_flush_page_tlbs(unsigned long page_addr)
 {
diff --git a/hypervisor/arch/arm/include/asm/paging_modes.h 
b/hypervisor/arch/arm/include/asm/paging_modes.h
index 72950eb..6634f9f 100644
--- a/hypervisor/arch/arm/include/asm/paging_modes.h
+++ b/hypervisor/arch/arm/include/asm/paging_modes.h
@@ -15,8 +15,7 @@
 #include <jailhouse/paging.h>
 
 /* Long-descriptor paging */
-extern const struct paging arm_paging[];
-
-#define hv_paging      arm_paging
+extern const struct paging *hv_paging;
+extern const struct paging *cell_paging;
 
 #endif /* !__ASSEMBLY__ */
diff --git a/hypervisor/arch/arm/mmu_cell.c b/hypervisor/arch/arm/mmu_cell.c
index 4885f8c..fb5ad83 100644
--- a/hypervisor/arch/arm/mmu_cell.c
+++ b/hypervisor/arch/arm/mmu_cell.c
@@ -57,8 +57,10 @@ unsigned long arch_paging_gphys2phys(struct per_cpu 
*cpu_data,
 
 int arch_mmu_cell_init(struct cell *cell)
 {
-       cell->arch.mm.root_paging = hv_paging;
-       cell->arch.mm.root_table = page_alloc(&mem_pool, 1);
+       cell->arch.mm.root_paging = cell_paging;
+       cell->arch.mm.root_table =
+               page_alloc_aligned(&mem_pool, ARM_CELL_ROOT_PT_SZ);
+
        if (!cell->arch.mm.root_table)
                return -ENOMEM;
 
@@ -67,7 +69,7 @@ int arch_mmu_cell_init(struct cell *cell)
 
 void arch_mmu_cell_destroy(struct cell *cell)
 {
-       page_free(&mem_pool, cell->arch.mm.root_table, 1);
+       page_free(&mem_pool, cell->arch.mm.root_table, ARM_CELL_ROOT_PT_SZ);
 }
 
 int arch_mmu_cpu_cell_init(struct per_cpu *cpu_data)
diff --git a/hypervisor/arch/arm/paging.c b/hypervisor/arch/arm/paging.c
index 8fdd034..2ba7da6 100644
--- a/hypervisor/arch/arm/paging.c
+++ b/hypervisor/arch/arm/paging.c
@@ -12,6 +12,8 @@
 
 #include <jailhouse/paging.h>
 
+unsigned int cpu_parange = 0;
+
 static bool arm_entry_valid(pt_entry_t entry, unsigned long flags)
 {
        // FIXME: validate flags!
@@ -40,6 +42,20 @@ static bool arm_page_table_empty(page_table_t page_table)
        return true;
 }
 
+#if MAX_PAGE_TABLE_LEVELS > 3
+static pt_entry_t arm_get_l0_entry(page_table_t page_table, unsigned long virt)
+{
+       return &page_table[(virt & L0_VADDR_MASK) >> 39];
+}
+
+static unsigned long arm_get_l0_phys(pt_entry_t pte, unsigned long virt)
+{
+       if ((*pte & PTE_TABLE_FLAGS) == PTE_TABLE_FLAGS)
+               return INVALID_PHYS_ADDR;
+       return (*pte & PTE_L0_BLOCK_ADDR_MASK) | (virt & BLOCK_512G_VADDR_MASK);
+}
+#endif
+
 #if MAX_PAGE_TABLE_LEVELS > 2
 static pt_entry_t arm_get_l1_entry(page_table_t page_table, unsigned long virt)
 {
@@ -59,6 +75,18 @@ static unsigned long arm_get_l1_phys(pt_entry_t pte, 
unsigned long virt)
 }
 #endif
 
+static pt_entry_t arm_get_l1_alt_entry(page_table_t page_table, unsigned long 
virt)
+{
+       return &page_table[(virt & BIT_MASK(48,30)) >> 30];
+}
+
+static unsigned long arm_get_l1_alt_phys(pt_entry_t pte, unsigned long virt)
+{
+       if ((*pte & PTE_TABLE_FLAGS) == PTE_TABLE_FLAGS)
+               return INVALID_PHYS_ADDR;
+       return (*pte & BIT_MASK(48,30)) | (virt & BIT_MASK(29,0));
+}
+
 static pt_entry_t arm_get_l2_entry(page_table_t page_table, unsigned long virt)
 {
        return &page_table[(virt & L2_VADDR_MASK) >> 21];
@@ -109,7 +137,18 @@ static unsigned long arm_get_l3_phys(pt_entry_t pte, 
unsigned long virt)
                .clear_entry = arm_clear_entry,         \
                .page_table_empty = arm_page_table_empty,
 
-const struct paging arm_paging[] = {
+const static struct paging arm_paging[] = {
+#if MAX_PAGE_TABLE_LEVELS > 3
+       {
+               ARM_PAGING_COMMON
+               /* No block entries for level 0, so no need to set page_size */
+               .get_entry = arm_get_l0_entry,
+               .get_phys = arm_get_l0_phys,
+
+               .set_next_pt = arm_set_l12_table,
+               .get_next_pt = arm_get_l12_table,
+       },
+#endif
 #if MAX_PAGE_TABLE_LEVELS > 2
        {
                ARM_PAGING_COMMON
@@ -144,6 +183,47 @@ const struct paging arm_paging[] = {
        }
 };
 
+const static struct paging arm_s2_paging_alt[] = {
+       {
+               ARM_PAGING_COMMON
+               .get_entry = arm_get_l1_alt_entry,
+               .get_phys = arm_get_l1_alt_phys,
+
+               .set_next_pt = arm_set_l12_table,
+               .get_next_pt = arm_get_l12_table,
+       },
+       {
+               ARM_PAGING_COMMON
+               /* Block entry: 2MB */
+               .page_size = 2 * 1024 * 1024,
+               .get_entry = arm_get_l2_entry,
+               .set_terminal = arm_set_l2_block,
+               .get_phys = arm_get_l2_phys,
+
+               .set_next_pt = arm_set_l12_table,
+               .get_next_pt = arm_get_l12_table,
+       },
+       {
+               ARM_PAGING_COMMON
+               /* Page entry: 4kB */
+               .page_size = 4 * 1024,
+               .get_entry = arm_get_l3_entry,
+               .set_terminal = arm_set_l3_page,
+               .get_phys = arm_get_l3_phys,
+       }
+};
+
+const struct paging *hv_paging = arm_paging;
+const struct paging *cell_paging;
+
 void arch_paging_init(void)
 {
+       cpu_parange = get_cpu_parange();
+
+       if (cpu_parange < 44)
+               /* 4 level page tables not supported for stage 2.
+                * We need to use multiple consecutive pages for L1 */
+               cell_paging = arm_s2_paging_alt;
+       else
+               cell_paging = arm_paging;
 }
-- 
2.8.0.rc3


-- 
You received this message because you are subscribed to the Google Groups 
"Jailhouse" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to [email protected].
For more options, visit https://groups.google.com/d/optout.

Reply via email to