From: Penny Zheng <[email protected]>

For MMU system, setup_virt_paging is used to configure stage 2 address
translation regime, like IPA bits, VMID allocator set up, etc.
Some could be inherited in MPU system, like VMID allocator set up, etc.

For MPU system, we could have the following memory translation regime:
- PMSAv8-64 at both EL1/EL0 and EL2 (default)
- VMSAv8-64 at EL1/EL0 and PMSAv8-64 at EL2 (enabled with device tree
  proprty v8r_el1_msa)

Signed-off-by: Penny Zheng <[email protected]>
Signed-off-by: Wei Chen <[email protected]>
Signed-off-by: Luca Fancellu <[email protected]>
Signed-off-by: Hari Limaye <[email protected]>
Signed-off-by: Harry Ramsey <[email protected]>
---
 xen/arch/arm/arm64/mpu/p2m.c             | 67 +++++++++++++++++++++++-
 xen/arch/arm/include/asm/arm64/sysregs.h |  4 ++
 xen/arch/arm/include/asm/cpufeature.h    | 13 +++--
 xen/arch/arm/include/asm/mpu/p2m.h       | 12 +++++
 xen/arch/arm/include/asm/p2m.h           |  5 ++
 xen/arch/arm/include/asm/processor.h     | 11 ++++
 6 files changed, 108 insertions(+), 4 deletions(-)

diff --git a/xen/arch/arm/arm64/mpu/p2m.c b/xen/arch/arm/arm64/mpu/p2m.c
index b6d8b2777b..da8f0553c1 100644
--- a/xen/arch/arm/arm64/mpu/p2m.c
+++ b/xen/arch/arm/arm64/mpu/p2m.c
@@ -2,11 +2,76 @@
 
 #include <xen/bug.h>
 #include <xen/init.h>
+#include <xen/warning.h>
 #include <asm/p2m.h>
 
 void __init setup_virt_paging(void)
 {
-    BUG_ON("unimplemented");
+    uint64_t vtcr_el2 = READ_SYSREG(VTCR_EL2), vstcr_el2 = 
READ_SYSREG(VSTCR_EL2);
+
+    /* PA size */
+    const unsigned int pa_range_info[] = {32, 36, 40, 42, 44, 48, 52, 0,
+                                          /* Invalid */};
+
+    /*
+     * Restrict "p2m_ipa_bits" if needed. As P2M table is always configured
+     * with IPA bits == PA bits, compare against "pabits".
+     */
+    if ( pa_range_info[system_cpuinfo.mm64.pa_range] < p2m_ipa_bits )
+        p2m_ipa_bits = pa_range_info[system_cpuinfo.mm64.pa_range];
+
+    /*
+     * Clear VTCR_EL2.NSA bit to configure non-secure stage 2 translation 
output
+     * address space to access the Secure PA space as Armv8r only implements
+     * secure state.
+     */
+    vtcr_el2 &= ~VTCR_NSA;
+
+    /*
+     * The MSA and MSA_frac fields in the ID_AA64MMFR0_EL1 register identify 
the
+     * memory system configurations supported. In Armv8-R AArch64, the
+     * only permitted value for ID_AA64MMFR0_EL1.MSA is 0b1111.
+     */
+    if ( system_cpuinfo.mm64.msa != MM64_MSA_PMSA_SUPPORT )
+        goto fault;
+
+    /* Permitted values for ID_AA64MMFR0_EL1.MSA_frac are 0b0001 and 0b0010. */
+    if ( system_cpuinfo.mm64.msa_frac == MM64_MSA_FRAC_NONE_SUPPORT )
+        goto fault;
+
+    /*
+     * cpuinfo sanitization makes sure we support 16bits VMID only if all cores
+     * are supporting it.
+     */
+    if ( system_cpuinfo.mm64.vmid_bits == MM64_VMID_16_BITS_SUPPORT )
+        max_vmid = MAX_VMID_16_BIT;
+
+    /* Set the VS bit only if 16 bit VMID is supported. */
+    if ( max_vmid == MAX_VMID_16_BIT )
+        vtcr_el2 |= VTCR_VS;
+
+    p2m_vmid_allocator_init();
+
+    WRITE_SYSREG(vtcr_el2, VTCR_EL2);
+
+    /*
+     * VSTCR_EL2.SA defines secure stage 2 translation output address space.
+     * To make sure that all stage 2 translations for the Secure PA space 
access
+     * the Secure PA space, we keep SA bit as 0.
+     *
+     * VSTCR_EL2.SC is NS check enable bit. To make sure that Stage 2 NS
+     * configuration is checked against stage 1 NS configuration in EL1&0
+     * translation regime for the given address, and generates a fault if they
+     * are different, we set SC bit 1.
+     */
+    vstcr_el2 &= ~VSTCR_EL2_SA;
+    vstcr_el2 |= VSTCR_EL2_SC;
+    WRITE_SYSREG(vstcr_el2, VSTCR_EL2);
+
+    return;
+
+ fault:
+    panic("Hardware with no PMSAv8-64 support in any translation regime\n");
 }
 
 /*
diff --git a/xen/arch/arm/include/asm/arm64/sysregs.h 
b/xen/arch/arm/include/asm/arm64/sysregs.h
index 19d409d3eb..a4b6eef181 100644
--- a/xen/arch/arm/include/asm/arm64/sysregs.h
+++ b/xen/arch/arm/include/asm/arm64/sysregs.h
@@ -462,6 +462,10 @@
 #define ZCR_ELx_LEN_SIZE             9
 #define ZCR_ELx_LEN_MASK             0x1ff
 
+/* Virtualization Secure Translation Control Register */
+#define VSTCR_EL2_SA                 (_AC(0x1,UL)<<30)
+#define VSTCR_EL2_SC                 (_AC(0x1,UL)<<20)
+
 #ifdef CONFIG_MPU
 /*
  * The Armv8-R AArch64 architecture always executes code in Secure
diff --git a/xen/arch/arm/include/asm/cpufeature.h 
b/xen/arch/arm/include/asm/cpufeature.h
index 13353c8e1a..bf902a3970 100644
--- a/xen/arch/arm/include/asm/cpufeature.h
+++ b/xen/arch/arm/include/asm/cpufeature.h
@@ -248,6 +248,12 @@ struct cpuinfo_arm {
             unsigned long tgranule_16K:4;
             unsigned long tgranule_64K:4;
             unsigned long tgranule_4K:4;
+#ifdef CONFIG_MPU
+            unsigned long __res0:16;
+            unsigned long msa:4;
+            unsigned long msa_frac:4;
+            unsigned long __res1:8;
+#else
             unsigned long tgranule_16k_2:4;
             unsigned long tgranule_64k_2:4;
             unsigned long tgranule_4k_2:4;
@@ -255,6 +261,7 @@ struct cpuinfo_arm {
             unsigned long __res0:8;
             unsigned long fgt:4;
             unsigned long ecv:4;
+#endif
 
             /* MMFR1 */
             unsigned long hafdbs:4;
@@ -267,13 +274,13 @@ struct cpuinfo_arm {
             unsigned long xnx:4;
             unsigned long twed:4;
             unsigned long ets:4;
-            unsigned long __res1:4;
+            unsigned long __res2:4;
             unsigned long afp:4;
-            unsigned long __res2:12;
+            unsigned long __res3:12;
             unsigned long ecbhb:4;
 
             /* MMFR2 */
-            unsigned long __res3:64;
+            unsigned long __res4:64;
         };
     } mm64;
 
diff --git a/xen/arch/arm/include/asm/mpu/p2m.h 
b/xen/arch/arm/include/asm/mpu/p2m.h
index e46d9e757a..d165585d4e 100644
--- a/xen/arch/arm/include/asm/mpu/p2m.h
+++ b/xen/arch/arm/include/asm/mpu/p2m.h
@@ -5,6 +5,18 @@
 
 struct p2m_domain;
 
+/*
+ * The architecture allows at most 255 EL2 MPU memory regions. The size of the
+ * MPU structure entry (pr_t) is 32 Bytes on AArch64 (requiring two 4KB pages)
+ * and 16 bytes on AArch32 (requiring one 4KB page).
+ */
+#ifdef CONFIG_ARM_64
+#define P2M_ROOT_ORDER 1
+#else
+#define P2M_ROOT_ORDER 0
+#endif
+
+/* Not used on MPU system */
 static inline void p2m_clear_root_pages(struct p2m_domain *p2m) {}
 
 static inline void p2m_tlb_flush_sync(struct p2m_domain *p2m) {}
diff --git a/xen/arch/arm/include/asm/p2m.h b/xen/arch/arm/include/asm/p2m.h
index 010ce8c9eb..ed1b6dd40f 100644
--- a/xen/arch/arm/include/asm/p2m.h
+++ b/xen/arch/arm/include/asm/p2m.h
@@ -48,8 +48,13 @@ struct p2m_domain {
     /* Current VMID in use */
     uint16_t vmid;
 
+#ifdef CONFIG_MMU
     /* Current Translation Table Base Register for the p2m */
     uint64_t vttbr;
+#else
+    /* Current Virtualization System Control Register for the p2m */
+    register_t vsctlr;
+#endif
 
     /* Highest guest frame that's ever been mapped in the p2m */
     gfn_t max_mapped_gfn;
diff --git a/xen/arch/arm/include/asm/processor.h 
b/xen/arch/arm/include/asm/processor.h
index 1a48c9ff3b..7344aa094b 100644
--- a/xen/arch/arm/include/asm/processor.h
+++ b/xen/arch/arm/include/asm/processor.h
@@ -403,6 +403,10 @@
 
 #define VTCR_RES1       (_AC(1,UL)<<31)
 
+#if defined(CONFIG_MPU) && defined(CONFIG_ARM_64)
+#define VTCR_NSA        (_AC(0x1,UL)<<30)
+#endif
+
 /* HCPTR Hyp. Coprocessor Trap Register */
 #define HCPTR_TAM       ((_AC(1,U)<<30))
 #define HCPTR_TTA       ((_AC(1,U)<<20))        /* Trap trace registers */
@@ -464,6 +468,13 @@
 #define MM64_VMID_16_BITS_SUPPORT   0x2
 #endif
 
+#if defined(CONFIG_MPU) && defined(CONFIG_ARM_64)
+#define MM64_MSA_PMSA_SUPPORT       0xf
+#define MM64_MSA_FRAC_NONE_SUPPORT  0x0
+#define MM64_MSA_FRAC_PMSA_SUPPORT  0x1
+#define MM64_MSA_FRAC_VMSA_SUPPORT  0x2
+#endif
+
 #ifndef __ASSEMBLER__
 
 extern register_t __cpu_logical_map[];
-- 
2.43.0


Reply via email to