At the moment, on MMU system, enable_mm() will return to an address in
the 1:1 mapping, then each path is responsible to switch to virtual runtime
mapping. Then remove_identity_mapping() is called to remove all 1:1 mapping.

Since remove_identity_mapping() is not necessary on MPU system, and we also
avoid creating empty function for MPU system, trying to keep only one codeflow
in arm64/head.S, we move path switch and remove_identity_mapping() in
enable_mm() on MMU system.

Signed-off-by: Penny Zheng <penny.zh...@arm.com>
Signed-off-by: Wei Chen <wei.c...@arm.com>
---
 xen/arch/arm/arm64/head.S     | 28 +++++++++++++---------------
 xen/arch/arm/arm64/head_mmu.S | 33 ++++++++++++++++++++++++++++++---
 2 files changed, 43 insertions(+), 18 deletions(-)

diff --git a/xen/arch/arm/arm64/head.S b/xen/arch/arm/arm64/head.S
index a92883319d..6358305f03 100644
--- a/xen/arch/arm/arm64/head.S
+++ b/xen/arch/arm/arm64/head.S
@@ -258,20 +258,15 @@ real_start_efi:
          * and memory regions for MPU systems.
          */
         bl    prepare_early_mappings
+        /*
+         * Address in the runtime mapping to jump to after the
+         * MMU/MPU is enabled
+         */
+        ldr   lr, =primary_switched
         /* Turn on MMU or MPU */
-        bl    enable_mm
+        b    enable_mm
 
-        /* We are still in the 1:1 mapping. Jump to the runtime Virtual 
Address. */
-        ldr   x0, =primary_switched
-        br    x0
 primary_switched:
-        /*
-         * The 1:1 map may clash with other parts of the Xen virtual memory
-         * layout. As it is not used anymore, remove it completely to
-         * avoid having to worry about replacing existing mapping
-         * afterwards.
-         */
-        bl    remove_identity_mapping
         bl    setup_early_uart
 #ifdef CONFIG_EARLY_PRINTK
         /* Use a virtual address to access the UART. */
@@ -317,11 +312,14 @@ GLOBAL(init_secondary)
         bl    check_cpu_mode
         bl    cpu_init
         bl    prepare_early_mappings
-        bl    enable_mm
 
-        /* We are still in the 1:1 mapping. Jump to the runtime Virtual 
Address. */
-        ldr   x0, =secondary_switched
-        br    x0
+        /*
+         * Address in the runtime mapping to jump to after the
+         * MMU/MPU is enabled
+         */
+        ldr   lr, =secondary_switched
+        b    enable_mm
+
 secondary_switched:
         /*
          * Non-boot CPUs need to move on to the proper pagetables, which were
diff --git a/xen/arch/arm/arm64/head_mmu.S b/xen/arch/arm/arm64/head_mmu.S
index a19b7c873d..c9e83bbe2d 100644
--- a/xen/arch/arm/arm64/head_mmu.S
+++ b/xen/arch/arm/arm64/head_mmu.S
@@ -211,9 +211,11 @@ virtphys_clash:
 ENDPROC(prepare_early_mappings)
 
 /*
- * Turn on the Data Cache and the MMU. The function will return on the 1:1
- * mapping. In other word, the caller is responsible to switch to the runtime
- * mapping.
+ * Turn on the Data Cache and the MMU. The function will return
+ * to the virtual address provided in LR (e.g. the runtime mapping).
+ *
+ * Inputs:
+ * lr(x30): Virtual address to return to
  *
  * Clobbers x0 - x3
  */
@@ -238,6 +240,31 @@ ENTRY(enable_mm)
         dsb   sy                     /* Flush PTE writes and finish reads */
         msr   SCTLR_EL2, x0          /* now paging is enabled */
         isb                          /* Now, flush the icache */
+
+        /*
+         * The MMU is turned on and we are in the 1:1 mapping. Switch
+         * to the runtime mapping.
+         */
+        ldr   x0, =1f
+        br    x0
+1:
+        /*
+         * The 1:1 map may clash with other parts of the Xen virtual memory
+         * layout. As it is not used anymore, remove it completely to
+         * avoid having to worry about replacing existing mapping
+         * afterwards.
+         *
+         * On return this will jump to the virtual address requested by
+         * the caller
+         */
+        b     remove_identity_mapping
+
+        /*
+         * Here might not be reached, as "ret" in remove_identity_mapping
+         * will use the return address in LR in advance. But keep ret here
+         * might be more safe if "ret" in remove_identity_mapping is removed
+         * in future.
+         */
         ret
 ENDPROC(enable_mm)
 
-- 
2.25.1


Reply via email to