From: Ahmad Fatoum <a.fat...@pengutronix.de>

The code is identical between ARM32 and 64 and is going to get more
complex with the addition of finer grained MMU permissions.

Let's move it to a common code file in anticipation.

Signed-off-by: Ahmad Fatoum <a.fat...@pengutronix.de>
Signed-off-by: Ahmad Fatoum <a.fat...@barebox.org>
---
 arch/arm/cpu/mmu-common.c | 31 +++++++++++++++++++++++++++++--
 arch/arm/cpu/mmu_32.c     | 22 ----------------------
 arch/arm/cpu/mmu_64.c     | 16 ----------------
 3 files changed, 29 insertions(+), 40 deletions(-)

diff --git a/arch/arm/cpu/mmu-common.c b/arch/arm/cpu/mmu-common.c
index a55dce72a22d..85cb7cb007b9 100644
--- a/arch/arm/cpu/mmu-common.c
+++ b/arch/arm/cpu/mmu-common.c
@@ -69,6 +69,34 @@ void zero_page_faulting(void)
        remap_range(0x0, PAGE_SIZE, MAP_FAULT);
 }
 
+static void mmu_remap_memory_banks(void)
+{
+       struct memory_bank *bank;
+
+       /*
+        * Early mmu init will have mapped everything but the initial memory 
area
+        * (excluding final OPTEE_SIZE bytes) uncached. We have now discovered
+        * all memory banks, so let's map all pages, excluding reserved memory 
areas,
+        * cacheable and executable.
+        */
+       for_each_memory_bank(bank) {
+               struct resource *rsv;
+               resource_size_t pos;
+
+               pos = bank->start;
+
+               /* Skip reserved regions */
+               for_each_reserved_region(bank, rsv) {
+                       remap_range((void *)pos, rsv->start - pos, MAP_CACHED);
+                       pos = rsv->end + 1;
+               }
+
+               remap_range((void *)pos, bank->start + bank->size - pos, 
MAP_CACHED);
+       }
+
+       setup_trap_pages();
+}
+
 static int mmu_init(void)
 {
        if (efi_is_payload())
@@ -94,8 +122,7 @@ static int mmu_init(void)
        }
 
        __mmu_init(get_cr() & CR_M);
-
-       setup_trap_pages();
+       mmu_remap_memory_banks();
 
        return 0;
 }
diff --git a/arch/arm/cpu/mmu_32.c b/arch/arm/cpu/mmu_32.c
index 3572fa70d13a..080e55a7ced6 100644
--- a/arch/arm/cpu/mmu_32.c
+++ b/arch/arm/cpu/mmu_32.c
@@ -555,7 +555,6 @@ void setup_trap_pages(void)
  */
 void __mmu_init(bool mmu_on)
 {
-       struct memory_bank *bank;
        uint32_t *ttb = get_ttb();
 
        // TODO: remap writable only while remapping?
@@ -574,27 +573,6 @@ void __mmu_init(bool mmu_on)
                                        ttb);
 
        pr_debug("ttb: 0x%p\n", ttb);
-
-       /*
-        * Early mmu init will have mapped everything but the initial memory 
area
-        * (excluding final OPTEE_SIZE bytes) uncached. We have now discovered
-        * all memory banks, so let's map all pages, excluding reserved memory 
areas,
-        * cacheable and executable.
-        */
-       for_each_memory_bank(bank) {
-               struct resource *rsv;
-               resource_size_t pos;
-
-               pos = bank->start;
-
-               /* Skip reserved regions */
-               for_each_reserved_region(bank, rsv) {
-                       remap_range((void *)pos, rsv->start - pos, MAP_CACHED);
-                       pos = rsv->end + 1;
-               }
-
-               remap_range((void *)pos, bank->start + bank->size - pos, 
MAP_CACHED);
-       }
 }
 
 /*
diff --git a/arch/arm/cpu/mmu_64.c b/arch/arm/cpu/mmu_64.c
index ba82528990fe..54d4a4e9c638 100644
--- a/arch/arm/cpu/mmu_64.c
+++ b/arch/arm/cpu/mmu_64.c
@@ -359,7 +359,6 @@ void setup_trap_pages(void)
 void __mmu_init(bool mmu_on)
 {
        uint64_t *ttb = get_ttb();
-       struct memory_bank *bank;
 
        // TODO: remap writable only while remapping?
        // TODO: What memtype for ttb when barebox is EFI loader?
@@ -373,21 +372,6 @@ void __mmu_init(bool mmu_on)
                 *   the ttb will get corrupted.
                 */
                pr_crit("Can't request SDRAM region for ttb at %p\n", ttb);
-
-       for_each_memory_bank(bank) {
-               struct resource *rsv;
-               resource_size_t pos;
-
-               pos = bank->start;
-
-               /* Skip reserved regions */
-               for_each_reserved_region(bank, rsv) {
-                       remap_range((void *)pos, rsv->start - pos, MAP_CACHED);
-                       pos = rsv->end + 1;
-               }
-
-               remap_range((void *)pos, bank->start + bank->size - pos, 
MAP_CACHED);
-       }
 }
 
 void mmu_disable(void)
-- 
2.39.5


Reply via email to