The code is identical between ARM32 and 64 and is going to get more
complex with the addition of finer grained MMU permissions.

Let's move it to a common code file in anticipation.

Signed-off-by: Ahmad Fatoum <a.fat...@barebox.org>
---
 arch/arm/cpu/mmu-common.c | 46 +++++++++++++++++++++++++++++++++++++++
 arch/arm/cpu/mmu_32.c     | 40 ----------------------------------
 arch/arm/cpu/mmu_64.c     | 35 -----------------------------
 3 files changed, 46 insertions(+), 75 deletions(-)

diff --git a/arch/arm/cpu/mmu-common.c b/arch/arm/cpu/mmu-common.c
index f3416ae7f7ca..575fb32282d1 100644
--- a/arch/arm/cpu/mmu-common.c
+++ b/arch/arm/cpu/mmu-common.c
@@ -12,6 +12,7 @@
 #include <asm/barebox-arm.h>
 #include <memory.h>
 #include <zero_page.h>
+#include <range.h>
 #include "mmu-common.h"
 #include <efi/efi-mode.h>
 
@@ -69,6 +70,50 @@ void zero_page_faulting(void)
        remap_range(0x0, PAGE_SIZE, MAP_FAULT);
 }
 
+static void mmu_remap_memory_banks(void)
+{
+       struct memory_bank *bank;
+       unsigned long text_start = (unsigned long)&_stext;
+       unsigned long code_start = text_start;
+       unsigned long code_size = (unsigned long)&__start_rodata - (unsigned 
long)&_stext;
+       unsigned long text_size = (unsigned long)&_etext - text_start;
+       unsigned long rodata_start = (unsigned long)&__start_rodata;
+       unsigned long rodata_size = (unsigned long)&__end_rodata - rodata_start;
+
+       /*
+        * Early mmu init will have mapped everything but the initial memory 
area
+        * (excluding final OPTEE_SIZE bytes) uncached. We have now discovered
+        * all memory banks, so let's map all pages, excluding reserved memory 
areas,
+        * cacheable and executable.
+        */
+       for_each_memory_bank(bank) {
+               struct resource *rsv;
+               resource_size_t pos;
+
+               pos = bank->start;
+
+               /* Skip reserved regions */
+               for_each_reserved_region(bank, rsv) {
+                       remap_range((void *)pos, rsv->start - pos, MAP_CACHED);
+                       pos = rsv->end + 1;
+               }
+
+               if (region_overlap_size(pos, bank->start + bank->size - pos,
+                   text_start, text_size)) {
+                       remap_range((void *)pos, text_start - pos, MAP_CACHED);
+                       /* skip barebox segments here, will be mapped below */
+                       pos = text_start + text_size;
+               }
+
+               remap_range((void *)pos, bank->start + bank->size - pos, 
MAP_CACHED);
+       }
+
+       setup_trap_pages();
+
+       remap_range((void *)code_start, code_size, MAP_CODE);
+       remap_range((void *)rodata_start, rodata_size, ARCH_MAP_CACHED_RO);
+}
+
 static int mmu_init(void)
 {
        if (efi_is_payload())
@@ -94,6 +139,7 @@ static int mmu_init(void)
        }
 
        __mmu_init(get_cr() & CR_M);
+       mmu_remap_memory_banks();
 
        return 0;
 }
diff --git a/arch/arm/cpu/mmu_32.c b/arch/arm/cpu/mmu_32.c
index 151e786c9b2d..985a063bbdda 100644
--- a/arch/arm/cpu/mmu_32.c
+++ b/arch/arm/cpu/mmu_32.c
@@ -19,7 +19,6 @@
 #include <asm/system_info.h>
 #include <asm/sections.h>
 #include <linux/pagemap.h>
-#include <range.h>
 
 #include "mmu_32.h"
 
@@ -579,14 +578,7 @@ void setup_trap_pages(void)
  */
 void __mmu_init(bool mmu_on)
 {
-       struct memory_bank *bank;
        uint32_t *ttb = get_ttb();
-       unsigned long text_start = (unsigned long)&_stext;
-       unsigned long code_start = text_start;
-       unsigned long code_size = (unsigned long)&__start_rodata - (unsigned 
long)&_stext;
-       unsigned long text_size = (unsigned long)&_etext - text_start;
-       unsigned long rodata_start = (unsigned long)&__start_rodata;
-       unsigned long rodata_size = (unsigned long)&__end_rodata - rodata_start;
 
        // TODO: remap writable only while remapping?
        // TODO: What memtype for ttb when barebox is EFI loader?
@@ -604,38 +596,6 @@ void __mmu_init(bool mmu_on)
                                        ttb);
 
        pr_debug("ttb: 0x%p\n", ttb);
-
-       /*
-        * Early mmu init will have mapped everything but the initial memory 
area
-        * (excluding final OPTEE_SIZE bytes) uncached. We have now discovered
-        * all memory banks, so let's map all pages, excluding reserved memory 
areas,
-        * cacheable and executable.
-        */
-       for_each_memory_bank(bank) {
-               struct resource *rsv;
-               resource_size_t pos;
-
-               pos = bank->start;
-
-               /* Skip reserved regions */
-               for_each_reserved_region(bank, rsv) {
-                       remap_range((void *)pos, rsv->start - pos, MAP_CACHED);
-                       pos = rsv->end + 1;
-               }
-
-               if (region_overlap_size(pos, bank->start + bank->size - pos, 
text_start, text_size)) {
-                       remap_range((void *)pos, code_start - pos, MAP_CACHED);
-                       /* skip barebox segments here, will be mapped below */
-                       pos = text_start + text_size;
-               }
-
-               remap_range((void *)pos, bank->start + bank->size - pos, 
MAP_CACHED);
-       }
-
-       setup_trap_pages();
-
-       remap_range((void *)code_start, code_size, MAP_CODE);
-       remap_range((void *)rodata_start, rodata_size, ARCH_MAP_CACHED_RO);
 }
 
 /*
diff --git a/arch/arm/cpu/mmu_64.c b/arch/arm/cpu/mmu_64.c
index a770be7ed611..e7d2e9697a7e 100644
--- a/arch/arm/cpu/mmu_64.c
+++ b/arch/arm/cpu/mmu_64.c
@@ -10,7 +10,6 @@
 #include <init.h>
 #include <mmu.h>
 #include <errno.h>
-#include <range.h>
 #include <zero_page.h>
 #include <linux/sizes.h>
 #include <asm/memory.h>
@@ -374,13 +373,6 @@ void setup_trap_pages(void)
 void __mmu_init(bool mmu_on)
 {
        uint64_t *ttb = get_ttb();
-       struct memory_bank *bank;
-       unsigned long text_start = (unsigned long)&_stext;
-       unsigned long code_start = text_start;
-       unsigned long code_size = (unsigned long)&__start_rodata - (unsigned 
long)&_stext;
-       unsigned long text_size = (unsigned long)&_etext - text_start;
-       unsigned long rodata_start = (unsigned long)&__start_rodata;
-       unsigned long rodata_size = (unsigned long)&__end_rodata - rodata_start;
 
        // TODO: remap writable only while remapping?
        // TODO: What memtype for ttb when barebox is EFI loader?
@@ -394,33 +386,6 @@ void __mmu_init(bool mmu_on)
                 *   the ttb will get corrupted.
                 */
                pr_crit("Can't request SDRAM region for ttb at %p\n", ttb);
-
-       for_each_memory_bank(bank) {
-               struct resource *rsv;
-               resource_size_t pos;
-
-               pos = bank->start;
-
-               /* Skip reserved regions */
-               for_each_reserved_region(bank, rsv) {
-                       remap_range((void *)pos, rsv->start - pos, MAP_CACHED);
-                       pos = rsv->end + 1;
-               }
-
-               if (region_overlap_size(pos, bank->start + bank->size - pos,
-                   text_start, text_size)) {
-                       remap_range((void *)pos, text_start - pos, MAP_CACHED);
-                       /* skip barebox segments here, will be mapped below */
-                       pos = text_start + text_size;
-               }
-
-               remap_range((void *)pos, bank->start + bank->size - pos, 
MAP_CACHED);
-       }
-
-       setup_trap_pages();
-
-       remap_range((void *)code_start, code_size, MAP_CODE);
-       remap_range((void *)rodata_start, rodata_size, ARCH_MAP_CACHED_RO);
 }
 
 void mmu_disable(void)
-- 
2.39.5


Reply via email to