We already have early_remap_range for remapping operations done before
enabling the MMU. Let's use the same name scheme for all early page
table operations.

Signed-off-by: Ahmad Fatoum <a.fat...@pengutronix.de>
---
 arch/arm/cpu/mmu_32.c | 10 +++++-----
 arch/arm/cpu/mmu_64.c |  4 ++--
 2 files changed, 7 insertions(+), 7 deletions(-)

diff --git a/arch/arm/cpu/mmu_32.c b/arch/arm/cpu/mmu_32.c
index ae86c27e7e27..848d2d2b8c0b 100644
--- a/arch/arm/cpu/mmu_32.c
+++ b/arch/arm/cpu/mmu_32.c
@@ -381,8 +381,8 @@ int arch_remap_range(void *virt_addr, phys_addr_t 
phys_addr, size_t size, maptyp
        return 0;
 }
 
-static void create_sections(unsigned long first, unsigned long last,
-                           unsigned int flags)
+static void early_create_sections(unsigned long first, unsigned long last,
+                                 unsigned int flags)
 {
        uint32_t *ttb = get_ttb();
        unsigned long ttb_start = pgd_index(first);
@@ -395,10 +395,10 @@ static void create_sections(unsigned long first, unsigned 
long last,
        }
 }
 
-static inline void create_flat_mapping(void)
+static inline void early_create_flat_mapping(void)
 {
        /* create a flat mapping using 1MiB sections */
-       create_sections(0, 0xffffffff, attrs_uncached_mem());
+       early_create_sections(0, 0xffffffff, attrs_uncached_mem());
 }
 
 void *map_io_sections(unsigned long phys, void *_start, size_t size)
@@ -634,7 +634,7 @@ void mmu_early_enable(unsigned long membase, unsigned long 
memsize, unsigned lon
         * This marks the whole address space as uncachable as well as
         * unexecutable if possible
         */
-       create_flat_mapping();
+       early_create_flat_mapping();
 
        /* maps main memory as cachable */
        optee_start = membase + memsize - OPTEE_SIZE;
diff --git a/arch/arm/cpu/mmu_64.c b/arch/arm/cpu/mmu_64.c
index 9e8d36d94944..83738ed6ad0d 100644
--- a/arch/arm/cpu/mmu_64.c
+++ b/arch/arm/cpu/mmu_64.c
@@ -424,7 +424,7 @@ void *dma_alloc_writecombine(struct device *dev, size_t 
size, dma_addr_t *dma_ha
        return dma_alloc_map(dev, size, dma_handle, MAP_WRITECOMBINE);
 }
 
-static void init_range(size_t total_level0_tables)
+static void early_init_range(size_t total_level0_tables)
 {
        uint64_t *ttb = get_ttb();
        uint64_t addr = 0;
@@ -460,7 +460,7 @@ void mmu_early_enable(unsigned long membase, unsigned long 
memsize, unsigned lon
         * Assume maximum BITS_PER_PA set to 40 bits.
         * Set 1:1 mapping of VA->PA. So to cover the full 1TB range we need 2 
tables.
         */
-       init_range(2);
+       early_init_range(2);
 
        early_remap_range(membase, memsize, ARCH_MAP_CACHED_RWX, false);
 
-- 
2.39.5


Reply via email to