The loop that remaps memory banks can end up calling remap_range with
zero size, when a reserved region is at the very start of the memory
bank.

This is handled correctly by the code, but does an unnecessary
invalidation of the whole TLB. Let's early exit instead to skip that.

Signed-off-by: Ahmad Fatoum <a.fat...@barebox.org>
---
 arch/arm/cpu/mmu_32.c | 2 ++
 arch/arm/cpu/mmu_64.c | 2 ++
 2 files changed, 4 insertions(+)

diff --git a/arch/arm/cpu/mmu_32.c b/arch/arm/cpu/mmu_32.c
index 89a18d342b80..5f303ae1dc87 100644
--- a/arch/arm/cpu/mmu_32.c
+++ b/arch/arm/cpu/mmu_32.c
@@ -283,6 +283,8 @@ static void __arch_remap_range(void *_virt_addr, 
phys_addr_t phys_addr, size_t s
        pr_debug("%s: 0x%08x 0x%08x type %d\n", __func__, virt_addr, size, 
map_type);
 
        size = PAGE_ALIGN(size);
+       if (!size)
+               return;
 
        while (size) {
                const bool pgdir_size_aligned = IS_ALIGNED(virt_addr, 
PGDIR_SIZE);
diff --git a/arch/arm/cpu/mmu_64.c b/arch/arm/cpu/mmu_64.c
index a229e4cb5526..91b3cd76c24f 100644
--- a/arch/arm/cpu/mmu_64.c
+++ b/arch/arm/cpu/mmu_64.c
@@ -146,6 +146,8 @@ static void create_sections(uint64_t virt, uint64_t phys, 
uint64_t size,
        attr &= ~PTE_TYPE_MASK;
 
        size = PAGE_ALIGN(size);
+       if (!size)
+               return;
 
        while (size) {
                table = ttb;
-- 
2.39.5


Reply via email to