This loop is duplicated 3 times, put it into its own function and call
it instead. This simplifies the logic in a few functions.

Signed-off-by: Casey Connolly <[email protected]>
---
 arch/arm/cpu/armv8/cache_v8.c | 97 ++++++++++++++-----------------------------
 1 file changed, 31 insertions(+), 66 deletions(-)

diff --git a/arch/arm/cpu/armv8/cache_v8.c b/arch/arm/cpu/armv8/cache_v8.c
index 36d337378fcd..bd839b2addb4 100644
--- a/arch/arm/cpu/armv8/cache_v8.c
+++ b/arch/arm/cpu/armv8/cache_v8.c
@@ -1022,8 +1022,33 @@ static u64 set_one_region(u64 start, u64 size, u64 
attrs, bool flag, int level)
        /* Roll on to the next page table level */
        return 0;
 }
 
+static void set_regions(u64 start, u64 size, u64 attrs, bool flag)
+{
+       int level, levelstart;
+       u64 r, va_bits;
+       get_tcr(NULL, &va_bits);
+
+       levelstart = va_bits < 39 ? 1 : 0;
+
+       /*
+        * Loop through the address range until we find a page granule that fits
+        * our alignment constraints, then set it to the new cache attributes
+        */
+       while (size > 0) {
+               for (level = levelstart; level < 4; level++) {
+                       r = set_one_region(start, size, attrs, flag, level);
+                       if (r) {
+                               /* PTE successfully replaced */
+                               size -= r;
+                               start += r;
+                               break;
+                       }
+               }
+       }
+}
+
 void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size,
                                     enum dcache_option option)
 {
        u64 attrs = PMD_ATTRINDX(option >> 2);
@@ -1041,28 +1066,9 @@ void mmu_set_region_dcache_behaviour(phys_addr_t start, 
size_t size,
         * we can safely modify our primary page tables and then switch back
         */
        __asm_switch_ttbr(gd->arch.tlb_emerg);
 
-       /*
-        * Loop through the address range until we find a page granule that fits
-        * our alignment constraints, then set it to the new cache attributes
-        */
-       while (size > 0) {
-               int level;
-               u64 r;
-
-               for (level = 1; level < 4; level++) {
-                       /* Set d-cache attributes only */
-                       r = set_one_region(start, size, attrs, false, level);
-                       if (r) {
-                               /* PTE successfully replaced */
-                               size -= r;
-                               start += r;
-                               break;
-                       }
-               }
-
-       }
+       set_regions(start, size, attrs, false);
 
        /* We're done modifying page tables, switch back to our primary ones */
        __asm_switch_ttbr(gd->arch.tlb_addr);
 
@@ -1072,31 +1078,11 @@ void mmu_set_region_dcache_behaviour(phys_addr_t start, 
size_t size,
         */
        flush_dcache_range(real_start, real_start + real_size);
 }
 
-void mmu_change_region_attr_nobreak(phys_addr_t addr, size_t siz, u64 attrs)
+void mmu_change_region_attr_nobreak(phys_addr_t addr, size_t size, u64 attrs)
 {
-       int level;
-       u64 r, size, start;
-
-       /*
-        * Loop through the address range until we find a page granule that fits
-        * our alignment constraints and set the new permissions
-        */
-       start = addr;
-       size = siz;
-       while (size > 0) {
-               for (level = 1; level < 4; level++) {
-                       /* Set PTE to new attributes */
-                       r = set_one_region(start, size, attrs, true, level);
-                       if (r) {
-                               /* PTE successfully updated */
-                               size -= r;
-                               start += r;
-                               break;
-                       }
-               }
-       }
+       set_regions(addr, size, attrs, true);
        flush_dcache_range(gd->arch.tlb_addr,
                           gd->arch.tlb_addr + gd->arch.tlb_size);
        __asm_invalidate_tlb_all();
 }
@@ -1105,38 +1091,17 @@ void mmu_change_region_attr_nobreak(phys_addr_t addr, 
size_t siz, u64 attrs)
  * Modify MMU table for a region with updated PXN/UXN/Memory type/valid bits.
  * The procecess is break-before-make. The target region will be marked as
  * invalid during the process of changing.
  */
-void mmu_change_region_attr(phys_addr_t addr, size_t siz, u64 attrs)
+void mmu_change_region_attr(phys_addr_t addr, size_t size, u64 attrs)
 {
-       int level;
-       u64 r, size, start;
-
-       start = addr;
-       size = siz;
-       /*
-        * Loop through the address range until we find a page granule that fits
-        * our alignment constraints, then set it to "invalid".
-        */
-       while (size > 0) {
-               for (level = 1; level < 4; level++) {
-                       /* Set PTE to fault */
-                       r = set_one_region(start, size, PTE_TYPE_FAULT, true,
-                                          level);
-                       if (r) {
-                               /* PTE successfully invalidated */
-                               size -= r;
-                               start += r;
-                               break;
-                       }
-               }
-       }
+       set_regions(addr, size, PTE_TYPE_FAULT, true);
 
        flush_dcache_range(gd->arch.tlb_addr,
                           gd->arch.tlb_addr + gd->arch.tlb_size);
        __asm_invalidate_tlb_all();
 
-       mmu_change_region_attr_nobreak(addr, siz, attrs);
+       mmu_change_region_attr_nobreak(addr, size, attrs);
 }
 
 int pgprot_set_attrs(phys_addr_t addr, size_t size, enum pgprot_attrs perm)
 {

-- 
2.53.0

Reply via email to