As a side-effect, the order of flush_cache_vmap() and
arch_sync_kernel_mappings() calls are switched, but that now matches
the other callers in this file.

Reviewed-by: Christoph Hellwig <h...@lst.de>
Signed-off-by: Nicholas Piggin <npig...@gmail.com>
---
 mm/vmalloc.c | 16 +++++++++++++---
 1 file changed, 13 insertions(+), 3 deletions(-)

diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index f043386bb51d..47ab4338cfff 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -240,7 +240,7 @@ static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, 
unsigned long end,
        return 0;
 }
 
-int vmap_range(unsigned long addr, unsigned long end,
+static int vmap_range_noflush(unsigned long addr, unsigned long end,
                        phys_addr_t phys_addr, pgprot_t prot,
                        unsigned int max_page_shift)
 {
@@ -263,14 +263,24 @@ int vmap_range(unsigned long addr, unsigned long end,
                        break;
        } while (pgd++, phys_addr += (next - addr), addr = next, addr != end);
 
-       flush_cache_vmap(start, end);
-
        if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
                arch_sync_kernel_mappings(start, end);
 
        return err;
 }
 
+int vmap_range(unsigned long addr, unsigned long end,
+                       phys_addr_t phys_addr, pgprot_t prot,
+                       unsigned int max_page_shift)
+{
+       int err;
+
+       err = vmap_range_noflush(addr, end, phys_addr, prot, max_page_shift);
+       flush_cache_vmap(addr, end);
+
+       return err;
+}
+
 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
                             pgtbl_mod_mask *mask)
 {
-- 
2.23.0

Reply via email to