Let's make it clearer that we are operating within a single folio by
providing both the folio and the page.

This implies that for flush_dcache_folio() we'll now avoid one more
page->folio lookup, and that we can safely drop the "nth_page" usage.

Cc: Thomas Bogendoerfer <tsbog...@alpha.franken.de>
Signed-off-by: David Hildenbrand <da...@redhat.com>
---
 arch/mips/include/asm/cacheflush.h | 11 +++++++----
 arch/mips/mm/cache.c               |  8 ++++----
 2 files changed, 11 insertions(+), 8 deletions(-)

diff --git a/arch/mips/include/asm/cacheflush.h 
b/arch/mips/include/asm/cacheflush.h
index 1f14132b3fc98..8a2de28936e07 100644
--- a/arch/mips/include/asm/cacheflush.h
+++ b/arch/mips/include/asm/cacheflush.h
@@ -50,13 +50,14 @@ extern void (*flush_cache_mm)(struct mm_struct *mm);
 extern void (*flush_cache_range)(struct vm_area_struct *vma,
        unsigned long start, unsigned long end);
 extern void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long 
page, unsigned long pfn);
-extern void __flush_dcache_pages(struct page *page, unsigned int nr);
+extern void __flush_dcache_folio_pages(struct folio *folio, struct page *page, 
unsigned int nr);
 
 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
 static inline void flush_dcache_folio(struct folio *folio)
 {
        if (cpu_has_dc_aliases)
-               __flush_dcache_pages(&folio->page, folio_nr_pages(folio));
+               __flush_dcache_folio_pages(folio, folio_page(folio, 0),
+                                          folio_nr_pages(folio));
        else if (!cpu_has_ic_fills_f_dc)
                folio_set_dcache_dirty(folio);
 }
@@ -64,10 +65,12 @@ static inline void flush_dcache_folio(struct folio *folio)
 
 static inline void flush_dcache_page(struct page *page)
 {
+       struct folio *folio = page_folio(page);
+
        if (cpu_has_dc_aliases)
-               __flush_dcache_pages(page, 1);
+               __flush_dcache_folio_pages(folio, page, folio_nr_pages(folio));
        else if (!cpu_has_ic_fills_f_dc)
-               folio_set_dcache_dirty(page_folio(page));
+               folio_set_dcache_dirty(folio);
 }
 
 #define flush_dcache_mmap_lock(mapping)                do { } while (0)
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index bf9a37c60e9f0..e3b4224c9a406 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -99,9 +99,9 @@ SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned 
long, bytes,
        return 0;
 }
 
-void __flush_dcache_pages(struct page *page, unsigned int nr)
+void __flush_dcache_folio_pages(struct folio *folio, struct page *page,
+               unsigned int nr)
 {
-       struct folio *folio = page_folio(page);
        struct address_space *mapping = folio_flush_mapping(folio);
        unsigned long addr;
        unsigned int i;
@@ -117,12 +117,12 @@ void __flush_dcache_pages(struct page *page, unsigned int 
nr)
         * get faulted into the tlb (and thus flushed) anyways.
         */
        for (i = 0; i < nr; i++) {
-               addr = (unsigned long)kmap_local_page(nth_page(page, i));
+               addr = (unsigned long)kmap_local_page(page + i);
                flush_data_cache_page(addr);
                kunmap_local((void *)addr);
        }
 }
-EXPORT_SYMBOL(__flush_dcache_pages);
+EXPORT_SYMBOL(__flush_dcache_folio_pages);
 
 void __flush_anon_page(struct page *page, unsigned long vmaddr)
 {
-- 
2.50.1


Reply via email to