Create a new layer for the in-table PTE manipulation APIs. For now, The
existing API is prefixed with double underscore to become the
arch-private API and the public API is just a simple wrapper that calls
the private API.

The public API implementation will subsequently be used to transparently
manipulate the contiguous bit where appropriate. But since there are
already some contig-aware users (e.g. hugetlb, kernel mapper), we must
first ensure those users use the private API directly so that the future
contig-bit manipulations in the public API do not interfere with those
existing uses.

set_pte_at() is a core macro that forwards to set_ptes() (with nr=1).
Instead of creating a __set_pte_at() internal macro, convert all arch
users to use set_ptes()/__set_ptes() directly, as appropriate. Callers
in hugetlb may benefit from calling __set_ptes() once for their whole
range rather than managing their own loop. This is left for future
improvement.

Tested-by: John Hubbard <jhubb...@nvidia.com>
Signed-off-by: Ryan Roberts <ryan.robe...@arm.com>
---
 arch/arm64/include/asm/pgtable.h | 10 +++++-----
 arch/arm64/kernel/mte.c          |  2 +-
 arch/arm64/kvm/guest.c           |  2 +-
 arch/arm64/mm/fault.c            |  2 +-
 arch/arm64/mm/hugetlbpage.c      | 10 +++++-----
 5 files changed, 13 insertions(+), 13 deletions(-)

diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 3cb45e8dbb52..f1fd6c5e3eca 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -358,9 +358,9 @@ static inline pte_t pte_advance_pfn(pte_t pte, unsigned 
long nr)
        return pfn_pte(pte_pfn(pte) + nr, pte_pgprot(pte));
 }
 
-static inline void set_ptes(struct mm_struct *mm,
-                           unsigned long __always_unused addr,
-                           pte_t *ptep, pte_t pte, unsigned int nr)
+static inline void __set_ptes(struct mm_struct *mm,
+                             unsigned long __always_unused addr,
+                             pte_t *ptep, pte_t pte, unsigned int nr)
 {
        page_table_check_ptes_set(mm, ptep, pte, nr);
        __sync_cache_and_tags(pte, nr);
@@ -374,7 +374,6 @@ static inline void set_ptes(struct mm_struct *mm,
                pte = pte_advance_pfn(pte, 1);
        }
 }
-#define set_ptes set_ptes
 
 /*
  * Huge pte definitions.
@@ -1079,7 +1078,7 @@ static inline void arch_swap_restore(swp_entry_t entry, 
struct folio *folio)
 #endif /* CONFIG_ARM64_MTE */
 
 /*
- * On AArch64, the cache coherency is handled via the set_pte_at() function.
+ * On AArch64, the cache coherency is handled via the __set_ptes() function.
  */
 static inline void update_mmu_cache_range(struct vm_fault *vmf,
                struct vm_area_struct *vma, unsigned long addr, pte_t *ptep,
@@ -1140,6 +1139,7 @@ void vmemmap_update_pte(unsigned long addr, pte_t *ptep, 
pte_t pte);
 #endif
 
 #define set_pte                                        __set_pte
+#define set_ptes                               __set_ptes
 
 #endif /* !__ASSEMBLY__ */
 
diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
index a41ef3213e1e..dcdcccd40891 100644
--- a/arch/arm64/kernel/mte.c
+++ b/arch/arm64/kernel/mte.c
@@ -67,7 +67,7 @@ int memcmp_pages(struct page *page1, struct page *page2)
        /*
         * If the page content is identical but at least one of the pages is
         * tagged, return non-zero to avoid KSM merging. If only one of the
-        * pages is tagged, set_pte_at() may zero or change the tags of the
+        * pages is tagged, __set_ptes() may zero or change the tags of the
         * other page via mte_sync_tags().
         */
        if (page_mte_tagged(page1) || page_mte_tagged(page2))
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index aaf1d4939739..629145fd3161 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -1072,7 +1072,7 @@ int kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
                } else {
                        /*
                         * Only locking to serialise with a concurrent
-                        * set_pte_at() in the VMM but still overriding the
+                        * __set_ptes() in the VMM but still overriding the
                         * tags, hence ignoring the return value.
                         */
                        try_page_mte_tagging(page);
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 13189322a38f..23d0dfc16686 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -205,7 +205,7 @@ static void show_pte(unsigned long addr)
  *
  * It needs to cope with hardware update of the accessed/dirty state by other
  * agents in the system and can safely skip the __sync_icache_dcache() call as,
- * like set_pte_at(), the PTE is never changed from no-exec to exec here.
+ * like __set_ptes(), the PTE is never changed from no-exec to exec here.
  *
  * Returns whether or not the PTE actually changed.
  */
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 8116ac599f80..9d7e7315eaa3 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -254,12 +254,12 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long 
addr,
 
        if (!pte_present(pte)) {
                for (i = 0; i < ncontig; i++, ptep++, addr += pgsize)
-                       set_pte_at(mm, addr, ptep, pte);
+                       __set_ptes(mm, addr, ptep, pte, 1);
                return;
        }
 
        if (!pte_cont(pte)) {
-               set_pte_at(mm, addr, ptep, pte);
+               __set_ptes(mm, addr, ptep, pte, 1);
                return;
        }
 
@@ -270,7 +270,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long 
addr,
        clear_flush(mm, addr, ptep, pgsize, ncontig);
 
        for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
-               set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot));
+               __set_ptes(mm, addr, ptep, pfn_pte(pfn, hugeprot), 1);
 }
 
 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
@@ -478,7 +478,7 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
 
        hugeprot = pte_pgprot(pte);
        for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
-               set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot));
+               __set_ptes(mm, addr, ptep, pfn_pte(pfn, hugeprot), 1);
 
        return 1;
 }
@@ -507,7 +507,7 @@ void huge_ptep_set_wrprotect(struct mm_struct *mm,
        pfn = pte_pfn(pte);
 
        for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
-               set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot));
+               __set_ptes(mm, addr, ptep, pfn_pte(pfn, hugeprot), 1);
 }
 
 pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
-- 
2.25.1

Reply via email to