Le 20/12/2020 à 00:48, Nicholas Piggin a écrit :
This allows the function to be entirely noped if hash support is
compiled out (not possible yet).

Signed-off-by: Nicholas Piggin <npig...@gmail.com>
---
  arch/powerpc/include/asm/book3s/pgtable.h | 11 ++++++++++-
  arch/powerpc/mm/book3s32/mmu.c            |  4 ++--
  arch/powerpc/mm/book3s64/hash_utils.c     |  7 ++-----
  3 files changed, 14 insertions(+), 8 deletions(-)

diff --git a/arch/powerpc/include/asm/book3s/pgtable.h 
b/arch/powerpc/include/asm/book3s/pgtable.h
index 0e1263455d73..914e9fc7b069 100644
--- a/arch/powerpc/include/asm/book3s/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/pgtable.h
@@ -35,7 +35,16 @@ extern pgprot_t phys_mem_access_prot(struct file *file, 
unsigned long pfn,
   * corresponding HPTE into the hash table ahead of time, instead of
   * waiting for the inevitable extra hash-table miss exception.
   */
-void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t 
*ptep);
+void hash__update_mmu_cache(struct vm_area_struct *vma, unsigned long address, 
pte_t *ptep);
+
+static inline void update_mmu_cache(struct vm_area_struct *vma, unsigned long 
address, pte_t *ptep)
+{
+#ifdef CONFIG_PPC64

You shouldn't need that ifdef. radix_enabled() is always defined.

+       if (radix_enabled())
+               return;
+#endif
+       hash__update_mmu_cache(vma, address, ptep);
+}
#endif /* __ASSEMBLY__ */
  #endif
diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c
index 859e5bd603ac..c5a570ca37ff 100644
--- a/arch/powerpc/mm/book3s32/mmu.c
+++ b/arch/powerpc/mm/book3s32/mmu.c
@@ -325,8 +325,8 @@ static void hash_preload(struct mm_struct *mm, unsigned 
long ea)
   *
   * This must always be called with the pte lock held.
   */
-void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
-                     pte_t *ptep)
+void hash__update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
+                           pte_t *ptep)

Now the limit is 100 chars per line. This should fit on a single line I think.

  {
        if (!mmu_has_feature(MMU_FTR_HPTE_TABLE))
                return;
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c 
b/arch/powerpc/mm/book3s64/hash_utils.c
index 73b06adb6eeb..d52a3dee7cf2 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -1667,8 +1667,8 @@ static void hash_preload(struct mm_struct *mm, pte_t 
*ptep, unsigned long ea,
   *
   * This must always be called with the pte lock held.
   */
-void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
-                     pte_t *ptep)
+void hash__update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
+                           pte_t *ptep)

Now the limit is 100 chars per line. This should fit on a single line I think.

  {
        /*
         * We don't need to worry about _PAGE_PRESENT here because we are
@@ -1677,9 +1677,6 @@ void update_mmu_cache(struct vm_area_struct *vma, 
unsigned long address,
        unsigned long trap;
        bool is_exec;
- if (radix_enabled())
-               return;
-
        /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
        if (!pte_young(*ptep) || address >= TASK_SIZE)
                return;

Reply via email to