Le 15/02/2023 à 03:01, Rohan McLure a écrit :
> Replace occurrences of p{u,m,4}d_is_leaf with p{u,m,4}_leaf, as the
> latter is the name given to checking that a higher-level entry in
> multi-level paging contains a page translation entry (pte) throughout
> all other archs.
> 
> A future patch will implement p{u,m,4}_leaf stubs on all platforms so
> that they may be referenced in generic code.
> 
> Signed-off-by: Rohan McLure <rmcl...@linux.ibm.com>

Reviewed-by: Christophe Leroy <christophe.le...@csgroup.eu>

> ---
> V4: New patch
> V5: Previously replaced stub definition for *_is_leaf with *_leaf. Do
> that in a later patch
> ---
>   arch/powerpc/kvm/book3s_64_mmu_radix.c   | 12 ++++++------
>   arch/powerpc/mm/book3s64/radix_pgtable.c | 14 +++++++-------
>   arch/powerpc/mm/pgtable.c                |  6 +++---
>   arch/powerpc/mm/pgtable_64.c             |  6 +++---
>   arch/powerpc/xmon/xmon.c                 |  6 +++---
>   5 files changed, 22 insertions(+), 22 deletions(-)
> 
> diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c 
> b/arch/powerpc/kvm/book3s_64_mmu_radix.c
> index 9d3743ca16d5..0d24fd984d16 100644
> --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
> +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
> @@ -497,7 +497,7 @@ static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t 
> *pmd, bool full,
>       for (im = 0; im < PTRS_PER_PMD; ++im, ++p) {
>               if (!pmd_present(*p))
>                       continue;
> -             if (pmd_is_leaf(*p)) {
> +             if (pmd_leaf(*p)) {
>                       if (full) {
>                               pmd_clear(p);
>                       } else {
> @@ -526,7 +526,7 @@ static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t 
> *pud,
>       for (iu = 0; iu < PTRS_PER_PUD; ++iu, ++p) {
>               if (!pud_present(*p))
>                       continue;
> -             if (pud_is_leaf(*p)) {
> +             if (pud_leaf(*p)) {
>                       pud_clear(p);
>               } else {
>                       pmd_t *pmd;
> @@ -629,12 +629,12 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, 
> pte_t pte,
>               new_pud = pud_alloc_one(kvm->mm, gpa);
>   
>       pmd = NULL;
> -     if (pud && pud_present(*pud) && !pud_is_leaf(*pud))
> +     if (pud && pud_present(*pud) && !pud_leaf(*pud))
>               pmd = pmd_offset(pud, gpa);
>       else if (level <= 1)
>               new_pmd = kvmppc_pmd_alloc();
>   
> -     if (level == 0 && !(pmd && pmd_present(*pmd) && !pmd_is_leaf(*pmd)))
> +     if (level == 0 && !(pmd && pmd_present(*pmd) && !pmd_leaf(*pmd)))
>               new_ptep = kvmppc_pte_alloc();
>   
>       /* Check if we might have been invalidated; let the guest retry if so */
> @@ -652,7 +652,7 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, 
> pte_t pte,
>               new_pud = NULL;
>       }
>       pud = pud_offset(p4d, gpa);
> -     if (pud_is_leaf(*pud)) {
> +     if (pud_leaf(*pud)) {
>               unsigned long hgpa = gpa & PUD_MASK;
>   
>               /* Check if we raced and someone else has set the same thing */
> @@ -703,7 +703,7 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, 
> pte_t pte,
>               new_pmd = NULL;
>       }
>       pmd = pmd_offset(pud, gpa);
> -     if (pmd_is_leaf(*pmd)) {
> +     if (pmd_leaf(*pmd)) {
>               unsigned long lgpa = gpa & PMD_MASK;
>   
>               /* Check if we raced and someone else has set the same thing */
> diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c 
> b/arch/powerpc/mm/book3s64/radix_pgtable.c
> index 26245aaf12b8..4e46e001c3c3 100644
> --- a/arch/powerpc/mm/book3s64/radix_pgtable.c
> +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
> @@ -205,14 +205,14 @@ static void radix__change_memory_range(unsigned long 
> start, unsigned long end,
>               pudp = pud_alloc(&init_mm, p4dp, idx);
>               if (!pudp)
>                       continue;
> -             if (pud_is_leaf(*pudp)) {
> +             if (pud_leaf(*pudp)) {
>                       ptep = (pte_t *)pudp;
>                       goto update_the_pte;
>               }
>               pmdp = pmd_alloc(&init_mm, pudp, idx);
>               if (!pmdp)
>                       continue;
> -             if (pmd_is_leaf(*pmdp)) {
> +             if (pmd_leaf(*pmdp)) {
>                       ptep = pmdp_ptep(pmdp);
>                       goto update_the_pte;
>               }
> @@ -786,7 +786,7 @@ static void __meminit remove_pmd_table(pmd_t *pmd_start, 
> unsigned long addr,
>               if (!pmd_present(*pmd))
>                       continue;
>   
> -             if (pmd_is_leaf(*pmd)) {
> +             if (pmd_leaf(*pmd)) {
>                       if (!IS_ALIGNED(addr, PMD_SIZE) ||
>                           !IS_ALIGNED(next, PMD_SIZE)) {
>                               WARN_ONCE(1, "%s: unaligned range\n", __func__);
> @@ -816,7 +816,7 @@ static void __meminit remove_pud_table(pud_t *pud_start, 
> unsigned long addr,
>               if (!pud_present(*pud))
>                       continue;
>   
> -             if (pud_is_leaf(*pud)) {
> +             if (pud_leaf(*pud)) {
>                       if (!IS_ALIGNED(addr, PUD_SIZE) ||
>                           !IS_ALIGNED(next, PUD_SIZE)) {
>                               WARN_ONCE(1, "%s: unaligned range\n", __func__);
> @@ -849,7 +849,7 @@ static void __meminit remove_pagetable(unsigned long 
> start, unsigned long end)
>               if (!p4d_present(*p4d))
>                       continue;
>   
> -             if (p4d_is_leaf(*p4d)) {
> +             if (p4d_leaf(*p4d)) {
>                       if (!IS_ALIGNED(addr, P4D_SIZE) ||
>                           !IS_ALIGNED(next, P4D_SIZE)) {
>                               WARN_ONCE(1, "%s: unaligned range\n", __func__);
> @@ -1112,7 +1112,7 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t 
> prot)
>   
>   int pud_clear_huge(pud_t *pud)
>   {
> -     if (pud_is_leaf(*pud)) {
> +     if (pud_leaf(*pud)) {
>               pud_clear(pud);
>               return 1;
>       }
> @@ -1159,7 +1159,7 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t 
> prot)
>   
>   int pmd_clear_huge(pmd_t *pmd)
>   {
> -     if (pmd_is_leaf(*pmd)) {
> +     if (pmd_leaf(*pmd)) {
>               pmd_clear(pmd);
>               return 1;
>       }
> diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
> index d7cce317cef8..00ffbf197a13 100644
> --- a/arch/powerpc/mm/pgtable.c
> +++ b/arch/powerpc/mm/pgtable.c
> @@ -386,7 +386,7 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
>       if (p4d_none(p4d))
>               return NULL;
>   
> -     if (p4d_is_leaf(p4d)) {
> +     if (p4d_leaf(p4d)) {
>               ret_pte = (pte_t *)p4dp;
>               goto out;
>       }
> @@ -408,7 +408,7 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
>       if (pud_none(pud))
>               return NULL;
>   
> -     if (pud_is_leaf(pud)) {
> +     if (pud_leaf(pud)) {
>               ret_pte = (pte_t *)pudp;
>               goto out;
>       }
> @@ -447,7 +447,7 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
>               goto out;
>       }
>   
> -     if (pmd_is_leaf(pmd)) {
> +     if (pmd_leaf(pmd)) {
>               ret_pte = (pte_t *)pmdp;
>               goto out;
>       }
> diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
> index 5ac1fd30341b..0604c80dae66 100644
> --- a/arch/powerpc/mm/pgtable_64.c
> +++ b/arch/powerpc/mm/pgtable_64.c
> @@ -100,7 +100,7 @@ EXPORT_SYMBOL(__pte_frag_size_shift);
>   /* 4 level page table */
>   struct page *p4d_page(p4d_t p4d)
>   {
> -     if (p4d_is_leaf(p4d)) {
> +     if (p4d_leaf(p4d)) {
>               if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP))
>                       VM_WARN_ON(!p4d_huge(p4d));
>               return pte_page(p4d_pte(p4d));
> @@ -111,7 +111,7 @@ struct page *p4d_page(p4d_t p4d)
>   
>   struct page *pud_page(pud_t pud)
>   {
> -     if (pud_is_leaf(pud)) {
> +     if (pud_leaf(pud)) {
>               if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP))
>                       VM_WARN_ON(!pud_huge(pud));
>               return pte_page(pud_pte(pud));
> @@ -125,7 +125,7 @@ struct page *pud_page(pud_t pud)
>    */
>   struct page *pmd_page(pmd_t pmd)
>   {
> -     if (pmd_is_leaf(pmd)) {
> +     if (pmd_leaf(pmd)) {
>               /*
>                * vmalloc_to_page may be called on any vmap address (not only
>                * vmalloc), and it uses pmd_page() etc., when huge vmap is
> diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
> index 73c620c2a3a1..07346b10f972 100644
> --- a/arch/powerpc/xmon/xmon.c
> +++ b/arch/powerpc/xmon/xmon.c
> @@ -3339,7 +3339,7 @@ static void show_pte(unsigned long addr)
>               return;
>       }
>   
> -     if (p4d_is_leaf(*p4dp)) {
> +     if (p4d_leaf(*p4dp)) {
>               format_pte(p4dp, p4d_val(*p4dp));
>               return;
>       }
> @@ -3353,7 +3353,7 @@ static void show_pte(unsigned long addr)
>               return;
>       }
>   
> -     if (pud_is_leaf(*pudp)) {
> +     if (pud_leaf(*pudp)) {
>               format_pte(pudp, pud_val(*pudp));
>               return;
>       }
> @@ -3367,7 +3367,7 @@ static void show_pte(unsigned long addr)
>               return;
>       }
>   
> -     if (pmd_is_leaf(*pmdp)) {
> +     if (pmd_leaf(*pmdp)) {
>               format_pte(pmdp, pmd_val(*pmdp));
>               return;
>       }

Reply via email to