On 16/06/2023 12:08, Aneesh Kumar K.V wrote:
> dax vmemmap optimization requires a minimum of 2 PAGE_SIZE area within
> vmemmap such that tail page mapping can point to the second PAGE_SIZE area.
> Enforce that in vmemmap_can_optimize() function.
> 
> Architectures like powerpc also want to enable vmemmap optimization
> conditionally (only with radix MMU translation). Hence allow architecture
> override.
> 
This makes sense. The enforcing here is not just for correctness but because you
want to use VMEMMAP_RESERVE_NR supposedly?

I would suggest having two patches one for the refactor and another one for the
override, but I don't feel particularly strongly about it.

> Signed-off-by: Aneesh Kumar K.V <aneesh.ku...@linux.ibm.com>
> ---
>  include/linux/mm.h | 30 ++++++++++++++++++++++++++----
>  mm/mm_init.c       |  2 +-
>  2 files changed, 27 insertions(+), 5 deletions(-)
> 
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index 27ce77080c79..9a45e61cd83f 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -31,6 +31,8 @@
>  #include <linux/memremap.h>
>  #include <linux/slab.h>
>  
> +#include <asm/page.h>
> +

Why is this include needed?

>  struct mempolicy;
>  struct anon_vma;
>  struct anon_vma_chain;
> @@ -3550,13 +3552,33 @@ void vmemmap_free(unsigned long start, unsigned long 
> end,
>               struct vmem_altmap *altmap);
>  #endif
>  
> +#define VMEMMAP_RESERVE_NR   2

see below

>  #ifdef CONFIG_ARCH_WANT_OPTIMIZE_VMEMMAP
> -static inline bool vmemmap_can_optimize(struct vmem_altmap *altmap,
> -                                        struct dev_pagemap *pgmap)
> +static inline bool __vmemmap_can_optimize(struct vmem_altmap *altmap,
> +                                       struct dev_pagemap *pgmap)
>  {
> -     return is_power_of_2(sizeof(struct page)) &&
> -             pgmap && (pgmap_vmemmap_nr(pgmap) > 1) && !altmap;
> +     if (pgmap) {
> +             unsigned long nr_pages;
> +             unsigned long nr_vmemmap_pages;
> +
> +             nr_pages = pgmap_vmemmap_nr(pgmap);
> +             nr_vmemmap_pages = ((nr_pages * sizeof(struct page)) >> 
> PAGE_SHIFT);
> +             /*
> +              * For vmemmap optimization with DAX we need minimum 2 vmemmap



> +              * pages. See layout diagram in 
> Documentation/mm/vmemmap_dedup.rst
> +              */
> +             return is_power_of_2(sizeof(struct page)) &&
> +                     (nr_vmemmap_pages > VMEMMAP_RESERVE_NR) && !altmap;
> +     }

It would be more readable (i.e. less identation) if you just reverse this:

        unsigned long nr_vmemmap_pages;

        if (!pgmap || !is_power_of_2(sizeof(struct page))
                return false;

        nr_vmemmap_pages = ((pgmap_vmemmap_nr(pgmap) *
                             sizeof(struct page)) >> PAGE_SHIFT);

        /*
         * For vmemmap optimization with DAX we need minimum 2 vmemmap
         * pages. See layout diagram in Documentation/mm/vmemmap_dedup.rst
         */
        return (nr_vmemmap_pages > VMEMMAP_RESERVE_NR) && !altmap;


> +     return false;
>  }
> +/*
> + * If we don't have an architecture override, use the generic rule
> + */
> +#ifndef vmemmap_can_optimize
> +#define vmemmap_can_optimize __vmemmap_can_optimize
> +#endif
> +

sparse-vmemmap code is trivial to change to use dedup a single vmemmap page
(e.g. to align with hugetlb), hopefully the architecture override to do. this is
to say whether VMEMMAP_RESERVE_NR should have similar to above?

>  #else
>  static inline bool vmemmap_can_optimize(struct vmem_altmap *altmap,
>                                          struct dev_pagemap *pgmap)
> diff --git a/mm/mm_init.c b/mm/mm_init.c
> index 7f7f9c677854..d1676afc94f1 100644
> --- a/mm/mm_init.c
> +++ b/mm/mm_init.c
> @@ -1020,7 +1020,7 @@ static inline unsigned long compound_nr_pages(struct 
> vmem_altmap *altmap,
>       if (!vmemmap_can_optimize(altmap, pgmap))
>               return pgmap_vmemmap_nr(pgmap);
>  
> -     return 2 * (PAGE_SIZE / sizeof(struct page));
> +     return VMEMMAP_RESERVE_NR * (PAGE_SIZE / sizeof(struct page));
>  }
>  
>  static void __ref memmap_init_compound(struct page *head,

Reply via email to