On Wed, Oct 02, 2019 at 03:47:25PM +0200, Thomas Hellström (VMware) wrote:
> From: Thomas Hellstrom <[email protected]>
> 
> For users that want to travers all page table entries pointing into a
> region of a struct address_space mapping, introduce a walk_page_mapping()
> function.
> 
> The walk_page_mapping() function will be initially be used for dirty-
> tracking in virtual graphics drivers.
> 
> Cc: Andrew Morton <[email protected]>
> Cc: Matthew Wilcox <[email protected]>
> Cc: Will Deacon <[email protected]>
> Cc: Peter Zijlstra <[email protected]>
> Cc: Rik van Riel <[email protected]>
> Cc: Minchan Kim <[email protected]>
> Cc: Michal Hocko <[email protected]>
> Cc: Huang Ying <[email protected]>
> Cc: Jérôme Glisse <[email protected]>
> Cc: Kirill A. Shutemov <[email protected]>
> Signed-off-by: Thomas Hellstrom <[email protected]>
> ---
>  include/linux/pagewalk.h |  9 ++++
>  mm/pagewalk.c            | 99 +++++++++++++++++++++++++++++++++++++++-
>  2 files changed, 107 insertions(+), 1 deletion(-)
> 
> diff --git a/include/linux/pagewalk.h b/include/linux/pagewalk.h
> index bddd9759bab9..6ec82e92c87f 100644
> --- a/include/linux/pagewalk.h
> +++ b/include/linux/pagewalk.h
> @@ -24,6 +24,9 @@ struct mm_walk;
>   *                   "do page table walk over the current vma", returning
>   *                   a negative value means "abort current page table walk
>   *                   right now" and returning 1 means "skip the current vma"
> + * @pre_vma:            if set, called before starting walk on a non-null 
> vma.
> + * @post_vma:           if set, called after a walk on a non-null vma, 
> provided
> + *                      that @pre_vma and the vma walk succeeded.
>   */
>  struct mm_walk_ops {
>       int (*pud_entry)(pud_t *pud, unsigned long addr,
> @@ -39,6 +42,9 @@ struct mm_walk_ops {
>                            struct mm_walk *walk);
>       int (*test_walk)(unsigned long addr, unsigned long next,
>                       struct mm_walk *walk);
> +     int (*pre_vma)(unsigned long start, unsigned long end,
> +                    struct mm_walk *walk);
> +     void (*post_vma)(struct mm_walk *walk);
>  };
>  
>  /**
> @@ -62,5 +68,8 @@ int walk_page_range(struct mm_struct *mm, unsigned long 
> start,
>               void *private);
>  int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops,
>               void *private);
> +int walk_page_mapping(struct address_space *mapping, pgoff_t first_index,
> +                   pgoff_t nr, const struct mm_walk_ops *ops,
> +                   void *private);
>  
>  #endif /* _LINUX_PAGEWALK_H */
> diff --git a/mm/pagewalk.c b/mm/pagewalk.c
> index d48c2a986ea3..658d1e5ec428 100644
> --- a/mm/pagewalk.c
> +++ b/mm/pagewalk.c
> @@ -253,13 +253,23 @@ static int __walk_page_range(unsigned long start, 
> unsigned long end,
>  {
>       int err = 0;
>       struct vm_area_struct *vma = walk->vma;
> +     const struct mm_walk_ops *ops = walk->ops;
> +
> +     if (vma && ops->pre_vma) {
> +             err = ops->pre_vma(start, end, walk);
> +             if (err)
> +                     return err;
> +     }
>  
>       if (vma && is_vm_hugetlb_page(vma)) {
> -             if (walk->ops->hugetlb_entry)
> +             if (ops->hugetlb_entry)
>                       err = walk_hugetlb_range(start, end, walk);
>       } else
>               err = walk_pgd_range(start, end, walk);
>  
> +     if (vma && ops->post_vma)
> +             ops->post_vma(walk);
> +
>       return err;
>  }
>  
> @@ -285,11 +295,17 @@ static int __walk_page_range(unsigned long start, 
> unsigned long end,
>   *  - <0 : failed to handle the current entry, and return to the caller
>   *         with error code.
>   *
> + *
>   * Before starting to walk page table, some callers want to check whether
>   * they really want to walk over the current vma, typically by checking
>   * its vm_flags. walk_page_test() and @ops->test_walk() are used for this
>   * purpose.
>   *
> + * If operations need to be staged before and committed after a vma is 
> walked,
> + * there are two callbacks, pre_vma() and post_vma(). Note that post_vma(),
> + * since it is intended to handle commit-type operations, can't return any
> + * errors.
> + *
>   * struct mm_walk keeps current values of some common data like vma and pmd,
>   * which are useful for the access from callbacks. If you want to pass some
>   * caller-specific data to callbacks, @private should be helpful.
> @@ -376,3 +392,84 @@ int walk_page_vma(struct vm_area_struct *vma, const 
> struct mm_walk_ops *ops,
>               return err;
>       return __walk_page_range(vma->vm_start, vma->vm_end, &walk);
>  }
> +
> +/**
> + * walk_page_mapping - walk all memory areas mapped into a struct 
> address_space.
> + * @mapping: Pointer to the struct address_space
> + * @first_index: First page offset in the address_space
> + * @nr: Number of incremental page offsets to cover
> + * @ops:     operation to call during the walk
> + * @private: private data for callbacks' usage
> + *
> + * This function walks all memory areas mapped into a struct address_space.
> + * The walk is limited to only the given page-size index range, but if
> + * the index boundaries cross a huge page-table entry, that entry will be
> + * included.
> + *
> + * Also see walk_page_range() for additional information.
> + *
> + * Locking:
> + *   This function can't require that the struct mm_struct::mmap_sem is held,
> + *   since @mapping may be mapped by multiple processes. Instead
> + *   @mapping->i_mmap_rwsem must be held. This might have implications in the
> + *   callbacks, and it's up tho the caller to ensure that the
> + *   struct mm_struct::mmap_sem is not needed.
> + *
> + *   Also this means that a caller can't rely on the struct
> + *   vm_area_struct::vm_flags to be constant across a call,
> + *   except for immutable flags. Callers requiring this shouldn't use
> + *   this function.
> + *
> + *   If @mapping allows faulting of huge pmds and puds, it is desirable
> + *   that its huge_fault() handler blocks while this function is running on
> + *   @mapping. Otherwise a race may occur where the huge entry is split when
> + *   it was intended to be handled in a huge entry callback. This requires an
> + *   external lock, for example that @mapping->i_mmap_rwsem is held in
> + *   write mode in the huge_fault() handlers.

Em. No. We have ptl for this. It's the only lock required (plus mmap_sem
on read) to split PMD entry into PTE table. And it can happen not only
from fault path.

If you care about splitting compound page under you, take a pin or lock a
page. It will block split_huge_page().

Suggestion to block fault path is not viable (and it will not happen
magically just because of this comment).

> + */
> +int walk_page_mapping(struct address_space *mapping, pgoff_t first_index,
> +                   pgoff_t nr, const struct mm_walk_ops *ops,
> +                   void *private)
> +{
> +     struct mm_walk walk = {
> +             .ops            = ops,
> +             .private        = private,
> +     };
> +     struct vm_area_struct *vma;
> +     pgoff_t vba, vea, cba, cea;
> +     unsigned long start_addr, end_addr;
> +     int err = 0;
> +
> +     lockdep_assert_held(&mapping->i_mmap_rwsem);
> +     vma_interval_tree_foreach(vma, &mapping->i_mmap, first_index,
> +                               first_index + nr - 1) {
> +             /* Clip to the vma */
> +             vba = vma->vm_pgoff;
> +             vea = vba + vma_pages(vma);
> +             cba = first_index;
> +             cba = max(cba, vba);
> +             cea = first_index + nr;
> +             cea = min(cea, vea);
> +
> +             start_addr = ((cba - vba) << PAGE_SHIFT) + vma->vm_start;
> +             end_addr = ((cea - vba) << PAGE_SHIFT) + vma->vm_start;
> +             if (start_addr >= end_addr)
> +                     continue;
> +
> +             walk.vma = vma;
> +             walk.mm = vma->vm_mm;
> +
> +             err = walk_page_test(vma->vm_start, vma->vm_end, &walk);
> +             if (err > 0) {
> +                     err = 0;
> +                     break;
> +             } else if (err < 0)
> +                     break;
> +
> +             err = __walk_page_range(start_addr, end_addr, &walk);
> +             if (err)
> +                     break;
> +     }
> +
> +     return err;
> +}
> -- 
> 2.20.1
> 

-- 
 Kirill A. Shutemov

Reply via email to