On 12/13/20 7:45 AM, Muchun Song wrote:
> In the subsequent patch, we will allocate the vmemmap pages when free
> HugeTLB pages. But update_and_free_page() is called from a non-task
> context(and hold hugetlb_lock), so we can defer the actual freeing in
> a workqueue to prevent use GFP_ATOMIC to allocate the vmemmap pages.
> 
> Signed-off-by: Muchun Song <[email protected]>

It is unfortunate we need to add this complexitty, but I can not think
of another way.  One small comment (no required change) below.

Reviewed-by: Mike Kravetz <[email protected]>

> ---
>  mm/hugetlb.c         | 77 
> ++++++++++++++++++++++++++++++++++++++++++++++++----
>  mm/hugetlb_vmemmap.c | 12 --------
>  mm/hugetlb_vmemmap.h | 17 ++++++++++++
>  3 files changed, 88 insertions(+), 18 deletions(-)
> 
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index 140135fc8113..0ff9b90e524f 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -1292,15 +1292,76 @@ static inline void 
> destroy_compound_gigantic_page(struct page *page,
>                                               unsigned int order) { }
>  #endif
>  
> -static void update_and_free_page(struct hstate *h, struct page *page)
> +static void __free_hugepage(struct hstate *h, struct page *page);
> +
> +/*
> + * As update_and_free_page() is be called from a non-task context(and hold
> + * hugetlb_lock), we can defer the actual freeing in a workqueue to prevent
> + * use GFP_ATOMIC to allocate a lot of vmemmap pages.
> + *
> + * update_hpage_vmemmap_workfn() locklessly retrieves the linked list of
> + * pages to be freed and frees them one-by-one. As the page->mapping pointer
> + * is going to be cleared in update_hpage_vmemmap_workfn() anyway, it is
> + * reused as the llist_node structure of a lockless linked list of huge
> + * pages to be freed.
> + */
> +static LLIST_HEAD(hpage_update_freelist);
> +
> +static void update_hpage_vmemmap_workfn(struct work_struct *work)
>  {
> -     int i;
> +     struct llist_node *node;
> +     struct page *page;
> +
> +     node = llist_del_all(&hpage_update_freelist);
>  
> +     while (node) {
> +             page = container_of((struct address_space **)node,
> +                                  struct page, mapping);
> +             node = node->next;
> +             page->mapping = NULL;
> +             __free_hugepage(page_hstate(page), page);
> +
> +             cond_resched();
> +     }
> +}
> +static DECLARE_WORK(hpage_update_work, update_hpage_vmemmap_workfn);
> +
> +static inline void __update_and_free_page(struct hstate *h, struct page 
> *page)
> +{
> +     /* No need to allocate vmemmap pages */
> +     if (!free_vmemmap_pages_per_hpage(h)) {
> +             __free_hugepage(h, page);
> +             return;
> +     }
> +
> +     /*
> +      * Defer freeing to avoid using GFP_ATOMIC to allocate vmemmap
> +      * pages.
> +      *
> +      * Only call schedule_work() if hpage_update_freelist is previously
> +      * empty. Otherwise, schedule_work() had been called but the workfn
> +      * hasn't retrieved the list yet.
> +      */
> +     if (llist_add((struct llist_node *)&page->mapping,
> +                   &hpage_update_freelist))
> +             schedule_work(&hpage_update_work);
> +}
> +
> +static void update_and_free_page(struct hstate *h, struct page *page)
> +{
>       if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
>               return;
>  
>       h->nr_huge_pages--;
>       h->nr_huge_pages_node[page_to_nid(page)]--;
> +
> +     __update_and_free_page(h, page);
> +}
> +
> +static void __free_hugepage(struct hstate *h, struct page *page)
> +{
> +     int i;
> +

Can we add a comment here saying that this is where the call to allocate
vmemmmap pages will be inserted in a later patch.  Such a comment would
help a bit to understand the restructuring of the code.

-- 
Mike Kravetz

>       for (i = 0; i < pages_per_huge_page(h); i++) {
>               page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
>                               1 << PG_referenced | 1 << PG_dirty |
> @@ -1313,13 +1374,17 @@ static void update_and_free_page(struct hstate *h, 
> struct page *page)
>       set_page_refcounted(page);
>       if (hstate_is_gigantic(h)) {
>               /*
> -              * Temporarily drop the hugetlb_lock, because
> -              * we might block in free_gigantic_page().
> +              * Temporarily drop the hugetlb_lock only when this type of
> +              * HugeTLB page does not support vmemmap optimization (which
> +              * contex do not hold the hugetlb_lock), because we might block
> +              * in free_gigantic_page().
>                */
> -             spin_unlock(&hugetlb_lock);
> +             if (!free_vmemmap_pages_per_hpage(h))
> +                     spin_unlock(&hugetlb_lock);
>               destroy_compound_gigantic_page(page, huge_page_order(h));
>               free_gigantic_page(page, huge_page_order(h));
> -             spin_lock(&hugetlb_lock);
> +             if (!free_vmemmap_pages_per_hpage(h))
> +                     spin_lock(&hugetlb_lock);
>       } else {
>               __free_pages(page, huge_page_order(h));
>       }

Reply via email to