On Fri 15-01-21 20:49:40, Muchun Song wrote:
> There is a race condition between __free_huge_page()
> and dissolve_free_huge_page().
> 
> CPU0:                         CPU1:
> 
> // page_count(page) == 1
> put_page(page)
>   __free_huge_page(page)
>                               dissolve_free_huge_page(page)
>                                 spin_lock(&hugetlb_lock)
>                                 // PageHuge(page) && !page_count(page)
>                                 update_and_free_page(page)
>                                 // page is freed to the buddy
>                                 spin_unlock(&hugetlb_lock)
>     spin_lock(&hugetlb_lock)
>     clear_page_huge_active(page)
>     enqueue_huge_page(page)
>     // It is wrong, the page is already freed
>     spin_unlock(&hugetlb_lock)
> 
> The race windows is between put_page() and dissolve_free_huge_page().
> 
> We should make sure that the page is already on the free list
> when it is dissolved.
> 
> As a result __free_huge_page would corrupt page(s) already in the buddy
> allocator.
> 
> Fixes: c8721bbbdd36 ("mm: memory-hotplug: enable memory hotplug to handle 
> hugepage")
> Signed-off-by: Muchun Song <[email protected]>
> Reviewed-by: Mike Kravetz <[email protected]>
> Cc: [email protected]

Acked-by: Michal Hocko <[email protected]>

Thanks!

> ---
>  mm/hugetlb.c | 39 +++++++++++++++++++++++++++++++++++++++
>  1 file changed, 39 insertions(+)
> 
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index 4741d60f8955..b99fe4a2b435 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -79,6 +79,21 @@ DEFINE_SPINLOCK(hugetlb_lock);
>  static int num_fault_mutexes;
>  struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
>  
> +static inline bool PageHugeFreed(struct page *head)
> +{
> +     return page_private(head + 4) == -1UL;
> +}
> +
> +static inline void SetPageHugeFreed(struct page *head)
> +{
> +     set_page_private(head + 4, -1UL);
> +}
> +
> +static inline void ClearPageHugeFreed(struct page *head)
> +{
> +     set_page_private(head + 4, 0);
> +}
> +
>  /* Forward declaration */
>  static int hugetlb_acct_memory(struct hstate *h, long delta);
>  
> @@ -1028,6 +1043,7 @@ static void enqueue_huge_page(struct hstate *h, struct 
> page *page)
>       list_move(&page->lru, &h->hugepage_freelists[nid]);
>       h->free_huge_pages++;
>       h->free_huge_pages_node[nid]++;
> +     SetPageHugeFreed(page);
>  }
>  
>  static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
> @@ -1044,6 +1060,7 @@ static struct page *dequeue_huge_page_node_exact(struct 
> hstate *h, int nid)
>  
>               list_move(&page->lru, &h->hugepage_activelist);
>               set_page_refcounted(page);
> +             ClearPageHugeFreed(page);
>               h->free_huge_pages--;
>               h->free_huge_pages_node[nid]--;
>               return page;
> @@ -1504,6 +1521,7 @@ static void prep_new_huge_page(struct hstate *h, struct 
> page *page, int nid)
>       spin_lock(&hugetlb_lock);
>       h->nr_huge_pages++;
>       h->nr_huge_pages_node[nid]++;
> +     ClearPageHugeFreed(page);
>       spin_unlock(&hugetlb_lock);
>  }
>  
> @@ -1754,6 +1772,7 @@ int dissolve_free_huge_page(struct page *page)
>  {
>       int rc = -EBUSY;
>  
> +retry:
>       /* Not to disrupt normal path by vainly holding hugetlb_lock */
>       if (!PageHuge(page))
>               return 0;
> @@ -1770,6 +1789,26 @@ int dissolve_free_huge_page(struct page *page)
>               int nid = page_to_nid(head);
>               if (h->free_huge_pages - h->resv_huge_pages == 0)
>                       goto out;
> +
> +             /*
> +              * We should make sure that the page is already on the free list
> +              * when it is dissolved.
> +              */
> +             if (unlikely(!PageHugeFreed(head))) {
> +                     spin_unlock(&hugetlb_lock);
> +                     cond_resched();
> +
> +                     /*
> +                      * Theoretically, we should return -EBUSY when we
> +                      * encounter this race. In fact, we have a chance
> +                      * to successfully dissolve the page if we do a
> +                      * retry. Because the race window is quite small.
> +                      * If we seize this opportunity, it is an optimization
> +                      * for increasing the success rate of dissolving page.
> +                      */
> +                     goto retry;
> +             }
> +
>               /*
>                * Move PageHWPoison flag from head page to the raw error page,
>                * which makes any subpages rather than the error page reusable.
> -- 
> 2.11.0

-- 
Michal Hocko
SUSE Labs

Reply via email to