On Mon, 10 Aug 2020, Charan Teja Reddy wrote:

> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index e4896e6..25e7e12 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -3106,6 +3106,7 @@ static void free_unref_page_commit(struct page *page, 
> unsigned long pfn)
>       struct zone *zone = page_zone(page);
>       struct per_cpu_pages *pcp;
>       int migratetype;
> +     int high;
>  
>       migratetype = get_pcppage_migratetype(page);
>       __count_vm_event(PGFREE);
> @@ -3128,8 +3129,19 @@ static void free_unref_page_commit(struct page *page, 
> unsigned long pfn)
>       pcp = &this_cpu_ptr(zone->pageset)->pcp;
>       list_add(&page->lru, &pcp->lists[migratetype]);
>       pcp->count++;
> -     if (pcp->count >= pcp->high) {
> -             unsigned long batch = READ_ONCE(pcp->batch);
> +     high = READ_ONCE(pcp->high);
> +     if (pcp->count >= high) {
> +             int batch;
> +
> +             batch = READ_ONCE(pcp->batch);
> +             /*
> +              * For non-default pcp struct values, high is always
> +              * greater than the batch. If high < batch then pass
> +              * proper count to free the pcp's list pages.
> +              */
> +             if (unlikely(high < batch))
> +                     batch = min(pcp->count, batch);
> +
>               free_pcppages_bulk(zone, batch, pcp);
>       }
>  }

I'm wondering if a fix to free_pcppages_bulk() is more appropriate here 
because the count passed into it seems otherwise fragile if this results 
in a hung core?

Reply via email to