On 01/23/2017 04:39 PM, Mel Gorman wrote:
> buffered_rmqueue removes a page from a given zone and uses the per-cpu
> list for order-0. This is fine but a hypothetical caller that wanted
> multiple order-0 pages has to disable/reenable interrupts multiple
> times. This patch structures buffere_rmqueue such that it's relatively
> easy to build a bulk order-0 page allocator. There is no functional
> change.
> 
> Signed-off-by: Mel Gorman <mgor...@techsingularity.net>
> Acked-by: Hillf Danton <hillf...@alibaba-inc.com>

Acked-by: Vlastimil Babka <vba...@suse.cz>

But I think you need a fix on top

[...]

> -struct page *buffered_rmqueue(struct zone *preferred_zone,
> +struct page *rmqueue(struct zone *preferred_zone,
>                       struct zone *zone, unsigned int order,
>                       gfp_t gfp_flags, unsigned int alloc_flags,
>                       int migratetype)
>  {
>       unsigned long flags;
>       struct page *page;
> -     bool cold = ((gfp_flags & __GFP_COLD) != 0);
>  
>       if (likely(order == 0)) {
> -             struct per_cpu_pages *pcp;
> -             struct list_head *list;
> -
> -             local_irq_save(flags);
> -             do {
> -                     pcp = &this_cpu_ptr(zone->pageset)->pcp;
> -                     list = &pcp->lists[migratetype];
> -                     if (list_empty(list)) {
> -                             pcp->count += rmqueue_bulk(zone, 0,
> -                                             pcp->batch, list,
> -                                             migratetype, cold);
> -                             if (unlikely(list_empty(list)))
> -                                     goto failed;
> -                     }
> -
> -                     if (cold)
> -                             page = list_last_entry(list, struct page, lru);
> -                     else
> -                             page = list_first_entry(list, struct page, lru);
> -
> -                     list_del(&page->lru);
> -                     pcp->count--;
> +             page = rmqueue_pcplist(preferred_zone, zone, order,
> +                             gfp_flags, migratetype);
> +             goto out;

page might be NULL here...

> +     }
>  
> -             } while (check_new_pcp(page));
> -     } else {
> -             /*
> -              * We most definitely don't want callers attempting to
> -              * allocate greater than order-1 page units with __GFP_NOFAIL.
> -              */
> -             WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
> -             spin_lock_irqsave(&zone->lock, flags);
> +     /*
> +      * We most definitely don't want callers attempting to
> +      * allocate greater than order-1 page units with __GFP_NOFAIL.
> +      */
> +     WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
> +     spin_lock_irqsave(&zone->lock, flags);
>  
> -             do {
> -                     page = NULL;
> -                     if (alloc_flags & ALLOC_HARDER) {
> -                             page = __rmqueue_smallest(zone, order, 
> MIGRATE_HIGHATOMIC);
> -                             if (page)
> -                                     trace_mm_page_alloc_zone_locked(page, 
> order, migratetype);
> -                     }
> -                     if (!page)
> -                             page = __rmqueue(zone, order, migratetype);
> -             } while (page && check_new_pages(page, order));
> -             spin_unlock(&zone->lock);
> +     do {
> +             page = NULL;
> +             if (alloc_flags & ALLOC_HARDER) {
> +                     page = __rmqueue_smallest(zone, order, 
> MIGRATE_HIGHATOMIC);
> +                     if (page)
> +                             trace_mm_page_alloc_zone_locked(page, order, 
> migratetype);
> +             }
>               if (!page)
> -                     goto failed;
> -             __mod_zone_freepage_state(zone, -(1 << order),
> -                                       get_pcppage_migratetype(page));
> -     }
> +                     page = __rmqueue(zone, order, migratetype);
> +     } while (page && check_new_pages(page, order));
> +     spin_unlock(&zone->lock);
> +     if (!page)
> +             goto failed;
> +     __mod_zone_freepage_state(zone, -(1 << order),
> +                               get_pcppage_migratetype(page));
>  
>       __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
>       zone_statistics(preferred_zone, zone);
>       local_irq_restore(flags);
>  
> +out:
>       VM_BUG_ON_PAGE(bad_range(zone, page), page);

... and then this explodes?
I guess the easiest fix is change the condition to
"page && bad_range(...)"

Reply via email to