> @@ -2579,20 +2612,22 @@ struct page *buffered_rmqueue(struct zone 
> *preferred_zone,
>               struct list_head *list;
>  
>               local_irq_save(flags);
> -             pcp = &this_cpu_ptr(zone->pageset)->pcp;
> -             list = &pcp->lists[migratetype];
> -             if (list_empty(list)) {
> -                     pcp->count += rmqueue_bulk(zone, 0,
> -                                     pcp->batch, list,
> -                                     migratetype, cold);
> -                     if (unlikely(list_empty(list)))
> -                             goto failed;
> -             }
> +             do {
> +                     pcp = &this_cpu_ptr(zone->pageset)->pcp;
> +                     list = &pcp->lists[migratetype];
> +                     if (list_empty(list)) {
> +                             pcp->count += rmqueue_bulk(zone, 0,
> +                                             pcp->batch, list,
> +                                             migratetype, cold);
> +                             if (unlikely(list_empty(list)))
> +                                     goto failed;
> +                     }
>  
> -             if (cold)
> -                     page = list_last_entry(list, struct page, lru);
> -             else
> -                     page = list_first_entry(list, struct page, lru);
> +                     if (cold)
> +                             page = list_last_entry(list, struct page, lru);
> +                     else
> +                             page = list_first_entry(list, struct page, lru);
> +             } while (page && check_new_pcp(page));

This causes infinite loop when check_new_pcp() returns 1, because the bad
page is still in the list (I assume that a bad page never disappears).
The original kernel is free from this problem because we do retry after
list_del(). So moving the following 3 lines into this do-while block solves
the problem?

    __dec_zone_state(zone, NR_ALLOC_BATCH);
    list_del(&page->lru);                  
    pcp->count--;                          

There seems no infinit loop issue in order > 0 block below, because bad pages
are deleted from free list in __rmqueue_smallest().

Thanks,
Naoya Horiguchi

>  
>               __dec_zone_state(zone, NR_ALLOC_BATCH);
>               list_del(&page->lru);
> @@ -2605,14 +2640,16 @@ struct page *buffered_rmqueue(struct zone 
> *preferred_zone,
>               WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
>               spin_lock_irqsave(&zone->lock, flags);
>  
> -             page = NULL;
> -             if (alloc_flags & ALLOC_HARDER) {
> -                     page = __rmqueue_smallest(zone, order, 
> MIGRATE_HIGHATOMIC);
> -                     if (page)
> -                             trace_mm_page_alloc_zone_locked(page, order, 
> migratetype);
> -             }
> -             if (!page)
> -                     page = __rmqueue(zone, order, migratetype);
> +             do {
> +                     page = NULL;
> +                     if (alloc_flags & ALLOC_HARDER) {
> +                             page = __rmqueue_smallest(zone, order, 
> MIGRATE_HIGHATOMIC);
> +                             if (page)
> +                                     trace_mm_page_alloc_zone_locked(page, 
> order, migratetype);
> +                     }
> +                     if (!page)
> +                             page = __rmqueue(zone, order, migratetype);
> +             } while (page && check_new_pages(page, order));
>               spin_unlock(&zone->lock);
>               if (!page)
>                       goto failed;
> @@ -2979,8 +3016,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int 
> order, int alloc_flags,
>               page = buffered_rmqueue(ac->preferred_zoneref->zone, zone, 
> order,
>                               gfp_mask, alloc_flags, ac->migratetype);
>               if (page) {
> -                     if (prep_new_page(page, order, gfp_mask, alloc_flags))
> -                             goto try_this_zone;
> +                     prep_new_page(page, order, gfp_mask, alloc_flags);
>  
>                       /*
>                        * If this is a high-order atomic allocation then check
> -- 
> 2.6.4
> 
> --
> To unsubscribe, send a message with 'unsubscribe linux-mm' in
> the body to [email protected].  For more info on Linux MM,
> see: http://www.linux-mm.org/ .
> Don't email: <a href=mailto:"[email protected]";> [email protected] </a>

Reply via email to