On Tuesday, September 30, 2014 01:31:29 PM Joerg Roedel wrote:
> From: Joerg Roedel <[email protected]>
> 
> The existing implementation of swsusp_free iterates over all
> pfns in the system and checks every bit in the two memory
> bitmaps.
> 
> This doesn't scale very well with large numbers of pfns,
> especially when the bitmaps are not populated very densly.
> Change the algorithm to iterate over the set bits in the
> bitmaps instead to make it scale better in large memory
> configurations.
> 
> Also add a memory_bm_clear_current() helper function that
> clears the bit for the last position returned from the
> memory bitmap.
> 
> This new version adds a !NULL check for the memory bitmaps
> before they are walked. Not doing so causes a kernel crash
> when the bitmaps are NULL.
> 
> Signed-off-by: Joerg Roedel <[email protected]>

Queued up for 3.18, thanks!

> ---
>  kernel/power/snapshot.c | 54 
> +++++++++++++++++++++++++++++++++++--------------
>  1 file changed, 39 insertions(+), 15 deletions(-)
> 
> diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
> index f1604d8..791a618 100644
> --- a/kernel/power/snapshot.c
> +++ b/kernel/power/snapshot.c
> @@ -725,6 +725,14 @@ static void memory_bm_clear_bit(struct memory_bitmap 
> *bm, unsigned long pfn)
>       clear_bit(bit, addr);
>  }
>  
> +static void memory_bm_clear_current(struct memory_bitmap *bm)
> +{
> +     int bit;
> +
> +     bit = max(bm->cur.node_bit - 1, 0);
> +     clear_bit(bit, bm->cur.node->data);
> +}
> +
>  static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
>  {
>       void *addr;
> @@ -1333,23 +1341,39 @@ static struct memory_bitmap copy_bm;
>  
>  void swsusp_free(void)
>  {
> -     struct zone *zone;
> -     unsigned long pfn, max_zone_pfn;
> +     unsigned long fb_pfn, fr_pfn;
>  
> -     for_each_populated_zone(zone) {
> -             max_zone_pfn = zone_end_pfn(zone);
> -             for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
> -                     if (pfn_valid(pfn)) {
> -                             struct page *page = pfn_to_page(pfn);
> -
> -                             if (swsusp_page_is_forbidden(page) &&
> -                                 swsusp_page_is_free(page)) {
> -                                     swsusp_unset_page_forbidden(page);
> -                                     swsusp_unset_page_free(page);
> -                                     __free_page(page);
> -                             }
> -                     }
> +     if (!forbidden_pages_map || !free_pages_map)
> +             goto out;
> +
> +     memory_bm_position_reset(forbidden_pages_map);
> +     memory_bm_position_reset(free_pages_map);
> +
> +loop:
> +     fr_pfn = memory_bm_next_pfn(free_pages_map);
> +     fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
> +
> +     /*
> +      * Find the next bit set in both bitmaps. This is guaranteed to
> +      * terminate when fb_pfn == fr_pfn == BM_END_OF_MAP.
> +      */
> +     do {
> +             if (fb_pfn < fr_pfn)
> +                     fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
> +             if (fr_pfn < fb_pfn)
> +                     fr_pfn = memory_bm_next_pfn(free_pages_map);
> +     } while (fb_pfn != fr_pfn);
> +
> +     if (fr_pfn != BM_END_OF_MAP && pfn_valid(fr_pfn)) {
> +             struct page *page = pfn_to_page(fr_pfn);
> +
> +             memory_bm_clear_current(forbidden_pages_map);
> +             memory_bm_clear_current(free_pages_map);
> +             __free_page(page);
> +             goto loop;
>       }
> +
> +out:
>       nr_copy_pages = 0;
>       nr_meta_pages = 0;
>       restore_pblist = NULL;
> 

-- 
I speak only for myself.
Rafael J. Wysocki, Intel Open Source Technology Center.
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to