[PATCH 04/15] mm: compaction: introduce isolate_freepages_range()

2012-02-03 Thread Marek Szyprowski
From: Michal Nazarewicz min...@mina86.com

This commit introduces isolate_freepages_range() function which
generalises isolate_freepages_block() so that it can be used on
arbitrary PFN ranges.

isolate_freepages_block() is left with only minor changes.

Signed-off-by: Michal Nazarewicz min...@mina86.com
Signed-off-by: Marek Szyprowski m.szyprow...@samsung.com
Acked-by: Mel Gorman m...@csn.ul.ie
Tested-by: Rob Clark rob.cl...@linaro.org
Tested-by: Ohad Ben-Cohen o...@wizery.com
Tested-by: Benjamin Gaignard benjamin.gaign...@linaro.org
---
 mm/compaction.c |  111 ++-
 1 files changed, 93 insertions(+), 18 deletions(-)

diff --git a/mm/compaction.c b/mm/compaction.c
index 9bbcc53..9fef891 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -54,24 +54,20 @@ static unsigned long release_freepages(struct list_head 
*freelist)
return count;
 }
 
-/* Isolate free pages onto a private freelist. Must hold zone-lock */
-static unsigned long isolate_freepages_block(struct zone *zone,
-   unsigned long blockpfn,
-   struct list_head *freelist)
+/*
+ * Isolate free pages onto a private freelist. Caller must hold zone-lock.
+ * If @strict is true, will abort returning 0 on any invalid PFNs or non-free
+ * pages inside of the pageblock (even though it may still end up isolating
+ * some pages).
+ */
+static unsigned long isolate_freepages_block(unsigned long blockpfn,
+   unsigned long end_pfn,
+   struct list_head *freelist,
+   bool strict)
 {
-   unsigned long zone_end_pfn, end_pfn;
int nr_scanned = 0, total_isolated = 0;
struct page *cursor;
 
-   /* Get the last PFN we should scan for free pages at */
-   zone_end_pfn = zone-zone_start_pfn + zone-spanned_pages;
-   end_pfn = min(blockpfn + pageblock_nr_pages, zone_end_pfn);
-
-   /* Find the first usable PFN in the block to initialse page cursor */
-   for (; blockpfn  end_pfn; blockpfn++) {
-   if (pfn_valid_within(blockpfn))
-   break;
-   }
cursor = pfn_to_page(blockpfn);
 
/* Isolate free pages. This assumes the block is valid */
@@ -79,15 +75,23 @@ static unsigned long isolate_freepages_block(struct zone 
*zone,
int isolated, i;
struct page *page = cursor;
 
-   if (!pfn_valid_within(blockpfn))
+   if (!pfn_valid_within(blockpfn)) {
+   if (strict)
+   return 0;
continue;
+   }
nr_scanned++;
 
-   if (!PageBuddy(page))
+   if (!PageBuddy(page)) {
+   if (strict)
+   return 0;
continue;
+   }
 
/* Found a free page, break it into order-0 pages */
isolated = split_free_page(page);
+   if (!isolated  strict)
+   return 0;
total_isolated += isolated;
for (i = 0; i  isolated; i++) {
list_add(page-lru, freelist);
@@ -105,6 +109,73 @@ static unsigned long isolate_freepages_block(struct zone 
*zone,
return total_isolated;
 }
 
+/**
+ * isolate_freepages_range() - isolate free pages.
+ * @start_pfn: The first PFN to start isolating.
+ * @end_pfn:   The one-past-last PFN.
+ *
+ * Non-free pages, invalid PFNs, or zone boundaries within the
+ * [start_pfn, end_pfn) range are considered errors, cause function to
+ * undo its actions and return zero.
+ *
+ * Otherwise, function returns one-past-the-last PFN of isolated page
+ * (which may be greater then end_pfn if end fell in a middle of
+ * a free page).
+ */
+static unsigned long
+isolate_freepages_range(unsigned long start_pfn, unsigned long end_pfn)
+{
+   unsigned long isolated, pfn, block_end_pfn, flags;
+   struct zone *zone = NULL;
+   LIST_HEAD(freelist);
+
+   if (pfn_valid(start_pfn))
+   zone = page_zone(pfn_to_page(start_pfn));
+
+   for (pfn = start_pfn; pfn  end_pfn; pfn += isolated) {
+   if (!pfn_valid(pfn) || zone != page_zone(pfn_to_page(pfn)))
+   break;
+
+   /*
+* On subsequent iterations ALIGN() is actually not needed,
+* but we keep it that we not to complicate the code.
+*/
+   block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
+   block_end_pfn = min(block_end_pfn, end_pfn);
+
+   spin_lock_irqsave(zone-lock, flags);
+   isolated = isolate_freepages_block(pfn, block_end_pfn,
+  freelist, true);
+   spin_unlock_irqrestore(zone-lock, flags);
+
+   /*
+* In strict mode, 

Re: [PATCH 04/15] mm: compaction: introduce isolate_freepages_range()

2012-01-30 Thread Mel Gorman
On Thu, Jan 26, 2012 at 10:00:46AM +0100, Marek Szyprowski wrote:
 From: Michal Nazarewicz min...@mina86.com
 
 This commit introduces isolate_freepages_range() function which
 generalises isolate_freepages_block() so that it can be used on
 arbitrary PFN ranges.
 
 isolate_freepages_block() is left with only minor changes.
 

The minor changes to isolate_freepages_block() look fine in
terms of how current compaction works. I have a minor comment on
isolate_freepages_range() but it is up to you whether to address them
or not. Whether you alter isolate_freepages_range() or not;

Acked-by: Mel Gorman m...@csn.ul.ie

 SNIP
 @@ -105,6 +109,80 @@ static unsigned long isolate_freepages_block(struct zone 
 *zone,
   return total_isolated;
  }
  
 +/**
 + * isolate_freepages_range() - isolate free pages.
 + * @start_pfn: The first PFN to start isolating.
 + * @end_pfn:   The one-past-last PFN.
 + *
 + * Non-free pages, invalid PFNs, or zone boundaries within the
 + * [start_pfn, end_pfn) range are considered errors, cause function to
 + * undo its actions and return zero.
 + *
 + * Otherwise, function returns one-past-the-last PFN of isolated page
 + * (which may be greater then end_pfn if end fell in a middle of
 + * a free page).
 + */
 +static unsigned long
 +isolate_freepages_range(unsigned long start_pfn, unsigned long end_pfn)
 +{
 + unsigned long isolated, pfn, block_end_pfn, flags;
 + struct zone *zone = NULL;
 + LIST_HEAD(freelist);
 + struct page *page;
 +
 + for (pfn = start_pfn; pfn  end_pfn; pfn += isolated) {
 + if (!pfn_valid(pfn))
 + break;
 +
 + if (!zone)
 + zone = page_zone(pfn_to_page(pfn));
 + else if (zone != page_zone(pfn_to_page(pfn)))
 + break;
 +

So what you are checking for here is if you straddle zones.
You could just initialise zone outside of the for loop. You can
then check outside the loop if end_pfn is in a different zone to
start_pfn. If it is, either adjust end_pfn accordingly or bail the
entire operation avoiding the need for release_freepages() later. This
will be a little cheaper.

 + /*
 +  * On subsequent iterations round_down() is actually not
 +  * needed, but we keep it that we not to complicate the code.
 +  */
 + block_end_pfn = round_down(pfn, pageblock_nr_pages)
 + + pageblock_nr_pages;

Seems a little more involved than it needs to be. Something like
this might suit and be a bit nicer?

block_end_pfn = ALIGN(pfn+1, pageblock_nr_pages);

 + block_end_pfn = min(block_end_pfn, end_pfn);
 +
 + spin_lock_irqsave(zone-lock, flags);
 + isolated = isolate_freepages_block(pfn, block_end_pfn,
 +freelist, true);
 + spin_unlock_irqrestore(zone-lock, flags);
 +
 + /*
 +  * In strict mode, isolate_freepages_block() returns 0 if
 +  * there are any holes in the block (ie. invalid PFNs or
 +  * non-free pages).
 +  */
 + if (!isolated)
 + break;
 +
 + /*
 +  * If we managed to isolate pages, it is always (1  n) *
 +  * pageblock_nr_pages for some non-negative n.  (Max order
 +  * page may span two pageblocks).
 +  */
 + }
 +
 + /* split_free_page does not map the pages */
 + list_for_each_entry(page, freelist, lru) {
 + arch_alloc_page(page, 0);
 + kernel_map_pages(page, 1, 1);
 + }
 +

This block is copied in two places - isolate_freepages and
isolate_freepages_range() so sharing a common helper would be nice. I
suspect you didn't because it would interfere with existing code more
than was strictly necessary which I complained about previously as
it made review harder. If that was your thinking, then just create
this helper in a separate patch. It's not critical though.

 + if (pfn  end_pfn) {
 + /* Loop terminated early, cleanup. */
 + release_freepages(freelist);
 + return 0;
 + }
 +
 + /* We don't use freelists for anything. */
 + return pfn;
 +}
 +
  /* Returns true if the page is within a block suitable for migration to */
  static bool suitable_migration_target(struct page *page)
  {

-- 
Mel Gorman
SUSE Labs
--
To unsubscribe from this list: send the line unsubscribe linux-media in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 04/15] mm: compaction: introduce isolate_freepages_range()

2012-01-30 Thread Mel Gorman
On Mon, Jan 30, 2012 at 11:48:20AM +, Mel Gorman wrote:
  +   if (!zone)
  +   zone = page_zone(pfn_to_page(pfn));
  +   else if (zone != page_zone(pfn_to_page(pfn)))
  +   break;
  +
 
 So what you are checking for here is if you straddle zones.
 You could just initialise zone outside of the for loop. You can
 then check outside the loop if end_pfn is in a different zone to
 start_pfn. If it is, either adjust end_pfn accordingly or bail the
 entire operation avoiding the need for release_freepages() later. This
 will be a little cheaper.
 

Whoops, silly me! You are watching for overlapping zones which can
happen in some rare configurations and for that checking page_zone()
like this is necessary. You can still initialise zone outside the loop
but the page_zone() check is still necessary.

My bad.

-- 
Mel Gorman
SUSE Labs
--
To unsubscribe from this list: send the line unsubscribe linux-media in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 04/15] mm: compaction: introduce isolate_freepages_range()

2012-01-26 Thread Marek Szyprowski
From: Michal Nazarewicz min...@mina86.com

This commit introduces isolate_freepages_range() function which
generalises isolate_freepages_block() so that it can be used on
arbitrary PFN ranges.

isolate_freepages_block() is left with only minor changes.

Signed-off-by: Michal Nazarewicz min...@mina86.com
Signed-off-by: Marek Szyprowski m.szyprow...@samsung.com
---
 mm/compaction.c |  118 ++
 1 files changed, 100 insertions(+), 18 deletions(-)

diff --git a/mm/compaction.c b/mm/compaction.c
index a42bbdd..63f82be 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -54,24 +54,20 @@ static unsigned long release_freepages(struct list_head 
*freelist)
return count;
 }
 
-/* Isolate free pages onto a private freelist. Must hold zone-lock */
-static unsigned long isolate_freepages_block(struct zone *zone,
-   unsigned long blockpfn,
-   struct list_head *freelist)
+/*
+ * Isolate free pages onto a private freelist. Caller must hold zone-lock.
+ * If @strict is true, will abort returning 0 on any invalid PFNs or non-free
+ * pages inside of the pageblock (even though it may still end up isolating
+ * some pages).
+ */
+static unsigned long isolate_freepages_block(unsigned long blockpfn,
+   unsigned long end_pfn,
+   struct list_head *freelist,
+   bool strict)
 {
-   unsigned long zone_end_pfn, end_pfn;
int nr_scanned = 0, total_isolated = 0;
struct page *cursor;
 
-   /* Get the last PFN we should scan for free pages at */
-   zone_end_pfn = zone-zone_start_pfn + zone-spanned_pages;
-   end_pfn = min(blockpfn + pageblock_nr_pages, zone_end_pfn);
-
-   /* Find the first usable PFN in the block to initialse page cursor */
-   for (; blockpfn  end_pfn; blockpfn++) {
-   if (pfn_valid_within(blockpfn))
-   break;
-   }
cursor = pfn_to_page(blockpfn);
 
/* Isolate free pages. This assumes the block is valid */
@@ -79,15 +75,23 @@ static unsigned long isolate_freepages_block(struct zone 
*zone,
int isolated, i;
struct page *page = cursor;
 
-   if (!pfn_valid_within(blockpfn))
+   if (!pfn_valid_within(blockpfn)) {
+   if (strict)
+   return 0;
continue;
+   }
nr_scanned++;
 
-   if (!PageBuddy(page))
+   if (!PageBuddy(page)) {
+   if (strict)
+   return 0;
continue;
+   }
 
/* Found a free page, break it into order-0 pages */
isolated = split_free_page(page);
+   if (!isolated  strict)
+   return 0;
total_isolated += isolated;
for (i = 0; i  isolated; i++) {
list_add(page-lru, freelist);
@@ -105,6 +109,80 @@ static unsigned long isolate_freepages_block(struct zone 
*zone,
return total_isolated;
 }
 
+/**
+ * isolate_freepages_range() - isolate free pages.
+ * @start_pfn: The first PFN to start isolating.
+ * @end_pfn:   The one-past-last PFN.
+ *
+ * Non-free pages, invalid PFNs, or zone boundaries within the
+ * [start_pfn, end_pfn) range are considered errors, cause function to
+ * undo its actions and return zero.
+ *
+ * Otherwise, function returns one-past-the-last PFN of isolated page
+ * (which may be greater then end_pfn if end fell in a middle of
+ * a free page).
+ */
+static unsigned long
+isolate_freepages_range(unsigned long start_pfn, unsigned long end_pfn)
+{
+   unsigned long isolated, pfn, block_end_pfn, flags;
+   struct zone *zone = NULL;
+   LIST_HEAD(freelist);
+   struct page *page;
+
+   for (pfn = start_pfn; pfn  end_pfn; pfn += isolated) {
+   if (!pfn_valid(pfn))
+   break;
+
+   if (!zone)
+   zone = page_zone(pfn_to_page(pfn));
+   else if (zone != page_zone(pfn_to_page(pfn)))
+   break;
+
+   /*
+* On subsequent iterations round_down() is actually not
+* needed, but we keep it that we not to complicate the code.
+*/
+   block_end_pfn = round_down(pfn, pageblock_nr_pages)
+   + pageblock_nr_pages;
+   block_end_pfn = min(block_end_pfn, end_pfn);
+
+   spin_lock_irqsave(zone-lock, flags);
+   isolated = isolate_freepages_block(pfn, block_end_pfn,
+  freelist, true);
+   spin_unlock_irqrestore(zone-lock, flags);
+
+   /*
+* In strict mode, isolate_freepages_block() returns 0 if
+