From: Zi Yan <z...@nvidia.com>

Enable set_migratetype_isolate() to check specified sub-range for
unmovable pages during isolation. Page isolation is done
at max(MAX_ORDER_NR_PAEGS, pageblock_nr_pages) granularity, but not all
pages within that granularity are intended to be isolated. For example,
alloc_contig_range(), which uses page isolation, allows ranges without
alignment. This commit makes unmovable page check only look for
interesting pages, so that page isolation can succeed for any
non-overlapping ranges.

Signed-off-by: Zi Yan <z...@nvidia.com>
---
 include/linux/page-isolation.h | 10 ++++++++
 mm/page_alloc.c                | 13 +---------
 mm/page_isolation.c            | 47 +++++++++++++++++++++-------------
 3 files changed, 40 insertions(+), 30 deletions(-)

diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index e14eddf6741a..eb4a208fe907 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -15,6 +15,16 @@ static inline bool is_migrate_isolate(int migratetype)
 {
        return migratetype == MIGRATE_ISOLATE;
 }
+static inline unsigned long pfn_max_align_down(unsigned long pfn)
+{
+       return ALIGN_DOWN(pfn, MAX_ORDER_NR_PAGES);
+}
+
+static inline unsigned long pfn_max_align_up(unsigned long pfn)
+{
+       return ALIGN(pfn, MAX_ORDER_NR_PAGES);
+}
+
 #else
 static inline bool has_isolate_pageblock(struct zone *zone)
 {
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 228751019fd8..b900315657cf 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -8949,16 +8949,6 @@ void *__init alloc_large_system_hash(const char 
*tablename,
 }
 
 #ifdef CONFIG_CONTIG_ALLOC
-static unsigned long pfn_max_align_down(unsigned long pfn)
-{
-       return ALIGN_DOWN(pfn, MAX_ORDER_NR_PAGES);
-}
-
-static unsigned long pfn_max_align_up(unsigned long pfn)
-{
-       return ALIGN(pfn, MAX_ORDER_NR_PAGES);
-}
-
 #if defined(CONFIG_DYNAMIC_DEBUG) || \
        (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
 /* Usage: See admin-guide/dynamic-debug-howto.rst */
@@ -9103,8 +9093,7 @@ int alloc_contig_range(unsigned long start, unsigned long 
end,
         * put back to page allocator so that buddy can use them.
         */
 
-       ret = start_isolate_page_range(pfn_max_align_down(start),
-                                      pfn_max_align_up(end), migratetype, 0);
+       ret = start_isolate_page_range(start, end, migratetype, 0);
        if (ret)
                return ret;
 
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index b34f1310aeaa..e0afc3ee8cf9 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -16,7 +16,8 @@
 #include <trace/events/page_isolation.h>
 
 /*
- * This function checks whether pageblock includes unmovable pages or not.
+ * This function checks whether pageblock within [start_pfn, end_pfn) includes
+ * unmovable pages or not.
  *
  * PageLRU check without isolation or lru_lock could race so that
  * MIGRATE_MOVABLE block might include unmovable pages. And __PageMovable
@@ -29,11 +30,14 @@
  *
  */
 static struct page *has_unmovable_pages(struct zone *zone, struct page *page,
-                                int migratetype, int flags)
+                                int migratetype, int flags,
+                                unsigned long start_pfn, unsigned long end_pfn)
 {
-       unsigned long iter = 0;
-       unsigned long pfn = page_to_pfn(page);
-       unsigned long offset = pfn % pageblock_nr_pages;
+       unsigned long first_pfn = max(page_to_pfn(page), start_pfn);
+       unsigned long pfn = first_pfn;
+       unsigned long last_pfn = min(ALIGN(pfn + 1, pageblock_nr_pages), 
end_pfn);
+
+       page = pfn_to_page(pfn);
 
        if (is_migrate_cma_page(page)) {
                /*
@@ -47,8 +51,8 @@ static struct page *has_unmovable_pages(struct zone *zone, 
struct page *page,
                return page;
        }
 
-       for (; iter < pageblock_nr_pages - offset; iter++) {
-               page = pfn_to_page(pfn + iter);
+       for (pfn = first_pfn; pfn < last_pfn; pfn++) {
+               page = pfn_to_page(pfn);
 
                /*
                 * Both, bootmem allocations and memory holes are marked
@@ -85,7 +89,7 @@ static struct page *has_unmovable_pages(struct zone *zone, 
struct page *page,
                        }
 
                        skip_pages = compound_nr(head) - (page - head);
-                       iter += skip_pages - 1;
+                       pfn += skip_pages - 1;
                        continue;
                }
 
@@ -97,7 +101,7 @@ static struct page *has_unmovable_pages(struct zone *zone, 
struct page *page,
                 */
                if (!page_ref_count(page)) {
                        if (PageBuddy(page))
-                               iter += (1 << buddy_order(page)) - 1;
+                               pfn += (1 << buddy_order(page)) - 1;
                        continue;
                }
 
@@ -134,7 +138,13 @@ static struct page *has_unmovable_pages(struct zone *zone, 
struct page *page,
        return NULL;
 }
 
-static int set_migratetype_isolate(struct page *page, int migratetype, int 
isol_flags)
+/*
+ * This function set pageblock migratetype to isolate if no unmovable page is
+ * present in [start_pfn, end_pfn). The pageblock must be within
+ * [start_pfn, end_pfn).
+ */
+static int set_migratetype_isolate(struct page *page, int migratetype, int 
isol_flags,
+                       unsigned long start_pfn, unsigned long end_pfn)
 {
        struct zone *zone = page_zone(page);
        struct page *unmovable;
@@ -156,7 +166,8 @@ static int set_migratetype_isolate(struct page *page, int 
migratetype, int isol_
         * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
         * We just check MOVABLE pages.
         */
-       unmovable = has_unmovable_pages(zone, page, migratetype, isol_flags);
+       unmovable = has_unmovable_pages(zone, page, migratetype, isol_flags,
+                               start_pfn, end_pfn);
        if (!unmovable) {
                unsigned long nr_pages;
                int mt = get_pageblock_migratetype(page);
@@ -267,7 +278,6 @@ __first_valid_page(unsigned long pfn, unsigned long 
nr_pages)
  * be MIGRATE_ISOLATE.
  * @start_pfn:         The lower PFN of the range to be isolated.
  * @end_pfn:           The upper PFN of the range to be isolated.
- *                     start_pfn/end_pfn must be aligned to pageblock_order.
  * @migratetype:       Migrate type to set in error recovery.
  * @flags:             The following flags are allowed (they can be combined in
  *                     a bit mask)
@@ -309,15 +319,16 @@ int start_isolate_page_range(unsigned long start_pfn, 
unsigned long end_pfn,
        unsigned long pfn;
        struct page *page;
 
-       BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
-       BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
+       unsigned long isolate_start = pfn_max_align_down(start_pfn);
+       unsigned long isolate_end = pfn_max_align_up(end_pfn);
 
-       for (pfn = start_pfn;
-            pfn < end_pfn;
+       for (pfn = isolate_start;
+            pfn < isolate_end;
             pfn += pageblock_nr_pages) {
                page = __first_valid_page(pfn, pageblock_nr_pages);
-               if (page && set_migratetype_isolate(page, migratetype, flags)) {
-                       undo_isolate_page_range(start_pfn, pfn, migratetype);
+               if (page && set_migratetype_isolate(page, migratetype, flags,
+                                       start_pfn, end_pfn)) {
+                       undo_isolate_page_range(isolate_start, pfn, 
migratetype);
                        return -EBUSY;
                }
        }
-- 
2.34.1

Reply via email to