Fixed screw ups during the initial patch split up as per Hillf
---
>From 8be6c5e47de66210e47710c80e72e8abd899017b Mon Sep 17 00:00:00 2001
From: Michal Hocko <[email protected]>
Date: Wed, 29 Mar 2017 15:11:30 +0200
Subject: [PATCH] mm: get rid of zone_is_initialized

There shouldn't be any reason to add initialized when we can tell the
same thing from checking whether there are any pages spanned to the
zone. Remove zone_is_initialized() and replace it by zone_is_empty
which can be used for the same set of tests.

This shouldn't have any visible effect

Signed-off-by: Michal Hocko <[email protected]>
---
 include/linux/mmzone.h | 7 -------
 mm/memory_hotplug.c    | 6 +++---
 mm/page_alloc.c        | 3 +--
 3 files changed, 4 insertions(+), 12 deletions(-)

diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 618499159a7c..3bac3ed71c7a 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -442,8 +442,6 @@ struct zone {
        seqlock_t               span_seqlock;
 #endif
 
-       int initialized;
-
        /* Write-intensive fields used from the page allocator */
        ZONE_PADDING(_pad1_)
 
@@ -520,11 +518,6 @@ static inline bool zone_spans_pfn(const struct zone *zone, 
unsigned long pfn)
        return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone);
 }
 
-static inline bool zone_is_initialized(struct zone *zone)
-{
-       return zone->initialized;
-}
-
 static inline bool zone_is_empty(struct zone *zone)
 {
        return zone->spanned_pages == 0;
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 6fb6bd2df787..699f5a2a8efd 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -348,7 +348,7 @@ static void fix_zone_id(struct zone *zone, unsigned long 
start_pfn,
 static int __ref ensure_zone_is_initialized(struct zone *zone,
                        unsigned long start_pfn, unsigned long num_pages)
 {
-       if (!zone_is_initialized(zone))
+       if (zone_is_empty(zone))
                return init_currently_empty_zone(zone, start_pfn, num_pages);
 
        return 0;
@@ -1051,7 +1051,7 @@ bool zone_can_shift(unsigned long pfn, unsigned long 
nr_pages,
 
                /* no zones in use between current zone and target */
                for (i = idx + 1; i < target; i++)
-                       if (zone_is_initialized(zone - idx + i))
+                       if (!zone_is_empty(zone - idx + i))
                                return false;
        }
 
@@ -1062,7 +1062,7 @@ bool zone_can_shift(unsigned long pfn, unsigned long 
nr_pages,
 
                /* no zones in use between current zone and target */
                for (i = target + 1; i < idx; i++)
-                       if (zone_is_initialized(zone - idx + i))
+                       if (!zone_is_empty(zone - idx + i))
                                return false;
        }
 
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 5ee8a26fa383..756353d1e293 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -795,7 +795,7 @@ static inline void __free_one_page(struct page *page,
 
        max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1);
 
-       VM_BUG_ON(!zone_is_initialized(zone));
+       VM_BUG_ON(zone_is_empty(zone));
        VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
 
        VM_BUG_ON(migratetype == -1);
@@ -5535,7 +5535,6 @@ int __meminit init_currently_empty_zone(struct zone *zone,
                        zone_start_pfn, (zone_start_pfn + size));
 
        zone_init_free_lists(zone);
-       zone->initialized = 1;
 
        return 0;
 }
-- 
2.11.0

-- 
Michal Hocko
SUSE Labs

Reply via email to