... and clarify why this is needed at all right now. It all boils down
to false positives. We will try to remove the false positives for
!ZONE_DEVICE memory, soon, however, for ZONE_DEVICE memory we won't be
able to easily get rid of false positives.

Don't only detect "all holes" but try to shrink using the existing
functions we have.

Cc: Andrew Morton <[email protected]>
Cc: Oscar Salvador <[email protected]>
Cc: David Hildenbrand <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: Pavel Tatashin <[email protected]>
Cc: Dan Williams <[email protected]>
Cc: Wei Yang <[email protected]>
Signed-off-by: David Hildenbrand <[email protected]>
---
 mm/memory_hotplug.c | 45 +++++++++++++++++++++++----------------------
 1 file changed, 23 insertions(+), 22 deletions(-)

diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index d3c34bbeb36d..663853bf97ed 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -411,32 +411,33 @@ static void shrink_zone_span(struct zone *zone, unsigned 
long start_pfn,
                }
        }
 
-       /*
-        * The section is not biggest or smallest mem_section in the zone, it
-        * only creates a hole in the zone. So in this case, we need not
-        * change the zone. But perhaps, the zone has only hole data. Thus
-        * it check the zone has only hole or not.
-        */
-       for (pfn = zone->zone_start_pfn;
-            pfn < zone_end_pfn(zone); pfn += PAGES_PER_SUBSECTION) {
-               if (unlikely(!pfn_valid(pfn)))
-                       continue;
-
-               if (page_zone(pfn_to_page(pfn)) != zone)
-                       continue;
-
-               /* Skip range to be removed */
-               if (pfn >= start_pfn && pfn < end_pfn)
-                       continue;
-
-               /* If we find valid section, we have nothing to do */
+       if (!zone->spanned_pages) {
                zone_span_writeunlock(zone);
                return;
        }
 
-       /* The zone has no valid section */
-       zone->zone_start_pfn = 0;
-       zone->spanned_pages = 0;
+       /*
+        * Due to false positives in previous skrink attempts, it can happen
+        * that we can shrink the zones further (possibly to zero). Once we
+        * can reliably detect which PFNs actually belong to a zone
+        * (especially for ZONE_DEVICE memory where we don't have online
+        * sections), this can go.
+        */
+       pfn = find_smallest_section_pfn(nid, zone, zone->zone_start_pfn,
+                                       zone_end_pfn(zone));
+       if (pfn) {
+               zone->spanned_pages = zone_end_pfn(zone) - pfn;
+               zone->zone_start_pfn = pfn;
+
+               pfn = find_biggest_section_pfn(nid, zone, zone->zone_start_pfn,
+                                              zone_end_pfn(zone));
+               if (pfn)
+                       zone->spanned_pages = pfn - zone->zone_start_pfn + 1;
+       }
+       if (!pfn) {
+               zone->zone_start_pfn = 0;
+               zone->spanned_pages = 0;
+       }
        zone_span_writeunlock(zone);
 }
 
-- 
2.21.0

Reply via email to