On Mon, May 27, 2019 at 01:11:49PM +0200, David Hildenbrand wrote:
>No longer needed, the callers of arch_add_memory() can handle this
>manually.
>
>Cc: Andrew Morton <a...@linux-foundation.org>
>Cc: David Hildenbrand <da...@redhat.com>
>Cc: Michal Hocko <mho...@suse.com>
>Cc: Oscar Salvador <osalva...@suse.com>
>Cc: Pavel Tatashin <pasha.tatas...@soleen.com>
>Cc: Wei Yang <richard.weiy...@gmail.com>
>Cc: Joonsoo Kim <iamjoonsoo....@lge.com>
>Cc: Qian Cai <c...@lca.pw>
>Cc: Arun KS <aru...@codeaurora.org>
>Cc: Mathieu Malaterre <ma...@debian.org>
>Signed-off-by: David Hildenbrand <da...@redhat.com>

Reviewed-by: Wei Yang <richardw.y...@linux.intel.com>

>---
> include/linux/memory_hotplug.h | 8 --------
> mm/memory_hotplug.c            | 9 +++------
> 2 files changed, 3 insertions(+), 14 deletions(-)
>
>diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
>index 2d4de313926d..2f1f87e13baa 100644
>--- a/include/linux/memory_hotplug.h
>+++ b/include/linux/memory_hotplug.h
>@@ -128,14 +128,6 @@ extern void arch_remove_memory(int nid, u64 start, u64 
>size,
> extern void __remove_pages(struct zone *zone, unsigned long start_pfn,
>                          unsigned long nr_pages, struct vmem_altmap *altmap);
> 
>-/*
>- * Do we want sysfs memblock files created. This will allow userspace to 
>online
>- * and offline memory explicitly. Lack of this bit means that the caller has 
>to
>- * call move_pfn_range_to_zone to finish the initialization.
>- */
>-
>-#define MHP_MEMBLOCK_API               (1<<0)
>-
> /* reasonably generic interface to expand the physical pages */
> extern int __add_pages(int nid, unsigned long start_pfn, unsigned long 
> nr_pages,
>                      struct mhp_restrictions *restrictions);
>diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
>index b1fde90bbf19..9a92549ef23b 100644
>--- a/mm/memory_hotplug.c
>+++ b/mm/memory_hotplug.c
>@@ -251,7 +251,7 @@ void __init register_page_bootmem_info_node(struct 
>pglist_data *pgdat)
> #endif /* CONFIG_HAVE_BOOTMEM_INFO_NODE */
> 
> static int __meminit __add_section(int nid, unsigned long phys_start_pfn,
>-              struct vmem_altmap *altmap, bool want_memblock)
>+                                 struct vmem_altmap *altmap)
> {
>       int ret;
> 
>@@ -294,8 +294,7 @@ int __ref __add_pages(int nid, unsigned long 
>phys_start_pfn,
>       }
> 
>       for (i = start_sec; i <= end_sec; i++) {
>-              err = __add_section(nid, section_nr_to_pfn(i), altmap,
>-                              restrictions->flags & MHP_MEMBLOCK_API);
>+              err = __add_section(nid, section_nr_to_pfn(i), altmap);
> 
>               /*
>                * EEXIST is finally dealt with by ioresource collision
>@@ -1067,9 +1066,7 @@ static int online_memory_block(struct memory_block *mem, 
>void *arg)
>  */
> int __ref add_memory_resource(int nid, struct resource *res)
> {
>-      struct mhp_restrictions restrictions = {
>-              .flags = MHP_MEMBLOCK_API,
>-      };
>+      struct mhp_restrictions restrictions = {};
>       u64 start, size;
>       bool new_node = false;
>       int ret;
>-- 
>2.20.1

-- 
Wei Yang
Help you, Help me

Reply via email to