By converting start and size to page granularity, we actually ignore
unaligned parts within a page instead of properly bailing out with an
error.

Cc: Andrew Morton <a...@linux-foundation.org>
Cc: Oscar Salvador <osalva...@suse.de>
Cc: Michal Hocko <mho...@suse.com>
Cc: David Hildenbrand <da...@redhat.com>
Cc: Pavel Tatashin <pasha.tatas...@soleen.com>
Cc: Qian Cai <c...@lca.pw>
Cc: Wei Yang <richard.weiy...@gmail.com>
Cc: Arun KS <aru...@codeaurora.org>
Cc: Mathieu Malaterre <ma...@debian.org>
Reviewed-by: Dan Williams <dan.j.willi...@intel.com>
Reviewed-by: Wei Yang <richardw.y...@linux.intel.com>
Signed-off-by: David Hildenbrand <da...@redhat.com>
---
 mm/memory_hotplug.c | 11 +++--------
 1 file changed, 3 insertions(+), 8 deletions(-)

diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index e096c987d261..762887b2358b 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1051,16 +1051,11 @@ int try_online_node(int nid)
 
 static int check_hotplug_memory_range(u64 start, u64 size)
 {
-       unsigned long block_sz = memory_block_size_bytes();
-       u64 block_nr_pages = block_sz >> PAGE_SHIFT;
-       u64 nr_pages = size >> PAGE_SHIFT;
-       u64 start_pfn = PFN_DOWN(start);
-
        /* memory range must be block size aligned */
-       if (!nr_pages || !IS_ALIGNED(start_pfn, block_nr_pages) ||
-           !IS_ALIGNED(nr_pages, block_nr_pages)) {
+       if (!size || !IS_ALIGNED(start, memory_block_size_bytes()) ||
+           !IS_ALIGNED(size, memory_block_size_bytes())) {
                pr_err("Block size [%#lx] unaligned hotplug range: start %#llx, 
size %#llx",
-                      block_sz, start, size);
+                      memory_block_size_bytes(), start, size);
                return -EINVAL;
        }
 
-- 
2.20.1

Reply via email to