On Thu, Aug 15, 2019 at 02:10:49PM +1000, Alastair D'Silva wrote:
> From: Alastair D'Silva <alast...@d-silva.org>
> 
> When presented with large amounts of memory being hotplugged
> (in my test case, ~890GB), the call to flush_dcache_range takes
> a while (~50 seconds), triggering RCU stalls.
> 
> This patch breaks up the call into 16GB chunks, calling
> cond_resched() inbetween to allow the scheduler to run.
> 
> Signed-off-by: Alastair D'Silva <alast...@d-silva.org>
> ---
>  arch/powerpc/mm/mem.c | 16 ++++++++++++++--
>  1 file changed, 14 insertions(+), 2 deletions(-)
> 
> diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
> index 5400da87a804..fb0d5e9aa11b 100644
> --- a/arch/powerpc/mm/mem.c
> +++ b/arch/powerpc/mm/mem.c
> @@ -104,11 +104,14 @@ int __weak remove_section_mapping(unsigned long start, 
> unsigned long end)
>       return -ENODEV;
>  }
> 
> +#define FLUSH_CHUNK_SIZE (16ull * 1024ull * 1024ull * 1024ull)

IMHO this begs for adding SZ_16G to include/linux/sizes.h and using it here

> +
>  int __ref arch_add_memory(int nid, u64 start, u64 size,
>                       struct mhp_restrictions *restrictions)
>  {
>       unsigned long start_pfn = start >> PAGE_SHIFT;
>       unsigned long nr_pages = size >> PAGE_SHIFT;
> +     unsigned long i;
>       int rc;
> 
>       resize_hpt_for_hotplug(memblock_phys_mem_size());
> @@ -120,7 +123,11 @@ int __ref arch_add_memory(int nid, u64 start, u64 size,
>                       start, start + size, rc);
>               return -EFAULT;
>       }
> -     flush_dcache_range(start, start + size);
> +
> +     for (i = 0; i < size; i += FLUSH_CHUNK_SIZE) {
> +             flush_dcache_range(start + i, min(start + size, start + i + 
> FLUSH_CHUNK_SIZE));
> +             cond_resched();
> +     }
> 
>       return __add_pages(nid, start_pfn, nr_pages, restrictions);
>  }
> @@ -131,13 +138,18 @@ void __ref arch_remove_memory(int nid, u64 start, u64 
> size,
>       unsigned long start_pfn = start >> PAGE_SHIFT;
>       unsigned long nr_pages = size >> PAGE_SHIFT;
>       struct page *page = pfn_to_page(start_pfn) + vmem_altmap_offset(altmap);
> +     unsigned long i;
>       int ret;
> 
>       __remove_pages(page_zone(page), start_pfn, nr_pages, altmap);
> 
>       /* Remove htab bolted mappings for this section of memory */
>       start = (unsigned long)__va(start);
> -     flush_dcache_range(start, start + size);
> +     for (i = 0; i < size; i += FLUSH_CHUNK_SIZE) {
> +             flush_dcache_range(start + i, min(start + size, start + i + 
> FLUSH_CHUNK_SIZE));
> +             cond_resched();
> +     }
> +
>       ret = remove_section_mapping(start, start + size);
>       WARN_ON_ONCE(ret);
> 
> -- 
> 2.21.0
> 

-- 
Sincerely yours,
Mike.

Reply via email to