On Sun, 20 Nov 2016, Eric Dumazet wrote:

> Another potential issue with CONFIG_VMAP_STACK is that we make no
> attempt to allocate 4 consecutive pages.
> 
> Even if we have plenty of memory, 4 calls to alloc_page() are likely to
> give us 4 pages in completely different locations.
> 
> Here I printed the hugepage number of the 4 pages for some stacks :
> 
> 
> 0xffffc9001a07c000-0xffffc9001a081000   20480 _do_fork+0xe1/0x360 pages=4 
> vmalloc Hfcac Hfeba Hfec0 Hfc9d N0=4
> 0xffffc9001a084000-0xffffc9001a089000   20480 _do_fork+0xe1/0x360 pages=4 
> vmalloc Hfc79 Hfc79 Hfc79 Hfc83 N0=4
> 0xffffc9001a08c000-0xffffc9001a091000   20480 _do_fork+0xe1/0x360 pages=4 
> vmalloc Hfc9b Hfe91 Hfebe Hfca2 N0=4
> 0xffffc9001a094000-0xffffc9001a099000   20480 _do_fork+0xe1/0x360 pages=4 
> vmalloc Hfcaa Hfcaa Hfca6 Hfebc N0=4
> 0xffffc9001a09c000-0xffffc9001a0a1000   20480 _do_fork+0xe1/0x360 pages=4 
> vmalloc Hfe9b Hfe90 Hff09 Hfefb N0=4
> 0xffffc9001a0a4000-0xffffc9001a0a9000   20480 _do_fork+0xe1/0x360 pages=4 
> vmalloc Hfe94 Hfe62 Hfea0 Hfe7b N0=4
> 0xffffc9001a0ac000-0xffffc9001a0b1000   20480 _do_fork+0xe1/0x360 pages=4 
> vmalloc Hfe78 Hff05 Hff05 Hfc74 N0=4
> 0xffffc9001a0b4000-0xffffc9001a0b9000   20480 _do_fork+0xe1/0x360 pages=4 
> vmalloc Hfc9b Hfc9b Hfe83 Hf782 N0=4
> 0xffffc9001a0bc000-0xffffc9001a0c1000   20480 _do_fork+0xe1/0x360 pages=4 
> vmalloc Hfe78 Hfe78 Hfc7f Hfc7f N0=4
> 0xffffc9001a0c4000-0xffffc9001a0c9000   20480 _do_fork+0xe1/0x360 pages=4 
> vmalloc Hfebe Hfebe Hfe82 Hfe85 N0=4
> 0xffffc9001a0cc000-0xffffc9001a0d1000   20480 _do_fork+0xe1/0x360 pages=4 
> vmalloc Hfc6b Hfe62 Hfe62 Hfcaa N0=4
> 0xffffc9001a0d4000-0xffffc9001a0d9000   20480 _do_fork+0xe1/0x360 pages=4 
> vmalloc Hfebd Hfebd Hfc92 Hfc92 N0=4
> 
> This is a vmalloc() generic issue that is worth fixing now ?
> 
> Note this RFC might conflict with NUMA interleave policy.
> 
> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> index f2481cb4e6b2..0123e97debb9 100644
> --- a/mm/vmalloc.c
> +++ b/mm/vmalloc.c
> @@ -1602,9 +1602,10 @@ static void *__vmalloc_area_node(struct vm_struct 
> *area, gfp_t gfp_mask,
>                                pgprot_t prot, int node)
>  {
>       struct page **pages;
> -     unsigned int nr_pages, array_size, i;
> +     unsigned int nr_pages, array_size, i, j;
>       const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
>       const gfp_t alloc_mask = gfp_mask | __GFP_NOWARN;
> +     const gfp_t multi_alloc_mask = (gfp_mask & ~__GFP_DIRECT_RECLAIM) | 
> __GFP_NORETRY;
>  
>       nr_pages = get_vm_area_size(area) >> PAGE_SHIFT;
>       array_size = (nr_pages * sizeof(struct page *));

I think multi_alloc_mask wants to use alloc_mask rather than gfp_mask 
before clearing the bit, otherwise the failed high-order allocations with 
no chance to reclaim will spew page allocation failure warnings.  Using 
__GFP_NORETRY here would be a no-op, but it depends on the implementation 
so no problems setting it.

> @@ -1624,20 +1625,34 @@ static void *__vmalloc_area_node(struct vm_struct 
> *area, gfp_t gfp_mask,
>               return NULL;
>       }
>  
> -     for (i = 0; i < area->nr_pages; i++) {
> -             struct page *page;
> -
> -             if (node == NUMA_NO_NODE)
> -                     page = alloc_page(alloc_mask);
> -             else
> -                     page = alloc_pages_node(node, alloc_mask, 0);
> +     for (i = 0; i < area->nr_pages;) {
> +             struct page *page = NULL;
> +             unsigned int chunk_order = min(ilog2(area->nr_pages - i), 
> MAX_ORDER - 1);
> +
> +             while (chunk_order && !page) {
> +                     if (node == NUMA_NO_NODE)
> +                             page = alloc_pages(multi_alloc_mask, 
> chunk_order);
> +                     else
> +                             page = alloc_pages_node(node, multi_alloc_mask, 
> chunk_order);
> +                     if (page)
> +                             split_page(page, chunk_order);
> +                     else
> +                             chunk_order--;
> +             }
> +             if (!page) {
> +                     if (node == NUMA_NO_NODE)
> +                             page = alloc_pages(alloc_mask, 0);
> +                     else
> +                             page = alloc_pages_node(node, alloc_mask, 0);
> +             }
>  
>               if (unlikely(!page)) {
>                       /* Successfully allocated i pages, free them in 
> __vunmap() */
>                       area->nr_pages = i;
>                       goto fail;
>               }
> -             area->pages[i] = page;
> +             for (j = 0; j < (1 << chunk_order); j++)
> +                     area->pages[i++] = page++;
>               if (gfpflags_allow_blocking(gfp_mask))
>                       cond_resched();
>       }
> 
> 
> 

Reply via email to