On Thu, 14 Jul 2016 12:14:11 -0700 Andy Lutomirski <[email protected]> wrote:

> We should account for stacks regardless of stack size, and we need
> to account in sub-page units if THREAD_SIZE < PAGE_SIZE.  Change the
> units to kilobytes and Move it into account_kernel_stack().

I queued this patch after
http://ozlabs.org/~akpm/mmotm/broken-out/mm-charge-uncharge-kmemcg-from-generic-page-allocator-paths.patch
so some changes are needed.  (patching mainline when we're at -rc7 was
optimistic!)

> --- a/kernel/fork.c
> +++ b/kernel/fork.c
> @@ -165,20 +165,12 @@ static unsigned long *alloc_thread_stack_node(struct 
> task_struct *tsk,
>       struct page *page = alloc_kmem_pages_node(node, THREADINFO_GFP,
>                                                 THREAD_SIZE_ORDER);
>  
> -     if (page)
> -             memcg_kmem_update_page_stat(page, MEMCG_KERNEL_STACK,
> -                                         1 << THREAD_SIZE_ORDER);
> -
>       return page ? page_address(page) : NULL;
>  }
>  
>  static inline void free_thread_stack(unsigned long *stack)
>  {
> -     struct page *page = virt_to_page(stack);
> -
> -     memcg_kmem_update_page_stat(page, MEMCG_KERNEL_STACK,
> -                                 -(1 << THREAD_SIZE_ORDER));
> -     __free_kmem_pages(page, THREAD_SIZE_ORDER);
> +     free_kmem_pages((unsigned long)stack, THREAD_SIZE_ORDER);
>  }

Here's what I ended up with:

static unsigned long *alloc_thread_stack_node(struct task_struct *tsk,
                                                  int node)
{
        struct page *page = alloc_pages_node(node, THREADINFO_GFP,
                                             THREAD_SIZE_ORDER);

        return page ? page_address(page) : NULL;
}

static inline void free_thread_stack(unsigned long *stack)
{
        __free_pages(virt_to_page(stack), THREAD_SIZE_ORDER);
}


Reply via email to