On Thu, 3 Apr 2014, Vladimir Davydov wrote:

> --- a/include/linux/slab.h
> +++ b/include/linux/slab.h
> @@ -358,16 +358,7 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
>  #include <linux/slub_def.h>
>  #endif
>
> -static __always_inline void *
> -kmalloc_order(size_t size, gfp_t flags, unsigned int order)
> -{
> -     void *ret;
> -
> -     flags |= (__GFP_COMP | __GFP_KMEMCG);
> -     ret = (void *) __get_free_pages(flags, order);
> -     kmemleak_alloc(ret, size, 1, flags);
> -     return ret;
> -}
> +extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order);


Hmmm... This was intentional inlined to allow inline expansion for calls
to kmalloc with large constants. The inline expansion directly converts
these calls to page allocator calls avoiding slab overhead.

_______________________________________________
Devel mailing list
[email protected]
https://lists.openvz.org/mailman/listinfo/devel

Reply via email to