Gitweb:     
http://git.kernel.org/git/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=d07dbea46405b37d59495eb4de9d1056dcfb7c6d
Commit:     d07dbea46405b37d59495eb4de9d1056dcfb7c6d
Parent:     6cb8f91320d3e720351c21741da795fed580b21b
Author:     Christoph Lameter <[EMAIL PROTECTED]>
AuthorDate: Tue Jul 17 04:03:23 2007 -0700
Committer:  Linus Torvalds <[EMAIL PROTECTED]>
CommitDate: Tue Jul 17 10:23:01 2007 -0700

    Slab allocators: support __GFP_ZERO in all allocators
    
    A kernel convention for many allocators is that if __GFP_ZERO is passed to 
an
    allocator then the allocated memory should be zeroed.
    
    This is currently not supported by the slab allocators.  The inconsistency
    makes it difficult to implement in derived allocators such as in the 
uncached
    allocator and the pool allocators.
    
    In addition the support zeroed allocations in the slab allocators does not
    have a consistent API.  There are no zeroing allocator functions for NUMA 
node
    placement (kmalloc_node, kmem_cache_alloc_node).  The zeroing allocations 
are
    only provided for default allocs (kzalloc, kmem_cache_zalloc_node).
    __GFP_ZERO will make zeroing universally available and does not require any
    addititional functions.
    
    So add the necessary logic to all slab allocators to support __GFP_ZERO.
    
    The code is added to the hot path.  The gfp flags are on the stack and so 
the
    cacheline is readily available for checking if we want a zeroed object.
    
    Zeroing while allocating is now a frequent operation and we seem to be
    gradually approaching a 1-1 parity between zeroing and not zeroing allocs.
    The current tree has 3476 uses of kmalloc vs 2731 uses of kzalloc.
    
    Signed-off-by: Christoph Lameter <[EMAIL PROTECTED]>
    Acked-by: Pekka Enberg <[EMAIL PROTECTED]>
    Signed-off-by: Andrew Morton <[EMAIL PROTECTED]>
    Signed-off-by: Linus Torvalds <[EMAIL PROTECTED]>
---
 mm/slab.c |    8 +++++++-
 mm/slob.c |    2 ++
 mm/slub.c |   24 +++++++++++++++---------
 3 files changed, 24 insertions(+), 10 deletions(-)

diff --git a/mm/slab.c b/mm/slab.c
index d2cd304..1a88fde 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2746,7 +2746,7 @@ static int cache_grow(struct kmem_cache *cachep,
         * Be lazy and only check for valid flags here,  keeping it out of the
         * critical path in kmem_cache_alloc().
         */
-       BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK));
+       BUG_ON(flags & ~(GFP_DMA | __GFP_ZERO | GFP_LEVEL_MASK));
 
        local_flags = (flags & GFP_LEVEL_MASK);
        /* Take the l3 list lock to change the colour_next on this node */
@@ -3392,6 +3392,9 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t 
flags, int nodeid,
        local_irq_restore(save_flags);
        ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
 
+       if (unlikely((flags & __GFP_ZERO) && ptr))
+               memset(ptr, 0, obj_size(cachep));
+
        return ptr;
 }
 
@@ -3443,6 +3446,9 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, 
void *caller)
        objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
        prefetchw(objp);
 
+       if (unlikely((flags & __GFP_ZERO) && objp))
+               memset(objp, 0, obj_size(cachep));
+
        return objp;
 }
 
diff --git a/mm/slob.c b/mm/slob.c
index 41d32c3..b3a4558 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -334,6 +334,8 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, 
int node)
                BUG_ON(!b);
                spin_unlock_irqrestore(&slob_lock, flags);
        }
+       if (unlikely((gfp & __GFP_ZERO) && b))
+               memset(b, 0, size);
        return b;
 }
 
diff --git a/mm/slub.c b/mm/slub.c
index 548d78d..479eb5c 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1077,7 +1077,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t 
flags, int node)
        void *last;
        void *p;
 
-       BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK));
+       BUG_ON(flags & ~(GFP_DMA | __GFP_ZERO | GFP_LEVEL_MASK));
 
        if (flags & __GFP_WAIT)
                local_irq_enable();
@@ -1540,7 +1540,7 @@ debug:
  * Otherwise we can simply pick the next object from the lockless free list.
  */
 static void __always_inline *slab_alloc(struct kmem_cache *s,
-                               gfp_t gfpflags, int node, void *addr)
+               gfp_t gfpflags, int node, void *addr, int length)
 {
        struct page *page;
        void **object;
@@ -1558,19 +1558,25 @@ static void __always_inline *slab_alloc(struct 
kmem_cache *s,
                page->lockless_freelist = object[page->offset];
        }
        local_irq_restore(flags);
+
+       if (unlikely((gfpflags & __GFP_ZERO) && object))
+               memset(object, 0, length);
+
        return object;
 }
 
 void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
 {
-       return slab_alloc(s, gfpflags, -1, __builtin_return_address(0));
+       return slab_alloc(s, gfpflags, -1,
+                       __builtin_return_address(0), s->objsize);
 }
 EXPORT_SYMBOL(kmem_cache_alloc);
 
 #ifdef CONFIG_NUMA
 void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
 {
-       return slab_alloc(s, gfpflags, node, __builtin_return_address(0));
+       return slab_alloc(s, gfpflags, node,
+               __builtin_return_address(0), s->objsize);
 }
 EXPORT_SYMBOL(kmem_cache_alloc_node);
 #endif
@@ -2318,7 +2324,7 @@ void *__kmalloc(size_t size, gfp_t flags)
        if (ZERO_OR_NULL_PTR(s))
                return s;
 
-       return slab_alloc(s, flags, -1, __builtin_return_address(0));
+       return slab_alloc(s, flags, -1, __builtin_return_address(0), size);
 }
 EXPORT_SYMBOL(__kmalloc);
 
@@ -2330,7 +2336,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
        if (ZERO_OR_NULL_PTR(s))
                return s;
 
-       return slab_alloc(s, flags, node, __builtin_return_address(0));
+       return slab_alloc(s, flags, node, __builtin_return_address(0), size);
 }
 EXPORT_SYMBOL(__kmalloc_node);
 #endif
@@ -2643,7 +2649,7 @@ void *kmem_cache_zalloc(struct kmem_cache *s, gfp_t flags)
 {
        void *x;
 
-       x = slab_alloc(s, flags, -1, __builtin_return_address(0));
+       x = slab_alloc(s, flags, -1, __builtin_return_address(0), 0);
        if (x)
                memset(x, 0, s->objsize);
        return x;
@@ -2693,7 +2699,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, 
void *caller)
        if (ZERO_OR_NULL_PTR(s))
                return s;
 
-       return slab_alloc(s, gfpflags, -1, caller);
+       return slab_alloc(s, gfpflags, -1, caller, size);
 }
 
 void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
@@ -2704,7 +2710,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t 
gfpflags,
        if (ZERO_OR_NULL_PTR(s))
                return s;
 
-       return slab_alloc(s, gfpflags, node, caller);
+       return slab_alloc(s, gfpflags, node, caller, size);
 }
 
 #if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)
-
To unsubscribe from this list: send the line "unsubscribe git-commits-head" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to