Gitweb:     
http://git.kernel.org/git/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=331dc558fa020451ff773973cee855fd721aa88e
Commit:     331dc558fa020451ff773973cee855fd721aa88e
Parent:     71c7a06ff0a2ba0434ace4d7aa679537c4211d9d
Author:     Christoph Lameter <[EMAIL PROTECTED]>
AuthorDate: Thu Feb 14 14:28:09 2008 -0800
Committer:  Christoph Lameter <[EMAIL PROTECTED]>
CommitDate: Thu Feb 14 15:30:02 2008 -0800

    slub: Support 4k kmallocs again to compensate for page allocator slowness
    
    Currently we hand off PAGE_SIZEd kmallocs to the page allocator in the
    mistaken belief that the page allocator can handle these allocations
    effectively. However, measurements indicate a minimum slowdown by the
    factor of 8 (and that is only SMP, NUMA is much worse) vs the slub fastpath
    which causes regressions in tbench.
    
    Increase the number of kmalloc caches by one so that we again handle 4k
    kmallocs directly from slub. 4k page buffering for the page allocator
    will be performed by slub like done by slab.
    
    At some point the page allocator fastpath should be fixed. A lot of the 
kernel
    would benefit from a faster ability to allocate a single page. If that is
    done then the 4k allocs may again be forwarded to the page allocator and 
this
    patch could be reverted.
    
    Reviewed-by: Pekka Enberg <[EMAIL PROTECTED]>
    Acked-by: Mel Gorman <[EMAIL PROTECTED]>
    Signed-off-by: Christoph Lameter <[EMAIL PROTECTED]>
---
 include/linux/slub_def.h |    6 +++---
 mm/slub.c                |   18 +++++++++---------
 2 files changed, 12 insertions(+), 12 deletions(-)

diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 98be113..57deecc 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -111,7 +111,7 @@ struct kmem_cache {
  * We keep the general caches in an array of slab caches that are used for
  * 2^x bytes of allocations.
  */
-extern struct kmem_cache kmalloc_caches[PAGE_SHIFT];
+extern struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1];
 
 /*
  * Sorry that the following has to be that ugly but some versions of GCC
@@ -197,7 +197,7 @@ static __always_inline void *kmalloc_large(size_t size, 
gfp_t flags)
 static __always_inline void *kmalloc(size_t size, gfp_t flags)
 {
        if (__builtin_constant_p(size)) {
-               if (size > PAGE_SIZE / 2)
+               if (size > PAGE_SIZE)
                        return kmalloc_large(size, flags);
 
                if (!(flags & SLUB_DMA)) {
@@ -219,7 +219,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t 
flags, int node);
 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
 {
        if (__builtin_constant_p(size) &&
-               size <= PAGE_SIZE / 2 && !(flags & SLUB_DMA)) {
+               size <= PAGE_SIZE && !(flags & SLUB_DMA)) {
                        struct kmem_cache *s = kmalloc_slab(size);
 
                if (!s)
diff --git a/mm/slub.c b/mm/slub.c
index 644fd0a..4b3895c 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2517,11 +2517,11 @@ EXPORT_SYMBOL(kmem_cache_destroy);
  *             Kmalloc subsystem
  *******************************************************************/
 
-struct kmem_cache kmalloc_caches[PAGE_SHIFT] __cacheline_aligned;
+struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1] __cacheline_aligned;
 EXPORT_SYMBOL(kmalloc_caches);
 
 #ifdef CONFIG_ZONE_DMA
-static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT];
+static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT + 1];
 #endif
 
 static int __init setup_slub_min_order(char *str)
@@ -2703,7 +2703,7 @@ void *__kmalloc(size_t size, gfp_t flags)
 {
        struct kmem_cache *s;
 
-       if (unlikely(size > PAGE_SIZE / 2))
+       if (unlikely(size > PAGE_SIZE))
                return kmalloc_large(size, flags);
 
        s = get_slab(size, flags);
@@ -2720,7 +2720,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
 {
        struct kmem_cache *s;
 
-       if (unlikely(size > PAGE_SIZE / 2))
+       if (unlikely(size > PAGE_SIZE))
                return kmalloc_large(size, flags);
 
        s = get_slab(size, flags);
@@ -3032,7 +3032,7 @@ void __init kmem_cache_init(void)
                caches++;
        }
 
-       for (i = KMALLOC_SHIFT_LOW; i < PAGE_SHIFT; i++) {
+       for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) {
                create_kmalloc_cache(&kmalloc_caches[i],
                        "kmalloc", 1 << i, GFP_KERNEL);
                caches++;
@@ -3059,7 +3059,7 @@ void __init kmem_cache_init(void)
        slab_state = UP;
 
        /* Provide the correct kmalloc names now that the caches are up */
-       for (i = KMALLOC_SHIFT_LOW; i < PAGE_SHIFT; i++)
+       for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++)
                kmalloc_caches[i]. name =
                        kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i);
 
@@ -3088,7 +3088,7 @@ static int slab_unmergeable(struct kmem_cache *s)
        if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE))
                return 1;
 
-       if ((s->flags & __PAGE_ALLOC_FALLBACK)
+       if ((s->flags & __PAGE_ALLOC_FALLBACK))
                return 1;
 
        if (s->ctor)
@@ -3252,7 +3252,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, 
void *caller)
 {
        struct kmem_cache *s;
 
-       if (unlikely(size > PAGE_SIZE / 2))
+       if (unlikely(size > PAGE_SIZE))
                return kmalloc_large(size, gfpflags);
 
        s = get_slab(size, gfpflags);
@@ -3268,7 +3268,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t 
gfpflags,
 {
        struct kmem_cache *s;
 
-       if (unlikely(size > PAGE_SIZE / 2))
+       if (unlikely(size > PAGE_SIZE))
                return kmalloc_large(size, gfpflags);
 
        s = get_slab(size, gfpflags);
-
To unsubscribe from this list: send the line "unsubscribe git-commits-head" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to