Gitweb:     
http://git.kernel.org/git/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=71c7a06ff0a2ba0434ace4d7aa679537c4211d9d
Commit:     71c7a06ff0a2ba0434ace4d7aa679537c4211d9d
Parent:     b7a49f0d4c34166ae84089d9f145cfaae1b0eec5
Author:     Christoph Lameter <[EMAIL PROTECTED]>
AuthorDate: Thu Feb 14 14:28:01 2008 -0800
Committer:  Christoph Lameter <[EMAIL PROTECTED]>
CommitDate: Thu Feb 14 15:30:01 2008 -0800

    slub: Fallback to kmalloc_large for failing higher order allocs
    
    Slub already has two ways of allocating an object. One is via its own
    logic and the other is via the call to kmalloc_large to hand off object
    allocation to the page allocator. kmalloc_large is typically used
    for objects >= PAGE_SIZE.
    
    We can use that handoff to avoid failing if a higher order kmalloc slab
    allocation cannot be satisfied by the page allocator. If we reach the
    out of memory path then simply try a kmalloc_large(). kfree() can
    already handle the case of an object that was allocated via the page
    allocator and so this will work just fine (apart from object
    accounting...).
    
    For any kmalloc slab that already requires higher order allocs (which
    makes it impossible to use the page allocator fastpath!)
    we just use PAGE_ALLOC_COSTLY_ORDER to get the largest number of
    objects in one go from the page allocator slowpath.
    
    On a 4k platform this patch will lead to the following use of higher
    order pages for the following kmalloc slabs:
    
    8 ... 1024  order 0
    2048 .. 4096        order 3 (4k slab only after the next patch)
    
    We may waste some space if fallback occurs on a 2k slab but we
    are always able to fallback to an order 0 alloc.
    
    Reviewed-by: Pekka Enberg <[EMAIL PROTECTED]>
    Signed-off-by: Christoph Lameter <[EMAIL PROTECTED]>
---
 mm/slub.c |   43 ++++++++++++++++++++++++++++++++++++++-----
 1 files changed, 38 insertions(+), 5 deletions(-)

diff --git a/mm/slub.c b/mm/slub.c
index ccfd411..644fd0a 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -211,6 +211,8 @@ static inline void ClearSlabDebug(struct page *page)
 /* Internal SLUB flags */
 #define __OBJECT_POISON                0x80000000 /* Poison object */
 #define __SYSFS_ADD_DEFERRED   0x40000000 /* Not yet visible via sysfs */
+#define __KMALLOC_CACHE                0x20000000 /* objects freed using kfree 
*/
+#define __PAGE_ALLOC_FALLBACK  0x10000000 /* Allow fallback to page alloc */
 
 /* Not all arches define cache_line_size */
 #ifndef cache_line_size
@@ -1539,7 +1541,6 @@ load_freelist:
 unlock_out:
        slab_unlock(c->page);
        stat(c, ALLOC_SLOWPATH);
-out:
 #ifdef SLUB_FASTPATH
        local_irq_restore(flags);
 #endif
@@ -1574,8 +1575,24 @@ new_slab:
                c->page = new;
                goto load_freelist;
        }
-       object = NULL;
-       goto out;
+#ifdef SLUB_FASTPATH
+       local_irq_restore(flags);
+#endif
+       /*
+        * No memory available.
+        *
+        * If the slab uses higher order allocs but the object is
+        * smaller than a page size then we can fallback in emergencies
+        * to the page allocator via kmalloc_large. The page allocator may
+        * have failed to obtain a higher order page and we can try to
+        * allocate a single page if the object fits into a single page.
+        * That is only possible if certain conditions are met that are being
+        * checked when a slab is created.
+        */
+       if (!(gfpflags & __GFP_NORETRY) && (s->flags & __PAGE_ALLOC_FALLBACK))
+               return kmalloc_large(s->objsize, gfpflags);
+
+       return NULL;
 debug:
        object = c->page->freelist;
        if (!alloc_debug_processing(s, c->page, object, addr))
@@ -2322,7 +2339,20 @@ static int calculate_sizes(struct kmem_cache *s)
        size = ALIGN(size, align);
        s->size = size;
 
-       s->order = calculate_order(size);
+       if ((flags & __KMALLOC_CACHE) &&
+                       PAGE_SIZE / size < slub_min_objects) {
+               /*
+                * Kmalloc cache that would not have enough objects in
+                * an order 0 page. Kmalloc slabs can fallback to
+                * page allocator order 0 allocs so take a reasonably large
+                * order that will allows us a good number of objects.
+                */
+               s->order = max(slub_max_order, PAGE_ALLOC_COSTLY_ORDER);
+               s->flags |= __PAGE_ALLOC_FALLBACK;
+               s->allocflags |= __GFP_NOWARN;
+       } else
+               s->order = calculate_order(size);
+
        if (s->order < 0)
                return 0;
 
@@ -2539,7 +2569,7 @@ static struct kmem_cache *create_kmalloc_cache(struct 
kmem_cache *s,
 
        down_write(&slub_lock);
        if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN,
-                       flags, NULL))
+                       flags | __KMALLOC_CACHE, NULL))
                goto panic;
 
        list_add(&s->list, &slab_caches);
@@ -3058,6 +3088,9 @@ static int slab_unmergeable(struct kmem_cache *s)
        if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE))
                return 1;
 
+       if ((s->flags & __PAGE_ALLOC_FALLBACK)
+               return 1;
+
        if (s->ctor)
                return 1;
 
-
To unsubscribe from this list: send the line "unsubscribe git-commits-head" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to