Gitweb:     
http://git.kernel.org/git/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=2086d26a05a4b5bda4a2f677bc143933bbdfa9f8
Commit:     2086d26a05a4b5bda4a2f677bc143933bbdfa9f8
Parent:     88a420e4e21c1ff6592a668cf4e8af42eff30bad
Author:     Christoph Lameter <[EMAIL PROTECTED]>
AuthorDate: Sun May 6 14:49:46 2007 -0700
Committer:  Linus Torvalds <[EMAIL PROTECTED]>
CommitDate: Mon May 7 12:12:54 2007 -0700

    SLUB: Free slabs and sort partial slab lists in kmem_cache_shrink
    
    At kmem_cache_shrink check if we have any empty slabs on the partial
    if so then remove them.
    
    Also--as an anti-fragmentation measure--sort the partial slabs so that
    the most fully allocated ones come first and the least allocated last.
    
    The next allocations may fill up the nearly full slabs. Having the
    least allocated slabs last gives them the maximum chance that their
    remaining objects may be freed. Thus we can hopefully minimize the
    partial slabs.
    
    I think this is the best one can do in terms antifragmentation
    measures. Real defragmentation (meaning moving objects out of slabs with
    the least free objects to those that are almost full) can be implemted
    by reverse scanning through the list produced here but that would mean
    that we need to provide a callback at slab cache creation that allows
    the deletion or moving of an object. This will involve slab API
    changes, so defer for now.
    
    Cc: Mel Gorman <[EMAIL PROTECTED]>
    Signed-off-by: Christoph Lameter <[EMAIL PROTECTED]>
    Signed-off-by: Andrew Morton <[EMAIL PROTECTED]>
    Signed-off-by: Linus Torvalds <[EMAIL PROTECTED]>
---
 mm/slub.c |  125 ++++++++++++++++++++++++++++++++++++++++++++++++++++++------
 1 files changed, 112 insertions(+), 13 deletions(-)

diff --git a/mm/slub.c b/mm/slub.c
index a623196..ed28462 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -130,9 +130,19 @@
  */
 #define SLUB_UNIMPLEMENTED (SLAB_DEBUG_INITIAL)
 
-/* Mininum number of partial slabs */
+/*
+ * Mininum number of partial slabs. These will be left on the partial
+ * lists even if they are empty. kmem_cache_shrink may reclaim them.
+ */
 #define MIN_PARTIAL 2
 
+/*
+ * Maximum number of desirable partial slabs.
+ * The existence of more partial slabs makes kmem_cache_shrink
+ * sort the partial list by the number of objects in the.
+ */
+#define MAX_PARTIAL 10
+
 #define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \
                                SLAB_POISON | SLAB_STORE_USER)
 /*
@@ -1882,7 +1892,7 @@ static int kmem_cache_close(struct kmem_cache *s)
        for_each_online_node(node) {
                struct kmem_cache_node *n = get_node(s, node);
 
-               free_list(s, n, &n->partial);
+               n->nr_partial -= free_list(s, n, &n->partial);
                if (atomic_long_read(&n->nr_slabs))
                        return 1;
        }
@@ -2130,6 +2140,86 @@ void kfree(const void *x)
 }
 EXPORT_SYMBOL(kfree);
 
+/*
+ *  kmem_cache_shrink removes empty slabs from the partial lists
+ *  and then sorts the partially allocated slabs by the number
+ *  of items in use. The slabs with the most items in use
+ *  come first. New allocations will remove these from the
+ *  partial list because they are full. The slabs with the
+ *  least items are placed last. If it happens that the objects
+ *  are freed then the page can be returned to the page allocator.
+ */
+int kmem_cache_shrink(struct kmem_cache *s)
+{
+       int node;
+       int i;
+       struct kmem_cache_node *n;
+       struct page *page;
+       struct page *t;
+       struct list_head *slabs_by_inuse =
+               kmalloc(sizeof(struct list_head) * s->objects, GFP_KERNEL);
+       unsigned long flags;
+
+       if (!slabs_by_inuse)
+               return -ENOMEM;
+
+       flush_all(s);
+       for_each_online_node(node) {
+               n = get_node(s, node);
+
+               if (!n->nr_partial)
+                       continue;
+
+               for (i = 0; i < s->objects; i++)
+                       INIT_LIST_HEAD(slabs_by_inuse + i);
+
+               spin_lock_irqsave(&n->list_lock, flags);
+
+               /*
+                * Build lists indexed by the items in use in
+                * each slab or free slabs if empty.
+                *
+                * Note that concurrent frees may occur while
+                * we hold the list_lock. page->inuse here is
+                * the upper limit.
+                */
+               list_for_each_entry_safe(page, t, &n->partial, lru) {
+                       if (!page->inuse && slab_trylock(page)) {
+                               /*
+                                * Must hold slab lock here because slab_free
+                                * may have freed the last object and be
+                                * waiting to release the slab.
+                                */
+                               list_del(&page->lru);
+                               n->nr_partial--;
+                               slab_unlock(page);
+                               discard_slab(s, page);
+                       } else {
+                               if (n->nr_partial > MAX_PARTIAL)
+                                       list_move(&page->lru,
+                                       slabs_by_inuse + page->inuse);
+                       }
+               }
+
+               if (n->nr_partial <= MAX_PARTIAL)
+                       goto out;
+
+               /*
+                * Rebuild the partial list with the slabs filled up
+                * most first and the least used slabs at the end.
+                */
+               for (i = s->objects - 1; i >= 0; i--)
+                       list_splice(slabs_by_inuse + i, n->partial.prev);
+
+       out:
+               spin_unlock_irqrestore(&n->list_lock, flags);
+       }
+
+       kfree(slabs_by_inuse);
+       return 0;
+}
+EXPORT_SYMBOL(kmem_cache_shrink);
+
 /**
  * krealloc - reallocate memory. The contents will remain unchanged.
  *
@@ -2382,17 +2472,6 @@ static struct notifier_block __cpuinitdata slab_notifier 
=
 
 #endif
 
-/***************************************************************
- *     Compatiblility definitions
- **************************************************************/
-
-int kmem_cache_shrink(struct kmem_cache *s)
-{
-       flush_all(s);
-       return 0;
-}
-EXPORT_SYMBOL(kmem_cache_shrink);
-
 #ifdef CONFIG_NUMA
 
 /*****************************************************************
@@ -3169,6 +3248,25 @@ static ssize_t validate_store(struct kmem_cache *s,
 }
 SLAB_ATTR(validate);
 
+static ssize_t shrink_show(struct kmem_cache *s, char *buf)
+{
+       return 0;
+}
+
+static ssize_t shrink_store(struct kmem_cache *s,
+                       const char *buf, size_t length)
+{
+       if (buf[0] == '1') {
+               int rc = kmem_cache_shrink(s);
+
+               if (rc)
+                       return rc;
+       } else
+               return -EINVAL;
+       return length;
+}
+SLAB_ATTR(shrink);
+
 static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf)
 {
        if (!(s->flags & SLAB_STORE_USER))
@@ -3225,6 +3323,7 @@ static struct attribute * slab_attrs[] = {
        &poison_attr.attr,
        &store_user_attr.attr,
        &validate_attr.attr,
+       &shrink_attr.attr,
        &alloc_calls_attr.attr,
        &free_calls_attr.attr,
 #ifdef CONFIG_ZONE_DMA
-
To unsubscribe from this list: send the line "unsubscribe git-commits-head" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to