Gitweb:     
http://git.kernel.org/git/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=7c2e132c54c79af4e524154074b9a02c3c0d6072
Commit:     7c2e132c54c79af4e524154074b9a02c3c0d6072
Parent:     9824601ead957a29e35d539e43266c003f7b085b
Author:     Christoph Lameter <[EMAIL PROTECTED]>
AuthorDate: Mon Jan 7 23:20:27 2008 -0800
Committer:  Christoph Lameter <[EMAIL PROTECTED]>
CommitDate: Mon Feb 4 10:56:02 2008 -0800

    Add parameter to add_partial to avoid having two functions
    
    Add a parameter to add_partial instead of having separate functions.  The
    parameter allows a more detailed control of where the slab pages is placed 
in
    the partial queues.
    
    If we put slabs back to the front then they are likely immediately used for
    allocations.  If they are put at the end then we can maximize the time that
    the partial slabs spent without being subject to allocations.
    
    When deactivating slab we can put the slabs that had remote objects freed 
(we
    can see that because objects were put on the freelist that requires locks) 
to
    them at the end of the list so that the cachelines of remote processors can
    cool down.  Slabs that had objects from the local cpu freed to them (objects
    exist in the lockless freelist) are put in the front of the list to be 
reused
    ASAP in order to exploit the cache hot state of the local cpu.
    
    Patch seems to slightly improve tbench speed (1-2%).
    
    Signed-off-by: Christoph Lameter <[EMAIL PROTECTED]>
    Reviewed-by: Pekka Enberg <[EMAIL PROTECTED]>
    Signed-off-by: Andrew Morton <[EMAIL PROTECTED]>
---
 mm/slub.c |   31 +++++++++++++++----------------
 1 files changed, 15 insertions(+), 16 deletions(-)

diff --git a/mm/slub.c b/mm/slub.c
index 5146e27..e160f28 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1198,19 +1198,15 @@ static __always_inline int slab_trylock(struct page 
*page)
 /*
  * Management of partially allocated slabs
  */
-static void add_partial_tail(struct kmem_cache_node *n, struct page *page)
+static void add_partial(struct kmem_cache_node *n,
+                               struct page *page, int tail)
 {
        spin_lock(&n->list_lock);
        n->nr_partial++;
-       list_add_tail(&page->lru, &n->partial);
-       spin_unlock(&n->list_lock);
-}
-
-static void add_partial(struct kmem_cache_node *n, struct page *page)
-{
-       spin_lock(&n->list_lock);
-       n->nr_partial++;
-       list_add(&page->lru, &n->partial);
+       if (tail)
+               list_add_tail(&page->lru, &n->partial);
+       else
+               list_add(&page->lru, &n->partial);
        spin_unlock(&n->list_lock);
 }
 
@@ -1339,7 +1335,7 @@ static struct page *get_partial(struct kmem_cache *s, 
gfp_t flags, int node)
  *
  * On exit the slab lock will have been dropped.
  */
-static void unfreeze_slab(struct kmem_cache *s, struct page *page)
+static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
 {
        struct kmem_cache_node *n = get_node(s, page_to_nid(page));
 
@@ -1347,7 +1343,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct 
page *page)
        if (page->inuse) {
 
                if (page->freelist)
-                       add_partial(n, page);
+                       add_partial(n, page, tail);
                else if (SlabDebug(page) && (s->flags & SLAB_STORE_USER))
                        add_full(n, page);
                slab_unlock(page);
@@ -1362,7 +1358,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct 
page *page)
                         * partial list stays small. kmem_cache_shrink can
                         * reclaim empty slabs from the partial list.
                         */
-                       add_partial_tail(n, page);
+                       add_partial(n, page, 1);
                        slab_unlock(page);
                } else {
                        slab_unlock(page);
@@ -1377,6 +1373,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct 
page *page)
 static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
 {
        struct page *page = c->page;
+       int tail = 1;
        /*
         * Merge cpu freelist into freelist. Typically we get here
         * because both freelists are empty. So this is unlikely
@@ -1385,6 +1382,8 @@ static void deactivate_slab(struct kmem_cache *s, struct 
kmem_cache_cpu *c)
        while (unlikely(c->freelist)) {
                void **object;
 
+               tail = 0;       /* Hot objects. Put the slab first */
+
                /* Retrieve object from cpu_freelist */
                object = c->freelist;
                c->freelist = c->freelist[c->offset];
@@ -1395,7 +1394,7 @@ static void deactivate_slab(struct kmem_cache *s, struct 
kmem_cache_cpu *c)
                page->inuse--;
        }
        c->page = NULL;
-       unfreeze_slab(s, page);
+       unfreeze_slab(s, page, tail);
 }
 
 static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
@@ -1617,7 +1616,7 @@ checks_ok:
         * then add it.
         */
        if (unlikely(!prior))
-               add_partial_tail(get_node(s, page_to_nid(page)), page);
+               add_partial(get_node(s, page_to_nid(page)), page, 1);
 
 out_unlock:
        slab_unlock(page);
@@ -2025,7 +2024,7 @@ static struct kmem_cache_node 
*early_kmem_cache_node_alloc(gfp_t gfpflags,
 #endif
        init_kmem_cache_node(n);
        atomic_long_inc(&n->nr_slabs);
-       add_partial(n, page);
+       add_partial(n, page, 0);
        return n;
 }
 
-
To unsubscribe from this list: send the line "unsubscribe git-commits-head" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to