Commit:     d6269543ef24aa012aa228c27af3adb074f7b36b
Parent:     41f9dc5c871600f53c8912b2975971d2a11c1c25
Author:     Matt Mackall <[EMAIL PROTECTED]>
AuthorDate: Sat Jul 21 04:37:40 2007 -0700
Committer:  Linus Torvalds <[EMAIL PROTECTED]>
CommitDate: Sat Jul 21 17:49:16 2007 -0700

    slob: reduce list scanning
    The version of SLOB in -mm always scans its free list from the beginning,
    which results in small allocations and free segments clustering at the
    beginning of the list over time.  This causes the average search to scan
    over a large stretch at the beginning on each allocation.
    By starting each page search where the last one left off, we evenly
    distribute the allocations and greatly shorten the average search.
    Without this patch, kernel compiles on a 1.5G machine take a large amount
    of system time for list scanning.  With this patch, compiles are within a
    few seconds of performance of a SLAB kernel with no notable change in
    system time.
    Signed-off-by: Matt Mackall <[EMAIL PROTECTED]>
    Cc: Christoph Lameter <[EMAIL PROTECTED]>
    Signed-off-by: Andrew Morton <[EMAIL PROTECTED]>
    Signed-off-by: Linus Torvalds <[EMAIL PROTECTED]>
 mm/slob.c |   21 ++++++++++++++++-----
 1 files changed, 16 insertions(+), 5 deletions(-)

diff --git a/mm/slob.c b/mm/slob.c
index d50920e..ec33fcd 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -293,6 +293,7 @@ static void *slob_page_alloc(struct slob_page *sp, size_t 
size, int align)
 static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
        struct slob_page *sp;
+       struct list_head *prev;
        slob_t *b = NULL;
        unsigned long flags;
@@ -307,12 +308,22 @@ static void *slob_alloc(size_t size, gfp_t gfp, int 
align, int node)
                if (node != -1 && page_to_nid(&sp->page) != node)
+               /* Enough room on this page? */
+               if (sp->units < SLOB_UNITS(size))
+                       continue;
-               if (sp->units >= SLOB_UNITS(size)) {
-                       b = slob_page_alloc(sp, size, align);
-                       if (b)
-                               break;
-               }
+               /* Attempt to alloc */
+               prev = sp->list.prev;
+               b = slob_page_alloc(sp, size, align);
+               if (!b)
+                       continue;
+               /* Improve fragment distribution and reduce our average
+                * search time by starting our next search here. (see
+                * Knuth vol 1, sec 2.5, pg 449) */
+               if ( != prev->next)
+                       list_move_tail(&free_slob_pages, prev->next);
+               break;
        spin_unlock_irqrestore(&slob_lock, flags);
To unsubscribe from this list: send the line "unsubscribe git-commits-head" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at

Reply via email to