cache_free_alien() is rarely used function when node mismatch. But,
it is defined with inline attribute so it is inlined to __cache_free()
which is core free function of slab allocator. It uselessly makes
kmem_cache_free()/kfree() functions large. What we really need to
inline is just checking node match so this patch factor out other
parts of cache_free_alien() to reduce code size of kmem_cache_free()/
kfree().

<Before>
nm -S mm/slab.o | grep -e "T kfree" -e "T kmem_cache_free"
00000000000011e0 0000000000000228 T kfree
0000000000000670 0000000000000216 T kmem_cache_free

<After>
nm -S mm/slab.o | grep -e "T kfree" -e "T kmem_cache_free"
0000000000001110 00000000000001b5 T kfree
0000000000000750 0000000000000181 T kmem_cache_free

You can see slightly reduced size of text: 0x228->0x1b5, 0x216->0x181.

Signed-off-by: Joonsoo Kim <[email protected]>
---
 mm/slab.c |   38 +++++++++++++++++++++-----------------
 1 file changed, 21 insertions(+), 17 deletions(-)

diff --git a/mm/slab.c b/mm/slab.c
index c9f137f..5927a17 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -984,46 +984,50 @@ static void drain_alien_cache(struct kmem_cache *cachep,
        }
 }
 
-static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
+static int __cache_free_alien(struct kmem_cache *cachep, void *objp,
+                               int node, int page_node)
 {
-       int nodeid = page_to_nid(virt_to_page(objp));
        struct kmem_cache_node *n;
        struct alien_cache *alien = NULL;
        struct array_cache *ac;
-       int node;
        LIST_HEAD(list);
 
-       node = numa_mem_id();
-
-       /*
-        * Make sure we are not freeing a object from another node to the array
-        * cache on this cpu.
-        */
-       if (likely(nodeid == node))
-               return 0;
-
        n = get_node(cachep, node);
        STATS_INC_NODEFREES(cachep);
-       if (n->alien && n->alien[nodeid]) {
-               alien = n->alien[nodeid];
+       if (n->alien && n->alien[page_node]) {
+               alien = n->alien[page_node];
                ac = &alien->ac;
                spin_lock(&alien->lock);
                if (unlikely(ac->avail == ac->limit)) {
                        STATS_INC_ACOVERFLOW(cachep);
-                       __drain_alien_cache(cachep, ac, nodeid, &list);
+                       __drain_alien_cache(cachep, ac, page_node, &list);
                }
                ac_put_obj(cachep, ac, objp);
                spin_unlock(&alien->lock);
                slabs_destroy(cachep, &list);
        } else {
-               n = get_node(cachep, nodeid);
+               n = get_node(cachep, page_node);
                spin_lock(&n->list_lock);
-               free_block(cachep, &objp, 1, nodeid, &list);
+               free_block(cachep, &objp, 1, page_node, &list);
                spin_unlock(&n->list_lock);
                slabs_destroy(cachep, &list);
        }
        return 1;
 }
+
+static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
+{
+       int page_node = page_to_nid(virt_to_page(objp));
+       int node = numa_mem_id();
+       /*
+        * Make sure we are not freeing a object from another node to the array
+        * cache on this cpu.
+        */
+       if (likely(node == page_node))
+               return 0;
+
+       return __cache_free_alien(cachep, objp, node, page_node);
+}
 #endif
 
 /*
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to