From: Joonsoo Kim <iamjoonsoo....@lge.com>

slabs_tofree() implies freeing all free slab. We can do it with
just providing INT_MAX.

Acked-by: Christoph Lameter <c...@linux.com>
Signed-off-by: Joonsoo Kim <iamjoonsoo....@lge.com>
---
 mm/slab.c | 12 +++---------
 1 file changed, 3 insertions(+), 9 deletions(-)

diff --git a/mm/slab.c b/mm/slab.c
index 373b8be..5451929 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -888,12 +888,6 @@ static int init_cache_node_node(int node)
        return 0;
 }
 
-static inline int slabs_tofree(struct kmem_cache *cachep,
-                                               struct kmem_cache_node *n)
-{
-       return (n->free_objects + cachep->num - 1) / cachep->num;
-}
-
 static void cpuup_canceled(long cpu)
 {
        struct kmem_cache *cachep;
@@ -958,7 +952,7 @@ free_slab:
                n = get_node(cachep, node);
                if (!n)
                        continue;
-               drain_freelist(cachep, n, slabs_tofree(cachep, n));
+               drain_freelist(cachep, n, INT_MAX);
        }
 }
 
@@ -1110,7 +1104,7 @@ static int __meminit drain_cache_node_node(int node)
                if (!n)
                        continue;
 
-               drain_freelist(cachep, n, slabs_tofree(cachep, n));
+               drain_freelist(cachep, n, INT_MAX);
 
                if (!list_empty(&n->slabs_full) ||
                    !list_empty(&n->slabs_partial)) {
@@ -2304,7 +2298,7 @@ int __kmem_cache_shrink(struct kmem_cache *cachep, bool 
deactivate)
 
        check_irq_on();
        for_each_kmem_cache_node(cachep, node, n) {
-               drain_freelist(cachep, n, slabs_tofree(cachep, n));
+               drain_freelist(cachep, n, INT_MAX);
 
                ret += !list_empty(&n->slabs_full) ||
                        !list_empty(&n->slabs_partial);
-- 
1.9.1

Reply via email to