We need to differ the situations, when shrinker has
very small amount of objects (see vfs_pressure_ratio()
called from super_cache_count()), and when it has no
objects at all. Currently, in the both of these cases,
shrinker::count_objects() returns 0.

The patch introduces new SHRINK_EMPTY return value,
which will be used for "no objects at all" case.
It's is a refactoring mostly, as SHRINK_EMPTY is replaced
by 0 by all callers of do_shrink_slab() in this patch,
and all the magic will happen in further.

Signed-off-by: Kirill Tkhai <ktk...@virtuozzo.com>
Acked-by: Vladimir Davydov <vdavydov....@gmail.com>
Tested-by: Shakeel Butt <shake...@google.com>
---
 fs/super.c               |    3 +++
 include/linux/shrinker.h |    7 +++++--
 mm/vmscan.c              |   12 +++++++++---
 mm/workingset.c          |    3 +++
 4 files changed, 20 insertions(+), 5 deletions(-)

diff --git a/fs/super.c b/fs/super.c
index f5f96e52e0cd..7429588d6b49 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -144,6 +144,9 @@ static unsigned long super_cache_count(struct shrinker 
*shrink,
        total_objects += list_lru_shrink_count(&sb->s_dentry_lru, sc);
        total_objects += list_lru_shrink_count(&sb->s_inode_lru, sc);
 
+       if (!total_objects)
+               return SHRINK_EMPTY;
+
        total_objects = vfs_pressure_ratio(total_objects);
        return total_objects;
 }
diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
index 7ca9c18cf130..b154fd2b084c 100644
--- a/include/linux/shrinker.h
+++ b/include/linux/shrinker.h
@@ -34,12 +34,15 @@ struct shrink_control {
 };
 
 #define SHRINK_STOP (~0UL)
+#define SHRINK_EMPTY (~0UL - 1)
 /*
  * A callback you can register to apply pressure to ageable caches.
  *
  * @count_objects should return the number of freeable items in the cache. If
- * there are no objects to free or the number of freeable items cannot be
- * determined, it should return 0. No deadlock checks should be done during the
+ * there are no objects to free, it should return SHRINK_EMPTY, while 0 is
+ * returned in cases of the number of freeable items cannot be determined
+ * or shrinker should skip this cache for this time (e.g., their number
+ * is below shrinkable limit). No deadlock checks should be done during the
  * count callback - the shrinker relies on aggregating scan counts that 
couldn't
  * be executed due to potential deadlocks to be run at a later call when the
  * deadlock condition is no longer pending.
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 1518f3c613f5..d24a3e16ade2 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -453,8 +453,8 @@ static unsigned long do_shrink_slab(struct shrink_control 
*shrinkctl,
        long scanned = 0, next_deferred;
 
        freeable = shrinker->count_objects(shrinker, shrinkctl);
-       if (freeable == 0)
-               return 0;
+       if (freeable == 0 || freeable == SHRINK_EMPTY)
+               return freeable;
 
        /*
         * copy the current shrinker scan count into a local variable
@@ -597,6 +597,8 @@ static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int 
nid,
                        continue;
 
                ret = do_shrink_slab(&sc, shrinker, priority);
+               if (ret == SHRINK_EMPTY)
+                       ret = 0;
                freed += ret;
 
                if (rwsem_is_contended(&shrinker_rwsem)) {
@@ -642,6 +644,7 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
 {
        struct shrinker *shrinker;
        unsigned long freed = 0;
+       int ret;
 
        if (!mem_cgroup_is_root(memcg))
                return shrink_slab_memcg(gfp_mask, nid, memcg, priority);
@@ -659,7 +662,10 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
                if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
                        sc.nid = 0;
 
-               freed += do_shrink_slab(&sc, shrinker, priority);
+               ret = do_shrink_slab(&sc, shrinker, priority);
+               if (ret == SHRINK_EMPTY)
+                       ret = 0;
+               freed += ret;
                /*
                 * Bail out if someone want to register a new shrinker to
                 * prevent the regsitration from being stalled for long periods
diff --git a/mm/workingset.c b/mm/workingset.c
index 79099bc5c256..3bac1b2f649a 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -402,6 +402,9 @@ static unsigned long count_shadow_nodes(struct shrinker 
*shrinker,
        }
        max_nodes = cache >> (RADIX_TREE_MAP_SHIFT - 3);
 
+       if (!nodes)
+               return SHRINK_EMPTY;
+
        if (nodes <= max_nodes)
                return 0;
        return nodes - max_nodes;

Reply via email to