From: Huang Ying <ying.hu...@intel.com>

The statistics for total readahead pages and total readahead hits are
recorded and exported via the following sysfs interface.

/sys/kernel/mm/swap/ra_hits
/sys/kernel/mm/swap/ra_total

With them, the efficiency of the swap readahead could be measured, so
that the swap readahead algorithm and parameters could be tuned
accordingly.

Signed-off-by: "Huang, Ying" <ying.hu...@intel.com>
Cc: Johannes Weiner <han...@cmpxchg.org>
Cc: Minchan Kim <minc...@kernel.org>
Cc: Rik van Riel <r...@redhat.com>
Cc: Shaohua Li <s...@kernel.org>
Cc: Hugh Dickins <hu...@google.com>
Cc: Fengguang Wu <fengguang...@intel.com>
Cc: Tim Chen <tim.c.c...@intel.com>
Cc: Dave Hansen <dave.han...@intel.com>
---
 mm/swap_state.c | 38 ++++++++++++++++++++++++++++++++++++--
 1 file changed, 36 insertions(+), 2 deletions(-)

diff --git a/mm/swap_state.c b/mm/swap_state.c
index a13bbf504e93..8be7153967ed 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -20,6 +20,7 @@
 #include <linux/vmalloc.h>
 #include <linux/swap_slots.h>
 #include <linux/huge_mm.h>
+#include <linux/percpu_counter.h>
 
 #include <asm/pgtable.h>
 
@@ -74,6 +75,15 @@ unsigned long total_swapcache_pages(void)
 }
 
 static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
+static struct percpu_counter swapin_readahead_hits_total;
+static struct percpu_counter swapin_readahead_total;
+
+static int __init swap_init(void)
+{
+       percpu_counter_init(&swapin_readahead_hits_total, 0, GFP_KERNEL);
+       percpu_counter_init(&swapin_readahead_total, 0, GFP_KERNEL);
+}
+subsys_initcall(swap_init);
 
 void show_swap_cache_info(void)
 {
@@ -305,8 +315,10 @@ struct page * lookup_swap_cache(swp_entry_t entry)
 
        if (page && likely(!PageTransCompound(page))) {
                INC_CACHE_INFO(find_success);
-               if (TestClearPageReadahead(page))
+               if (TestClearPageReadahead(page)) {
                        atomic_inc(&swapin_readahead_hits);
+                       percpu_counter_inc(&swapin_readahead_hits_total);
+               }
        }
 
        INC_CACHE_INFO(find_total);
@@ -516,8 +528,11 @@ struct page *swapin_readahead(swp_entry_t entry, gfp_t 
gfp_mask,
                                                gfp_mask, vma, addr, false);
                if (!page)
                        continue;
-               if (offset != entry_offset && likely(!PageTransCompound(page)))
+               if (offset != entry_offset &&
+                   likely(!PageTransCompound(page))) {
                        SetPageReadahead(page);
+                       percpu_counter_inc(&swapin_readahead_total);
+               }
                put_page(page);
        }
        blk_finish_plug(&plug);
@@ -603,12 +618,31 @@ static ssize_t swap_cache_find_total_show(
 static struct kobj_attribute swap_cache_find_total_attr =
        __ATTR(cache_find_total, 0444, swap_cache_find_total_show, NULL);
 
+static ssize_t swap_readahead_hits_show(
+       struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+       return sprintf(buf, "%lld\n",
+                      percpu_counter_sum(&swapin_readahead_hits_total));
+}
+static struct kobj_attribute swap_readahead_hits_attr =
+       __ATTR(ra_hits, 0444, swap_readahead_hits_show, NULL);
+
+static ssize_t swap_readahead_total_show(
+       struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+       return sprintf(buf, "%lld\n", 
percpu_counter_sum(&swapin_readahead_total));
+}
+static struct kobj_attribute swap_readahead_total_attr =
+       __ATTR(ra_total, 0444, swap_readahead_total_show, NULL);
+
 static struct attribute *swap_attrs[] = {
        &swap_cache_pages_attr.attr,
        &swap_cache_add_attr.attr,
        &swap_cache_del_attr.attr,
        &swap_cache_find_success_attr.attr,
        &swap_cache_find_total_attr.attr,
+       &swap_readahead_hits_attr.attr,
+       &swap_readahead_total_attr.attr,
        NULL,
 };
 
-- 
2.13.2

Reply via email to