Provide the current number of vmalloc shadow pages in
/sys/kernel/debug/kasan/vmalloc_shadow_pages.

Signed-off-by: Daniel Axtens <d...@axtens.net>

---

v8: rename kasan_vmalloc/shadow_pages -> kasan/vmalloc_shadow_pages

On v4 (no dynamic freeing), I saw the following approximate figures
on my test VM:

 - fresh boot: 720
 - after test_vmalloc: ~14000

With v5 (lazy dynamic freeing):

 - boot: ~490-500
 - running modprobe test_vmalloc pushes the figures up to sometimes
    as high as ~14000, but they drop down to ~560 after the test ends.
    I'm not sure where the extra sixty pages are from, but running the
    test repeately doesn't cause the number to keep growing, so I don't
    think we're leaking.
 - with vmap_stack, spawning tasks pushes the figure up to ~4200, then
    some clearing kicks in and drops it down to previous levels again.
---
 mm/kasan/common.c | 26 ++++++++++++++++++++++++++
 1 file changed, 26 insertions(+)

diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index e33cbab83309..5b924f860a32 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -35,6 +35,7 @@
 #include <linux/vmalloc.h>
 #include <linux/bug.h>
 #include <linux/uaccess.h>
+#include <linux/debugfs.h>
 
 #include <asm/tlbflush.h>
 
@@ -750,6 +751,8 @@ core_initcall(kasan_memhotplug_init);
 #endif
 
 #ifdef CONFIG_KASAN_VMALLOC
+static u64 vmalloc_shadow_pages;
+
 static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr,
                                      void *unused)
 {
@@ -776,6 +779,7 @@ static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned 
long addr,
        if (likely(pte_none(*ptep))) {
                set_pte_at(&init_mm, addr, ptep, pte);
                page = 0;
+               vmalloc_shadow_pages++;
        }
        spin_unlock(&init_mm.page_table_lock);
        if (page)
@@ -829,6 +833,7 @@ static int kasan_depopulate_vmalloc_pte(pte_t *ptep, 
unsigned long addr,
        if (likely(!pte_none(*ptep))) {
                pte_clear(&init_mm, addr, ptep);
                free_page(page);
+               vmalloc_shadow_pages--;
        }
        spin_unlock(&init_mm.page_table_lock);
 
@@ -947,4 +952,25 @@ void kasan_release_vmalloc(unsigned long start, unsigned 
long end,
                                       (unsigned long)shadow_end);
        }
 }
+
+static __init int kasan_init_debugfs(void)
+{
+       struct dentry *root, *count;
+
+       root = debugfs_create_dir("kasan", NULL);
+       if (IS_ERR(root)) {
+               if (PTR_ERR(root) == -ENODEV)
+                       return 0;
+               return PTR_ERR(root);
+       }
+
+       count = debugfs_create_u64("vmalloc_shadow_pages", 0444, root,
+                                  &vmalloc_shadow_pages);
+
+       if (IS_ERR(count))
+               return PTR_ERR(root);
+
+       return 0;
+}
+late_initcall(kasan_init_debugfs);
 #endif
-- 
2.20.1

Reply via email to