[PATCH v1 09/10] zsmalloc: add fullness into stat

2015-01-20 Thread Minchan Kim
During investigating compaction, fullness information of each class
and pages_per_zspage are helpful for investigating how compaction
works well on each size class.

Signed-off-by: Minchan Kim 
---
 mm/zsmalloc.c | 349 +++---
 1 file changed, 184 insertions(+), 165 deletions(-)

diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 8217e8e..48b702e 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -192,6 +192,8 @@ enum fullness_group {
 enum zs_stat_type {
OBJ_ALLOCATED,
OBJ_USED,
+   CLASS_ALMOST_FULL,
+   CLASS_ALMOST_EMPTY,
NR_ZS_STAT_TYPE,
 };
 
@@ -404,6 +406,11 @@ static struct zpool_driver zs_zpool_driver = {
 MODULE_ALIAS("zpool-zsmalloc");
 #endif /* CONFIG_ZPOOL */
 
+static unsigned int get_maxobj_per_zspage(int size, int pages_per_zspage)
+{
+   return pages_per_zspage * PAGE_SIZE / size;
+}
+
 /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
 static DEFINE_PER_CPU(struct mapping_area, zs_map_area);
 
@@ -457,6 +464,179 @@ static int get_size_class_index(int size)
return idx;
 }
 
+#ifdef CONFIG_ZSMALLOC_STAT
+
+static inline void zs_stat_inc(struct size_class *class,
+   enum zs_stat_type type, unsigned long cnt)
+{
+   class->stats.objs[type] += cnt;
+}
+
+static inline void zs_stat_dec(struct size_class *class,
+   enum zs_stat_type type, unsigned long cnt)
+{
+   class->stats.objs[type] -= cnt;
+}
+
+static inline unsigned long zs_stat_get(struct size_class *class,
+   enum zs_stat_type type)
+{
+   return class->stats.objs[type];
+}
+
+static int __init zs_stat_init(void)
+{
+   if (!debugfs_initialized())
+   return -ENODEV;
+
+   zs_stat_root = debugfs_create_dir("zsmalloc", NULL);
+   if (!zs_stat_root)
+   return -ENOMEM;
+
+   return 0;
+}
+
+static void __exit zs_stat_exit(void)
+{
+   debugfs_remove_recursive(zs_stat_root);
+}
+
+static int zs_stats_size_show(struct seq_file *s, void *v)
+{
+   int i;
+   struct zs_pool *pool = s->private;
+   struct size_class *class;
+   int objs_per_zspage;
+   unsigned long class_almost_full, class_almost_empty;
+   unsigned long obj_allocated, obj_used, pages_used;
+   unsigned long total_class_almost_full = 0, total_class_almost_empty = 0;
+   unsigned long total_objs = 0, total_used_objs = 0, total_pages = 0;
+
+   seq_printf(s, " %5s %5s %11s %12s %13s %10s %10s %16s\n",
+   "class", "size", "almost_full", "almost_empty",
+   "obj_allocated", "obj_used", "pages_used",
+   "pages_per_zspage");
+
+   for (i = 0; i < zs_size_classes; i++) {
+   class = pool->size_class[i];
+
+   if (class->index != i)
+   continue;
+
+   spin_lock(>lock);
+   class_almost_full = zs_stat_get(class, CLASS_ALMOST_FULL);
+   class_almost_empty = zs_stat_get(class, CLASS_ALMOST_EMPTY);
+   obj_allocated = zs_stat_get(class, OBJ_ALLOCATED);
+   obj_used = zs_stat_get(class, OBJ_USED);
+   spin_unlock(>lock);
+
+   objs_per_zspage = get_maxobj_per_zspage(class->size,
+   class->pages_per_zspage);
+   pages_used = obj_allocated / objs_per_zspage *
+   class->pages_per_zspage;
+
+   seq_printf(s, " %5u %5u %11lu %12lu %13lu %10lu %10lu %16d\n",
+   i, class->size, class_almost_full, class_almost_empty,
+   obj_allocated, obj_used, pages_used,
+   class->pages_per_zspage);
+
+   total_class_almost_full += class_almost_full;
+   total_class_almost_empty += class_almost_empty;
+   total_objs += obj_allocated;
+   total_used_objs += obj_used;
+   total_pages += pages_used;
+   }
+
+   seq_puts(s, "\n");
+   seq_printf(s, " %5s %5s %11lu %12lu %13lu %10lu %10lu\n",
+   "Total", "", total_class_almost_full,
+   total_class_almost_empty, total_objs,
+   total_used_objs, total_pages);
+
+   return 0;
+}
+
+static int zs_stats_size_open(struct inode *inode, struct file *file)
+{
+   return single_open(file, zs_stats_size_show, inode->i_private);
+}
+
+static const struct file_operations zs_stat_size_ops = {
+   .open   = zs_stats_size_open,
+   .read   = seq_read,
+   .llseek = seq_lseek,
+   .release= single_release,
+};
+
+static int zs_pool_stat_create(char *name, struct zs_pool *pool)
+{
+   struct dentry *entry;
+
+   if (!zs_stat_root)
+   return -ENODEV;
+
+   entry = debugfs_create_dir(name, zs_stat_root);
+   if (!entry) {
+   

[PATCH v1 09/10] zsmalloc: add fullness into stat

2015-01-20 Thread Minchan Kim
During investigating compaction, fullness information of each class
and pages_per_zspage are helpful for investigating how compaction
works well on each size class.

Signed-off-by: Minchan Kim minc...@kernel.org
---
 mm/zsmalloc.c | 349 +++---
 1 file changed, 184 insertions(+), 165 deletions(-)

diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 8217e8e..48b702e 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -192,6 +192,8 @@ enum fullness_group {
 enum zs_stat_type {
OBJ_ALLOCATED,
OBJ_USED,
+   CLASS_ALMOST_FULL,
+   CLASS_ALMOST_EMPTY,
NR_ZS_STAT_TYPE,
 };
 
@@ -404,6 +406,11 @@ static struct zpool_driver zs_zpool_driver = {
 MODULE_ALIAS(zpool-zsmalloc);
 #endif /* CONFIG_ZPOOL */
 
+static unsigned int get_maxobj_per_zspage(int size, int pages_per_zspage)
+{
+   return pages_per_zspage * PAGE_SIZE / size;
+}
+
 /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
 static DEFINE_PER_CPU(struct mapping_area, zs_map_area);
 
@@ -457,6 +464,179 @@ static int get_size_class_index(int size)
return idx;
 }
 
+#ifdef CONFIG_ZSMALLOC_STAT
+
+static inline void zs_stat_inc(struct size_class *class,
+   enum zs_stat_type type, unsigned long cnt)
+{
+   class-stats.objs[type] += cnt;
+}
+
+static inline void zs_stat_dec(struct size_class *class,
+   enum zs_stat_type type, unsigned long cnt)
+{
+   class-stats.objs[type] -= cnt;
+}
+
+static inline unsigned long zs_stat_get(struct size_class *class,
+   enum zs_stat_type type)
+{
+   return class-stats.objs[type];
+}
+
+static int __init zs_stat_init(void)
+{
+   if (!debugfs_initialized())
+   return -ENODEV;
+
+   zs_stat_root = debugfs_create_dir(zsmalloc, NULL);
+   if (!zs_stat_root)
+   return -ENOMEM;
+
+   return 0;
+}
+
+static void __exit zs_stat_exit(void)
+{
+   debugfs_remove_recursive(zs_stat_root);
+}
+
+static int zs_stats_size_show(struct seq_file *s, void *v)
+{
+   int i;
+   struct zs_pool *pool = s-private;
+   struct size_class *class;
+   int objs_per_zspage;
+   unsigned long class_almost_full, class_almost_empty;
+   unsigned long obj_allocated, obj_used, pages_used;
+   unsigned long total_class_almost_full = 0, total_class_almost_empty = 0;
+   unsigned long total_objs = 0, total_used_objs = 0, total_pages = 0;
+
+   seq_printf(s,  %5s %5s %11s %12s %13s %10s %10s %16s\n,
+   class, size, almost_full, almost_empty,
+   obj_allocated, obj_used, pages_used,
+   pages_per_zspage);
+
+   for (i = 0; i  zs_size_classes; i++) {
+   class = pool-size_class[i];
+
+   if (class-index != i)
+   continue;
+
+   spin_lock(class-lock);
+   class_almost_full = zs_stat_get(class, CLASS_ALMOST_FULL);
+   class_almost_empty = zs_stat_get(class, CLASS_ALMOST_EMPTY);
+   obj_allocated = zs_stat_get(class, OBJ_ALLOCATED);
+   obj_used = zs_stat_get(class, OBJ_USED);
+   spin_unlock(class-lock);
+
+   objs_per_zspage = get_maxobj_per_zspage(class-size,
+   class-pages_per_zspage);
+   pages_used = obj_allocated / objs_per_zspage *
+   class-pages_per_zspage;
+
+   seq_printf(s,  %5u %5u %11lu %12lu %13lu %10lu %10lu %16d\n,
+   i, class-size, class_almost_full, class_almost_empty,
+   obj_allocated, obj_used, pages_used,
+   class-pages_per_zspage);
+
+   total_class_almost_full += class_almost_full;
+   total_class_almost_empty += class_almost_empty;
+   total_objs += obj_allocated;
+   total_used_objs += obj_used;
+   total_pages += pages_used;
+   }
+
+   seq_puts(s, \n);
+   seq_printf(s,  %5s %5s %11lu %12lu %13lu %10lu %10lu\n,
+   Total, , total_class_almost_full,
+   total_class_almost_empty, total_objs,
+   total_used_objs, total_pages);
+
+   return 0;
+}
+
+static int zs_stats_size_open(struct inode *inode, struct file *file)
+{
+   return single_open(file, zs_stats_size_show, inode-i_private);
+}
+
+static const struct file_operations zs_stat_size_ops = {
+   .open   = zs_stats_size_open,
+   .read   = seq_read,
+   .llseek = seq_lseek,
+   .release= single_release,
+};
+
+static int zs_pool_stat_create(char *name, struct zs_pool *pool)
+{
+   struct dentry *entry;
+
+   if (!zs_stat_root)
+   return -ENODEV;
+
+   entry = debugfs_create_dir(name, zs_stat_root);
+   if (!entry) {
+   pr_warn(debugfs dir