Add memcg routines to track dirty, writeback, and unstable_NFS pages.
These routines are not yet used by the kernel to count such pages.
A later change adds kernel calls to these new routines.

Signed-off-by: Greg Thelen <[email protected]>
Signed-off-by: Andrea Righi <[email protected]>
---

Changelog since v1:
- Renamed "nfs"/"total_nfs" to "nfs_unstable"/"total_nfs_unstable" in per cgroup
  memory.stat to match /proc/meminfo.
- Rename (for clarity):
  - mem_cgroup_write_page_stat_item -> mem_cgroup_page_stat_item
  - mem_cgroup_read_page_stat_item -> mem_cgroup_nr_pages_item
- Remove redundant comments.
- Made mem_cgroup_move_account_page_stat() inline.

 include/linux/memcontrol.h |    3 ++
 mm/memcontrol.c            |   86 +++++++++++++++++++++++++++++++++++++++----
 2 files changed, 81 insertions(+), 8 deletions(-)

diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 067115c..ef2eec7 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -28,6 +28,9 @@ struct mm_struct;
 /* Stats that can be updated by kernel. */
 enum mem_cgroup_page_stat_item {
        MEMCG_NR_FILE_MAPPED, /* # of pages charged as file rss */
+       MEMCG_NR_FILE_DIRTY, /* # of dirty pages in page cache */
+       MEMCG_NR_FILE_WRITEBACK, /* # of pages under writeback */
+       MEMCG_NR_FILE_UNSTABLE_NFS, /* # of NFS unstable pages */
 };
 
 extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 697f7b8..3ac2693 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -85,10 +85,13 @@ enum mem_cgroup_stat_index {
         */
        MEM_CGROUP_STAT_CACHE,     /* # of pages charged as cache */
        MEM_CGROUP_STAT_RSS,       /* # of pages charged as anon rss */
-       MEM_CGROUP_STAT_FILE_MAPPED,  /* # of pages charged as file rss */
        MEM_CGROUP_STAT_PGPGIN_COUNT,   /* # of pages paged in */
        MEM_CGROUP_STAT_PGPGOUT_COUNT,  /* # of pages paged out */
        MEM_CGROUP_STAT_SWAPOUT, /* # of pages, swapped out */
+       MEM_CGROUP_STAT_FILE_MAPPED,  /* # of pages charged as file rss */
+       MEM_CGROUP_STAT_FILE_DIRTY,     /* # of dirty pages in page cache */
+       MEM_CGROUP_STAT_FILE_WRITEBACK,         /* # of pages under writeback */
+       MEM_CGROUP_STAT_FILE_UNSTABLE_NFS,      /* # of NFS unstable pages */
        MEM_CGROUP_STAT_DATA, /* end of data requires synchronization */
        /* incremented at every  pagein/pageout */
        MEM_CGROUP_EVENTS = MEM_CGROUP_STAT_DATA,
@@ -1642,6 +1645,44 @@ void mem_cgroup_update_page_stat(struct page *page,
                        ClearPageCgroupFileMapped(pc);
                idx = MEM_CGROUP_STAT_FILE_MAPPED;
                break;
+
+       case MEMCG_NR_FILE_DIRTY:
+               /* Use Test{Set,Clear} to only un/charge the memcg once. */
+               if (val > 0) {
+                       if (TestSetPageCgroupFileDirty(pc))
+                               val = 0;
+               } else {
+                       if (!TestClearPageCgroupFileDirty(pc))
+                               val = 0;
+               }
+               idx = MEM_CGROUP_STAT_FILE_DIRTY;
+               break;
+
+       case MEMCG_NR_FILE_WRITEBACK:
+               /*
+                * This counter is adjusted while holding the mapping's
+                * tree_lock.  Therefore there is no race between settings and
+                * clearing of this flag.
+                */
+               if (val > 0)
+                       SetPageCgroupFileWriteback(pc);
+               else
+                       ClearPageCgroupFileWriteback(pc);
+               idx = MEM_CGROUP_STAT_FILE_WRITEBACK;
+               break;
+
+       case MEMCG_NR_FILE_UNSTABLE_NFS:
+               /* Use Test{Set,Clear} to only un/charge the memcg once. */
+               if (val > 0) {
+                       if (TestSetPageCgroupFileUnstableNFS(pc))
+                               val = 0;
+               } else {
+                       if (!TestClearPageCgroupFileUnstableNFS(pc))
+                               val = 0;
+               }
+               idx = MEM_CGROUP_STAT_FILE_UNSTABLE_NFS;
+               break;
+
        default:
                BUG();
        }
@@ -2146,6 +2187,17 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup 
*mem,
        memcg_check_events(mem, pc->page);
 }
 
+static inline
+void mem_cgroup_move_account_page_stat(struct mem_cgroup *from,
+                                      struct mem_cgroup *to,
+                                      enum mem_cgroup_stat_index idx)
+{
+       preempt_disable();
+       __this_cpu_dec(from->stat->count[idx]);
+       __this_cpu_inc(to->stat->count[idx]);
+       preempt_enable();
+}
+
 /**
  * __mem_cgroup_move_account - move account of the page
  * @pc:        page_cgroup of the page.
@@ -2172,13 +2224,18 @@ static void __mem_cgroup_move_account(struct 
page_cgroup *pc,
        VM_BUG_ON(!PageCgroupUsed(pc));
        VM_BUG_ON(pc->mem_cgroup != from);
 
-       if (PageCgroupFileMapped(pc)) {
-               /* Update mapped_file data for mem_cgroup */
-               preempt_disable();
-               __this_cpu_dec(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
-               __this_cpu_inc(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
-               preempt_enable();
-       }
+       if (PageCgroupFileMapped(pc))
+               mem_cgroup_move_account_page_stat(from, to,
+                                       MEM_CGROUP_STAT_FILE_MAPPED);
+       if (PageCgroupFileDirty(pc))
+               mem_cgroup_move_account_page_stat(from, to,
+                                       MEM_CGROUP_STAT_FILE_DIRTY);
+       if (PageCgroupFileWriteback(pc))
+               mem_cgroup_move_account_page_stat(from, to,
+                                       MEM_CGROUP_STAT_FILE_WRITEBACK);
+       if (PageCgroupFileUnstableNFS(pc))
+               mem_cgroup_move_account_page_stat(from, to,
+                                       MEM_CGROUP_STAT_FILE_UNSTABLE_NFS);
        mem_cgroup_charge_statistics(from, pc, false);
        if (uncharge)
                /* This is not "cancel", but cancel_charge does all we need. */
@@ -3557,6 +3614,9 @@ enum {
        MCS_PGPGIN,
        MCS_PGPGOUT,
        MCS_SWAP,
+       MCS_FILE_DIRTY,
+       MCS_WRITEBACK,
+       MCS_UNSTABLE_NFS,
        MCS_INACTIVE_ANON,
        MCS_ACTIVE_ANON,
        MCS_INACTIVE_FILE,
@@ -3579,6 +3639,9 @@ struct {
        {"pgpgin", "total_pgpgin"},
        {"pgpgout", "total_pgpgout"},
        {"swap", "total_swap"},
+       {"dirty", "total_dirty"},
+       {"writeback", "total_writeback"},
+       {"nfs_unstable", "total_nfs_unstable"},
        {"inactive_anon", "total_inactive_anon"},
        {"active_anon", "total_active_anon"},
        {"inactive_file", "total_inactive_file"},
@@ -3608,6 +3671,13 @@ mem_cgroup_get_local_stat(struct mem_cgroup *mem, struct 
mcs_total_stat *s)
                s->stat[MCS_SWAP] += val * PAGE_SIZE;
        }
 
+       val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_FILE_DIRTY);
+       s->stat[MCS_FILE_DIRTY] += val * PAGE_SIZE;
+       val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_FILE_WRITEBACK);
+       s->stat[MCS_WRITEBACK] += val * PAGE_SIZE;
+       val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_FILE_UNSTABLE_NFS);
+       s->stat[MCS_UNSTABLE_NFS] += val * PAGE_SIZE;
+
        /* per zone stat */
        val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_ANON);
        s->stat[MCS_INACTIVE_ANON] += val * PAGE_SIZE;
-- 
1.7.1

_______________________________________________
Containers mailing list
[email protected]
https://lists.linux-foundation.org/mailman/listinfo/containers

_______________________________________________
Devel mailing list
[email protected]
https://openvz.org/mailman/listinfo/devel

Reply via email to