Gitweb:     
http://git.kernel.org/git/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=ae41be374293e70e1ed441d986afcc6e744ef9d9
Commit:     ae41be374293e70e1ed441d986afcc6e744ef9d9
Parent:     9175e0311ec9e6d1bf1f6dfecf9268baf08765e6
Author:     KAMEZAWA Hiroyuki <[EMAIL PROTECTED]>
AuthorDate: Thu Feb 7 00:14:10 2008 -0800
Committer:  Linus Torvalds <[EMAIL PROTECTED]>
CommitDate: Thu Feb 7 08:42:19 2008 -0800

    bugfix for memory cgroup controller: migration under memory controller fix
    
    While using memory control cgroup, page-migration under it works as 
following.
    ==
     1. uncharge all refs at try to unmap.
     2. charge regs again remove_migration_ptes()
    ==
    This is simple but has following problems.
    ==
     The page is uncharged and charged back again if *mapped*.
        - This means that cgroup before migration can be different from one 
after
          migration
        - If page is not mapped but charged as page cache, charge is just 
ignored
          (because not mapped, it will not be uncharged before migration)
          This is memory leak.
    ==
    This patch tries to keep memory cgroup at page migration by increasing
    one refcnt during it. 3 functions are added.
    
     mem_cgroup_prepare_migration() --- increase refcnt of page->page_cgroup
     mem_cgroup_end_migration()     --- decrease refcnt of page->page_cgroup
     mem_cgroup_page_migration() --- copy page->page_cgroup from old page to
                                     new page.
    
    During migration
      - old page is under PG_locked.
      - new page is under PG_locked, too.
      - both old page and new page is not on LRU.
    
    These 3 facts guarantee that page_cgroup() migration has no race.
    
    Tested and worked well in x86_64/fake-NUMA box.
    
    Signed-off-by: KAMEZAWA Hiroyuki <[EMAIL PROTECTED]>
    Cc: Balbir Singh <[EMAIL PROTECTED]>
    Cc: Pavel Emelianov <[EMAIL PROTECTED]>
    Cc: Paul Menage <[EMAIL PROTECTED]>
    Cc: Peter Zijlstra <[EMAIL PROTECTED]>
    Cc: "Eric W. Biederman" <[EMAIL PROTECTED]>
    Cc: Nick Piggin <[EMAIL PROTECTED]>
    Cc: Kirill Korotaev <[EMAIL PROTECTED]>
    Cc: Herbert Poetzl <[EMAIL PROTECTED]>
    Cc: David Rientjes <[EMAIL PROTECTED]>
    Cc: Vaidyanathan Srinivasan <[EMAIL PROTECTED]>
    Signed-off-by: Andrew Morton <[EMAIL PROTECTED]>
    Signed-off-by: Linus Torvalds <[EMAIL PROTECTED]>
---
 include/linux/memcontrol.h |   19 +++++++++++++++++++
 mm/memcontrol.c            |   43 +++++++++++++++++++++++++++++++++++++++++++
 mm/migrate.c               |   13 ++++++++++---
 3 files changed, 72 insertions(+), 3 deletions(-)

diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 42536c7..4ec7129 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -60,6 +60,10 @@ static inline void mem_cgroup_uncharge_page(struct page 
*page)
        mem_cgroup_uncharge(page_get_page_cgroup(page));
 }
 
+extern int mem_cgroup_prepare_migration(struct page *page);
+extern void mem_cgroup_end_migration(struct page *page);
+extern void mem_cgroup_page_migration(struct page *page, struct page *newpage);
+
 #else /* CONFIG_CGROUP_MEM_CONT */
 static inline void mm_init_cgroup(struct mm_struct *mm,
                                        struct task_struct *p)
@@ -117,6 +121,21 @@ static inline int task_in_mem_cgroup(struct task_struct 
*task,
        return 1;
 }
 
+static inline int mem_cgroup_prepare_migration(struct page *page)
+{
+       return 0;
+}
+
+static inline void mem_cgroup_end_migration(struct page *page)
+{
+}
+
+static inline void
+mem_cgroup_page_migration(struct page *page, struct page *newpage)
+{
+}
+
+
 #endif /* CONFIG_CGROUP_MEM_CONT */
 
 #endif /* _LINUX_MEMCONTROL_H */
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 3270ce7..128f45c 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -492,6 +492,49 @@ void mem_cgroup_uncharge(struct page_cgroup *pc)
                }
        }
 }
+/*
+ * Returns non-zero if a page (under migration) has valid page_cgroup member.
+ * Refcnt of page_cgroup is incremented.
+ */
+
+int mem_cgroup_prepare_migration(struct page *page)
+{
+       struct page_cgroup *pc;
+       int ret = 0;
+       lock_page_cgroup(page);
+       pc = page_get_page_cgroup(page);
+       if (pc && atomic_inc_not_zero(&pc->ref_cnt))
+               ret = 1;
+       unlock_page_cgroup(page);
+       return ret;
+}
+
+void mem_cgroup_end_migration(struct page *page)
+{
+       struct page_cgroup *pc = page_get_page_cgroup(page);
+       mem_cgroup_uncharge(pc);
+}
+/*
+ * We know both *page* and *newpage* are now not-on-LRU and Pg_locked.
+ * And no race with uncharge() routines because page_cgroup for *page*
+ * has extra one reference by mem_cgroup_prepare_migration.
+ */
+
+void mem_cgroup_page_migration(struct page *page, struct page *newpage)
+{
+       struct page_cgroup *pc;
+retry:
+       pc = page_get_page_cgroup(page);
+       if (!pc)
+               return;
+       if (clear_page_cgroup(page, pc) != pc)
+               goto retry;
+       pc->page = newpage;
+       lock_page_cgroup(newpage);
+       page_assign_page_cgroup(newpage, pc);
+       unlock_page_cgroup(newpage);
+       return;
+}
 
 int mem_cgroup_write_strategy(char *buf, unsigned long long *tmp)
 {
diff --git a/mm/migrate.c b/mm/migrate.c
index 7637941..a73504f 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -593,9 +593,10 @@ static int move_to_new_page(struct page *newpage, struct 
page *page)
        else
                rc = fallback_migrate_page(mapping, newpage, page);
 
-       if (!rc)
+       if (!rc) {
+               mem_cgroup_page_migration(page, newpage);
                remove_migration_ptes(page, newpage);
-       else
+       } else
                newpage->mapping = NULL;
 
        unlock_page(newpage);
@@ -614,6 +615,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned 
long private,
        int *result = NULL;
        struct page *newpage = get_new_page(page, private, &result);
        int rcu_locked = 0;
+       int charge = 0;
 
        if (!newpage)
                return -ENOMEM;
@@ -673,14 +675,19 @@ static int unmap_and_move(new_page_t get_new_page, 
unsigned long private,
                goto rcu_unlock;
        }
 
+       charge = mem_cgroup_prepare_migration(page);
        /* Establish migration ptes or remove ptes */
        try_to_unmap(page, 1);
 
        if (!page_mapped(page))
                rc = move_to_new_page(newpage, page);
 
-       if (rc)
+       if (rc) {
                remove_migration_ptes(page, page);
+               if (charge)
+                       mem_cgroup_end_migration(page);
+       } else if (charge)
+               mem_cgroup_end_migration(newpage);
 rcu_unlock:
        if (rcu_locked)
                rcu_read_unlock();
-
To unsubscribe from this list: send the line "unsubscribe git-commits-head" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to