[PATCHv4 06/24] mm: store mapcount for compound page separately

2015-03-04 Thread Kirill A. Shutemov
We're going to allow mapping of individual 4k pages of THP compound and
we need a cheap way to find out how many time the compound page is
mapped with PMD -- compound_mapcount() does this.

We use the same approach as with compound page destructor and compound
order: use space in first tail page, ->mapping this time.

page_mapcount() counts both: PTE and PMD mappings of the page.

Signed-off-by: Kirill A. Shutemov 
---
 include/linux/mm.h   | 16 ++--
 include/linux/mm_types.h |  1 +
 include/linux/rmap.h |  4 ++--
 mm/debug.c   |  5 -
 mm/huge_memory.c | 23 ++-
 mm/hugetlb.c |  4 ++--
 mm/memory.c  |  4 ++--
 mm/migrate.c |  2 +-
 mm/page_alloc.c  |  7 ++-
 mm/rmap.c| 47 +++
 10 files changed, 85 insertions(+), 28 deletions(-)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index 1aea94e837a0..b64dfe352d71 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -472,6 +472,18 @@ static inline struct page *compound_head_fast(struct page 
*page)
return page->first_page;
return page;
 }
+static inline atomic_t *compound_mapcount_ptr(struct page *page)
+{
+   return [1].compound_mapcount;
+}
+
+static inline int compound_mapcount(struct page *page)
+{
+   if (!PageCompound(page))
+   return 0;
+   page = compound_head(page);
+   return atomic_read(compound_mapcount_ptr(page)) + 1;
+}
 
 /*
  * The atomic page->_mapcount, starts from -1: so that transitions
@@ -486,7 +498,7 @@ static inline void page_mapcount_reset(struct page *page)
 static inline int page_mapcount(struct page *page)
 {
VM_BUG_ON_PAGE(PageSlab(page), page);
-   return atomic_read(>_mapcount) + 1;
+   return atomic_read(>_mapcount) + compound_mapcount(page) + 1;
 }
 
 static inline int page_count(struct page *page)
@@ -1081,7 +1093,7 @@ static inline pgoff_t page_file_index(struct page *page)
  */
 static inline int page_mapped(struct page *page)
 {
-   return atomic_read(&(page)->_mapcount) >= 0;
+   return atomic_read(&(page)->_mapcount) + compound_mapcount(page) >= 0;
 }
 
 /*
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 590630eb59ba..aefbc95148c4 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -56,6 +56,7 @@ struct page {
 * see PAGE_MAPPING_ANON below.
 */
void *s_mem;/* slab first object */
+   atomic_t compound_mapcount; /* first tail page */
};
 
/* Second double word */
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index ebe50ceacea6..ec63d3f20ca3 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -190,9 +190,9 @@ void hugepage_add_anon_rmap(struct page *, struct 
vm_area_struct *,
 void hugepage_add_new_anon_rmap(struct page *, struct vm_area_struct *,
unsigned long);
 
-static inline void page_dup_rmap(struct page *page)
+static inline void page_dup_rmap(struct page *page, bool compound)
 {
-   atomic_inc(>_mapcount);
+   atomic_inc(compound ? compound_mapcount_ptr(page) : >_mapcount);
 }
 
 /*
diff --git a/mm/debug.c b/mm/debug.c
index 3eb3ac2fcee7..13d2b8146ef9 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -83,9 +83,12 @@ static void dump_flags(unsigned long flags,
 void dump_page_badflags(struct page *page, const char *reason,
unsigned long badflags)
 {
-   pr_emerg("page:%p count:%d mapcount:%d mapping:%p index:%#lx\n",
+   pr_emerg("page:%p count:%d mapcount:%d mapping:%p index:%#lx",
  page, atomic_read(>_count), page_mapcount(page),
  page->mapping, page->index);
+   if (PageCompound(page))
+   pr_cont(" compound_mapcount: %d", compound_mapcount(page));
+   pr_cont("\n");
BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS);
dump_flags(page->flags, pageflag_names, ARRAY_SIZE(pageflag_names));
if (reason)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 38c6b72cbe80..0c83451679f8 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -890,7 +890,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct 
mm_struct *src_mm,
src_page = pmd_page(pmd);
VM_BUG_ON_PAGE(!PageHead(src_page), src_page);
get_page(src_page);
-   page_dup_rmap(src_page);
+   page_dup_rmap(src_page, true);
add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
 
pmdp_set_wrprotect(src_mm, addr, src_pmd);
@@ -1787,8 +1787,8 @@ static void __split_huge_page_refcount(struct page *page,
struct page *page_tail = page + i;
 
/* tail_page->_mapcount cannot change */
-   BUG_ON(page_mapcount(page_tail) < 0);
-   tail_count += 

[PATCHv4 06/24] mm: store mapcount for compound page separately

2015-03-04 Thread Kirill A. Shutemov
We're going to allow mapping of individual 4k pages of THP compound and
we need a cheap way to find out how many time the compound page is
mapped with PMD -- compound_mapcount() does this.

We use the same approach as with compound page destructor and compound
order: use space in first tail page, -mapping this time.

page_mapcount() counts both: PTE and PMD mappings of the page.

Signed-off-by: Kirill A. Shutemov kirill.shute...@linux.intel.com
---
 include/linux/mm.h   | 16 ++--
 include/linux/mm_types.h |  1 +
 include/linux/rmap.h |  4 ++--
 mm/debug.c   |  5 -
 mm/huge_memory.c | 23 ++-
 mm/hugetlb.c |  4 ++--
 mm/memory.c  |  4 ++--
 mm/migrate.c |  2 +-
 mm/page_alloc.c  |  7 ++-
 mm/rmap.c| 47 +++
 10 files changed, 85 insertions(+), 28 deletions(-)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index 1aea94e837a0..b64dfe352d71 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -472,6 +472,18 @@ static inline struct page *compound_head_fast(struct page 
*page)
return page-first_page;
return page;
 }
+static inline atomic_t *compound_mapcount_ptr(struct page *page)
+{
+   return page[1].compound_mapcount;
+}
+
+static inline int compound_mapcount(struct page *page)
+{
+   if (!PageCompound(page))
+   return 0;
+   page = compound_head(page);
+   return atomic_read(compound_mapcount_ptr(page)) + 1;
+}
 
 /*
  * The atomic page-_mapcount, starts from -1: so that transitions
@@ -486,7 +498,7 @@ static inline void page_mapcount_reset(struct page *page)
 static inline int page_mapcount(struct page *page)
 {
VM_BUG_ON_PAGE(PageSlab(page), page);
-   return atomic_read(page-_mapcount) + 1;
+   return atomic_read(page-_mapcount) + compound_mapcount(page) + 1;
 }
 
 static inline int page_count(struct page *page)
@@ -1081,7 +1093,7 @@ static inline pgoff_t page_file_index(struct page *page)
  */
 static inline int page_mapped(struct page *page)
 {
-   return atomic_read((page)-_mapcount) = 0;
+   return atomic_read((page)-_mapcount) + compound_mapcount(page) = 0;
 }
 
 /*
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 590630eb59ba..aefbc95148c4 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -56,6 +56,7 @@ struct page {
 * see PAGE_MAPPING_ANON below.
 */
void *s_mem;/* slab first object */
+   atomic_t compound_mapcount; /* first tail page */
};
 
/* Second double word */
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index ebe50ceacea6..ec63d3f20ca3 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -190,9 +190,9 @@ void hugepage_add_anon_rmap(struct page *, struct 
vm_area_struct *,
 void hugepage_add_new_anon_rmap(struct page *, struct vm_area_struct *,
unsigned long);
 
-static inline void page_dup_rmap(struct page *page)
+static inline void page_dup_rmap(struct page *page, bool compound)
 {
-   atomic_inc(page-_mapcount);
+   atomic_inc(compound ? compound_mapcount_ptr(page) : page-_mapcount);
 }
 
 /*
diff --git a/mm/debug.c b/mm/debug.c
index 3eb3ac2fcee7..13d2b8146ef9 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -83,9 +83,12 @@ static void dump_flags(unsigned long flags,
 void dump_page_badflags(struct page *page, const char *reason,
unsigned long badflags)
 {
-   pr_emerg(page:%p count:%d mapcount:%d mapping:%p index:%#lx\n,
+   pr_emerg(page:%p count:%d mapcount:%d mapping:%p index:%#lx,
  page, atomic_read(page-_count), page_mapcount(page),
  page-mapping, page-index);
+   if (PageCompound(page))
+   pr_cont( compound_mapcount: %d, compound_mapcount(page));
+   pr_cont(\n);
BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS);
dump_flags(page-flags, pageflag_names, ARRAY_SIZE(pageflag_names));
if (reason)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 38c6b72cbe80..0c83451679f8 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -890,7 +890,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct 
mm_struct *src_mm,
src_page = pmd_page(pmd);
VM_BUG_ON_PAGE(!PageHead(src_page), src_page);
get_page(src_page);
-   page_dup_rmap(src_page);
+   page_dup_rmap(src_page, true);
add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
 
pmdp_set_wrprotect(src_mm, addr, src_pmd);
@@ -1787,8 +1787,8 @@ static void __split_huge_page_refcount(struct page *page,
struct page *page_tail = page + i;
 
/* tail_page-_mapcount cannot change */
-   BUG_ON(page_mapcount(page_tail)  0);
-