[PATCHv4 04/10] mm, thp: change pmd_trans_huge_lock() to return taken lock

2013-09-27 Thread Kirill A. Shutemov
With split ptlock it's important to know which lock pmd_trans_huge_lock()
took. This patch adds one more parameter to the function to return the
lock.

In most places migration to new api is trivial.
Exception is move_huge_pmd(): we need to take two locks if pmd tables
are different.

Signed-off-by: Naoya Horiguchi 
Signed-off-by: Kirill A. Shutemov 
Tested-by: Alex Thorlton 
---
 fs/proc/task_mmu.c  | 13 +++--
 include/linux/huge_mm.h | 14 +++---
 mm/huge_memory.c| 40 +++-
 mm/memcontrol.c | 10 +-
 4 files changed, 46 insertions(+), 31 deletions(-)

diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index c52c597fbf..d7df94069e 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -506,9 +506,9 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, 
unsigned long end,
pte_t *pte;
spinlock_t *ptl;
 
-   if (pmd_trans_huge_lock(pmd, vma) == 1) {
+   if (pmd_trans_huge_lock(pmd, vma, ) == 1) {
smaps_pte_entry(*(pte_t *)pmd, addr, HPAGE_PMD_SIZE, walk);
-   spin_unlock(>mm->page_table_lock);
+   spin_unlock(ptl);
mss->anonymous_thp += HPAGE_PMD_SIZE;
return 0;
}
@@ -994,13 +994,14 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long 
addr, unsigned long end,
 {
struct vm_area_struct *vma;
struct pagemapread *pm = walk->private;
+   spinlock_t *ptl;
pte_t *pte;
int err = 0;
pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2));
 
/* find the first VMA at or above 'addr' */
vma = find_vma(walk->mm, addr);
-   if (vma && pmd_trans_huge_lock(pmd, vma) == 1) {
+   if (vma && pmd_trans_huge_lock(pmd, vma, ) == 1) {
int pmd_flags2;
 
if ((vma->vm_flags & VM_SOFTDIRTY) || pmd_soft_dirty(*pmd))
@@ -1018,7 +1019,7 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long 
addr, unsigned long end,
if (err)
break;
}
-   spin_unlock(>mm->page_table_lock);
+   spin_unlock(ptl);
return err;
}
 
@@ -1320,7 +1321,7 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long 
addr,
 
md = walk->private;
 
-   if (pmd_trans_huge_lock(pmd, md->vma) == 1) {
+   if (pmd_trans_huge_lock(pmd, md->vma, ) == 1) {
pte_t huge_pte = *(pte_t *)pmd;
struct page *page;
 
@@ -1328,7 +1329,7 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long 
addr,
if (page)
gather_stats(page, md, pte_dirty(huge_pte),
 HPAGE_PMD_SIZE/PAGE_SIZE);
-   spin_unlock(>mm->page_table_lock);
+   spin_unlock(ptl);
return 0;
}
 
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 3935428c57..4aca0d8da1 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -129,15 +129,15 @@ extern void __vma_adjust_trans_huge(struct vm_area_struct 
*vma,
unsigned long start,
unsigned long end,
long adjust_next);
-extern int __pmd_trans_huge_lock(pmd_t *pmd,
-struct vm_area_struct *vma);
+extern int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
+   spinlock_t **ptl);
 /* mmap_sem must be held on entry */
-static inline int pmd_trans_huge_lock(pmd_t *pmd,
- struct vm_area_struct *vma)
+static inline int pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
+   spinlock_t **ptl)
 {
VM_BUG_ON(!rwsem_is_locked(>vm_mm->mmap_sem));
if (pmd_trans_huge(*pmd))
-   return __pmd_trans_huge_lock(pmd, vma);
+   return __pmd_trans_huge_lock(pmd, vma, ptl);
else
return 0;
 }
@@ -215,8 +215,8 @@ static inline void vma_adjust_trans_huge(struct 
vm_area_struct *vma,
 long adjust_next)
 {
 }
-static inline int pmd_trans_huge_lock(pmd_t *pmd,
- struct vm_area_struct *vma)
+static inline int pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
+   spinlock_t **ptl)
 {
return 0;
 }
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index bbd41a2f49..59a1340f35 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1342,9 +1342,10 @@ out_unlock:
 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
 pmd_t *pmd, unsigned long addr)
 {
+   spinlock_t *ptl;
int ret = 0;
 
-   if (__pmd_trans_huge_lock(pmd, vma) == 1) {
+   if (__pmd_trans_huge_lock(pmd, vma, ) == 1) {
struct page *page;
pgtable_t pgtable;
pmd_t 

[PATCHv4 04/10] mm, thp: change pmd_trans_huge_lock() to return taken lock

2013-09-27 Thread Kirill A. Shutemov
With split ptlock it's important to know which lock pmd_trans_huge_lock()
took. This patch adds one more parameter to the function to return the
lock.

In most places migration to new api is trivial.
Exception is move_huge_pmd(): we need to take two locks if pmd tables
are different.

Signed-off-by: Naoya Horiguchi n-horigu...@ah.jp.nec.com
Signed-off-by: Kirill A. Shutemov kirill.shute...@linux.intel.com
Tested-by: Alex Thorlton athorl...@sgi.com
---
 fs/proc/task_mmu.c  | 13 +++--
 include/linux/huge_mm.h | 14 +++---
 mm/huge_memory.c| 40 +++-
 mm/memcontrol.c | 10 +-
 4 files changed, 46 insertions(+), 31 deletions(-)

diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index c52c597fbf..d7df94069e 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -506,9 +506,9 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, 
unsigned long end,
pte_t *pte;
spinlock_t *ptl;
 
-   if (pmd_trans_huge_lock(pmd, vma) == 1) {
+   if (pmd_trans_huge_lock(pmd, vma, ptl) == 1) {
smaps_pte_entry(*(pte_t *)pmd, addr, HPAGE_PMD_SIZE, walk);
-   spin_unlock(walk-mm-page_table_lock);
+   spin_unlock(ptl);
mss-anonymous_thp += HPAGE_PMD_SIZE;
return 0;
}
@@ -994,13 +994,14 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long 
addr, unsigned long end,
 {
struct vm_area_struct *vma;
struct pagemapread *pm = walk-private;
+   spinlock_t *ptl;
pte_t *pte;
int err = 0;
pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm-v2));
 
/* find the first VMA at or above 'addr' */
vma = find_vma(walk-mm, addr);
-   if (vma  pmd_trans_huge_lock(pmd, vma) == 1) {
+   if (vma  pmd_trans_huge_lock(pmd, vma, ptl) == 1) {
int pmd_flags2;
 
if ((vma-vm_flags  VM_SOFTDIRTY) || pmd_soft_dirty(*pmd))
@@ -1018,7 +1019,7 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long 
addr, unsigned long end,
if (err)
break;
}
-   spin_unlock(walk-mm-page_table_lock);
+   spin_unlock(ptl);
return err;
}
 
@@ -1320,7 +1321,7 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long 
addr,
 
md = walk-private;
 
-   if (pmd_trans_huge_lock(pmd, md-vma) == 1) {
+   if (pmd_trans_huge_lock(pmd, md-vma, ptl) == 1) {
pte_t huge_pte = *(pte_t *)pmd;
struct page *page;
 
@@ -1328,7 +1329,7 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long 
addr,
if (page)
gather_stats(page, md, pte_dirty(huge_pte),
 HPAGE_PMD_SIZE/PAGE_SIZE);
-   spin_unlock(walk-mm-page_table_lock);
+   spin_unlock(ptl);
return 0;
}
 
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 3935428c57..4aca0d8da1 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -129,15 +129,15 @@ extern void __vma_adjust_trans_huge(struct vm_area_struct 
*vma,
unsigned long start,
unsigned long end,
long adjust_next);
-extern int __pmd_trans_huge_lock(pmd_t *pmd,
-struct vm_area_struct *vma);
+extern int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
+   spinlock_t **ptl);
 /* mmap_sem must be held on entry */
-static inline int pmd_trans_huge_lock(pmd_t *pmd,
- struct vm_area_struct *vma)
+static inline int pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
+   spinlock_t **ptl)
 {
VM_BUG_ON(!rwsem_is_locked(vma-vm_mm-mmap_sem));
if (pmd_trans_huge(*pmd))
-   return __pmd_trans_huge_lock(pmd, vma);
+   return __pmd_trans_huge_lock(pmd, vma, ptl);
else
return 0;
 }
@@ -215,8 +215,8 @@ static inline void vma_adjust_trans_huge(struct 
vm_area_struct *vma,
 long adjust_next)
 {
 }
-static inline int pmd_trans_huge_lock(pmd_t *pmd,
- struct vm_area_struct *vma)
+static inline int pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
+   spinlock_t **ptl)
 {
return 0;
 }
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index bbd41a2f49..59a1340f35 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1342,9 +1342,10 @@ out_unlock:
 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
 pmd_t *pmd, unsigned long addr)
 {
+   spinlock_t *ptl;
int ret = 0;
 
-   if (__pmd_trans_huge_lock(pmd, vma) == 1) {
+   if (__pmd_trans_huge_lock(pmd, vma, ptl) == 1) {