From: "Aneesh Kumar K.V" <aneesh.ku...@linux.vnet.ibm.com>

In later patch we switch pmd_lock from mm->page_table_lock to split pmd ptlock.
It avoid compilations issues, use pmd_lockptr helper.

Signed-off-by: Aneesh Kumar K.V <aneesh.ku...@linux.vnet.ibm.com>
---
 arch/powerpc/mm/pgtable-book3s64.c | 4 ++--
 arch/powerpc/mm/pgtable-hash64.c   | 8 +++++---
 arch/powerpc/mm/pgtable-radix.c    | 2 +-
 3 files changed, 8 insertions(+), 6 deletions(-)

diff --git a/arch/powerpc/mm/pgtable-book3s64.c 
b/arch/powerpc/mm/pgtable-book3s64.c
index 35913b0b6d56..e1c304183172 100644
--- a/arch/powerpc/mm/pgtable-book3s64.c
+++ b/arch/powerpc/mm/pgtable-book3s64.c
@@ -37,7 +37,7 @@ int pmdp_set_access_flags(struct vm_area_struct *vma, 
unsigned long address,
        int changed;
 #ifdef CONFIG_DEBUG_VM
        WARN_ON(!pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
-       assert_spin_locked(&vma->vm_mm->page_table_lock);
+       assert_spin_locked(pmd_lockptr(vma->vm_mm, pmdp));
 #endif
        changed = !pmd_same(*(pmdp), entry);
        if (changed) {
@@ -62,7 +62,7 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
 {
 #ifdef CONFIG_DEBUG_VM
        WARN_ON(pte_present(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp)));
-       assert_spin_locked(&mm->page_table_lock);
+       assert_spin_locked(pmd_lockptr(mm, pmdp));
        WARN_ON(!(pmd_trans_huge(pmd) || pmd_devmap(pmd)));
 #endif
        trace_hugepage_set_pmd(addr, pmd_val(pmd));
diff --git a/arch/powerpc/mm/pgtable-hash64.c b/arch/powerpc/mm/pgtable-hash64.c
index 199bfda5f0d9..692bfc9e372c 100644
--- a/arch/powerpc/mm/pgtable-hash64.c
+++ b/arch/powerpc/mm/pgtable-hash64.c
@@ -193,7 +193,7 @@ unsigned long hash__pmd_hugepage_update(struct mm_struct 
*mm, unsigned long addr
 
 #ifdef CONFIG_DEBUG_VM
        WARN_ON(!hash__pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
-       assert_spin_locked(&mm->page_table_lock);
+       assert_spin_locked(pmd_lockptr(mm, pmdp));
 #endif
 
        __asm__ __volatile__(
@@ -265,7 +265,8 @@ void hash__pgtable_trans_huge_deposit(struct mm_struct *mm, 
pmd_t *pmdp,
                                  pgtable_t pgtable)
 {
        pgtable_t *pgtable_slot;
-       assert_spin_locked(&mm->page_table_lock);
+
+       assert_spin_locked(pmd_lockptr(mm, pmdp));
        /*
         * we store the pgtable in the second half of PMD
         */
@@ -285,7 +286,8 @@ pgtable_t hash__pgtable_trans_huge_withdraw(struct 
mm_struct *mm, pmd_t *pmdp)
        pgtable_t pgtable;
        pgtable_t *pgtable_slot;
 
-       assert_spin_locked(&mm->page_table_lock);
+       assert_spin_locked(pmd_lockptr(mm, pmdp));
+
        pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD;
        pgtable = *pgtable_slot;
        /*
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
index f1891e215e39..473415750cbf 100644
--- a/arch/powerpc/mm/pgtable-radix.c
+++ b/arch/powerpc/mm/pgtable-radix.c
@@ -975,7 +975,7 @@ unsigned long radix__pmd_hugepage_update(struct mm_struct 
*mm, unsigned long add
 
 #ifdef CONFIG_DEBUG_VM
        WARN_ON(!radix__pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
-       assert_spin_locked(&mm->page_table_lock);
+       assert_spin_locked(pmd_lockptr(mm, pmdp));
 #endif
 
        old = radix__pte_update(mm, addr, (pte_t *)pmdp, clr, set, 1);
-- 
2.14.3

Reply via email to