This is needed because in handle_pte_fault() pte_offset_map() is
called and then fe->ptl is fetched and spin_locked.

This was previously embedded in the call to pte_offset_map_lock().

Signed-off-by: Laurent Dufour <[email protected]>
---
 mm/memory.c | 15 +++++++++++----
 1 file changed, 11 insertions(+), 4 deletions(-)

diff --git a/mm/memory.c b/mm/memory.c
index bce32c9d73c2..441c0e3f3a0f 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2100,6 +2100,13 @@ static inline void wp_page_reuse(struct vm_fault *vmf)
        pte_unmap_unlock(vmf->pte, vmf->ptl);
 }
 
+static bool pte_spinlock(struct vm_fault *vmf)
+{
+       vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
+       spin_lock(vmf->ptl);
+       return true;
+}
+
 static bool pte_map_lock(struct vm_fault *vmf)
 {
        vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address, 
&vmf->ptl);
@@ -3398,8 +3405,8 @@ static int do_numa_page(struct vm_fault *vmf)
        * page table entry is not accessible, so there would be no
        * concurrent hardware modifications to the PTE.
        */
-       vmf->ptl = pte_lockptr(vma->vm_mm, vmf->pmd);
-       spin_lock(vmf->ptl);
+       if (!pte_spinlock(vmf))
+               return VM_FAULT_RETRY;
        if (unlikely(!pte_same(*vmf->pte, pte))) {
                pte_unmap_unlock(vmf->pte, vmf->ptl);
                goto out;
@@ -3566,8 +3573,8 @@ static int handle_pte_fault(struct vm_fault *vmf)
        if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma))
                return do_numa_page(vmf);
 
-       vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
-       spin_lock(vmf->ptl);
+       if (!pte_spinlock(vmf))
+               return VM_FAULT_RETRY;
        entry = vmf->orig_pte;
        if (unlikely(!pte_same(*vmf->pte, entry)))
                goto unlock;
-- 
2.7.4

Reply via email to