On 06/16/2017 11:22 PM, Laurent Dufour wrote:
> When handling page fault without holding the mmap_sem the fetch of the
> pte lock pointer and the locking will have to be done while ensuring
> that the VMA is not touched in our back.

It does not change things from whats happening right now, where do we
check that VMA has not changed by now ?

> 
> So move the fetch and locking operations in a dedicated function.
> 
> Signed-off-by: Laurent Dufour <lduf...@linux.vnet.ibm.com>
> ---
>  mm/memory.c | 15 +++++++++++----
>  1 file changed, 11 insertions(+), 4 deletions(-)
> 
> diff --git a/mm/memory.c b/mm/memory.c
> index 40834444ea0d..f1132f7931ef 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -2240,6 +2240,13 @@ static inline void wp_page_reuse(struct vm_fault *vmf)
>       pte_unmap_unlock(vmf->pte, vmf->ptl);
>  }
>  
> +static bool pte_spinlock(struct vm_fault *vmf)
> +{
> +     vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
> +     spin_lock(vmf->ptl);
> +     return true;
> +}
> +

Moving them together makes sense but again if blocks are redundant when
it returns true all the time.

>  static bool pte_map_lock(struct vm_fault *vmf)
>  {
>       vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address, 
> &vmf->ptl);
> @@ -3552,8 +3559,8 @@ static int do_numa_page(struct vm_fault *vmf)
>        * validation through pte_unmap_same(). It's of NUMA type but
>        * the pfn may be screwed if the read is non atomic.
>        */
> -     vmf->ptl = pte_lockptr(vma->vm_mm, vmf->pmd);
> -     spin_lock(vmf->ptl);
> +     if (!pte_spinlock(vmf))
> +             return VM_FAULT_RETRY;
>       if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) {
>               pte_unmap_unlock(vmf->pte, vmf->ptl);
>               goto out;
> @@ -3745,8 +3752,8 @@ static int handle_pte_fault(struct vm_fault *vmf)
>       if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma))
>               return do_numa_page(vmf);
>  
> -     vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
> -     spin_lock(vmf->ptl);
> +     if (!pte_spinlock(vmf))
> +             return VM_FAULT_RETRY;
>       entry = vmf->orig_pte;
>       if (unlikely(!pte_same(*vmf->pte, entry)))
>               goto unlock;
> 

Reply via email to