Re: [PATCH v12 07/31] mm: make pte_unmap_same compatible with SPF

2019-04-23 Thread Laurent Dufour

Le 23/04/2019 à 17:43, Matthew Wilcox a écrit :

On Tue, Apr 16, 2019 at 03:44:58PM +0200, Laurent Dufour wrote:

+static inline vm_fault_t pte_unmap_same(struct vm_fault *vmf)
  {
-   int same = 1;
+   int ret = 0;


Surely 'ret' should be of type vm_fault_t?


Nice catch !




+   ret = VM_FAULT_RETRY;


... this should have thrown a sparse warning?


It should have, but I can't remember having see it, weird...



Re: [PATCH v12 07/31] mm: make pte_unmap_same compatible with SPF

2019-04-23 Thread Matthew Wilcox
On Tue, Apr 16, 2019 at 03:44:58PM +0200, Laurent Dufour wrote:
> +static inline vm_fault_t pte_unmap_same(struct vm_fault *vmf)
>  {
> - int same = 1;
> + int ret = 0;

Surely 'ret' should be of type vm_fault_t?

> + ret = VM_FAULT_RETRY;

... this should have thrown a sparse warning?



Re: [PATCH v12 07/31] mm: make pte_unmap_same compatible with SPF

2019-04-18 Thread Jerome Glisse
On Tue, Apr 16, 2019 at 03:44:58PM +0200, Laurent Dufour wrote:
> pte_unmap_same() is making the assumption that the page table are still
> around because the mmap_sem is held.
> This is no more the case when running a speculative page fault and
> additional check must be made to ensure that the final page table are still
> there.
> 
> This is now done by calling pte_spinlock() to check for the VMA's
> consistency while locking for the page tables.
> 
> This is requiring passing a vm_fault structure to pte_unmap_same() which is
> containing all the needed parameters.
> 
> As pte_spinlock() may fail in the case of a speculative page fault, if the
> VMA has been touched in our back, pte_unmap_same() should now return 3
> cases :
>   1. pte are the same (0)
>   2. pte are different (VM_FAULT_PTNOTSAME)
>   3. a VMA's changes has been detected (VM_FAULT_RETRY)
> 
> The case 2 is handled by the introduction of a new VM_FAULT flag named
> VM_FAULT_PTNOTSAME which is then trapped in cow_user_page().
> If VM_FAULT_RETRY is returned, it is passed up to the callers to retry the
> page fault while holding the mmap_sem.
> 
> Acked-by: David Rientjes 
> Signed-off-by: Laurent Dufour 

Reviewed-by: Jérôme Glisse 


> ---
>  include/linux/mm_types.h |  6 +-
>  mm/memory.c  | 37 +++--
>  2 files changed, 32 insertions(+), 11 deletions(-)
> 
> diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
> index 8ec38b11b361..fd7d38ee2e33 100644
> --- a/include/linux/mm_types.h
> +++ b/include/linux/mm_types.h
> @@ -652,6 +652,8 @@ typedef __bitwise unsigned int vm_fault_t;
>   * @VM_FAULT_NEEDDSYNC:  ->fault did not modify page tables and 
> needs
>   *   fsync() to complete (for synchronous page faults
>   *   in DAX)
> + * @VM_FAULT_PTNOTSAME   Page table entries have changed during a
> + *   speculative page fault handling.
>   * @VM_FAULT_HINDEX_MASK:mask HINDEX value
>   *
>   */
> @@ -669,6 +671,7 @@ enum vm_fault_reason {
>   VM_FAULT_FALLBACK   = (__force vm_fault_t)0x000800,
>   VM_FAULT_DONE_COW   = (__force vm_fault_t)0x001000,
>   VM_FAULT_NEEDDSYNC  = (__force vm_fault_t)0x002000,
> + VM_FAULT_PTNOTSAME  = (__force vm_fault_t)0x004000,
>   VM_FAULT_HINDEX_MASK= (__force vm_fault_t)0x0f,
>  };
>  
> @@ -693,7 +696,8 @@ enum vm_fault_reason {
>   { VM_FAULT_RETRY,   "RETRY" },  \
>   { VM_FAULT_FALLBACK,"FALLBACK" },   \
>   { VM_FAULT_DONE_COW,"DONE_COW" },   \
> - { VM_FAULT_NEEDDSYNC,   "NEEDDSYNC" }
> + { VM_FAULT_NEEDDSYNC,   "NEEDDSYNC" },  \
> + { VM_FAULT_PTNOTSAME,   "PTNOTSAME" }
>  
>  struct vm_special_mapping {
>   const char *name;   /* The name, e.g. "[vdso]". */
> diff --git a/mm/memory.c b/mm/memory.c
> index 221ccdf34991..d5bebca47d98 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -2094,21 +2094,29 @@ static inline bool pte_map_lock(struct vm_fault *vmf)
>   * parts, do_swap_page must check under lock before unmapping the pte and
>   * proceeding (but do_wp_page is only called after already making such a 
> check;
>   * and do_anonymous_page can safely check later on).
> + *
> + * pte_unmap_same() returns:
> + *   0   if the PTE are the same
> + *   VM_FAULT_PTNOTSAME  if the PTE are different
> + *   VM_FAULT_RETRY  if the VMA has changed in our back during
> + *   a speculative page fault handling.
>   */
> -static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd,
> - pte_t *page_table, pte_t orig_pte)
> +static inline vm_fault_t pte_unmap_same(struct vm_fault *vmf)
>  {
> - int same = 1;
> + int ret = 0;
> +
>  #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
>   if (sizeof(pte_t) > sizeof(unsigned long)) {
> - spinlock_t *ptl = pte_lockptr(mm, pmd);
> - spin_lock(ptl);
> - same = pte_same(*page_table, orig_pte);
> - spin_unlock(ptl);
> + if (pte_spinlock(vmf)) {
> + if (!pte_same(*vmf->pte, vmf->orig_pte))
> + ret = VM_FAULT_PTNOTSAME;
> + spin_unlock(vmf->ptl);
> + } else
> + ret = VM_FAULT_RETRY;
>   }
>  #endif
> - pte_unmap(page_table);
> - return same;
> + pte_unmap(vmf->pte);
> + return ret;
>  }
>  
>  static inline void cow_user_page(struct page *dst, struct page *src, 
> unsigned long va, struct vm_area_struct *vma)
> @@ -2714,8 +2722,17 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
>   int exclusive = 0;
>   vm_fault_t ret = 0;
>  
> - if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte))
> + ret = pte_unmap_same(vmf);
> + if (ret) {
> + /*
> +   

[PATCH v12 07/31] mm: make pte_unmap_same compatible with SPF

2019-04-16 Thread Laurent Dufour
pte_unmap_same() is making the assumption that the page table are still
around because the mmap_sem is held.
This is no more the case when running a speculative page fault and
additional check must be made to ensure that the final page table are still
there.

This is now done by calling pte_spinlock() to check for the VMA's
consistency while locking for the page tables.

This is requiring passing a vm_fault structure to pte_unmap_same() which is
containing all the needed parameters.

As pte_spinlock() may fail in the case of a speculative page fault, if the
VMA has been touched in our back, pte_unmap_same() should now return 3
cases :
1. pte are the same (0)
2. pte are different (VM_FAULT_PTNOTSAME)
3. a VMA's changes has been detected (VM_FAULT_RETRY)

The case 2 is handled by the introduction of a new VM_FAULT flag named
VM_FAULT_PTNOTSAME which is then trapped in cow_user_page().
If VM_FAULT_RETRY is returned, it is passed up to the callers to retry the
page fault while holding the mmap_sem.

Acked-by: David Rientjes 
Signed-off-by: Laurent Dufour 
---
 include/linux/mm_types.h |  6 +-
 mm/memory.c  | 37 +++--
 2 files changed, 32 insertions(+), 11 deletions(-)

diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 8ec38b11b361..fd7d38ee2e33 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -652,6 +652,8 @@ typedef __bitwise unsigned int vm_fault_t;
  * @VM_FAULT_NEEDDSYNC:->fault did not modify page tables and 
needs
  * fsync() to complete (for synchronous page faults
  * in DAX)
+ * @VM_FAULT_PTNOTSAME Page table entries have changed during a
+ * speculative page fault handling.
  * @VM_FAULT_HINDEX_MASK:  mask HINDEX value
  *
  */
@@ -669,6 +671,7 @@ enum vm_fault_reason {
VM_FAULT_FALLBACK   = (__force vm_fault_t)0x000800,
VM_FAULT_DONE_COW   = (__force vm_fault_t)0x001000,
VM_FAULT_NEEDDSYNC  = (__force vm_fault_t)0x002000,
+   VM_FAULT_PTNOTSAME  = (__force vm_fault_t)0x004000,
VM_FAULT_HINDEX_MASK= (__force vm_fault_t)0x0f,
 };
 
@@ -693,7 +696,8 @@ enum vm_fault_reason {
{ VM_FAULT_RETRY,   "RETRY" },  \
{ VM_FAULT_FALLBACK,"FALLBACK" },   \
{ VM_FAULT_DONE_COW,"DONE_COW" },   \
-   { VM_FAULT_NEEDDSYNC,   "NEEDDSYNC" }
+   { VM_FAULT_NEEDDSYNC,   "NEEDDSYNC" },  \
+   { VM_FAULT_PTNOTSAME,   "PTNOTSAME" }
 
 struct vm_special_mapping {
const char *name;   /* The name, e.g. "[vdso]". */
diff --git a/mm/memory.c b/mm/memory.c
index 221ccdf34991..d5bebca47d98 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2094,21 +2094,29 @@ static inline bool pte_map_lock(struct vm_fault *vmf)
  * parts, do_swap_page must check under lock before unmapping the pte and
  * proceeding (but do_wp_page is only called after already making such a check;
  * and do_anonymous_page can safely check later on).
+ *
+ * pte_unmap_same() returns:
+ * 0   if the PTE are the same
+ * VM_FAULT_PTNOTSAME  if the PTE are different
+ * VM_FAULT_RETRY  if the VMA has changed in our back during
+ * a speculative page fault handling.
  */
-static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd,
-   pte_t *page_table, pte_t orig_pte)
+static inline vm_fault_t pte_unmap_same(struct vm_fault *vmf)
 {
-   int same = 1;
+   int ret = 0;
+
 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
if (sizeof(pte_t) > sizeof(unsigned long)) {
-   spinlock_t *ptl = pte_lockptr(mm, pmd);
-   spin_lock(ptl);
-   same = pte_same(*page_table, orig_pte);
-   spin_unlock(ptl);
+   if (pte_spinlock(vmf)) {
+   if (!pte_same(*vmf->pte, vmf->orig_pte))
+   ret = VM_FAULT_PTNOTSAME;
+   spin_unlock(vmf->ptl);
+   } else
+   ret = VM_FAULT_RETRY;
}
 #endif
-   pte_unmap(page_table);
-   return same;
+   pte_unmap(vmf->pte);
+   return ret;
 }
 
 static inline void cow_user_page(struct page *dst, struct page *src, unsigned 
long va, struct vm_area_struct *vma)
@@ -2714,8 +2722,17 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
int exclusive = 0;
vm_fault_t ret = 0;
 
-   if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte))
+   ret = pte_unmap_same(vmf);
+   if (ret) {
+   /*
+* If pte != orig_pte, this means another thread did the
+* swap operation in our back.
+* So nothing else to do.
+*/
+   if (ret == VM_FAULT_PTNOTSAME)
+