Re: [PATCH v9 3/3] mm: fix double page fault on arm64 if PTE_AF is cleared

2019-09-26 Thread Kirill A. Shutemov
On Wed, Sep 25, 2019 at 10:59:22AM +0800, Jia He wrote:
> When we tested pmdk unit test [1] vmmalloc_fork TEST1 in arm64 guest, there
> will be a double page fault in __copy_from_user_inatomic of cow_user_page.
> 
> Below call trace is from arm64 do_page_fault for debugging purpose
> [  110.016195] Call trace:
> [  110.016826]  do_page_fault+0x5a4/0x690
> [  110.017812]  do_mem_abort+0x50/0xb0
> [  110.018726]  el1_da+0x20/0xc4
> [  110.019492]  __arch_copy_from_user+0x180/0x280
> [  110.020646]  do_wp_page+0xb0/0x860
> [  110.021517]  __handle_mm_fault+0x994/0x1338
> [  110.022606]  handle_mm_fault+0xe8/0x180
> [  110.023584]  do_page_fault+0x240/0x690
> [  110.024535]  do_mem_abort+0x50/0xb0
> [  110.025423]  el0_da+0x20/0x24
> 
> The pte info before __copy_from_user_inatomic is (PTE_AF is cleared):
> [9b007000] pgd=00023d4f8003, pud=00023da9b003, 
> pmd=00023d4b3003, pte=36298607bd3
> 
> As told by Catalin: "On arm64 without hardware Access Flag, copying from
> user will fail because the pte is old and cannot be marked young. So we
> always end up with zeroed page after fork() + CoW for pfn mappings. we
> don't always have a hardware-managed access flag on arm64."
> 
> This patch fix it by calling pte_mkyoung. Also, the parameter is
> changed because vmf should be passed to cow_user_page()
> 
> Add a WARN_ON_ONCE when __copy_from_user_inatomic() returns error
> in case there can be some obscure use-case.(by Kirill)
> 
> [1] https://github.com/pmem/pmdk/tree/master/src/test/vmmalloc_fork
> 
> Signed-off-by: Jia He 
> Reported-by: Yibo Cai 

Acked-by: Kirill A. Shutemov 

-- 
 Kirill A. Shutemov


Re: [PATCH v9 3/3] mm: fix double page fault on arm64 if PTE_AF is cleared

2019-09-25 Thread Catalin Marinas
On Wed, Sep 25, 2019 at 10:59:22AM +0800, Jia He wrote:
> When we tested pmdk unit test [1] vmmalloc_fork TEST1 in arm64 guest, there
> will be a double page fault in __copy_from_user_inatomic of cow_user_page.
> 
> Below call trace is from arm64 do_page_fault for debugging purpose
> [  110.016195] Call trace:
> [  110.016826]  do_page_fault+0x5a4/0x690
> [  110.017812]  do_mem_abort+0x50/0xb0
> [  110.018726]  el1_da+0x20/0xc4
> [  110.019492]  __arch_copy_from_user+0x180/0x280
> [  110.020646]  do_wp_page+0xb0/0x860
> [  110.021517]  __handle_mm_fault+0x994/0x1338
> [  110.022606]  handle_mm_fault+0xe8/0x180
> [  110.023584]  do_page_fault+0x240/0x690
> [  110.024535]  do_mem_abort+0x50/0xb0
> [  110.025423]  el0_da+0x20/0x24
> 
> The pte info before __copy_from_user_inatomic is (PTE_AF is cleared):
> [9b007000] pgd=00023d4f8003, pud=00023da9b003, 
> pmd=00023d4b3003, pte=36298607bd3
> 
> As told by Catalin: "On arm64 without hardware Access Flag, copying from
> user will fail because the pte is old and cannot be marked young. So we
> always end up with zeroed page after fork() + CoW for pfn mappings. we
> don't always have a hardware-managed access flag on arm64."
> 
> This patch fix it by calling pte_mkyoung. Also, the parameter is
> changed because vmf should be passed to cow_user_page()
> 
> Add a WARN_ON_ONCE when __copy_from_user_inatomic() returns error
> in case there can be some obscure use-case.(by Kirill)
> 
> [1] https://github.com/pmem/pmdk/tree/master/src/test/vmmalloc_fork
> 
> Signed-off-by: Jia He 
> Reported-by: Yibo Cai 

Reviewed-by: Catalin Marinas 

>  mm/memory.c | 99 +
>  1 file changed, 84 insertions(+), 15 deletions(-)
> 
> diff --git a/mm/memory.c b/mm/memory.c
> index e2bb51b6242e..a0a381b36ff2 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -118,6 +118,13 @@ int randomize_va_space __read_mostly =
>   2;
>  #endif
>  
> +#ifndef arch_faults_on_old_pte
> +static inline bool arch_faults_on_old_pte(void)
> +{
> + return false;
> +}
> +#endif
> +
>  static int __init disable_randmaps(char *s)
>  {
>   randomize_va_space = 0;
> @@ -2140,32 +2147,82 @@ static inline int pte_unmap_same(struct mm_struct 
> *mm, pmd_t *pmd,
>   return same;
>  }
>  
> -static inline void cow_user_page(struct page *dst, struct page *src, 
> unsigned long va, struct vm_area_struct *vma)
> +static inline bool cow_user_page(struct page *dst, struct page *src,
> +  struct vm_fault *vmf)
>  {
> + bool ret;
> + void *kaddr;
> + void __user *uaddr;
> + bool force_mkyoung;
> + struct vm_area_struct *vma = vmf->vma;
> + struct mm_struct *mm = vma->vm_mm;
> + unsigned long addr = vmf->address;
> +
>   debug_dma_assert_idle(src);
>  
> + if (likely(src)) {
> + copy_user_highpage(dst, src, addr, vma);
> + return true;
> + }
> +
>   /*
>* If the source page was a PFN mapping, we don't have
>* a "struct page" for it. We do a best-effort copy by
>* just copying from the original user address. If that
>* fails, we just zero-fill it. Live with it.
>*/
> - if (unlikely(!src)) {
> - void *kaddr = kmap_atomic(dst);
> - void __user *uaddr = (void __user *)(va & PAGE_MASK);
> + kaddr = kmap_atomic(dst);
> + uaddr = (void __user *)(addr & PAGE_MASK);
> +
> + /*
> +  * On architectures with software "accessed" bits, we would
> +  * take a double page fault, so mark it accessed here.
> +  */
> + force_mkyoung = arch_faults_on_old_pte() && !pte_young(vmf->orig_pte);
> + if (force_mkyoung) {
> + pte_t entry;
> +
> + vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, >ptl);
> + if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) {
> + /*
> +  * Other thread has already handled the fault
> +  * and we don't need to do anything. If it's
> +  * not the case, the fault will be triggered
> +  * again on the same address.
> +  */
> + ret = false;
> + goto pte_unlock;
> + }
> +
> + entry = pte_mkyoung(vmf->orig_pte);
> + if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0))
> + update_mmu_cache(vma, addr, vmf->pte);
> + }
>  
> + /*
> +  * This really shouldn't fail, because the page is there
> +  * in the page tables. But it might just be unreadable,
> +  * in which case we just give up and fill the result with
> +  * zeroes.
> +  */
> + if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
>   /*
> -  * This really shouldn't fail, because the page is there
> -  * in the page tables. But it might just 

RE: [PATCH v9 3/3] mm: fix double page fault on arm64 if PTE_AF is cleared

2019-09-24 Thread Justin He (Arm Technology China)
Hi Matthew and Kirill
I didn't add your previous r-b and a-b tag since I refactored the cow_user_page
and changed the ptl range in v9. Please have a review, thanks


--
Cheers,
Justin (Jia He)



> -Original Message-
> From: Jia He 
> Sent: 2019年9月25日 10:59
> To: Catalin Marinas ; Will Deacon
> ; Mark Rutland ; James Morse
> ; Marc Zyngier ; Matthew
> Wilcox ; Kirill A. Shutemov
> ; linux-arm-ker...@lists.infradead.org;
> linux-kernel@vger.kernel.org; linux...@kvack.org; Suzuki Poulose
> 
> Cc: Punit Agrawal ; Anshuman Khandual
> ; Alex Van Brunt
> ; Robin Murphy ;
> Thomas Gleixner ; Andrew Morton  foundation.org>; Jérôme Glisse ; Ralph Campbell
> ; hejia...@gmail.com; Kaly Xin (Arm Technology
> China) ; nd ; Justin He (Arm
> Technology China) 
> Subject: [PATCH v9 3/3] mm: fix double page fault on arm64 if PTE_AF is
> cleared
> 
> When we tested pmdk unit test [1] vmmalloc_fork TEST1 in arm64 guest,
> there
> will be a double page fault in __copy_from_user_inatomic of
> cow_user_page.
> 
> Below call trace is from arm64 do_page_fault for debugging purpose
> [  110.016195] Call trace:
> [  110.016826]  do_page_fault+0x5a4/0x690
> [  110.017812]  do_mem_abort+0x50/0xb0
> [  110.018726]  el1_da+0x20/0xc4
> [  110.019492]  __arch_copy_from_user+0x180/0x280
> [  110.020646]  do_wp_page+0xb0/0x860
> [  110.021517]  __handle_mm_fault+0x994/0x1338
> [  110.022606]  handle_mm_fault+0xe8/0x180
> [  110.023584]  do_page_fault+0x240/0x690
> [  110.024535]  do_mem_abort+0x50/0xb0
> [  110.025423]  el0_da+0x20/0x24
> 
> The pte info before __copy_from_user_inatomic is (PTE_AF is cleared):
> [9b007000] pgd=00023d4f8003, pud=00023da9b003,
> pmd=00023d4b3003, pte=36298607bd3
> 
> As told by Catalin: "On arm64 without hardware Access Flag, copying from
> user will fail because the pte is old and cannot be marked young. So we
> always end up with zeroed page after fork() + CoW for pfn mappings. we
> don't always have a hardware-managed access flag on arm64."
> 
> This patch fix it by calling pte_mkyoung. Also, the parameter is
> changed because vmf should be passed to cow_user_page()
> 
> Add a WARN_ON_ONCE when __copy_from_user_inatomic() returns error
> in case there can be some obscure use-case.(by Kirill)
> 
> [1] https://github.com/pmem/pmdk/tree/master/src/test/vmmalloc_fork
> 
> Signed-off-by: Jia He 
> Reported-by: Yibo Cai 
> ---
>  mm/memory.c | 99
> +
>  1 file changed, 84 insertions(+), 15 deletions(-)
> 
> diff --git a/mm/memory.c b/mm/memory.c
> index e2bb51b6242e..a0a381b36ff2 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -118,6 +118,13 @@ int randomize_va_space __read_mostly =
>   2;
>  #endif
> 
> +#ifndef arch_faults_on_old_pte
> +static inline bool arch_faults_on_old_pte(void)
> +{
> + return false;
> +}
> +#endif
> +
>  static int __init disable_randmaps(char *s)
>  {
>   randomize_va_space = 0;
> @@ -2140,32 +2147,82 @@ static inline int pte_unmap_same(struct
> mm_struct *mm, pmd_t *pmd,
>   return same;
>  }
> 
> -static inline void cow_user_page(struct page *dst, struct page *src,
> unsigned long va, struct vm_area_struct *vma)
> +static inline bool cow_user_page(struct page *dst, struct page *src,
> +  struct vm_fault *vmf)
>  {
> + bool ret;
> + void *kaddr;
> + void __user *uaddr;
> + bool force_mkyoung;
> + struct vm_area_struct *vma = vmf->vma;
> + struct mm_struct *mm = vma->vm_mm;
> + unsigned long addr = vmf->address;
> +
>   debug_dma_assert_idle(src);
> 
> + if (likely(src)) {
> + copy_user_highpage(dst, src, addr, vma);
> + return true;
> + }
> +
>   /*
>* If the source page was a PFN mapping, we don't have
>* a "struct page" for it. We do a best-effort copy by
>* just copying from the original user address. If that
>* fails, we just zero-fill it. Live with it.
>*/
> - if (unlikely(!src)) {
> - void *kaddr = kmap_atomic(dst);
> - void __user *uaddr = (void __user *)(va & PAGE_MASK);
> + kaddr = kmap_atomic(dst);
> + uaddr = (void __user *)(addr & PAGE_MASK);
> +
> + /*
> +  * On architectures with software "accessed" bits, we would
> +  * take a double page fault, so mark it accessed here.
> +  */
> + force_mkyoung = arch_faults_on_old_pte() && !pte_young(vmf-
> >orig_pte);
> + if (force_mkyoung) {
> + pte_t entry;
> +
> + vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr,
> >ptl);
> + if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) {
> + /*
> +  * Other thread has already handled the fault
> +  * and we don't need to do anything. If it's
> +  * not the case, the fault will be triggered
> +  * again on the same address.
> +

[PATCH v9 3/3] mm: fix double page fault on arm64 if PTE_AF is cleared

2019-09-24 Thread Jia He
When we tested pmdk unit test [1] vmmalloc_fork TEST1 in arm64 guest, there
will be a double page fault in __copy_from_user_inatomic of cow_user_page.

Below call trace is from arm64 do_page_fault for debugging purpose
[  110.016195] Call trace:
[  110.016826]  do_page_fault+0x5a4/0x690
[  110.017812]  do_mem_abort+0x50/0xb0
[  110.018726]  el1_da+0x20/0xc4
[  110.019492]  __arch_copy_from_user+0x180/0x280
[  110.020646]  do_wp_page+0xb0/0x860
[  110.021517]  __handle_mm_fault+0x994/0x1338
[  110.022606]  handle_mm_fault+0xe8/0x180
[  110.023584]  do_page_fault+0x240/0x690
[  110.024535]  do_mem_abort+0x50/0xb0
[  110.025423]  el0_da+0x20/0x24

The pte info before __copy_from_user_inatomic is (PTE_AF is cleared):
[9b007000] pgd=00023d4f8003, pud=00023da9b003, 
pmd=00023d4b3003, pte=36298607bd3

As told by Catalin: "On arm64 without hardware Access Flag, copying from
user will fail because the pte is old and cannot be marked young. So we
always end up with zeroed page after fork() + CoW for pfn mappings. we
don't always have a hardware-managed access flag on arm64."

This patch fix it by calling pte_mkyoung. Also, the parameter is
changed because vmf should be passed to cow_user_page()

Add a WARN_ON_ONCE when __copy_from_user_inatomic() returns error
in case there can be some obscure use-case.(by Kirill)

[1] https://github.com/pmem/pmdk/tree/master/src/test/vmmalloc_fork

Signed-off-by: Jia He 
Reported-by: Yibo Cai 
---
 mm/memory.c | 99 +
 1 file changed, 84 insertions(+), 15 deletions(-)

diff --git a/mm/memory.c b/mm/memory.c
index e2bb51b6242e..a0a381b36ff2 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -118,6 +118,13 @@ int randomize_va_space __read_mostly =
2;
 #endif
 
+#ifndef arch_faults_on_old_pte
+static inline bool arch_faults_on_old_pte(void)
+{
+   return false;
+}
+#endif
+
 static int __init disable_randmaps(char *s)
 {
randomize_va_space = 0;
@@ -2140,32 +2147,82 @@ static inline int pte_unmap_same(struct mm_struct *mm, 
pmd_t *pmd,
return same;
 }
 
-static inline void cow_user_page(struct page *dst, struct page *src, unsigned 
long va, struct vm_area_struct *vma)
+static inline bool cow_user_page(struct page *dst, struct page *src,
+struct vm_fault *vmf)
 {
+   bool ret;
+   void *kaddr;
+   void __user *uaddr;
+   bool force_mkyoung;
+   struct vm_area_struct *vma = vmf->vma;
+   struct mm_struct *mm = vma->vm_mm;
+   unsigned long addr = vmf->address;
+
debug_dma_assert_idle(src);
 
+   if (likely(src)) {
+   copy_user_highpage(dst, src, addr, vma);
+   return true;
+   }
+
/*
 * If the source page was a PFN mapping, we don't have
 * a "struct page" for it. We do a best-effort copy by
 * just copying from the original user address. If that
 * fails, we just zero-fill it. Live with it.
 */
-   if (unlikely(!src)) {
-   void *kaddr = kmap_atomic(dst);
-   void __user *uaddr = (void __user *)(va & PAGE_MASK);
+   kaddr = kmap_atomic(dst);
+   uaddr = (void __user *)(addr & PAGE_MASK);
+
+   /*
+* On architectures with software "accessed" bits, we would
+* take a double page fault, so mark it accessed here.
+*/
+   force_mkyoung = arch_faults_on_old_pte() && !pte_young(vmf->orig_pte);
+   if (force_mkyoung) {
+   pte_t entry;
+
+   vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, >ptl);
+   if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) {
+   /*
+* Other thread has already handled the fault
+* and we don't need to do anything. If it's
+* not the case, the fault will be triggered
+* again on the same address.
+*/
+   ret = false;
+   goto pte_unlock;
+   }
+
+   entry = pte_mkyoung(vmf->orig_pte);
+   if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0))
+   update_mmu_cache(vma, addr, vmf->pte);
+   }
 
+   /*
+* This really shouldn't fail, because the page is there
+* in the page tables. But it might just be unreadable,
+* in which case we just give up and fill the result with
+* zeroes.
+*/
+   if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
/*
-* This really shouldn't fail, because the page is there
-* in the page tables. But it might just be unreadable,
-* in which case we just give up and fill the result with
-* zeroes.
+* Give a warn in case there can be some obscure
+* use-case