Re: [PATCH v6 22/24] mm: Speculative page fault handler return VMA

2018-01-16 Thread Matthew Wilcox
On Tue, Jan 16, 2018 at 03:47:51PM +0100, Laurent Dufour wrote:
> On 13/01/2018 05:23, Matthew Wilcox wrote:
> > Of course, we don't need to change them all.  Try this:
> 
> That would be good candidate for a clean up but I'm not sure this should be
> part of this already too long series.
> 
> If you don't mind, unless a global agreement is stated on that, I'd prefer
> to postpone such a change once the initial series is accepted.

Actually, I think this can go in first, independently of the speculative
fault series.  It's a win in memory savings, and probably shaves a
cycle or two off the fault handler due to less argument marshalling in
the call-stack.


Re: [PATCH v6 22/24] mm: Speculative page fault handler return VMA

2018-01-16 Thread Laurent Dufour
On 13/01/2018 05:23, Matthew Wilcox wrote:
> On Fri, Jan 12, 2018 at 11:02:51AM -0800, Matthew Wilcox wrote:
>> On Fri, Jan 12, 2018 at 06:26:06PM +0100, Laurent Dufour wrote:
>>> @@ -1354,7 +1354,10 @@ extern int handle_mm_fault(struct vm_area_struct 
>>> *vma, unsigned long address,
>>> unsigned int flags);
>>>  #ifdef CONFIG_SPF
>>>  extern int handle_speculative_fault(struct mm_struct *mm,
>>> +   unsigned long address, unsigned int flags,
>>> +   struct vm_area_struct **vma);
>>
>> I think this shows that we need to create 'struct vm_fault' on the stack
>> in the arch code and then pass it to handle_speculative_fault(), followed
>> by handle_mm_fault().  That should be quite a nice cleanup actually.
>> I know that's only 30+ architectures to change ;-)
> 
> Of course, we don't need to change them all.  Try this:

That would be good candidate for a clean up but I'm not sure this should be
part of this already too long series.

If you don't mind, unless a global agreement is stated on that, I'd prefer
to postpone such a change once the initial series is accepted.

Cheers,
Laurent.

> Subject: [PATCH] Add vm_handle_fault
> 
> For the speculative fault handler, we want to create the struct vm_fault
> on the stack in the arch code and pass it into the generic mm code.
> To avoid changing 30+ architectures, leave handle_mm_fault with its
> current function signature and move its guts into the new vm_handle_fault
> function.  Even this saves a nice 172 bytes on the random x86-64 .config
> I happen to have around.
> 
> Signed-off-by: Matthew Wilcox 
> 
> diff --git a/mm/memory.c b/mm/memory.c
> index 5eb3d2524bdc..403934297a3d 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -3977,36 +3977,28 @@ static int handle_pte_fault(struct vm_fault *vmf)
>   * The mmap_sem may have been released depending on flags and our
>   * return value.  See filemap_fault() and __lock_page_or_retry().
>   */
> -static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long 
> address,
> - unsigned int flags)
> +static int __handle_mm_fault(struct vm_fault *vmf)
>  {
> - struct vm_fault vmf = {
> - .vma = vma,
> - .address = address & PAGE_MASK,
> - .flags = flags,
> - .pgoff = linear_page_index(vma, address),
> - .gfp_mask = __get_fault_gfp_mask(vma),
> - };
> - unsigned int dirty = flags & FAULT_FLAG_WRITE;
> - struct mm_struct *mm = vma->vm_mm;
> + unsigned int dirty = vmf->flags & FAULT_FLAG_WRITE;
> + struct mm_struct *mm = vmf->vma->vm_mm;
>   pgd_t *pgd;
>   p4d_t *p4d;
>   int ret;
> 
> - pgd = pgd_offset(mm, address);
> - p4d = p4d_alloc(mm, pgd, address);
> + pgd = pgd_offset(mm, vmf->address);
> + p4d = p4d_alloc(mm, pgd, vmf->address);
>   if (!p4d)
>   return VM_FAULT_OOM;
> 
> - vmf.pud = pud_alloc(mm, p4d, address);
> - if (!vmf.pud)
> + vmf->pud = pud_alloc(mm, p4d, vmf->address);
> + if (!vmf->pud)
>   return VM_FAULT_OOM;
> - if (pud_none(*vmf.pud) && transparent_hugepage_enabled(vma)) {
> - ret = create_huge_pud();
> + if (pud_none(*vmf->pud) && transparent_hugepage_enabled(vmf->vma)) {
> + ret = create_huge_pud(vmf);
>   if (!(ret & VM_FAULT_FALLBACK))
>   return ret;
>   } else {
> - pud_t orig_pud = *vmf.pud;
> + pud_t orig_pud = *vmf->pud;
> 
>   barrier();
>   if (pud_trans_huge(orig_pud) || pud_devmap(orig_pud)) {
> @@ -4014,50 +4006,51 @@ static int __handle_mm_fault(struct vm_area_struct 
> *vma, unsigned long address,
>   /* NUMA case for anonymous PUDs would go here */
> 
>   if (dirty && !pud_access_permitted(orig_pud, WRITE)) {
> - ret = wp_huge_pud(, orig_pud);
> + ret = wp_huge_pud(vmf, orig_pud);
>   if (!(ret & VM_FAULT_FALLBACK))
>   return ret;
>   } else {
> - huge_pud_set_accessed(, orig_pud);
> + huge_pud_set_accessed(vmf, orig_pud);
>   return 0;
>   }
>   }
>   }
> 
> - vmf.pmd = pmd_alloc(mm, vmf.pud, address);
> - if (!vmf.pmd)
> + vmf->pmd = pmd_alloc(mm, vmf->pud, vmf->address);
> + if (!vmf->pmd)
>   return VM_FAULT_OOM;
> - if (pmd_none(*vmf.pmd) && transparent_hugepage_enabled(vma)) {
> - ret = create_huge_pmd();
> + if (pmd_none(*vmf->pmd) && transparent_hugepage_enabled(vmf->vma)) {
> + ret = create_huge_pmd(vmf);
>   if (!(ret & VM_FAULT_FALLBACK))
>   return ret;
>   } else {
> - pmd_t orig_pmd = 

Re: [PATCH v6 22/24] mm: Speculative page fault handler return VMA

2018-01-12 Thread Matthew Wilcox
On Fri, Jan 12, 2018 at 11:02:51AM -0800, Matthew Wilcox wrote:
> On Fri, Jan 12, 2018 at 06:26:06PM +0100, Laurent Dufour wrote:
> > @@ -1354,7 +1354,10 @@ extern int handle_mm_fault(struct vm_area_struct 
> > *vma, unsigned long address,
> > unsigned int flags);
> >  #ifdef CONFIG_SPF
> >  extern int handle_speculative_fault(struct mm_struct *mm,
> > +   unsigned long address, unsigned int flags,
> > +   struct vm_area_struct **vma);
> 
> I think this shows that we need to create 'struct vm_fault' on the stack
> in the arch code and then pass it to handle_speculative_fault(), followed
> by handle_mm_fault().  That should be quite a nice cleanup actually.
> I know that's only 30+ architectures to change ;-)

Of course, we don't need to change them all.  Try this:

Subject: [PATCH] Add vm_handle_fault

For the speculative fault handler, we want to create the struct vm_fault
on the stack in the arch code and pass it into the generic mm code.
To avoid changing 30+ architectures, leave handle_mm_fault with its
current function signature and move its guts into the new vm_handle_fault
function.  Even this saves a nice 172 bytes on the random x86-64 .config
I happen to have around.

Signed-off-by: Matthew Wilcox 

diff --git a/mm/memory.c b/mm/memory.c
index 5eb3d2524bdc..403934297a3d 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3977,36 +3977,28 @@ static int handle_pte_fault(struct vm_fault *vmf)
  * The mmap_sem may have been released depending on flags and our
  * return value.  See filemap_fault() and __lock_page_or_retry().
  */
-static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
-   unsigned int flags)
+static int __handle_mm_fault(struct vm_fault *vmf)
 {
-   struct vm_fault vmf = {
-   .vma = vma,
-   .address = address & PAGE_MASK,
-   .flags = flags,
-   .pgoff = linear_page_index(vma, address),
-   .gfp_mask = __get_fault_gfp_mask(vma),
-   };
-   unsigned int dirty = flags & FAULT_FLAG_WRITE;
-   struct mm_struct *mm = vma->vm_mm;
+   unsigned int dirty = vmf->flags & FAULT_FLAG_WRITE;
+   struct mm_struct *mm = vmf->vma->vm_mm;
pgd_t *pgd;
p4d_t *p4d;
int ret;
 
-   pgd = pgd_offset(mm, address);
-   p4d = p4d_alloc(mm, pgd, address);
+   pgd = pgd_offset(mm, vmf->address);
+   p4d = p4d_alloc(mm, pgd, vmf->address);
if (!p4d)
return VM_FAULT_OOM;
 
-   vmf.pud = pud_alloc(mm, p4d, address);
-   if (!vmf.pud)
+   vmf->pud = pud_alloc(mm, p4d, vmf->address);
+   if (!vmf->pud)
return VM_FAULT_OOM;
-   if (pud_none(*vmf.pud) && transparent_hugepage_enabled(vma)) {
-   ret = create_huge_pud();
+   if (pud_none(*vmf->pud) && transparent_hugepage_enabled(vmf->vma)) {
+   ret = create_huge_pud(vmf);
if (!(ret & VM_FAULT_FALLBACK))
return ret;
} else {
-   pud_t orig_pud = *vmf.pud;
+   pud_t orig_pud = *vmf->pud;
 
barrier();
if (pud_trans_huge(orig_pud) || pud_devmap(orig_pud)) {
@@ -4014,50 +4006,51 @@ static int __handle_mm_fault(struct vm_area_struct 
*vma, unsigned long address,
/* NUMA case for anonymous PUDs would go here */
 
if (dirty && !pud_access_permitted(orig_pud, WRITE)) {
-   ret = wp_huge_pud(, orig_pud);
+   ret = wp_huge_pud(vmf, orig_pud);
if (!(ret & VM_FAULT_FALLBACK))
return ret;
} else {
-   huge_pud_set_accessed(, orig_pud);
+   huge_pud_set_accessed(vmf, orig_pud);
return 0;
}
}
}
 
-   vmf.pmd = pmd_alloc(mm, vmf.pud, address);
-   if (!vmf.pmd)
+   vmf->pmd = pmd_alloc(mm, vmf->pud, vmf->address);
+   if (!vmf->pmd)
return VM_FAULT_OOM;
-   if (pmd_none(*vmf.pmd) && transparent_hugepage_enabled(vma)) {
-   ret = create_huge_pmd();
+   if (pmd_none(*vmf->pmd) && transparent_hugepage_enabled(vmf->vma)) {
+   ret = create_huge_pmd(vmf);
if (!(ret & VM_FAULT_FALLBACK))
return ret;
} else {
-   pmd_t orig_pmd = *vmf.pmd;
+   pmd_t orig_pmd = *vmf->pmd;
 
barrier();
if (unlikely(is_swap_pmd(orig_pmd))) {
VM_BUG_ON(thp_migration_supported() &&
  !is_pmd_migration_entry(orig_pmd));
if (is_pmd_migration_entry(orig_pmd))
-   

Re: [PATCH v6 22/24] mm: Speculative page fault handler return VMA

2018-01-12 Thread Matthew Wilcox
On Fri, Jan 12, 2018 at 06:26:06PM +0100, Laurent Dufour wrote:
> @@ -1354,7 +1354,10 @@ extern int handle_mm_fault(struct vm_area_struct *vma, 
> unsigned long address,
>   unsigned int flags);
>  #ifdef CONFIG_SPF
>  extern int handle_speculative_fault(struct mm_struct *mm,
> + unsigned long address, unsigned int flags,
> + struct vm_area_struct **vma);

I think this shows that we need to create 'struct vm_fault' on the stack
in the arch code and then pass it to handle_speculative_fault(), followed
by handle_mm_fault().  That should be quite a nice cleanup actually.
I know that's only 30+ architectures to change ;-)



[PATCH v6 22/24] mm: Speculative page fault handler return VMA

2018-01-12 Thread Laurent Dufour
When the speculative page fault handler is returning VM_RETRY, there is a
chance that VMA fetched without grabbing the mmap_sem can be reused by the
legacy page fault handler.  By reusing it, we avoid calling find_vma()
again. To achieve, that we must ensure that the VMA structure will not be
freed in our back. This is done by getting the reference on it (get_vma())
and by assuming that the caller will call the new service
can_reuse_spf_vma() once it has grabbed the mmap_sem.

can_reuse_spf_vma() is first checking that the VMA is still in the RB tree
, and then that the VMA's boundaries matched the passed address and release
the reference on the VMA so that it can be freed if needed.

In the case the VMA is freed, can_reuse_spf_vma() will have returned false
as the VMA is no more in the RB tree.

Signed-off-by: Laurent Dufour 
---
 include/linux/mm.h |   5 +-
 mm/memory.c| 136 +
 2 files changed, 88 insertions(+), 53 deletions(-)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index 4d8a7621da8a..02da17792f0d 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1354,7 +1354,10 @@ extern int handle_mm_fault(struct vm_area_struct *vma, 
unsigned long address,
unsigned int flags);
 #ifdef CONFIG_SPF
 extern int handle_speculative_fault(struct mm_struct *mm,
-   unsigned long address, unsigned int flags);
+   unsigned long address, unsigned int flags,
+   struct vm_area_struct **vma);
+extern bool can_reuse_spf_vma(struct vm_area_struct *vma,
+ unsigned long address);
 #endif /* CONFIG_SPF */
 extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
unsigned long address, unsigned int fault_flags,
diff --git a/mm/memory.c b/mm/memory.c
index 6ccb1f45473a..e1f172ac2c90 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4284,13 +4284,22 @@ static int __handle_mm_fault(struct vm_area_struct 
*vma, unsigned long address,
 /* This is required by vm_normal_page() */
 #error "Speculative page fault handler requires __HAVE_ARCH_PTE_SPECIAL"
 #endif
-
 /*
  * vm_normal_page() adds some processing which should be done while
  * hodling the mmap_sem.
  */
+
+/*
+ * Tries to handle the page fault in a speculative way, without grabbing the
+ * mmap_sem.
+ * When VM_FAULT_RETRY is returned, the vma pointer is valid and this vma must
+ * be checked later when the mmap_sem has been grabbed by calling
+ * can_reuse_spf_vma().
+ * This is needed as the returned vma is kept in memory until the call to
+ * can_reuse_spf_vma() is made.
+ */
 int handle_speculative_fault(struct mm_struct *mm, unsigned long address,
-unsigned int flags)
+unsigned int flags, struct vm_area_struct **vma)
 {
struct vm_fault vmf = {
.address = address,
@@ -4299,7 +4308,6 @@ int handle_speculative_fault(struct mm_struct *mm, 
unsigned long address,
p4d_t *p4d, p4dval;
pud_t pudval;
int seq, ret = VM_FAULT_RETRY;
-   struct vm_area_struct *vma;
 #ifdef CONFIG_NUMA
struct mempolicy *pol;
 #endif
@@ -4308,14 +4316,16 @@ int handle_speculative_fault(struct mm_struct *mm, 
unsigned long address,
flags &= ~(FAULT_FLAG_ALLOW_RETRY|FAULT_FLAG_KILLABLE);
flags |= FAULT_FLAG_SPECULATIVE;
 
-   vma = get_vma(mm, address);
-   if (!vma)
+   *vma = get_vma(mm, address);
+   if (!*vma)
return ret;
+   vmf.vma = *vma;
 
-   seq = raw_read_seqcount(>vm_sequence); /* rmb <-> 
seqlock,vma_rb_erase() */
+   /* rmb <-> seqlock,vma_rb_erase() */
+   seq = raw_read_seqcount(>vm_sequence);
if (seq & 1) {
-   trace_spf_vma_changed(_RET_IP_, vma, address);
-   goto out_put;
+   trace_spf_vma_changed(_RET_IP_, vmf.vma, address);
+   return ret;
}
 
/*
@@ -4323,9 +4333,9 @@ int handle_speculative_fault(struct mm_struct *mm, 
unsigned long address,
 * with the VMA.
 * This include huge page from hugetlbfs.
 */
-   if (vma->vm_ops) {
-   trace_spf_vma_notsup(_RET_IP_, vma, address);
-   goto out_put;
+   if (vmf.vma->vm_ops) {
+   trace_spf_vma_notsup(_RET_IP_, vmf.vma, address);
+   return ret;
}
 
/*
@@ -4333,18 +4343,18 @@ int handle_speculative_fault(struct mm_struct *mm, 
unsigned long address,
 * because vm_next and vm_prev must be safe. This can't be guaranteed
 * in the speculative path.
 */
-   if (unlikely(!vma->anon_vma)) {
-   trace_spf_vma_notsup(_RET_IP_, vma, address);
-   goto out_put;
+   if (unlikely(!vmf.vma->anon_vma)) {
+   trace_spf_vma_notsup(_RET_IP_, vmf.vma, address);
+