Re: [PATCH v9 07/24] mm: VMA sequence count

2018-03-28 Thread Laurent Dufour


On 27/03/2018 23:30, David Rientjes wrote:
> On Tue, 13 Mar 2018, Laurent Dufour wrote:
> 
>> diff --git a/mm/mmap.c b/mm/mmap.c
>> index faf85699f1a1..5898255d0aeb 100644
>> --- a/mm/mmap.c
>> +++ b/mm/mmap.c
>> @@ -558,6 +558,10 @@ void __vma_link_rb(struct mm_struct *mm, struct 
>> vm_area_struct *vma,
>>  else
>>  mm->highest_vm_end = vm_end_gap(vma);
>>  
>> +#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
>> +seqcount_init(>vm_sequence);
>> +#endif
>> +
>>  /*
>>   * vma->vm_prev wasn't known when we followed the rbtree to find the
>>   * correct insertion point for that vma. As a result, we could not
>> @@ -692,6 +696,30 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned 
>> long start,
>>  long adjust_next = 0;
>>  int remove_next = 0;
>>  
>> +/*
>> + * Why using vm_raw_write*() functions here to avoid lockdep's warning ?
>> + *
>> + * Locked is complaining about a theoretical lock dependency, involving
>> + * 3 locks:
>> + *   mapping->i_mmap_rwsem --> vma->vm_sequence --> fs_reclaim
>> + *
>> + * Here are the major path leading to this dependency :
>> + *  1. __vma_adjust() mmap_sem  -> vm_sequence -> i_mmap_rwsem
>> + *  2. move_vmap() mmap_sem -> vm_sequence -> fs_reclaim
>> + *  3. __alloc_pages_nodemask() fs_reclaim -> i_mmap_rwsem
>> + *  4. unmap_mapping_range() i_mmap_rwsem -> vm_sequence
>> + *
>> + * So there is no way to solve this easily, especially because in
>> + * unmap_mapping_range() the i_mmap_rwsem is grab while the impacted
>> + * VMAs are not yet known.
>> + * However, the way the vm_seq is used is guarantying that we will
>> + * never block on it since we just check for its value and never wait
>> + * for it to move, see vma_has_changed() and handle_speculative_fault().
>> + */
>> +vm_raw_write_begin(vma);
>> +if (next)
>> +vm_raw_write_begin(next);
>> +
>>  if (next && !insert) {
>>  struct vm_area_struct *exporter = NULL, *importer = NULL;
>>  
> 
> Eek, what about later on:
> 
>   /*
>* Easily overlooked: when mprotect shifts the boundary,
>* make sure the expanding vma has anon_vma set if the
>* shrinking vma had, to cover any anon pages imported.
>*/
>   if (exporter && exporter->anon_vma && !importer->anon_vma) {
>   int error;
> 
>   importer->anon_vma = exporter->anon_vma;
>   error = anon_vma_clone(importer, exporter);
>   if (error)
>   return error;
>   }
> 
> This needs
> 
> if (error) {
>   if (next && next != vma)
>   vm_raw_write_end(next);
>   vm_raw_write_end(vma);
>   return error;
> }

Nice catch !

Thanks,
Laurent.



Re: [PATCH v9 07/24] mm: VMA sequence count

2018-03-27 Thread David Rientjes
On Tue, 13 Mar 2018, Laurent Dufour wrote:

> diff --git a/mm/mmap.c b/mm/mmap.c
> index faf85699f1a1..5898255d0aeb 100644
> --- a/mm/mmap.c
> +++ b/mm/mmap.c
> @@ -558,6 +558,10 @@ void __vma_link_rb(struct mm_struct *mm, struct 
> vm_area_struct *vma,
>   else
>   mm->highest_vm_end = vm_end_gap(vma);
>  
> +#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
> + seqcount_init(>vm_sequence);
> +#endif
> +
>   /*
>* vma->vm_prev wasn't known when we followed the rbtree to find the
>* correct insertion point for that vma. As a result, we could not
> @@ -692,6 +696,30 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned 
> long start,
>   long adjust_next = 0;
>   int remove_next = 0;
>  
> + /*
> +  * Why using vm_raw_write*() functions here to avoid lockdep's warning ?
> +  *
> +  * Locked is complaining about a theoretical lock dependency, involving
> +  * 3 locks:
> +  *   mapping->i_mmap_rwsem --> vma->vm_sequence --> fs_reclaim
> +  *
> +  * Here are the major path leading to this dependency :
> +  *  1. __vma_adjust() mmap_sem  -> vm_sequence -> i_mmap_rwsem
> +  *  2. move_vmap() mmap_sem -> vm_sequence -> fs_reclaim
> +  *  3. __alloc_pages_nodemask() fs_reclaim -> i_mmap_rwsem
> +  *  4. unmap_mapping_range() i_mmap_rwsem -> vm_sequence
> +  *
> +  * So there is no way to solve this easily, especially because in
> +  * unmap_mapping_range() the i_mmap_rwsem is grab while the impacted
> +  * VMAs are not yet known.
> +  * However, the way the vm_seq is used is guarantying that we will
> +  * never block on it since we just check for its value and never wait
> +  * for it to move, see vma_has_changed() and handle_speculative_fault().
> +  */
> + vm_raw_write_begin(vma);
> + if (next)
> + vm_raw_write_begin(next);
> +
>   if (next && !insert) {
>   struct vm_area_struct *exporter = NULL, *importer = NULL;
>  

Eek, what about later on:

/*
 * Easily overlooked: when mprotect shifts the boundary,
 * make sure the expanding vma has anon_vma set if the
 * shrinking vma had, to cover any anon pages imported.
 */
if (exporter && exporter->anon_vma && !importer->anon_vma) {
int error;

importer->anon_vma = exporter->anon_vma;
error = anon_vma_clone(importer, exporter);
if (error)
return error;
}

This needs

if (error) {
if (next && next != vma)
vm_raw_write_end(next);
vm_raw_write_end(vma);
return error;
}


[PATCH v9 07/24] mm: VMA sequence count

2018-03-13 Thread Laurent Dufour
From: Peter Zijlstra 

Wrap the VMA modifications (vma_adjust/unmap_page_range) with sequence
counts such that we can easily test if a VMA is changed.

The unmap_page_range() one allows us to make assumptions about
page-tables; when we find the seqcount hasn't changed we can assume
page-tables are still valid.

The flip side is that we cannot distinguish between a vma_adjust() and
the unmap_page_range() -- where with the former we could have
re-checked the vma bounds against the address.

Signed-off-by: Peter Zijlstra (Intel) 

[Port to 4.12 kernel]
[Build depends on CONFIG_SPECULATIVE_PAGE_FAULT]
[Introduce vm_write_* inline function depending on
 CONFIG_SPECULATIVE_PAGE_FAULT]
[Fix lock dependency between mapping->i_mmap_rwsem and vma->vm_sequence by
 using vm_raw_write* functions]
Signed-off-by: Laurent Dufour 
---
 include/linux/mm.h   | 41 +
 include/linux/mm_types.h |  3 +++
 mm/memory.c  |  2 ++
 mm/mmap.c| 35 +++
 4 files changed, 81 insertions(+)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index b6432a261e63..88042d843668 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1372,6 +1372,47 @@ static inline void unmap_shared_mapping_range(struct 
address_space *mapping,
unmap_mapping_range(mapping, holebegin, holelen, 0);
 }
 
+#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
+static inline void vm_write_begin(struct vm_area_struct *vma)
+{
+   write_seqcount_begin(>vm_sequence);
+}
+static inline void vm_write_begin_nested(struct vm_area_struct *vma,
+int subclass)
+{
+   write_seqcount_begin_nested(>vm_sequence, subclass);
+}
+static inline void vm_write_end(struct vm_area_struct *vma)
+{
+   write_seqcount_end(>vm_sequence);
+}
+static inline void vm_raw_write_begin(struct vm_area_struct *vma)
+{
+   raw_write_seqcount_begin(>vm_sequence);
+}
+static inline void vm_raw_write_end(struct vm_area_struct *vma)
+{
+   raw_write_seqcount_end(>vm_sequence);
+}
+#else
+static inline void vm_write_begin(struct vm_area_struct *vma)
+{
+}
+static inline void vm_write_begin_nested(struct vm_area_struct *vma,
+int subclass)
+{
+}
+static inline void vm_write_end(struct vm_area_struct *vma)
+{
+}
+static inline void vm_raw_write_begin(struct vm_area_struct *vma)
+{
+}
+static inline void vm_raw_write_end(struct vm_area_struct *vma)
+{
+}
+#endif /* CONFIG_SPECULATIVE_PAGE_FAULT */
+
 extern int access_process_vm(struct task_struct *tsk, unsigned long addr,
void *buf, int len, unsigned int gup_flags);
 extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index fd1af6b9591d..34fde7111e88 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -333,6 +333,9 @@ struct vm_area_struct {
struct mempolicy *vm_policy;/* NUMA policy for the VMA */
 #endif
struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
+#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
+   seqcount_t vm_sequence;
+#endif
 } __randomize_layout;
 
 struct core_thread {
diff --git a/mm/memory.c b/mm/memory.c
index 4bc7b0bdcb40..d57749966fb8 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1503,6 +1503,7 @@ void unmap_page_range(struct mmu_gather *tlb,
unsigned long next;
 
BUG_ON(addr >= end);
+   vm_write_begin(vma);
tlb_start_vma(tlb, vma);
pgd = pgd_offset(vma->vm_mm, addr);
do {
@@ -1512,6 +1513,7 @@ void unmap_page_range(struct mmu_gather *tlb,
next = zap_p4d_range(tlb, vma, pgd, addr, next, details);
} while (pgd++, addr = next, addr != end);
tlb_end_vma(tlb, vma);
+   vm_write_end(vma);
 }
 
 
diff --git a/mm/mmap.c b/mm/mmap.c
index faf85699f1a1..5898255d0aeb 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -558,6 +558,10 @@ void __vma_link_rb(struct mm_struct *mm, struct 
vm_area_struct *vma,
else
mm->highest_vm_end = vm_end_gap(vma);
 
+#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
+   seqcount_init(>vm_sequence);
+#endif
+
/*
 * vma->vm_prev wasn't known when we followed the rbtree to find the
 * correct insertion point for that vma. As a result, we could not
@@ -692,6 +696,30 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long 
start,
long adjust_next = 0;
int remove_next = 0;
 
+   /*
+* Why using vm_raw_write*() functions here to avoid lockdep's warning ?
+*
+* Locked is complaining about a theoretical lock dependency, involving
+* 3 locks:
+*   mapping->i_mmap_rwsem --> vma->vm_sequence --> fs_reclaim
+*
+* Here are the major path leading to this dependency :
+*  1. __vma_adjust() mmap_sem  -> vm_sequence -> i_mmap_rwsem
+