Re: [PATCH 40/41] mm: separate vma->lock from vm_area_struct

2023-01-17 Thread Suren Baghdasaryan
On Tue, Jan 17, 2023 at 10:34 AM Jann Horn  wrote:
>
> On Mon, Jan 9, 2023 at 9:55 PM Suren Baghdasaryan  wrote:
> > vma->lock being part of the vm_area_struct causes performance regression
> > during page faults because during contention its count and owner fields
> > are constantly updated and having other parts of vm_area_struct used
> > during page fault handling next to them causes constant cache line
> > bouncing. Fix that by moving the lock outside of the vm_area_struct.
> > All attempts to keep vma->lock inside vm_area_struct in a separate
> > cache line still produce performance regression especially on NUMA
> > machines. Smallest regression was achieved when lock is placed in the
> > fourth cache line but that bloats vm_area_struct to 256 bytes.
>
> Just checking: When you tested putting the lock in different cache
> lines, did you force the slab allocator to actually store the
> vm_area_struct with cacheline alignment (by setting SLAB_HWCACHE_ALIGN
> on the slab or with a cacheline_aligned_in_smp on the struct
> definition)?

Yep, I tried all these combinations and still saw noticeable regression.


Re: [PATCH 40/41] mm: separate vma->lock from vm_area_struct

2023-01-17 Thread Jann Horn
On Mon, Jan 9, 2023 at 9:55 PM Suren Baghdasaryan  wrote:
> vma->lock being part of the vm_area_struct causes performance regression
> during page faults because during contention its count and owner fields
> are constantly updated and having other parts of vm_area_struct used
> during page fault handling next to them causes constant cache line
> bouncing. Fix that by moving the lock outside of the vm_area_struct.
> All attempts to keep vma->lock inside vm_area_struct in a separate
> cache line still produce performance regression especially on NUMA
> machines. Smallest regression was achieved when lock is placed in the
> fourth cache line but that bloats vm_area_struct to 256 bytes.

Just checking: When you tested putting the lock in different cache
lines, did you force the slab allocator to actually store the
vm_area_struct with cacheline alignment (by setting SLAB_HWCACHE_ALIGN
on the slab or with a cacheline_aligned_in_smp on the struct
definition)?


[PATCH 40/41] mm: separate vma->lock from vm_area_struct

2023-01-09 Thread Suren Baghdasaryan
vma->lock being part of the vm_area_struct causes performance regression
during page faults because during contention its count and owner fields
are constantly updated and having other parts of vm_area_struct used
during page fault handling next to them causes constant cache line
bouncing. Fix that by moving the lock outside of the vm_area_struct.
All attempts to keep vma->lock inside vm_area_struct in a separate
cache line still produce performance regression especially on NUMA
machines. Smallest regression was achieved when lock is placed in the
fourth cache line but that bloats vm_area_struct to 256 bytes.
Considering performance and memory impact, separate lock looks like
the best option. It increases memory footprint of each VMA but that
will be addressed in the next patch.
Note that after this change vma_init() does not allocate or
initialize vma->lock anymore. A number of drivers allocate a pseudo
VMA on the stack but they never use the VMA's lock, therefore it does
not need to be allocated. The future drivers which might need the VMA
lock should use vm_area_alloc()/vm_area_free() to allocate it.

Signed-off-by: Suren Baghdasaryan 
---
 include/linux/mm.h   | 25 ++--
 include/linux/mm_types.h |  6 ++-
 kernel/fork.c| 82 
 3 files changed, 74 insertions(+), 39 deletions(-)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index 50c7a6dd9c7a..d40bf8a5e19e 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -615,11 +615,6 @@ struct vm_operations_struct {
 };
 
 #ifdef CONFIG_PER_VMA_LOCK
-static inline void vma_init_lock(struct vm_area_struct *vma)
-{
-   init_rwsem(>lock);
-   vma->vm_lock_seq = -1;
-}
 
 static inline void vma_write_lock(struct vm_area_struct *vma)
 {
@@ -635,9 +630,9 @@ static inline void vma_write_lock(struct vm_area_struct 
*vma)
if (vma->vm_lock_seq == mm_lock_seq)
return;
 
-   down_write(>lock);
+   down_write(>vm_lock->lock);
vma->vm_lock_seq = mm_lock_seq;
-   up_write(>lock);
+   up_write(>vm_lock->lock);
 }
 
 /*
@@ -651,17 +646,17 @@ static inline bool vma_read_trylock(struct vm_area_struct 
*vma)
if (vma->vm_lock_seq == READ_ONCE(vma->vm_mm->mm_lock_seq))
return false;
 
-   if (unlikely(down_read_trylock(>lock) == 0))
+   if (unlikely(down_read_trylock(>vm_lock->lock) == 0))
return false;
 
/*
 * Overflow might produce false locked result.
 * False unlocked result is impossible because we modify and check
-* vma->vm_lock_seq under vma->lock protection and mm->mm_lock_seq
+* vma->vm_lock_seq under vma->vm_lock protection and mm->mm_lock_seq
 * modification invalidates all existing locks.
 */
if (unlikely(vma->vm_lock_seq == READ_ONCE(vma->vm_mm->mm_lock_seq))) {
-   up_read(>lock);
+   up_read(>vm_lock->lock);
return false;
}
return true;
@@ -669,7 +664,7 @@ static inline bool vma_read_trylock(struct vm_area_struct 
*vma)
 
 static inline void vma_read_unlock(struct vm_area_struct *vma)
 {
-   up_read(>lock);
+   up_read(>vm_lock->lock);
 }
 
 static inline void vma_assert_write_locked(struct vm_area_struct *vma)
@@ -684,7 +679,7 @@ static inline void vma_assert_write_locked(struct 
vm_area_struct *vma)
 
 static inline void vma_assert_no_reader(struct vm_area_struct *vma)
 {
-   VM_BUG_ON_VMA(rwsem_is_locked(>lock) &&
+   VM_BUG_ON_VMA(rwsem_is_locked(>vm_lock->lock) &&
  vma->vm_lock_seq != READ_ONCE(vma->vm_mm->mm_lock_seq),
  vma);
 }
@@ -694,7 +689,6 @@ struct vm_area_struct *lock_vma_under_rcu(struct mm_struct 
*mm,
 
 #else /* CONFIG_PER_VMA_LOCK */
 
-static inline void vma_init_lock(struct vm_area_struct *vma) {}
 static inline void vma_write_lock(struct vm_area_struct *vma) {}
 static inline bool vma_read_trylock(struct vm_area_struct *vma)
{ return false; }
@@ -704,6 +698,10 @@ static inline void vma_assert_no_reader(struct 
vm_area_struct *vma) {}
 
 #endif /* CONFIG_PER_VMA_LOCK */
 
+/*
+ * WARNING: vma_init does not initialize vma->vm_lock.
+ * Use vm_area_alloc()/vm_area_free() if vma needs locking.
+ */
 static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
 {
static const struct vm_operations_struct dummy_vm_ops = {};
@@ -712,7 +710,6 @@ static inline void vma_init(struct vm_area_struct *vma, 
struct mm_struct *mm)
vma->vm_mm = mm;
vma->vm_ops = _vm_ops;
INIT_LIST_HEAD(>anon_vma_chain);
-   vma_init_lock(vma);
 }
 
 /* Use when VMA is not part of the VMA tree and needs no locking */
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index c0e6c8e4700b..faa61b400f9b 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -526,6 +526,10 @@ struct anon_vma_name {
char name[];
 };
 
+struct