Re: [PATCH v4 hmm 12/12] mm/hmm: Fix error flows in hmm_invalidate_range_start

2019-06-27 Thread Jason Gunthorpe
On Wed, Jun 26, 2019 at 11:18:23AM -0700, Ralph Campbell wrote:
> > diff --git a/mm/hmm.c b/mm/hmm.c
> > index b224ea635a7716..89549eac03d506 100644
> > +++ b/mm/hmm.c
> > @@ -64,7 +64,7 @@ static struct hmm *hmm_get_or_create(struct mm_struct *mm)
> > init_rwsem(&hmm->mirrors_sem);
> > hmm->mmu_notifier.ops = NULL;
> > INIT_LIST_HEAD(&hmm->ranges);
> > -   mutex_init(&hmm->lock);
> > +   spin_lock_init(&hmm->ranges_lock);
> > kref_init(&hmm->kref);
> > hmm->notifiers = 0;
> > hmm->mm = mm;
> > @@ -144,6 +144,23 @@ static void hmm_release(struct mmu_notifier *mn, 
> > struct mm_struct *mm)
> > hmm_put(hmm);
> >   }
> > +static void notifiers_decrement(struct hmm *hmm)
> > +{
> > +   lockdep_assert_held(&hmm->ranges_lock);
> > +
> 
> Why not acquire the lock here and release at the end instead
> of asserting the lock is held?
> It looks like everywhere notifiers_decrement() is called does
> that.

Yes, this is just some left over mistake, thanks

From aa371c720a9e3c632dcd9a6a2c73e325b9b2b98c Mon Sep 17 00:00:00 2001
From: Jason Gunthorpe 
Date: Fri, 7 Jun 2019 12:10:33 -0300
Subject: [PATCH] mm/hmm: Fix error flows in hmm_invalidate_range_start

If the trylock on the hmm->mirrors_sem fails the function will return
without decrementing the notifiers that were previously incremented. Since
the caller will not call invalidate_range_end() on EAGAIN this will result
in notifiers becoming permanently incremented and deadlock.

If the sync_cpu_device_pagetables() required blocking the function will
not return EAGAIN even though the device continues to touch the
pages. This is a violation of the mmu notifier contract.

Switch, and rename, the ranges_lock to a spin lock so we can reliably
obtain it without blocking during error unwind.

The error unwind is necessary since the notifiers count must be held
incremented across the call to sync_cpu_device_pagetables() as we cannot
allow the range to become marked valid by a parallel
invalidate_start/end() pair while doing sync_cpu_device_pagetables().

Signed-off-by: Jason Gunthorpe 
Reviewed-by: Ralph Campbell 
Reviewed-by: Christoph Hellwig 
Tested-by: Philip Yang 
---
v4
 - Move lock into notifiers_decrement() (Ralph)
---
 include/linux/hmm.h |  2 +-
 mm/hmm.c| 69 ++---
 2 files changed, 41 insertions(+), 30 deletions(-)

diff --git a/include/linux/hmm.h b/include/linux/hmm.h
index bf013e96525771..0fa8ea34ccef6d 100644
--- a/include/linux/hmm.h
+++ b/include/linux/hmm.h
@@ -86,7 +86,7 @@
 struct hmm {
struct mm_struct*mm;
struct kref kref;
-   struct mutexlock;
+   spinlock_t  ranges_lock;
struct list_headranges;
struct list_headmirrors;
struct mmu_notifier mmu_notifier;
diff --git a/mm/hmm.c b/mm/hmm.c
index b224ea635a7716..de35289df20d43 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -64,7 +64,7 @@ static struct hmm *hmm_get_or_create(struct mm_struct *mm)
init_rwsem(&hmm->mirrors_sem);
hmm->mmu_notifier.ops = NULL;
INIT_LIST_HEAD(&hmm->ranges);
-   mutex_init(&hmm->lock);
+   spin_lock_init(&hmm->ranges_lock);
kref_init(&hmm->kref);
hmm->notifiers = 0;
hmm->mm = mm;
@@ -144,6 +144,25 @@ static void hmm_release(struct mmu_notifier *mn, struct 
mm_struct *mm)
hmm_put(hmm);
 }
 
+static void notifiers_decrement(struct hmm *hmm)
+{
+   unsigned long flags;
+
+   spin_lock_irqsave(&hmm->ranges_lock, flags);
+   hmm->notifiers--;
+   if (!hmm->notifiers) {
+   struct hmm_range *range;
+
+   list_for_each_entry(range, &hmm->ranges, list) {
+   if (range->valid)
+   continue;
+   range->valid = true;
+   }
+   wake_up_all(&hmm->wq);
+   }
+   spin_unlock_irqrestore(&hmm->ranges_lock, flags);
+}
+
 static int hmm_invalidate_range_start(struct mmu_notifier *mn,
const struct mmu_notifier_range *nrange)
 {
@@ -151,6 +170,7 @@ static int hmm_invalidate_range_start(struct mmu_notifier 
*mn,
struct hmm_mirror *mirror;
struct hmm_update update;
struct hmm_range *range;
+   unsigned long flags;
int ret = 0;
 
if (!kref_get_unless_zero(&hmm->kref))
@@ -161,12 +181,7 @@ static int hmm_invalidate_range_start(struct mmu_notifier 
*mn,
update.event = HMM_UPDATE_INVALIDATE;
update.blockable = mmu_notifier_range_blockable(nrange);
 
-   if (mmu_notifier_range_blockable(nrange))
-   mutex_lock(&hmm->lock);
-   else if (!mutex_trylock(&hmm->lock)) {
-   ret = -EAGAIN;
-   goto out;
-   }
+   spin_lock_irqsave(&hmm->ranges_lock, flags);
hmm->notifiers++;
list_for_each_entry(range, &hmm->ranges, list) {
if (update.end < range->start || update.sta

Re: [PATCH v4 hmm 12/12] mm/hmm: Fix error flows in hmm_invalidate_range_start

2019-06-27 Thread Ralph Campbell


On 6/24/19 2:01 PM, Jason Gunthorpe wrote:

From: Jason Gunthorpe 

If the trylock on the hmm->mirrors_sem fails the function will return
without decrementing the notifiers that were previously incremented. Since
the caller will not call invalidate_range_end() on EAGAIN this will result
in notifiers becoming permanently incremented and deadlock.

If the sync_cpu_device_pagetables() required blocking the function will
not return EAGAIN even though the device continues to touch the
pages. This is a violation of the mmu notifier contract.

Switch, and rename, the ranges_lock to a spin lock so we can reliably
obtain it without blocking during error unwind.

The error unwind is necessary since the notifiers count must be held
incremented across the call to sync_cpu_device_pagetables() as we cannot
allow the range to become marked valid by a parallel
invalidate_start/end() pair while doing sync_cpu_device_pagetables().

Signed-off-by: Jason Gunthorpe 
Reviewed-by: Ralph Campbell 
Reviewed-by: Christoph Hellwig 
Tested-by: Philip Yang 
---
  include/linux/hmm.h |  2 +-
  mm/hmm.c| 72 +++--
  2 files changed, 45 insertions(+), 29 deletions(-)

diff --git a/include/linux/hmm.h b/include/linux/hmm.h
index bf013e96525771..0fa8ea34ccef6d 100644
--- a/include/linux/hmm.h
+++ b/include/linux/hmm.h
@@ -86,7 +86,7 @@
  struct hmm {
struct mm_struct*mm;
struct kref kref;
-   struct mutexlock;
+   spinlock_t  ranges_lock;
struct list_headranges;
struct list_headmirrors;
struct mmu_notifier mmu_notifier;
diff --git a/mm/hmm.c b/mm/hmm.c
index b224ea635a7716..89549eac03d506 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -64,7 +64,7 @@ static struct hmm *hmm_get_or_create(struct mm_struct *mm)
init_rwsem(&hmm->mirrors_sem);
hmm->mmu_notifier.ops = NULL;
INIT_LIST_HEAD(&hmm->ranges);
-   mutex_init(&hmm->lock);
+   spin_lock_init(&hmm->ranges_lock);
kref_init(&hmm->kref);
hmm->notifiers = 0;
hmm->mm = mm;
@@ -144,6 +144,23 @@ static void hmm_release(struct mmu_notifier *mn, struct 
mm_struct *mm)
hmm_put(hmm);
  }
  
+static void notifiers_decrement(struct hmm *hmm)

+{
+   lockdep_assert_held(&hmm->ranges_lock);
+


Why not acquire the lock here and release at the end instead
of asserting the lock is held?
It looks like everywhere notifiers_decrement() is called does
that.


+   hmm->notifiers--;
+   if (!hmm->notifiers) {
+   struct hmm_range *range;
+
+   list_for_each_entry(range, &hmm->ranges, list) {
+   if (range->valid)
+   continue;
+   range->valid = true;
+   }
+   wake_up_all(&hmm->wq);
+   }
+}
+
  static int hmm_invalidate_range_start(struct mmu_notifier *mn,
const struct mmu_notifier_range *nrange)
  {
@@ -151,6 +168,7 @@ static int hmm_invalidate_range_start(struct mmu_notifier 
*mn,
struct hmm_mirror *mirror;
struct hmm_update update;
struct hmm_range *range;
+   unsigned long flags;
int ret = 0;
  
  	if (!kref_get_unless_zero(&hmm->kref))

@@ -161,12 +179,7 @@ static int hmm_invalidate_range_start(struct mmu_notifier 
*mn,
update.event = HMM_UPDATE_INVALIDATE;
update.blockable = mmu_notifier_range_blockable(nrange);
  
-	if (mmu_notifier_range_blockable(nrange))

-   mutex_lock(&hmm->lock);
-   else if (!mutex_trylock(&hmm->lock)) {
-   ret = -EAGAIN;
-   goto out;
-   }
+   spin_lock_irqsave(&hmm->ranges_lock, flags);
hmm->notifiers++;
list_for_each_entry(range, &hmm->ranges, list) {
if (update.end < range->start || update.start >= range->end)
@@ -174,7 +187,7 @@ static int hmm_invalidate_range_start(struct mmu_notifier 
*mn,
  
  		range->valid = false;

}
-   mutex_unlock(&hmm->lock);
+   spin_unlock_irqrestore(&hmm->ranges_lock, flags);
  
  	if (mmu_notifier_range_blockable(nrange))

down_read(&hmm->mirrors_sem);
@@ -182,16 +195,26 @@ static int hmm_invalidate_range_start(struct mmu_notifier 
*mn,
ret = -EAGAIN;
goto out;
}
+
list_for_each_entry(mirror, &hmm->mirrors, list) {
-   int ret;
+   int rc;
  
-		ret = mirror->ops->sync_cpu_device_pagetables(mirror, &update);

-   if (!update.blockable && ret == -EAGAIN)
+   rc = mirror->ops->sync_cpu_device_pagetables(mirror, &update);
+   if (rc) {
+   if (WARN_ON(update.blockable || rc != -EAGAIN))
+   continue;
+   ret = -EAGAIN;
break;
+   }
}
up_read(&hmm->mirrors_sem);
  
  out:

+   if (ret) {

[PATCH v4 hmm 12/12] mm/hmm: Fix error flows in hmm_invalidate_range_start

2019-06-25 Thread Jason Gunthorpe
From: Jason Gunthorpe 

If the trylock on the hmm->mirrors_sem fails the function will return
without decrementing the notifiers that were previously incremented. Since
the caller will not call invalidate_range_end() on EAGAIN this will result
in notifiers becoming permanently incremented and deadlock.

If the sync_cpu_device_pagetables() required blocking the function will
not return EAGAIN even though the device continues to touch the
pages. This is a violation of the mmu notifier contract.

Switch, and rename, the ranges_lock to a spin lock so we can reliably
obtain it without blocking during error unwind.

The error unwind is necessary since the notifiers count must be held
incremented across the call to sync_cpu_device_pagetables() as we cannot
allow the range to become marked valid by a parallel
invalidate_start/end() pair while doing sync_cpu_device_pagetables().

Signed-off-by: Jason Gunthorpe 
Reviewed-by: Ralph Campbell 
Reviewed-by: Christoph Hellwig 
Tested-by: Philip Yang 
---
 include/linux/hmm.h |  2 +-
 mm/hmm.c| 72 +++--
 2 files changed, 45 insertions(+), 29 deletions(-)

diff --git a/include/linux/hmm.h b/include/linux/hmm.h
index bf013e96525771..0fa8ea34ccef6d 100644
--- a/include/linux/hmm.h
+++ b/include/linux/hmm.h
@@ -86,7 +86,7 @@
 struct hmm {
struct mm_struct*mm;
struct kref kref;
-   struct mutexlock;
+   spinlock_t  ranges_lock;
struct list_headranges;
struct list_headmirrors;
struct mmu_notifier mmu_notifier;
diff --git a/mm/hmm.c b/mm/hmm.c
index b224ea635a7716..89549eac03d506 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -64,7 +64,7 @@ static struct hmm *hmm_get_or_create(struct mm_struct *mm)
init_rwsem(&hmm->mirrors_sem);
hmm->mmu_notifier.ops = NULL;
INIT_LIST_HEAD(&hmm->ranges);
-   mutex_init(&hmm->lock);
+   spin_lock_init(&hmm->ranges_lock);
kref_init(&hmm->kref);
hmm->notifiers = 0;
hmm->mm = mm;
@@ -144,6 +144,23 @@ static void hmm_release(struct mmu_notifier *mn, struct 
mm_struct *mm)
hmm_put(hmm);
 }
 
+static void notifiers_decrement(struct hmm *hmm)
+{
+   lockdep_assert_held(&hmm->ranges_lock);
+
+   hmm->notifiers--;
+   if (!hmm->notifiers) {
+   struct hmm_range *range;
+
+   list_for_each_entry(range, &hmm->ranges, list) {
+   if (range->valid)
+   continue;
+   range->valid = true;
+   }
+   wake_up_all(&hmm->wq);
+   }
+}
+
 static int hmm_invalidate_range_start(struct mmu_notifier *mn,
const struct mmu_notifier_range *nrange)
 {
@@ -151,6 +168,7 @@ static int hmm_invalidate_range_start(struct mmu_notifier 
*mn,
struct hmm_mirror *mirror;
struct hmm_update update;
struct hmm_range *range;
+   unsigned long flags;
int ret = 0;
 
if (!kref_get_unless_zero(&hmm->kref))
@@ -161,12 +179,7 @@ static int hmm_invalidate_range_start(struct mmu_notifier 
*mn,
update.event = HMM_UPDATE_INVALIDATE;
update.blockable = mmu_notifier_range_blockable(nrange);
 
-   if (mmu_notifier_range_blockable(nrange))
-   mutex_lock(&hmm->lock);
-   else if (!mutex_trylock(&hmm->lock)) {
-   ret = -EAGAIN;
-   goto out;
-   }
+   spin_lock_irqsave(&hmm->ranges_lock, flags);
hmm->notifiers++;
list_for_each_entry(range, &hmm->ranges, list) {
if (update.end < range->start || update.start >= range->end)
@@ -174,7 +187,7 @@ static int hmm_invalidate_range_start(struct mmu_notifier 
*mn,
 
range->valid = false;
}
-   mutex_unlock(&hmm->lock);
+   spin_unlock_irqrestore(&hmm->ranges_lock, flags);
 
if (mmu_notifier_range_blockable(nrange))
down_read(&hmm->mirrors_sem);
@@ -182,16 +195,26 @@ static int hmm_invalidate_range_start(struct mmu_notifier 
*mn,
ret = -EAGAIN;
goto out;
}
+
list_for_each_entry(mirror, &hmm->mirrors, list) {
-   int ret;
+   int rc;
 
-   ret = mirror->ops->sync_cpu_device_pagetables(mirror, &update);
-   if (!update.blockable && ret == -EAGAIN)
+   rc = mirror->ops->sync_cpu_device_pagetables(mirror, &update);
+   if (rc) {
+   if (WARN_ON(update.blockable || rc != -EAGAIN))
+   continue;
+   ret = -EAGAIN;
break;
+   }
}
up_read(&hmm->mirrors_sem);
 
 out:
+   if (ret) {
+   spin_lock_irqsave(&hmm->ranges_lock, flags);
+   notifiers_decrement(hmm);
+   spin_unlock_irqrestore(&hmm->ranges_lock, flags);
+   }
hmm