From: Jason Gunthorpe <j...@mellanox.com>

As coded this function can false-fail in various racy situations. Make it
reliable and simpler by running under the write side of the mmap_sem and
avoiding the false-failing compare/exchange pattern. Due to the mmap_sem
this no longer has to avoid racing with a 2nd parallel
hmm_get_or_create().

Unfortunately this still has to use the page_table_lock as the
non-sleeping lock protecting mm->hmm, since the contexts where we free the
hmm are incompatible with mmap_sem.

Signed-off-by: Jason Gunthorpe <j...@mellanox.com>
Reviewed-by: John Hubbard <jhubb...@nvidia.com>
Reviewed-by: Ralph Campbell <rcampb...@nvidia.com>
Reviewed-by: Ira Weiny <ira.we...@intel.com>
Tested-by: Philip Yang <philip.y...@amd.com>
---
v2:
- Fix error unwind of mmgrab (Jerome)
- Use hmm local instead of 2nd container_of (Jerome)
v3:
- Can't use mmap_sem in the SRCU callback, keep using the
  page_table_lock (Philip)
---
 mm/hmm.c | 84 ++++++++++++++++++++++++--------------------------------
 1 file changed, 36 insertions(+), 48 deletions(-)

diff --git a/mm/hmm.c b/mm/hmm.c
index 080b17a2e87e2d..4c64d4c32f4825 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -31,16 +31,6 @@
 #if IS_ENABLED(CONFIG_HMM_MIRROR)
 static const struct mmu_notifier_ops hmm_mmu_notifier_ops;
 
-static inline struct hmm *mm_get_hmm(struct mm_struct *mm)
-{
-       struct hmm *hmm = READ_ONCE(mm->hmm);
-
-       if (hmm && kref_get_unless_zero(&hmm->kref))
-               return hmm;
-
-       return NULL;
-}
-
 /**
  * hmm_get_or_create - register HMM against an mm (HMM internal)
  *
@@ -55,11 +45,19 @@ static inline struct hmm *mm_get_hmm(struct mm_struct *mm)
  */
 static struct hmm *hmm_get_or_create(struct mm_struct *mm)
 {
-       struct hmm *hmm = mm_get_hmm(mm);
-       bool cleanup = false;
+       struct hmm *hmm;
 
-       if (hmm)
-               return hmm;
+       lockdep_assert_held_exclusive(&mm->mmap_sem);
+
+       /* Abuse the page_table_lock to also protect mm->hmm. */
+       spin_lock(&mm->page_table_lock);
+       if (mm->hmm) {
+               if (kref_get_unless_zero(&mm->hmm->kref)) {
+                       spin_unlock(&mm->page_table_lock);
+                       return mm->hmm;
+               }
+       }
+       spin_unlock(&mm->page_table_lock);
 
        hmm = kmalloc(sizeof(*hmm), GFP_KERNEL);
        if (!hmm)
@@ -74,57 +72,47 @@ static struct hmm *hmm_get_or_create(struct mm_struct *mm)
        hmm->notifiers = 0;
        hmm->dead = false;
        hmm->mm = mm;
-       mmgrab(hmm->mm);
 
-       spin_lock(&mm->page_table_lock);
-       if (!mm->hmm)
-               mm->hmm = hmm;
-       else
-               cleanup = true;
-       spin_unlock(&mm->page_table_lock);
+       hmm->mmu_notifier.ops = &hmm_mmu_notifier_ops;
+       if (__mmu_notifier_register(&hmm->mmu_notifier, mm)) {
+               kfree(hmm);
+               return NULL;
+       }
 
-       if (cleanup)
-               goto error;
+       mmgrab(hmm->mm);
 
        /*
-        * We should only get here if hold the mmap_sem in write mode ie on
-        * registration of first mirror through hmm_mirror_register()
+        * We hold the exclusive mmap_sem here so we know that mm->hmm is
+        * still NULL or 0 kref, and is safe to update.
         */
-       hmm->mmu_notifier.ops = &hmm_mmu_notifier_ops;
-       if (__mmu_notifier_register(&hmm->mmu_notifier, mm))
-               goto error_mm;
-
-       return hmm;
-
-error_mm:
        spin_lock(&mm->page_table_lock);
-       if (mm->hmm == hmm)
-               mm->hmm = NULL;
+       mm->hmm = hmm;
        spin_unlock(&mm->page_table_lock);
-error:
-       mmdrop(hmm->mm);
-       kfree(hmm);
-       return NULL;
+       return hmm;
 }
 
 static void hmm_free_rcu(struct rcu_head *rcu)
 {
-       kfree(container_of(rcu, struct hmm, rcu));
+       struct hmm *hmm = container_of(rcu, struct hmm, rcu);
+
+       /*
+        * The mm->hmm pointer is kept valid while notifier ops can be running
+        * so they don't have to deal with a NULL mm->hmm value
+        */
+       spin_lock(&hmm->mm->page_table_lock);
+       if (hmm->mm->hmm == hmm)
+               hmm->mm->hmm = NULL;
+       spin_unlock(&hmm->mm->page_table_lock);
+       mmdrop(hmm->mm);
+
+       kfree(hmm);
 }
 
 static void hmm_free(struct kref *kref)
 {
        struct hmm *hmm = container_of(kref, struct hmm, kref);
-       struct mm_struct *mm = hmm->mm;
-
-       mmu_notifier_unregister_no_release(&hmm->mmu_notifier, mm);
 
-       spin_lock(&mm->page_table_lock);
-       if (mm->hmm == hmm)
-               mm->hmm = NULL;
-       spin_unlock(&mm->page_table_lock);
-
-       mmdrop(hmm->mm);
+       mmu_notifier_unregister_no_release(&hmm->mmu_notifier, hmm->mm);
        mmu_notifier_call_srcu(&hmm->rcu, hmm_free_rcu);
 }
 
-- 
2.21.0

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

Reply via email to