Use the new MMU_NOTIFY_MIGRATE event to skip MMU invalidations of device
private memory and handle the invalidation in the driver as part of
migrating device private memory.

Signed-off-by: Ralph Campbell <rcampb...@nvidia.com>
---
 lib/test_hmm.c | 31 ++++++++++++++++++-------------
 1 file changed, 18 insertions(+), 13 deletions(-)

diff --git a/lib/test_hmm.c b/lib/test_hmm.c
index 1bd60cfb5a25..77875fc4e7c1 100644
--- a/lib/test_hmm.c
+++ b/lib/test_hmm.c
@@ -214,6 +214,14 @@ static bool dmirror_interval_invalidate(struct 
mmu_interval_notifier *mni,
 {
        struct dmirror *dmirror = container_of(mni, struct dmirror, notifier);
 
+       /*
+        * Ignore invalidation callbacks for device private pages since
+        * the invalidation is handled as part of the migration process.
+        */
+       if (range->event == MMU_NOTIFY_MIGRATE &&
+           range->migrate_pgmap_owner == dmirror->mdevice)
+               return true;
+
        if (mmu_notifier_range_blockable(range))
                mutex_lock(&dmirror->mutex);
        else if (!mutex_trylock(&dmirror->mutex))
@@ -702,7 +710,7 @@ static int dmirror_migrate(struct dmirror *dmirror,
                args.dst = dst_pfns;
                args.start = addr;
                args.end = next;
-               args.src_owner = NULL;
+               args.src_owner = dmirror->mdevice;
                args.dir = MIGRATE_VMA_FROM_SYSTEM;
                ret = migrate_vma_setup(&args);
                if (ret)
@@ -992,7 +1000,7 @@ static void dmirror_devmem_free(struct page *page)
 }
 
 static vm_fault_t dmirror_devmem_fault_alloc_and_copy(struct migrate_vma *args,
-                                               struct dmirror_device *mdevice)
+                                                     struct dmirror *dmirror)
 {
        const unsigned long *src = args->src;
        unsigned long *dst = args->dst;
@@ -1014,6 +1022,7 @@ static vm_fault_t 
dmirror_devmem_fault_alloc_and_copy(struct migrate_vma *args,
                        continue;
 
                lock_page(dpage);
+               xa_erase(&dmirror->pt, addr >> PAGE_SHIFT);
                copy_highpage(dpage, spage);
                *dst = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
                if (*src & MIGRATE_PFN_WRITE)
@@ -1022,15 +1031,6 @@ static vm_fault_t 
dmirror_devmem_fault_alloc_and_copy(struct migrate_vma *args,
        return 0;
 }
 
-static void dmirror_devmem_fault_finalize_and_map(struct migrate_vma *args,
-                                                 struct dmirror *dmirror)
-{
-       /* Invalidate the device's page table mapping. */
-       mutex_lock(&dmirror->mutex);
-       dmirror_do_update(dmirror, args->start, args->end);
-       mutex_unlock(&dmirror->mutex);
-}
-
 static vm_fault_t dmirror_devmem_fault(struct vm_fault *vmf)
 {
        struct migrate_vma args;
@@ -1060,11 +1060,16 @@ static vm_fault_t dmirror_devmem_fault(struct vm_fault 
*vmf)
        if (migrate_vma_setup(&args))
                return VM_FAULT_SIGBUS;
 
-       ret = dmirror_devmem_fault_alloc_and_copy(&args, dmirror->mdevice);
+       ret = dmirror_devmem_fault_alloc_and_copy(&args, dmirror);
        if (ret)
                return ret;
        migrate_vma_pages(&args);
-       dmirror_devmem_fault_finalize_and_map(&args, dmirror);
+       /*
+        * No device finalize step is needed since
+        * dmirror_devmem_fault_alloc_and_copy() will have already
+        * invalidated the device page table. We could reinstate device MMU
+        * entries for pages that didn't migrate but that should be rare.
+        */
        migrate_vma_finalize(&args);
        return 0;
 }
-- 
2.20.1

_______________________________________________
Nouveau mailing list
Nouveau@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/nouveau

Reply via email to