When migrating pages to or from device private memory, the device's
page tables will be invalidated as part of migrate_vma_setup() locking
and isolating the pages. The HMM self test driver doesn't need to
invalidate the page table a second time after migrating pages to system
memory so remove that bit of extra code.

Signed-off-by: Ralph Campbell <rcampb...@nvidia.com>
---
 lib/test_hmm.c | 14 ++++----------
 1 file changed, 4 insertions(+), 10 deletions(-)

diff --git a/lib/test_hmm.c b/lib/test_hmm.c
index 28528285942c..f7c2b51a7a9d 100644
--- a/lib/test_hmm.c
+++ b/lib/test_hmm.c
@@ -1018,15 +1018,6 @@ static vm_fault_t 
dmirror_devmem_fault_alloc_and_copy(struct migrate_vma *args,
        return 0;
 }
 
-static void dmirror_devmem_fault_finalize_and_map(struct migrate_vma *args,
-                                                 struct dmirror *dmirror)
-{
-       /* Invalidate the device's page table mapping. */
-       mutex_lock(&dmirror->mutex);
-       dmirror_do_update(dmirror, args->start, args->end);
-       mutex_unlock(&dmirror->mutex);
-}
-
 static vm_fault_t dmirror_devmem_fault(struct vm_fault *vmf)
 {
        struct migrate_vma args;
@@ -1059,7 +1050,10 @@ static vm_fault_t dmirror_devmem_fault(struct vm_fault 
*vmf)
        if (ret)
                return ret;
        migrate_vma_pages(&args);
-       dmirror_devmem_fault_finalize_and_map(&args, dmirror);
+       /*
+        * No device finalize step is needed since migrate_vma_setup() will
+        * have already invalidated the device page table.
+        */
        migrate_vma_finalize(&args);
        return 0;
 }
-- 
2.20.1

Reply via email to