Add a test to check that migrating a range of addresses with mixed
device private pages and normal anonymous pages are all migrated.

Signed-off-by: Ralph Campbell <rcampb...@nvidia.com>
---
 lib/test_hmm.c                         | 22 +++++++++++++++++-----
 tools/testing/selftests/vm/hmm-tests.c | 18 ++++++++++++++----
 2 files changed, 31 insertions(+), 9 deletions(-)

diff --git a/lib/test_hmm.c b/lib/test_hmm.c
index f7c2b51a7a9d..50bdf041770a 100644
--- a/lib/test_hmm.c
+++ b/lib/test_hmm.c
@@ -588,12 +588,24 @@ static void dmirror_migrate_alloc_and_copy(struct 
migrate_vma *args,
 
                /*
                 * Don't migrate device private pages from our own driver or
-                * others. For our own we would do a device private memory copy
-                * not a migration and for others, we would need to fault the
-                * other device's page into system memory first.
+                * others. Other device's private pages are skipped because
+                * the src_owner field won't match. The migrate_vma_setup()
+                * will have invalidated our page tables for our own device
+                * private pages as part of isolating and locking the pages.
+                * In this case, repopulate our page table.
                 */
-               if (spage && is_zone_device_page(spage))
+               if (spage && is_zone_device_page(spage)) {
+                       unsigned long pfn = addr >> PAGE_SHIFT;
+                       void *entry;
+
+                       mutex_lock(&dmirror->mutex);
+                       entry = spage->zone_device_data;
+                       if (*src & MIGRATE_PFN_WRITE)
+                               entry = xa_tag_pointer(entry, DPT_XA_TAG_WRITE);
+                       xa_store(&dmirror->pt, pfn, entry, GFP_ATOMIC);
+                       mutex_unlock(&dmirror->mutex);
                        continue;
+               }
 
                dpage = dmirror_devmem_alloc_page(mdevice);
                if (!dpage)
@@ -703,7 +715,7 @@ static int dmirror_migrate(struct dmirror *dmirror,
                args.dst = dst_pfns;
                args.start = addr;
                args.end = next;
-               args.src_owner = NULL;
+               args.src_owner = dmirror->mdevice;
                ret = migrate_vma_setup(&args);
                if (ret)
                        goto out;
diff --git a/tools/testing/selftests/vm/hmm-tests.c 
b/tools/testing/selftests/vm/hmm-tests.c
index bdfa95ac9a7d..e2a36783e99d 100644
--- a/tools/testing/selftests/vm/hmm-tests.c
+++ b/tools/testing/selftests/vm/hmm-tests.c
@@ -881,8 +881,9 @@ TEST_F(hmm, migrate)
 }
 
 /*
- * Migrate anonymous memory to device private memory and fault it back to 
system
- * memory.
+ * Migrate anonymous memory to device private memory and fault some of it back
+ * to system memory, then try migrating the resulting mix of system and device
+ * private memory to the device.
  */
 TEST_F(hmm, migrate_fault)
 {
@@ -924,8 +925,17 @@ TEST_F(hmm, migrate_fault)
        for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
                ASSERT_EQ(ptr[i], i);
 
-       /* Fault pages back to system memory and check them. */
-       for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
+       /* Fault half the pages back to system memory and check them. */
+       for (i = 0, ptr = buffer->ptr; i < size / (2 * sizeof(*ptr)); ++i)
+               ASSERT_EQ(ptr[i], i);
+
+       /* Migrate memory to the device again. */
+       ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer, npages);
+       ASSERT_EQ(ret, 0);
+       ASSERT_EQ(buffer->cpages, npages);
+
+       /* Check what the device read. */
+       for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
                ASSERT_EQ(ptr[i], i);
 
        hmm_buffer_free(buffer);
-- 
2.20.1

Reply via email to