Add some basic stand alone self tests for migrating system memory to device
private memory and back.

Signed-off-by: Ralph Campbell <rcampb...@nvidia.com>
---
 lib/test_hmm.c                         | 323 ++++++++++++++++++++-----
 tools/testing/selftests/vm/hmm-tests.c | 292 ++++++++++++++++++++++
 2 files changed, 560 insertions(+), 55 deletions(-)

diff --git a/lib/test_hmm.c b/lib/test_hmm.c
index db5d2e8d7420..f4e2e8731366 100644
--- a/lib/test_hmm.c
+++ b/lib/test_hmm.c
@@ -92,6 +92,7 @@ struct dmirror_device {
        unsigned long           calloc;
        unsigned long           cfree;
        struct page             *free_pages;
+       struct page             *free_huge_pages;
        spinlock_t              lock;           /* protects the above */
 };
 
@@ -443,6 +444,7 @@ static int dmirror_write(struct dmirror *dmirror, struct 
hmm_dmirror_cmd *cmd)
 }
 
 static bool dmirror_allocate_chunk(struct dmirror_device *mdevice,
+                                  bool is_huge,
                                   struct page **ppage)
 {
        struct dmirror_chunk *devmem;
@@ -502,16 +504,39 @@ static bool dmirror_allocate_chunk(struct dmirror_device 
*mdevice,
                pfn_first, pfn_last);
 
        spin_lock(&mdevice->lock);
-       for (pfn = pfn_first; pfn < pfn_last; pfn++) {
+       for (pfn = pfn_first; pfn < pfn_last; ) {
                struct page *page = pfn_to_page(pfn);
 
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+               /*
+                * Check for PMD aligned PFN and create a huge page.
+                * Check for "< pfn_last - 1" so that the last two huge pages
+                * are used for normal pages.
+                */
+               if ((pfn & (HPAGE_PMD_NR - 1)) == 0 &&
+                   pfn + HPAGE_PMD_NR < pfn_last - 1) {
+                       prep_compound_page(page, HPAGE_PMD_ORDER);
+                       page->zone_device_data = mdevice->free_huge_pages;
+                       mdevice->free_huge_pages = page;
+                       pfn += HPAGE_PMD_NR;
+                       percpu_ref_put_many(page->pgmap->ref, HPAGE_PMD_NR - 1);
+                       continue;
+               }
+#endif
                page->zone_device_data = mdevice->free_pages;
                mdevice->free_pages = page;
+               pfn++;
        }
        if (ppage) {
-               *ppage = mdevice->free_pages;
-               mdevice->free_pages = (*ppage)->zone_device_data;
-               mdevice->calloc++;
+               if (is_huge) {
+                       *ppage = mdevice->free_huge_pages;
+                       mdevice->free_huge_pages = (*ppage)->zone_device_data;
+                       mdevice->calloc += 1UL << compound_order(*ppage);
+               } else {
+                       *ppage = mdevice->free_pages;
+                       mdevice->free_pages = (*ppage)->zone_device_data;
+                       mdevice->calloc++;
+               }
        }
        spin_unlock(&mdevice->lock);
 
@@ -527,7 +552,8 @@ static bool dmirror_allocate_chunk(struct dmirror_device 
*mdevice,
        return false;
 }
 
-static struct page *dmirror_devmem_alloc_page(struct dmirror_device *mdevice)
+static struct page *dmirror_devmem_alloc_page(struct dmirror_device *mdevice,
+                                             bool is_huge)
 {
        struct page *dpage = NULL;
        struct page *rpage;
@@ -542,17 +568,40 @@ static struct page *dmirror_devmem_alloc_page(struct 
dmirror_device *mdevice)
 
        spin_lock(&mdevice->lock);
 
-       if (mdevice->free_pages) {
+       if (is_huge && mdevice->free_huge_pages) {
+               dpage = mdevice->free_huge_pages;
+               mdevice->free_huge_pages = dpage->zone_device_data;
+               mdevice->calloc += 1UL << compound_order(dpage);
+               spin_unlock(&mdevice->lock);
+       } else if (!is_huge && mdevice->free_pages) {
                dpage = mdevice->free_pages;
                mdevice->free_pages = dpage->zone_device_data;
                mdevice->calloc++;
                spin_unlock(&mdevice->lock);
        } else {
                spin_unlock(&mdevice->lock);
-               if (!dmirror_allocate_chunk(mdevice, &dpage))
+               if (!dmirror_allocate_chunk(mdevice, is_huge, &dpage))
                        goto error;
        }
 
+       if (is_huge) {
+               unsigned int nr_pages = 1U << compound_order(dpage);
+               unsigned int i;
+               struct page **tpage;
+
+               tpage = kmap(rpage);
+               for (i = 0; i < nr_pages; i++, tpage++) {
+                       *tpage = alloc_page(GFP_HIGHUSER);
+                       if (!*tpage) {
+                               while (i--)
+                                       __free_page(*--tpage);
+                               kunmap(rpage);
+                               goto error;
+                       }
+               }
+               kunmap(rpage);
+       }
+
        dpage->zone_device_data = rpage;
        get_page(dpage);
        lock_page(dpage);
@@ -569,16 +618,17 @@ static void dmirror_migrate_alloc_and_copy(struct 
migrate_vma *args,
        struct dmirror_device *mdevice = dmirror->mdevice;
        const unsigned long *src = args->src;
        unsigned long *dst = args->dst;
-       unsigned long addr;
+       unsigned long end_pfn = args->end >> PAGE_SHIFT;
+       unsigned long pfn;
 
-       for (addr = args->start; addr < args->end; addr += PAGE_SIZE,
-                                                  src++, dst++) {
+       for (pfn = args->start >> PAGE_SHIFT; pfn < end_pfn; ) {
                struct page *spage;
                struct page *dpage;
                struct page *rpage;
+               bool is_huge;
 
                if (!(*src & MIGRATE_PFN_MIGRATE))
-                       continue;
+                       goto next;
 
                /*
                 * Note that spage might be NULL which is OK since it is an
@@ -595,7 +645,6 @@ static void dmirror_migrate_alloc_and_copy(struct 
migrate_vma *args,
                 * In this case, repopulate our page table.
                 */
                if (spage && is_zone_device_page(spage)) {
-                       unsigned long pfn = addr >> PAGE_SHIFT;
                        void *entry;
 
                        mutex_lock(&dmirror->mutex);
@@ -604,18 +653,14 @@ static void dmirror_migrate_alloc_and_copy(struct 
migrate_vma *args,
                                entry = xa_tag_pointer(entry, DPT_XA_TAG_WRITE);
                        xa_store(&dmirror->pt, pfn, entry, GFP_ATOMIC);
                        mutex_unlock(&dmirror->mutex);
-                       continue;
+                       goto next;
                }
 
-               dpage = dmirror_devmem_alloc_page(mdevice);
+               /* This flag is only set if a whole huge page is migrated. */
+               is_huge = *src & MIGRATE_PFN_COMPOUND;
+               dpage = dmirror_devmem_alloc_page(mdevice, is_huge);
                if (!dpage)
-                       continue;
-
-               rpage = dpage->zone_device_data;
-               if (spage)
-                       copy_highpage(rpage, spage);
-               else
-                       clear_highpage(rpage);
+                       goto next;
 
                /*
                 * Normally, a device would use the page->zone_device_data to
@@ -623,6 +668,7 @@ static void dmirror_migrate_alloc_and_copy(struct 
migrate_vma *args,
                 * the simulated device memory and that page holds the pointer
                 * to the mirror.
                 */
+               rpage = dpage->zone_device_data;
                rpage->zone_device_data = dmirror;
 
                *dst = migrate_pfn(page_to_pfn(dpage)) |
@@ -630,6 +676,37 @@ static void dmirror_migrate_alloc_and_copy(struct 
migrate_vma *args,
                if ((*src & MIGRATE_PFN_WRITE) ||
                    (!spage && args->vma->vm_flags & VM_WRITE))
                        *dst |= MIGRATE_PFN_WRITE;
+
+               if (is_huge) {
+                       struct page **tpage;
+                       unsigned int order = compound_order(dpage);
+                       unsigned long endp = pfn + (1UL << order);
+
+                       *dst |= MIGRATE_PFN_COMPOUND;
+                       tpage = kmap(rpage);
+                       while (pfn < endp) {
+                               if (spage) {
+                                       copy_highpage(*tpage, spage);
+                                       spage++;
+                               } else
+                                       clear_highpage(*tpage);
+                               tpage++;
+                               pfn++;
+                               src++;
+                               dst++;
+                       }
+                       kunmap(rpage);
+                       continue;
+               }
+
+               if (spage)
+                       copy_highpage(rpage, spage);
+               else
+                       clear_highpage(rpage);
+next:
+               pfn++;
+               src++;
+               dst++;
        }
 }
 
@@ -641,38 +718,76 @@ static int dmirror_migrate_finalize_and_map(struct 
migrate_vma *args,
        const unsigned long *src = args->src;
        const unsigned long *dst = args->dst;
        unsigned long pfn;
+       int ret = 0;
 
        /* Map the migrated pages into the device's page tables. */
        mutex_lock(&dmirror->mutex);
 
-       for (pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++,
-                                                               src++, dst++) {
+       for (pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); ) {
+               unsigned long mpfn;
                struct page *dpage;
+               struct page *rpage;
                void *entry;
 
                if (!(*src & MIGRATE_PFN_MIGRATE))
-                       continue;
+                       goto next;
 
-               dpage = migrate_pfn_to_page(*dst);
+               mpfn = *dst;
+               dpage = migrate_pfn_to_page(mpfn);
                if (!dpage)
-                       continue;
+                       goto next;
 
                /*
                 * Store the page that holds the data so the page table
                 * doesn't have to deal with ZONE_DEVICE private pages.
                 */
-               entry = dpage->zone_device_data;
-               if (*dst & MIGRATE_PFN_WRITE)
+               rpage = dpage->zone_device_data;
+               if (mpfn & MIGRATE_PFN_COMPOUND) {
+                       struct page **tpage;
+                       unsigned int order = compound_order(dpage);
+                       unsigned long end_pfn = pfn + (1UL << order);
+
+                       ret = 0;
+                       tpage = kmap(rpage);
+                       while (pfn < end_pfn) {
+                               entry = *tpage;
+                               if (mpfn & MIGRATE_PFN_WRITE)
+                                       entry = xa_tag_pointer(entry,
+                                                       DPT_XA_TAG_WRITE);
+                               entry = xa_store(&dmirror->pt, pfn, entry,
+                                                GFP_KERNEL);
+                               if (xa_is_err(entry)) {
+                                       ret = xa_err(entry);
+                                       break;
+                               }
+                               tpage++;
+                               pfn++;
+                               src++;
+                               dst++;
+                       }
+                       kunmap(rpage);
+                       if (ret)
+                               goto err;
+                       continue;
+               }
+
+               entry = rpage;
+               if (mpfn & MIGRATE_PFN_WRITE)
                        entry = xa_tag_pointer(entry, DPT_XA_TAG_WRITE);
                entry = xa_store(&dmirror->pt, pfn, entry, GFP_ATOMIC);
                if (xa_is_err(entry)) {
                        mutex_unlock(&dmirror->mutex);
                        return xa_err(entry);
                }
+next:
+               pfn++;
+               src++;
+               dst++;
        }
 
+err:
        mutex_unlock(&dmirror->mutex);
-       return 0;
+       return ret;
 }
 
 static int dmirror_migrate(struct dmirror *dmirror,
@@ -682,8 +797,8 @@ static int dmirror_migrate(struct dmirror *dmirror,
        unsigned long size = cmd->npages << PAGE_SHIFT;
        struct mm_struct *mm = dmirror->notifier.mm;
        struct vm_area_struct *vma;
-       unsigned long src_pfns[64];
-       unsigned long dst_pfns[64];
+       unsigned long *src_pfns;
+       unsigned long *dst_pfns;
        struct dmirror_bounce bounce;
        struct migrate_vma args;
        unsigned long next;
@@ -698,6 +813,17 @@ static int dmirror_migrate(struct dmirror *dmirror,
        if (!mmget_not_zero(mm))
                return -EINVAL;
 
+       src_pfns = kmalloc_array(PTRS_PER_PTE, sizeof(*src_pfns), GFP_KERNEL);
+       if (!src_pfns) {
+               ret = -ENOMEM;
+               goto out_put;
+       }
+       dst_pfns = kmalloc_array(PTRS_PER_PTE, sizeof(*dst_pfns), GFP_KERNEL);
+       if (!dst_pfns) {
+               ret = -ENOMEM;
+               goto out_free_src;
+       }
+
        mmap_read_lock(mm);
        for (addr = start; addr < end; addr = next) {
                vma = find_vma(mm, addr);
@@ -706,7 +832,7 @@ static int dmirror_migrate(struct dmirror *dmirror,
                        ret = -EINVAL;
                        goto out;
                }
-               next = min(end, addr + (ARRAY_SIZE(src_pfns) << PAGE_SHIFT));
+               next = min(end, addr + (PTRS_PER_PTE << PAGE_SHIFT));
                if (next > vma->vm_end)
                        next = vma->vm_end;
 
@@ -725,6 +851,8 @@ static int dmirror_migrate(struct dmirror *dmirror,
                dmirror_migrate_finalize_and_map(&args, dmirror);
                migrate_vma_finalize(&args);
        }
+       kfree(dst_pfns);
+       kfree(src_pfns);
        mmap_read_unlock(mm);
        mmput(mm);
 
@@ -746,6 +874,10 @@ static int dmirror_migrate(struct dmirror *dmirror,
 
 out:
        mmap_read_unlock(mm);
+       kfree(dst_pfns);
+out_free_src:
+       kfree(src_pfns);
+out_put:
        mmput(mm);
        return ret;
 }
@@ -986,18 +1118,37 @@ static const struct file_operations dmirror_fops = {
 
 static void dmirror_devmem_free(struct page *page)
 {
-       struct page *rpage = page->zone_device_data;
+       struct page *rpage = compound_head(page)->zone_device_data;
+       unsigned int order = compound_order(page);
+       unsigned int nr_pages = 1U << order;
        struct dmirror_device *mdevice;
 
-       if (rpage)
+       if (rpage) {
+               if (order) {
+                       unsigned int i;
+                       struct page **tpage;
+                       void *kaddr;
+
+                       kaddr = kmap_atomic(rpage);
+                       tpage = kaddr;
+                       for (i = 0; i < nr_pages; i++, tpage++)
+                               __free_page(*tpage);
+                       kunmap_atomic(kaddr);
+               }
                __free_page(rpage);
+       }
 
        mdevice = dmirror_page_to_device(page);
 
        spin_lock(&mdevice->lock);
-       mdevice->cfree++;
-       page->zone_device_data = mdevice->free_pages;
-       mdevice->free_pages = page;
+       if (order) {
+               page->zone_device_data = mdevice->free_huge_pages;
+               mdevice->free_huge_pages = page;
+       } else {
+               page->zone_device_data = mdevice->free_pages;
+               mdevice->free_pages = page;
+       }
+       mdevice->cfree += nr_pages;
        spin_unlock(&mdevice->lock);
 }
 
@@ -1010,24 +1161,51 @@ static vm_fault_t 
dmirror_devmem_fault_alloc_and_copy(struct migrate_vma *args,
        unsigned long end = args->end;
        unsigned long addr;
 
-       for (addr = start; addr < end; addr += PAGE_SIZE,
-                                      src++, dst++) {
-               struct page *dpage, *spage;
+       for (addr = start; addr < end; ) {
+               struct page *spage, *dpage;
+               unsigned int order = 0;
+               unsigned int nr_pages = 1;
+               unsigned int i;
 
                spage = migrate_pfn_to_page(*src);
                if (!spage || !(*src & MIGRATE_PFN_MIGRATE))
-                       continue;
+                       goto next;
+               order = compound_order(spage);
+               nr_pages = 1U << order;
+               /* The source page is the ZONE_DEVICE private page. */
                spage = spage->zone_device_data;
 
-               dpage = alloc_page_vma(GFP_HIGHUSER_MOVABLE, args->vma, addr);
-               if (!dpage)
-                       continue;
+               if (order)
+                       dpage = alloc_transhugepage(args->vma, addr);
+               else
+                       dpage = alloc_pages_vma(GFP_HIGHUSER_MOVABLE, 0,
+                                               args->vma, addr,
+                                               numa_node_id(), false);
+
+               if (!dpage || compound_order(dpage) != order)
+                       return VM_FAULT_OOM;
 
                lock_page(dpage);
-               copy_highpage(dpage, spage);
                *dst = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
                if (*src & MIGRATE_PFN_WRITE)
                        *dst |= MIGRATE_PFN_WRITE;
+               if (order) {
+                       struct page **tpage;
+
+                       *dst |= MIGRATE_PFN_COMPOUND;
+                       tpage = kmap(spage);
+                       for (i = 0; i < nr_pages; i++) {
+                               copy_highpage(dpage, *tpage);
+                               tpage++;
+                               dpage++;
+                       }
+                       kunmap(spage);
+               } else
+                       copy_highpage(dpage, spage);
+next:
+               addr += PAGE_SIZE << order;
+               src += nr_pages;
+               dst += nr_pages;
        }
        return 0;
 }
@@ -1037,39 +1215,74 @@ static vm_fault_t dmirror_devmem_fault(struct vm_fault 
*vmf)
        struct migrate_vma args;
        unsigned long src_pfns;
        unsigned long dst_pfns;
+       struct page *page;
        struct page *rpage;
+       unsigned int order;
        struct dmirror *dmirror;
        vm_fault_t ret;
 
+       page = compound_head(vmf->page);
+       order = compound_order(page);
+
        /*
         * Normally, a device would use the page->zone_device_data to point to
         * the mirror but here we use it to hold the page for the simulated
         * device memory and that page holds the pointer to the mirror.
         */
-       rpage = vmf->page->zone_device_data;
+       rpage = page->zone_device_data;
        dmirror = rpage->zone_device_data;
 
-       /* FIXME demonstrate how we can adjust migrate range */
+       if (order) {
+               args.start = vmf->address & (PAGE_MASK << order);
+               args.end = args.start + (PAGE_SIZE << order);
+               args.src = kcalloc(PTRS_PER_PTE, sizeof(*args.src),
+                                  GFP_KERNEL);
+               if (!args.src)
+                       return VM_FAULT_OOM;
+               args.dst = kcalloc(PTRS_PER_PTE, sizeof(*args.dst),
+                                  GFP_KERNEL);
+               if (!args.dst) {
+                       ret = VM_FAULT_OOM;
+                       goto error_src;
+               }
+       } else {
+               args.start = vmf->address;
+               args.end = args.start + PAGE_SIZE;
+               args.src = &src_pfns;
+               args.dst = &dst_pfns;
+       }
        args.vma = vmf->vma;
-       args.start = vmf->address;
-       args.end = args.start + PAGE_SIZE;
-       args.src = &src_pfns;
-       args.dst = &dst_pfns;
        args.src_owner = dmirror->mdevice;
 
-       if (migrate_vma_setup(&args))
-               return VM_FAULT_SIGBUS;
+       if (migrate_vma_setup(&args)) {
+               ret = VM_FAULT_SIGBUS;
+               goto error_dst;
+       }
 
        ret = dmirror_devmem_fault_alloc_and_copy(&args, dmirror->mdevice);
        if (ret)
-               return ret;
+               goto error_fin;
        migrate_vma_pages(&args);
        /*
         * No device finalize step is needed since migrate_vma_setup() will
         * have already invalidated the device page table.
         */
        migrate_vma_finalize(&args);
+       if (order) {
+               kfree(args.dst);
+               kfree(args.src);
+       }
        return 0;
+
+error_fin:
+       migrate_vma_finalize(&args);
+error_dst:
+       if (args.dst != &dst_pfns)
+               kfree(args.dst);
+error_src:
+       if (args.src != &src_pfns)
+               kfree(args.src);
+       return ret;
 }
 
 static const struct dev_pagemap_ops dmirror_devmem_ops = {
@@ -1093,7 +1306,7 @@ static int dmirror_device_init(struct dmirror_device 
*mdevice, int id)
                return ret;
 
        /* Build a list of free ZONE_DEVICE private struct pages */
-       dmirror_allocate_chunk(mdevice, NULL);
+       dmirror_allocate_chunk(mdevice, false, NULL);
 
        return 0;
 }
diff --git a/tools/testing/selftests/vm/hmm-tests.c 
b/tools/testing/selftests/vm/hmm-tests.c
index e0fa864d03fa..d58a6f5280b7 100644
--- a/tools/testing/selftests/vm/hmm-tests.c
+++ b/tools/testing/selftests/vm/hmm-tests.c
@@ -1442,4 +1442,296 @@ TEST_F(hmm2, double_map)
        hmm_buffer_free(buffer);
 }
 
+/*
+ * Migrate private anonymous huge empty page.
+ */
+TEST_F(hmm, migrate_anon_huge_empty)
+{
+       struct hmm_buffer *buffer;
+       unsigned long npages;
+       unsigned long size;
+       unsigned long i;
+       void *old_ptr;
+       void *map;
+       int *ptr;
+       int ret;
+
+       size = TWOMEG;
+
+       buffer = malloc(sizeof(*buffer));
+       ASSERT_NE(buffer, NULL);
+
+       buffer->fd = -1;
+       buffer->size = 2 * size;
+       buffer->mirror = malloc(size);
+       ASSERT_NE(buffer->mirror, NULL);
+       memset(buffer->mirror, 0xFF, size);
+
+       buffer->ptr = mmap(NULL, 2 * size,
+                          PROT_READ,
+                          MAP_PRIVATE | MAP_ANONYMOUS,
+                          buffer->fd, 0);
+       ASSERT_NE(buffer->ptr, MAP_FAILED);
+
+       npages = size >> self->page_shift;
+       map = (void *)ALIGN((uintptr_t)buffer->ptr, size);
+       ret = madvise(map, size, MADV_HUGEPAGE);
+       ASSERT_EQ(ret, 0);
+       old_ptr = buffer->ptr;
+       buffer->ptr = map;
+
+       /* Migrate memory to device. */
+       ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer, npages);
+       ASSERT_EQ(ret, 0);
+       ASSERT_EQ(buffer->cpages, npages);
+
+       /* Check what the device read. */
+       for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
+               ASSERT_EQ(ptr[i], 0);
+
+       buffer->ptr = old_ptr;
+       hmm_buffer_free(buffer);
+}
+
+/*
+ * Migrate private anonymous huge zero page.
+ */
+TEST_F(hmm, migrate_anon_huge_zero)
+{
+       struct hmm_buffer *buffer;
+       unsigned long npages;
+       unsigned long size;
+       unsigned long i;
+       void *old_ptr;
+       void *map;
+       int *ptr;
+       int ret;
+       int val;
+
+       size = TWOMEG;
+
+       buffer = malloc(sizeof(*buffer));
+       ASSERT_NE(buffer, NULL);
+
+       buffer->fd = -1;
+       buffer->size = 2 * size;
+       buffer->mirror = malloc(size);
+       ASSERT_NE(buffer->mirror, NULL);
+       memset(buffer->mirror, 0xFF, size);
+
+       buffer->ptr = mmap(NULL, 2 * size,
+                          PROT_READ,
+                          MAP_PRIVATE | MAP_ANONYMOUS,
+                          buffer->fd, 0);
+       ASSERT_NE(buffer->ptr, MAP_FAILED);
+
+       npages = size >> self->page_shift;
+       map = (void *)ALIGN((uintptr_t)buffer->ptr, size);
+       ret = madvise(map, size, MADV_HUGEPAGE);
+       ASSERT_EQ(ret, 0);
+       old_ptr = buffer->ptr;
+       buffer->ptr = map;
+
+       /* Initialize a read-only zero huge page. */
+       val = *(int *)buffer->ptr;
+       ASSERT_EQ(val, 0);
+
+       /* Migrate memory to device. */
+       ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer, npages);
+       ASSERT_EQ(ret, 0);
+       ASSERT_EQ(buffer->cpages, npages);
+
+       /* Check what the device read. */
+       for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
+               ASSERT_EQ(ptr[i], 0);
+
+       /* Fault pages back to system memory and check them. */
+       for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i) {
+               ASSERT_EQ(ptr[i], 0);
+               /* If it asserts once, it probably will 500,000 times */
+               if (ptr[i] != 0)
+                       break;
+       }
+
+       buffer->ptr = old_ptr;
+       hmm_buffer_free(buffer);
+}
+
+/*
+ * Migrate private anonymous huge page.
+ */
+TEST_F(hmm, migrate_anon_huge)
+{
+       struct hmm_buffer *buffer;
+       unsigned long npages;
+       unsigned long size;
+       unsigned long i;
+       void *old_ptr;
+       void *map;
+       int *ptr;
+       int ret;
+
+       size = TWOMEG;
+
+       buffer = malloc(sizeof(*buffer));
+       ASSERT_NE(buffer, NULL);
+
+       buffer->fd = -1;
+       buffer->size = 2 * size;
+       buffer->mirror = malloc(size);
+       ASSERT_NE(buffer->mirror, NULL);
+       memset(buffer->mirror, 0xFF, size);
+
+       buffer->ptr = mmap(NULL, 2 * size,
+                          PROT_READ | PROT_WRITE,
+                          MAP_PRIVATE | MAP_ANONYMOUS,
+                          buffer->fd, 0);
+       ASSERT_NE(buffer->ptr, MAP_FAILED);
+
+       npages = size >> self->page_shift;
+       map = (void *)ALIGN((uintptr_t)buffer->ptr, size);
+       ret = madvise(map, size, MADV_HUGEPAGE);
+       ASSERT_EQ(ret, 0);
+       old_ptr = buffer->ptr;
+       buffer->ptr = map;
+
+       /* Initialize buffer in system memory. */
+       for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
+               ptr[i] = i;
+
+       /* Migrate memory to device. */
+       ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer, npages);
+       ASSERT_EQ(ret, 0);
+       ASSERT_EQ(buffer->cpages, npages);
+
+       /* Check what the device read. */
+       for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
+               ASSERT_EQ(ptr[i], i);
+
+       buffer->ptr = old_ptr;
+       hmm_buffer_free(buffer);
+}
+
+/*
+ * Migrate private anonymous huge page and free.
+ */
+TEST_F(hmm, migrate_anon_huge_free)
+{
+       struct hmm_buffer *buffer;
+       unsigned long npages;
+       unsigned long size;
+       unsigned long i;
+       void *old_ptr;
+       void *map;
+       int *ptr;
+       int ret;
+
+       size = TWOMEG;
+
+       buffer = malloc(sizeof(*buffer));
+       ASSERT_NE(buffer, NULL);
+
+       buffer->fd = -1;
+       buffer->size = 2 * size;
+       buffer->mirror = malloc(size);
+       ASSERT_NE(buffer->mirror, NULL);
+       memset(buffer->mirror, 0xFF, size);
+
+       buffer->ptr = mmap(NULL, 2 * size,
+                          PROT_READ | PROT_WRITE,
+                          MAP_PRIVATE | MAP_ANONYMOUS,
+                          buffer->fd, 0);
+       ASSERT_NE(buffer->ptr, MAP_FAILED);
+
+       npages = size >> self->page_shift;
+       map = (void *)ALIGN((uintptr_t)buffer->ptr, size);
+       ret = madvise(map, size, MADV_HUGEPAGE);
+       ASSERT_EQ(ret, 0);
+       old_ptr = buffer->ptr;
+       buffer->ptr = map;
+
+       /* Initialize buffer in system memory. */
+       for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
+               ptr[i] = i;
+
+       /* Migrate memory to device. */
+       ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer, npages);
+       ASSERT_EQ(ret, 0);
+       ASSERT_EQ(buffer->cpages, npages);
+
+       /* Check what the device read. */
+       for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
+               ASSERT_EQ(ptr[i], i);
+
+       /* Try freeing it. */
+       ret = madvise(map, size, MADV_FREE);
+       ASSERT_EQ(ret, 0);
+
+       buffer->ptr = old_ptr;
+       hmm_buffer_free(buffer);
+}
+
+/*
+ * Migrate private anonymous huge page and fault back to sysmem.
+ */
+TEST_F(hmm, migrate_anon_huge_fault)
+{
+       struct hmm_buffer *buffer;
+       unsigned long npages;
+       unsigned long size;
+       unsigned long i;
+       void *old_ptr;
+       void *map;
+       int *ptr;
+       int ret;
+
+       size = TWOMEG;
+
+       buffer = malloc(sizeof(*buffer));
+       ASSERT_NE(buffer, NULL);
+
+       buffer->fd = -1;
+       buffer->size = 2 * size;
+       buffer->mirror = malloc(size);
+       ASSERT_NE(buffer->mirror, NULL);
+       memset(buffer->mirror, 0xFF, size);
+
+       buffer->ptr = mmap(NULL, 2 * size,
+                          PROT_READ | PROT_WRITE,
+                          MAP_PRIVATE | MAP_ANONYMOUS,
+                          buffer->fd, 0);
+       ASSERT_NE(buffer->ptr, MAP_FAILED);
+
+       npages = size >> self->page_shift;
+       map = (void *)ALIGN((uintptr_t)buffer->ptr, size);
+       ret = madvise(map, size, MADV_HUGEPAGE);
+       ASSERT_EQ(ret, 0);
+       old_ptr = buffer->ptr;
+       buffer->ptr = map;
+
+       /* Initialize buffer in system memory. */
+       for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
+               ptr[i] = i;
+
+       /* Migrate memory to device. */
+       ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer, npages);
+       ASSERT_EQ(ret, 0);
+       ASSERT_EQ(buffer->cpages, npages);
+
+       /* Check what the device read. */
+       for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
+               ASSERT_EQ(ptr[i], i);
+
+       /* Fault pages back to system memory and check them. */
+       for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i) {
+               ASSERT_EQ(ptr[i], i);
+               /* If it asserts once, it probably will 500,000 times */
+               if (ptr[i] != i)
+                       break;
+       }
+
+       buffer->ptr = old_ptr;
+       hmm_buffer_free(buffer);
+}
+
 TEST_HARNESS_MAIN
-- 
2.20.1

Reply via email to