When CPU is connected throug XGMI, it has coherent
access to VRAM resource. In this case that resource
is taken from a table in the device gmc aperture base.
This resource is used along with the device type, which could
be DEVICE_PRIVATE or DEVICE_COHERENT to create the device
page map region.
Also, MIGRATE_VMA_SELECT_DEVICE_COHERENT flag is selected for
coherent type case during migration to device.

Signed-off-by: Alex Sierra <alex.sie...@amd.com>
Reviewed-by: Felix Kuehling <felix.kuehl...@amd.com>
Signed-off-by: Christoph Hellwig <h...@lst.de>
---
 drivers/gpu/drm/amd/amdkfd/kfd_migrate.c | 33 +++++++++++++++---------
 1 file changed, 21 insertions(+), 12 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index 7e3a7fcb9fe6..25c9f7a4325d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -673,9 +673,12 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct 
svm_range *prange,
        migrate.vma = vma;
        migrate.start = start;
        migrate.end = end;
-       migrate.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
        migrate.pgmap_owner = SVM_ADEV_PGMAP_OWNER(adev);
 
+       if (adev->gmc.xgmi.connected_to_cpu)
+               migrate.flags = MIGRATE_VMA_SELECT_DEVICE_COHERENT;
+       else
+               migrate.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
        size = 2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t);
        size *= npages;
        buf = kvmalloc(size, GFP_KERNEL | __GFP_ZERO);
@@ -948,7 +951,7 @@ int svm_migrate_init(struct amdgpu_device *adev)
 {
        struct kfd_dev *kfddev = adev->kfd.dev;
        struct dev_pagemap *pgmap;
-       struct resource *res;
+       struct resource *res = NULL;
        unsigned long size;
        void *r;
 
@@ -963,28 +966,34 @@ int svm_migrate_init(struct amdgpu_device *adev)
         * should remove reserved size
         */
        size = ALIGN(adev->gmc.real_vram_size, 2ULL << 20);
-       res = devm_request_free_mem_region(adev->dev, &iomem_resource, size);
-       if (IS_ERR(res))
-               return -ENOMEM;
+       if (adev->gmc.xgmi.connected_to_cpu) {
+               pgmap->range.start = adev->gmc.aper_base;
+               pgmap->range.end = adev->gmc.aper_base + adev->gmc.aper_size - 
1;
+               pgmap->type = MEMORY_DEVICE_COHERENT;
+       } else {
+               res = devm_request_free_mem_region(adev->dev, &iomem_resource, 
size);
+               if (IS_ERR(res))
+                       return -ENOMEM;
+               pgmap->range.start = res->start;
+               pgmap->range.end = res->end;
+               pgmap->type = MEMORY_DEVICE_PRIVATE;
+       }
 
-       pgmap->type = MEMORY_DEVICE_PRIVATE;
        pgmap->nr_range = 1;
-       pgmap->range.start = res->start;
-       pgmap->range.end = res->end;
        pgmap->ops = &svm_migrate_pgmap_ops;
        pgmap->owner = SVM_ADEV_PGMAP_OWNER(adev);
-       pgmap->flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
-
+       pgmap->flags = 0;
        /* Device manager releases device-specific resources, memory region and
         * pgmap when driver disconnects from device.
         */
        r = devm_memremap_pages(adev->dev, pgmap);
        if (IS_ERR(r)) {
                pr_err("failed to register HMM device memory\n");
-
                /* Disable SVM support capability */
                pgmap->type = 0;
-               devm_release_mem_region(adev->dev, res->start, 
resource_size(res));
+               if (pgmap->type == MEMORY_DEVICE_PRIVATE)
+                       devm_release_mem_region(adev->dev, res->start,
+                                               res->end - res->start + 1);
                return PTR_ERR(r);
        }
 
-- 
2.32.0

Reply via email to