From: Tvrtko Ursulin <tvrtko.ursu...@igalia.com>

All apart from AMDGPU_GEM_DOMAIN_GTT memory domains map 1:1 to TTM
placements. And the former be either AMDGPU_PL_PREEMPT or TTM_PL_TT,
depending on AMDGPU_GEM_CREATE_PREEMPTIBLE.

Simplify a few places in the code which convert the TTM placement into
a domain by checking against the current placement directly.

In the conversion AMDGPU_PL_PREEMPT either does not have to be handled
because amdgpu_mem_type_to_domain() cannot return that value anyway.

v2:
 * Remove AMDGPU_PL_PREEMPT handling.

v3:
 * Rebase.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursu...@igalia.com>
Reviewed-by: Christian König <christian.koe...@amd.com> # v1
Reviewed-by: Felix Kuehling <felix.kuehl...@amd.com> # v2
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c |  3 +--
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c  | 29 +++++++++------------
 2 files changed, 13 insertions(+), 19 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index 055ba2ea4c12..0b3b10d21952 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -165,8 +165,7 @@ static struct sg_table *amdgpu_dma_buf_map(struct 
dma_buf_attachment *attach,
                if (r)
                        return ERR_PTR(r);
 
-       } else if (!(amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type) &
-                    AMDGPU_GEM_DOMAIN_GTT)) {
+       } else if (bo->tbo.resource->mem_type != TTM_PL_TT) {
                return ERR_PTR(-EBUSY);
        }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index b2a83c802bbd..c581e4952cbd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -983,12 +983,11 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 
domain,
 
        ttm_bo_pin(&bo->tbo);
 
-       domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
-       if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
+       if (bo->tbo.resource->mem_type == TTM_PL_VRAM) {
                atomic64_add(amdgpu_bo_size(bo), &adev->vram_pin_size);
                atomic64_add(amdgpu_vram_mgr_bo_visible_size(bo),
                             &adev->visible_pin_size);
-       } else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
+       } else if (bo->tbo.resource->mem_type == TTM_PL_TT) {
                atomic64_add(amdgpu_bo_size(bo), &adev->gart_pin_size);
        }
 
@@ -1289,7 +1288,6 @@ void amdgpu_bo_get_memory(struct amdgpu_bo *bo,
        struct ttm_resource *res = bo->tbo.resource;
        uint64_t size = amdgpu_bo_size(bo);
        struct drm_gem_object *obj;
-       unsigned int domain;
        bool shared;
 
        /* Abort if the BO doesn't currently have a backing store */
@@ -1299,21 +1297,20 @@ void amdgpu_bo_get_memory(struct amdgpu_bo *bo,
        obj = &bo->tbo.base;
        shared = drm_gem_object_is_shared_for_memory_stats(obj);
 
-       domain = amdgpu_mem_type_to_domain(res->mem_type);
-       switch (domain) {
-       case AMDGPU_GEM_DOMAIN_VRAM:
+       switch (res->mem_type) {
+       case TTM_PL_VRAM:
                stats->vram += size;
-               if (amdgpu_res_cpu_visible(adev, bo->tbo.resource))
+               if (amdgpu_res_cpu_visible(adev, res))
                        stats->visible_vram += size;
                if (shared)
                        stats->vram_shared += size;
                break;
-       case AMDGPU_GEM_DOMAIN_GTT:
+       case TTM_PL_TT:
                stats->gtt += size;
                if (shared)
                        stats->gtt_shared += size;
                break;
-       case AMDGPU_GEM_DOMAIN_CPU:
+       case TTM_PL_SYSTEM:
        default:
                stats->cpu += size;
                if (shared)
@@ -1326,7 +1323,7 @@ void amdgpu_bo_get_memory(struct amdgpu_bo *bo,
                if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
                        stats->requested_visible_vram += size;
 
-               if (domain != AMDGPU_GEM_DOMAIN_VRAM) {
+               if (res->mem_type != TTM_PL_VRAM) {
                        stats->evicted_vram += size;
                        if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
                                stats->evicted_visible_vram += size;
@@ -1600,20 +1597,18 @@ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, 
struct seq_file *m)
        u64 size;
 
        if (dma_resv_trylock(bo->tbo.base.resv)) {
-               unsigned int domain;
 
-               domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
-               switch (domain) {
-               case AMDGPU_GEM_DOMAIN_VRAM:
+               switch (bo->tbo.resource->mem_type) {
+               case TTM_PL_VRAM:
                        if (amdgpu_res_cpu_visible(adev, bo->tbo.resource))
                                placement = "VRAM VISIBLE";
                        else
                                placement = "VRAM";
                        break;
-               case AMDGPU_GEM_DOMAIN_GTT:
+               case TTM_PL_TT:
                        placement = "GTT";
                        break;
-               case AMDGPU_GEM_DOMAIN_CPU:
+               case TTM_PL_SYSTEM:
                default:
                        placement = "CPU";
                        break;
-- 
2.44.0

Reply via email to