Use drm_*_get() and drm_*_put() helpers instead of drm_*_reference() and
 drm_*_unreference() helpers.

Generated by: scripts/coccinelle/api/drm-get-put.cocci

CC: annwang <annie.w...@amd.com>
Signed-off-by: Julia Lawall <julia.law...@lip6.fr>
Signed-off-by: Fengguang Wu <fengguang...@intel.com>
---
tree:   git://people.freedesktop.org/~agd5f/linux.git
amd-mainline-hybrid-4.11
head:   7ccf5ab3da7a87288cc0fd11910b212e4ac154a6
commit: 67207f0941969278dd47e2549fae4fe5502183c1 [1119/1800]
drm/amd/amdkcl: [4.7] fix dev->struct_mutex

Please take the patch only if it's a positive warning. Thanks!

 amdgpu_gem.c |   24 ++++++++++++------------
 1 file changed, 12 insertions(+), 12 deletions(-)

--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -108,9 +108,9 @@ void amdgpu_gem_force_release(struct amd
                idr_for_each_entry(&file->object_idr, gobj, handle) {
                        WARN_ONCE(1, "And also active allocations!\n");
 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0)
-                       drm_gem_object_unreference(gobj);
+                       drm_gem_object_put(gobj);
 #else
-                       drm_gem_object_unreference_unlocked(gobj);
+                       drm_gem_object_put_unlocked(gobj);
 #endif
                }
                idr_destroy(&file->object_idr);
@@ -287,7 +287,7 @@ int amdgpu_gem_create_ioctl(struct drm_d

        r = drm_gem_handle_create(filp, gobj, &handle);
        /* drop reference from allocate - handle holds it now */
-       drm_gem_object_unreference_unlocked(gobj);
+       drm_gem_object_put_unlocked(gobj);
        if (r)
                return r;

@@ -365,7 +365,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_

        r = drm_gem_handle_create(filp, gobj, &handle);
        /* drop reference from allocate - handle holds it now */
-       drm_gem_object_unreference_unlocked(gobj);
+       drm_gem_object_put_unlocked(gobj);
        if (r)
                return r;

@@ -379,7 +379,7 @@ unlock_mmap_sem:
        up_read(&current->mm->mmap_sem);

 release_object:
-       drm_gem_object_unreference_unlocked(gobj);
+       drm_gem_object_put_unlocked(gobj);

        return r;
 }
@@ -398,11 +398,11 @@ int amdgpu_mode_dumb_mmap(struct drm_fil
        robj = gem_to_amdgpu_bo(gobj);
        if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) ||
            (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
-               drm_gem_object_unreference_unlocked(gobj);
+               drm_gem_object_put_unlocked(gobj);
                return -EPERM;
        }
        *offset_p = amdgpu_bo_mmap_offset(robj);
-       drm_gem_object_unreference_unlocked(gobj);
+       drm_gem_object_put_unlocked(gobj);
        return 0;
 }

@@ -472,7 +472,7 @@ int amdgpu_gem_wait_idle_ioctl(struct dr
        } else
                r = ret;

-       drm_gem_object_unreference_unlocked(gobj);
+       drm_gem_object_put_unlocked(gobj);
        return r;
 }

@@ -515,7 +515,7 @@ int amdgpu_gem_metadata_ioctl(struct drm
 unreserve:
        amdgpu_bo_unreserve(robj);
 out:
-       drm_gem_object_unreference_unlocked(gobj);
+       drm_gem_object_put_unlocked(gobj);
        return r;
 }

@@ -686,7 +686,7 @@ error_backoff:
        ttm_eu_backoff_reservation(&ticket, &list);

 error_unref:
-       drm_gem_object_unreference_unlocked(gobj);
+       drm_gem_object_put_unlocked(gobj);
        return r;
 }

@@ -748,7 +748,7 @@ int amdgpu_gem_op_ioctl(struct drm_devic
        }

 out:
-       drm_gem_object_unreference_unlocked(gobj);
+       drm_gem_object_put_unlocked(gobj);
        return r;
 }

@@ -776,7 +776,7 @@ int amdgpu_mode_dumb_create(struct drm_f

        r = drm_gem_handle_create(file_priv, gobj, &handle);
        /* drop reference from allocate - handle holds it now */
-       drm_gem_object_unreference_unlocked(gobj);
+       drm_gem_object_put_unlocked(gobj);
        if (r) {
                return r;
        }
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

Reply via email to