Now that we have core helpers, this gets rid of a lot of boilerplate.

Signed-off-by: Eric Anholt <e...@anholt.net>
---
 drivers/gpu/drm/v3d/v3d_gem.c | 56 ++++-------------------------------
 1 file changed, 6 insertions(+), 50 deletions(-)

diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index 0d1e5e0b8042..a4338d56ff9e 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -199,12 +199,8 @@ v3d_unlock_bo_reservations(struct v3d_bo **bos,
                           int bo_count,
                           struct ww_acquire_ctx *acquire_ctx)
 {
-       int i;
-
-       for (i = 0; i < bo_count; i++)
-               ww_mutex_unlock(&bos[i]->base.resv->lock);
-
-       ww_acquire_fini(acquire_ctx);
+       drm_gem_unlock_reservations((struct drm_gem_object **)bos, bo_count,
+                                   acquire_ctx);
 }
 
 /* Takes the reservation lock on all the BOs being referenced, so that
@@ -219,52 +215,12 @@ v3d_lock_bo_reservations(struct v3d_bo **bos,
                         int bo_count,
                         struct ww_acquire_ctx *acquire_ctx)
 {
-       int contended_lock = -1;
        int i, ret;
 
-       ww_acquire_init(acquire_ctx, &reservation_ww_class);
-
-retry:
-       if (contended_lock != -1) {
-               struct v3d_bo *bo = bos[contended_lock];
-
-               ret = ww_mutex_lock_slow_interruptible(&bo->base.resv->lock,
-                                                      acquire_ctx);
-               if (ret) {
-                       ww_acquire_done(acquire_ctx);
-                       return ret;
-               }
-       }
-
-       for (i = 0; i < bo_count; i++) {
-               if (i == contended_lock)
-                       continue;
-
-               ret = ww_mutex_lock_interruptible(&bos[i]->base.resv->lock,
-                                                 acquire_ctx);
-               if (ret) {
-                       int j;
-
-                       for (j = 0; j < i; j++)
-                               ww_mutex_unlock(&bos[j]->base.resv->lock);
-
-                       if (contended_lock != -1 && contended_lock >= i) {
-                               struct v3d_bo *bo = bos[contended_lock];
-
-                               ww_mutex_unlock(&bo->base.resv->lock);
-                       }
-
-                       if (ret == -EDEADLK) {
-                               contended_lock = i;
-                               goto retry;
-                       }
-
-                       ww_acquire_done(acquire_ctx);
-                       return ret;
-               }
-       }
-
-       ww_acquire_done(acquire_ctx);
+       ret = drm_gem_lock_reservations((struct drm_gem_object **)bos,
+                                       bo_count, acquire_ctx);
+       if (ret)
+               return ret;
 
        /* Reserve space for our shared (read-only) fence references,
         * before we commit the CL to the hardware.
-- 
2.20.1

Reply via email to