Tidy up the for loops that handle waiting for read/write vs read-only
access.

Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem.c | 163 +++++++++++++++++++---------------------
 1 file changed, 78 insertions(+), 85 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 610378bd1be4..ad3330adfa41 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1105,6 +1105,23 @@ put_rpm:
        return ret;
 }
 
+static void
+i915_gem_object_retire_request(struct drm_i915_gem_object *obj,
+                              struct drm_i915_gem_request *req)
+{
+       int ring = req->engine->id;
+
+       if (i915_gem_active_peek(&obj->last_read[ring],
+                                &obj->base.dev->struct_mutex) == req)
+               i915_gem_object_retire__read(obj, ring);
+       else if (i915_gem_active_peek(&obj->last_write,
+                                     &obj->base.dev->struct_mutex) == req)
+               i915_gem_object_retire__write(obj);
+
+       if (req->reset_counter == i915_reset_counter(&req->i915->gpu_error))
+               i915_gem_request_retire_upto(req);
+}
+
 /**
  * Ensures that all rendering to the object has completed and the object is
  * safe to unbind from the GTT or access from the CPU.
@@ -1113,61 +1130,40 @@ int
 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
                               bool readonly)
 {
-       struct drm_i915_gem_request *request;
-       int ret, i;
+       struct i915_gem_active *active;
+       unsigned long active_mask;
+       int idx;
 
-       if (!obj->active)
-               return 0;
+       lockdep_assert_held(&obj->base.dev->struct_mutex);
 
-       if (readonly) {
-               request = i915_gem_active_peek(&obj->last_write,
-                                              &obj->base.dev->struct_mutex);
-               if (request) {
-                       ret = i915_wait_request(request);
-                       if (ret)
-                               return ret;
+       active_mask = obj->active;
+       if (!active_mask)
+               return 0;
 
-                       i = request->engine->id;
-                       if (i915_gem_active_peek(&obj->last_read[i],
-                                                &obj->base.dev->struct_mutex) 
== request)
-                               i915_gem_object_retire__read(obj, i);
-                       else
-                               i915_gem_object_retire__write(obj);
-               }
+       if (!readonly) {
+               active = obj->last_read;
        } else {
-               for (i = 0; i < I915_NUM_ENGINES; i++) {
-                       request = i915_gem_active_peek(&obj->last_read[i],
-                                                      
&obj->base.dev->struct_mutex);
-                       if (!request)
-                               continue;
-
-                       ret = i915_wait_request(request);
-                       if (ret)
-                               return ret;
-
-                       i915_gem_object_retire__read(obj, i);
-               }
-               GEM_BUG_ON(obj->active);
+               active_mask = 1;
+               active = &obj->last_write;
        }
 
-       return 0;
-}
+       for_each_active(active_mask, idx) {
+               struct drm_i915_gem_request *request;
+               int ret;
 
-static void
-i915_gem_object_retire_request(struct drm_i915_gem_object *obj,
-                              struct drm_i915_gem_request *req)
-{
-       int ring = req->engine->id;
+               request = i915_gem_active_peek(&active[idx],
+                                              &obj->base.dev->struct_mutex);
+               if (!request)
+                       continue;
 
-       if (i915_gem_active_peek(&obj->last_read[ring],
-                                &obj->base.dev->struct_mutex) == req)
-               i915_gem_object_retire__read(obj, ring);
-       else if (i915_gem_active_peek(&obj->last_write,
-                                     &obj->base.dev->struct_mutex) == req)
-               i915_gem_object_retire__write(obj);
+               ret = i915_wait_request(request);
+               if (ret)
+                       return ret;
 
-       if (req->reset_counter == i915_reset_counter(&req->i915->gpu_error))
-               i915_gem_request_retire_upto(req);
+               i915_gem_object_retire_request(obj, request);
+       }
+
+       return 0;
 }
 
 /* A nonblocking variant of the above wait. This is a highly dangerous routine
@@ -1181,34 +1177,31 @@ i915_gem_object_wait_rendering__nonblocking(struct 
drm_i915_gem_object *obj,
        struct drm_device *dev = obj->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
+       struct i915_gem_active *active;
+       unsigned long active_mask;
        int ret, i, n = 0;
 
        BUG_ON(!mutex_is_locked(&dev->struct_mutex));
        BUG_ON(!dev_priv->mm.interruptible);
 
-       if (!obj->active)
+       active_mask = obj->active;
+       if (!active_mask)
                return 0;
 
-       if (readonly) {
-               struct drm_i915_gem_request *req;
-
-               req = i915_gem_active_peek(&obj->last_write,
-                                          &obj->base.dev->struct_mutex);
-               if (req == NULL)
-                       return 0;
-
-               requests[n++] = req;
+       if (!readonly) {
+               active = obj->last_read;
        } else {
-               for (i = 0; i < I915_NUM_ENGINES; i++) {
-                       struct drm_i915_gem_request *req;
+               active_mask = 1;
+               active = &obj->last_write;
+       }
 
-                       req = i915_gem_active_peek(&obj->last_read[i],
-                                                  
&obj->base.dev->struct_mutex);
-                       if (req == NULL)
-                               continue;
+       for_each_active(active_mask, i) {
+               struct drm_i915_gem_request *req;
 
+               req = i915_gem_active_get(&active[i],
+                                         &obj->base.dev->struct_mutex);
+               if (req)
                        requests[n++] = req;
-               }
        }
 
        mutex_unlock(&dev->struct_mutex);
@@ -2631,33 +2624,33 @@ int
 i915_gem_object_sync(struct drm_i915_gem_object *obj,
                     struct drm_i915_gem_request *to)
 {
-       const bool readonly = obj->base.pending_write_domain == 0;
-       struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
-       int ret, i, n;
+       struct i915_gem_active *active;
+       unsigned long active_mask;
+       int idx;
 
-       if (!obj->active)
-               return 0;
+       lockdep_assert_held(&obj->base.dev->struct_mutex);
 
-       n = 0;
-       if (readonly) {
-               struct drm_i915_gem_request *req;
+       active_mask = obj->active;
+       if (!active_mask)
+               return 0;
 
-               req = i915_gem_active_peek(&obj->last_write,
-                                          &obj->base.dev->struct_mutex);
-               if (req)
-                       requests[n++] = req;
+       if (obj->base.pending_write_domain) {
+               active = obj->last_read;
        } else {
-               for (i = 0; i < I915_NUM_ENGINES; i++) {
-                       struct drm_i915_gem_request *req;
-
-                       req = i915_gem_active_peek(&obj->last_read[i],
-                                                  
&obj->base.dev->struct_mutex);
-                       if (req)
-                               requests[n++] = req;
-               }
+               active_mask = 1;
+               active = &obj->last_write;
        }
-       for (i = 0; i < n; i++) {
-               ret = __i915_gem_object_sync(obj, to, requests[i]);
+
+       for_each_active(active_mask, idx) {
+               struct drm_i915_gem_request *request;
+               int ret;
+
+               request = i915_gem_active_peek(&active[idx],
+                                              &obj->base.dev->struct_mutex);
+               if (!request)
+                       continue;
+
+               ret = __i915_gem_object_sync(obj, to, request);
                if (ret)
                        return ret;
        }
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to