Now that we subclass our request from struct fence, we start using the
common primitives more freely and so avoid hand-rolling routines already
provided for by the helpers.

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/intel_atomic_plane.c |  3 --
 drivers/gpu/drm/i915/intel_display.c      | 52 +++----------------------------
 drivers/gpu/drm/i915/intel_drv.h          |  1 -
 3 files changed, 5 insertions(+), 51 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c 
b/drivers/gpu/drm/i915/intel_atomic_plane.c
index b82de3072d4f..b41bf380f2ab 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -77,14 +77,12 @@ intel_plane_duplicate_state(struct drm_plane *plane)
        struct intel_plane_state *intel_state;

        intel_state = kmemdup(plane->state, sizeof(*intel_state), GFP_KERNEL);
-
        if (!intel_state)
                return NULL;

        state = &intel_state->base;

        __drm_atomic_helper_plane_duplicate_state(plane, state);
-       intel_state->wait_req = NULL;

        return state;
 }
@@ -101,7 +99,6 @@ void
 intel_plane_destroy_state(struct drm_plane *plane,
                          struct drm_plane_state *state)
 {
-       WARN_ON(state && to_intel_plane_state(state)->wait_req);
        drm_atomic_helper_plane_destroy_state(plane, state);
 }

diff --git a/drivers/gpu/drm/i915/intel_display.c 
b/drivers/gpu/drm/i915/intel_display.c
index 123112c240e0..1b5f653d595b 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -13944,9 +13944,7 @@ static int intel_atomic_prepare_commit(struct 
drm_device *dev,
                                       bool nonblock)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
-       struct drm_plane_state *plane_state;
        struct drm_crtc_state *crtc_state;
-       struct drm_plane *plane;
        struct drm_crtc *crtc;
        int i, ret;

@@ -13969,27 +13967,6 @@ static int intel_atomic_prepare_commit(struct 
drm_device *dev,
        ret = drm_atomic_helper_prepare_planes(dev, state);
        mutex_unlock(&dev->struct_mutex);

-       if (!ret && !nonblock) {
-               for_each_plane_in_state(state, plane, plane_state, i) {
-                       struct intel_plane_state *intel_plane_state =
-                               to_intel_plane_state(plane_state);
-
-                       if (!intel_plane_state->wait_req)
-                               continue;
-
-                       ret = i915_wait_request(intel_plane_state->wait_req,
-                                               true, NULL, NULL);
-                       if (ret) {
-                               /* Any hang should be swallowed by the wait */
-                               WARN_ON(ret == -EIO);
-                               mutex_lock(&dev->struct_mutex);
-                               drm_atomic_helper_cleanup_planes(dev, state);
-                               mutex_unlock(&dev->struct_mutex);
-                               break;
-                       }
-               }
-       }
-
        return ret;
 }

@@ -14076,27 +14053,12 @@ static void intel_atomic_commit_tail(struct 
drm_atomic_state *state)
        struct drm_crtc_state *old_crtc_state;
        struct drm_crtc *crtc;
        struct intel_crtc_state *intel_cstate;
-       struct drm_plane *plane;
-       struct drm_plane_state *plane_state;
        bool hw_check = intel_state->modeset;
        unsigned long put_domains[I915_MAX_PIPES] = {};
        unsigned crtc_vblank_mask = 0;
-       int i, ret;
-
-       for_each_plane_in_state(state, plane, plane_state, i) {
-               struct intel_plane_state *intel_plane_state =
-                       to_intel_plane_state(plane_state);
-
-               if (!intel_plane_state->wait_req)
-                       continue;
-
-               ret = i915_wait_request(intel_plane_state->wait_req,
-                                       true, NULL, NULL);
-               /* EIO should be eaten, and we can't get interrupted in the
-                * worker, and blocking commits have waited already. */
-               WARN_ON(ret);
-       }
+       int i;

+       drm_atomic_helper_wait_for_fences(dev, state);
        drm_atomic_helper_wait_for_dependencies(state);

        if (intel_state->modeset) {
@@ -14506,9 +14468,9 @@ intel_prepare_plane_fb(struct drm_plane *plane,
        }

        if (ret == 0) {
-               to_intel_plane_state(new_state)->wait_req =
-                       i915_gem_active_get(&obj->last_write,
-                                           &obj->base.dev->struct_mutex);
+               new_state->fence =
+                       &i915_gem_active_get(&obj->last_write,
+                                            
&obj->base.dev->struct_mutex)->fence;
        }

        return ret;
@@ -14529,7 +14491,6 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
 {
        struct drm_device *dev = plane->dev;
        struct intel_plane_state *old_intel_state;
-       struct intel_plane_state *intel_state = 
to_intel_plane_state(plane->state);
        struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb);
        struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb);

@@ -14541,9 +14502,6 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
        if (old_obj && (plane->type != DRM_PLANE_TYPE_CURSOR ||
            !INTEL_INFO(dev)->cursor_needs_physical))
                intel_unpin_fb_obj(old_state->fb, old_state->rotation);
-
-       i915_gem_request_assign(&intel_state->wait_req, NULL);
-       i915_gem_request_assign(&old_intel_state->wait_req, NULL);
 }

 int
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 774aab342f40..acb42d66fb08 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -383,7 +383,6 @@ struct intel_plane_state {
        struct drm_intel_sprite_colorkey ckey;

        /* async flip related structures */
-       struct drm_i915_gem_request *wait_req;
 };

 struct intel_initial_plane_config {
-- 
2.9.3

Reply via email to