[Intel-gfx] [PATCH 27/41] drm/i915: Restore nonblocking awaits for modesetting

2016-10-20 Thread Chris Wilson
After combining the dma-buf reservation object and the GEM reservation
object, we lost the ability to do a nonblocking wait on the i915 request
(as we blocked upon the reservation object during prepare_fb). We can
instead convert the reservation object into a fence upon which we can
asynchronously wait (including a forced timeout in case the DMA fence is
never signaled).

Signed-off-by: Chris Wilson 
Reviewed-by: Joonas Lahtinen 
---
 drivers/gpu/drm/i915/intel_display.c | 82 +++-
 drivers/gpu/drm/i915/intel_drv.h |  2 +
 2 files changed, 55 insertions(+), 29 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_display.c 
b/drivers/gpu/drm/i915/intel_display.c
index 13522903ec4e..c7c973b24eba 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -14509,12 +14509,33 @@ static void intel_atomic_commit_tail(struct 
drm_atomic_state *state)
 
 static void intel_atomic_commit_work(struct work_struct *work)
 {
-   struct drm_atomic_state *state = container_of(work,
- struct drm_atomic_state,
- commit_work);
+   struct drm_atomic_state *state =
+   container_of(work, struct drm_atomic_state, commit_work);
+
intel_atomic_commit_tail(state);
 }
 
+static int __i915_sw_fence_call
+intel_atomic_commit_ready(struct i915_sw_fence *fence,
+ enum i915_sw_fence_notify notify)
+{
+   struct intel_atomic_state *state =
+   container_of(fence, struct intel_atomic_state, commit_ready);
+
+   switch (notify) {
+   case FENCE_COMPLETE:
+   if (state->base.commit_work.func)
+   queue_work(system_unbound_wq, >base.commit_work);
+   break;
+
+   case FENCE_FREE:
+   drm_atomic_state_put(>base);
+   break;
+   }
+
+   return NOTIFY_DONE;
+}
+
 static void intel_atomic_track_fbs(struct drm_atomic_state *state)
 {
struct drm_plane_state *old_plane_state;
@@ -14560,11 +14581,14 @@ static int intel_atomic_commit(struct drm_device *dev,
if (ret)
return ret;
 
-   INIT_WORK(>commit_work, intel_atomic_commit_work);
+   drm_atomic_state_get(state);
+   i915_sw_fence_init(_state->commit_ready,
+  intel_atomic_commit_ready);
 
ret = intel_atomic_prepare_commit(dev, state);
if (ret) {
DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
+   i915_sw_fence_commit(_state->commit_ready);
return ret;
}
 
@@ -14575,10 +14599,14 @@ static int intel_atomic_commit(struct drm_device *dev,
intel_atomic_track_fbs(state);
 
drm_atomic_state_get(state);
-   if (nonblock)
-   queue_work(system_unbound_wq, >commit_work);
-   else
+   INIT_WORK(>commit_work,
+ nonblock ? intel_atomic_commit_work : NULL);
+
+   i915_sw_fence_commit(_state->commit_ready);
+   if (!nonblock) {
+   i915_sw_fence_wait(_state->commit_ready);
intel_atomic_commit_tail(state);
+   }
 
return 0;
 }
@@ -14690,20 +14718,22 @@ int
 intel_prepare_plane_fb(struct drm_plane *plane,
   struct drm_plane_state *new_state)
 {
+   struct intel_atomic_state *intel_state =
+   to_intel_atomic_state(new_state->state);
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_framebuffer *fb = new_state->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
-   long lret;
-   int ret = 0;
+   int ret;
 
if (!obj && !old_obj)
return 0;
 
if (old_obj) {
struct drm_crtc_state *crtc_state =
-   drm_atomic_get_existing_crtc_state(new_state->state, 
plane->state->crtc);
+   drm_atomic_get_existing_crtc_state(new_state->state,
+  plane->state->crtc);
 
/* Big Hammer, we also need to ensure that any pending
 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
@@ -14716,31 +14746,25 @@ intel_prepare_plane_fb(struct drm_plane *plane,
 * This should only fail upon a hung GPU, in which case we
 * can safely continue.
 */
-   if (needs_modeset(crtc_state))
-   ret = i915_gem_object_wait(old_obj,
-  I915_WAIT_INTERRUPTIBLE |
-  I915_WAIT_LOCKED,
-  MAX_SCHEDULE_TIMEOUT,
- 

[Intel-gfx] [PATCH 27/41] drm/i915: Restore nonblocking awaits for modesetting

2016-10-14 Thread Chris Wilson
After combining the dma-buf reservation object and the GEM reservation
object, we lost the ability to do a nonblocking wait on the i915 request
(as we blocked upon the reservation object during prepare_fb). We can
instead convert the reservation object into a fence upon which we can
asynchronously wait (including a forced timeout in case the DMA fence is
never signaled).

Signed-off-by: Chris Wilson 
Reviewed-by: Joonas Lahtinen 
---
 drivers/gpu/drm/i915/intel_display.c | 82 +++-
 drivers/gpu/drm/i915/intel_drv.h |  2 +
 2 files changed, 55 insertions(+), 29 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_display.c 
b/drivers/gpu/drm/i915/intel_display.c
index 21820072dea2..317ee658028e 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -14421,12 +14421,33 @@ static void intel_atomic_commit_tail(struct 
drm_atomic_state *state)
 
 static void intel_atomic_commit_work(struct work_struct *work)
 {
-   struct drm_atomic_state *state = container_of(work,
- struct drm_atomic_state,
- commit_work);
+   struct drm_atomic_state *state =
+   container_of(work, struct drm_atomic_state, commit_work);
+
intel_atomic_commit_tail(state);
 }
 
+static int __i915_sw_fence_call
+intel_atomic_commit_ready(struct i915_sw_fence *fence,
+ enum i915_sw_fence_notify notify)
+{
+   struct intel_atomic_state *state =
+   container_of(fence, struct intel_atomic_state, commit_ready);
+
+   switch (notify) {
+   case FENCE_COMPLETE:
+   if (state->base.commit_work.func)
+   queue_work(system_unbound_wq, >base.commit_work);
+   break;
+
+   case FENCE_FREE:
+   drm_atomic_state_put(>base);
+   break;
+   }
+
+   return NOTIFY_DONE;
+}
+
 static void intel_atomic_track_fbs(struct drm_atomic_state *state)
 {
struct drm_plane_state *old_plane_state;
@@ -14472,11 +14493,14 @@ static int intel_atomic_commit(struct drm_device *dev,
if (ret)
return ret;
 
-   INIT_WORK(>commit_work, intel_atomic_commit_work);
+   drm_atomic_state_get(state);
+   i915_sw_fence_init(_state->commit_ready,
+  intel_atomic_commit_ready);
 
ret = intel_atomic_prepare_commit(dev, state);
if (ret) {
DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
+   i915_sw_fence_commit(_state->commit_ready);
return ret;
}
 
@@ -14487,10 +14511,14 @@ static int intel_atomic_commit(struct drm_device *dev,
intel_atomic_track_fbs(state);
 
drm_atomic_state_get(state);
-   if (nonblock)
-   queue_work(system_unbound_wq, >commit_work);
-   else
+   INIT_WORK(>commit_work,
+ nonblock ? intel_atomic_commit_work : NULL);
+
+   i915_sw_fence_commit(_state->commit_ready);
+   if (!nonblock) {
+   i915_sw_fence_wait(_state->commit_ready);
intel_atomic_commit_tail(state);
+   }
 
return 0;
 }
@@ -14602,20 +14630,22 @@ int
 intel_prepare_plane_fb(struct drm_plane *plane,
   struct drm_plane_state *new_state)
 {
+   struct intel_atomic_state *intel_state =
+   to_intel_atomic_state(new_state->state);
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_framebuffer *fb = new_state->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
-   long lret;
-   int ret = 0;
+   int ret;
 
if (!obj && !old_obj)
return 0;
 
if (old_obj) {
struct drm_crtc_state *crtc_state =
-   drm_atomic_get_existing_crtc_state(new_state->state, 
plane->state->crtc);
+   drm_atomic_get_existing_crtc_state(new_state->state,
+  plane->state->crtc);
 
/* Big Hammer, we also need to ensure that any pending
 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
@@ -14628,31 +14658,25 @@ intel_prepare_plane_fb(struct drm_plane *plane,
 * This should only fail upon a hung GPU, in which case we
 * can safely continue.
 */
-   if (needs_modeset(crtc_state))
-   ret = i915_gem_object_wait(old_obj,
-  I915_WAIT_INTERRUPTIBLE |
-  I915_WAIT_LOCKED,
-  MAX_SCHEDULE_TIMEOUT,
-