From: John Harrison <[email protected]>

The scheduler now supports sync framework fences being associated with
batch buffers. The execbuff IOCTL allows such fences to be passed in
from user land. This patch wires the two together so that the IOCTL no
longer needs to stall on the fence immediately. Instead the stall is
now swallowed by the scheduler's scheduling algorithm.

For: VIZ-1587
Signed-off-by: John Harrison <[email protected]>
---
 drivers/gpu/drm/i915/i915_gem_execbuffer.c | 21 ++++++++++++++++++++-
 drivers/gpu/drm/i915/i915_scheduler.c      |  3 +++
 drivers/gpu/drm/i915/i915_scheduler.h      |  5 +++++
 3 files changed, 28 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c 
b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 1642701..1325b19 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1612,7 +1612,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        /*
         * Without a GPU scheduler, any fence waits must be done up front.
         */
-       if (args->flags & I915_EXEC_WAIT_FENCE) {
+       if ((args->flags & I915_EXEC_WAIT_FENCE) &&
+           (i915.scheduler_override & i915_so_direct_submit))
+       {
                ret = i915_early_fence_wait(ring, fd_fence_wait);
                if (ret < 0)
                        return ret;
@@ -1799,6 +1801,18 @@ i915_gem_do_execbuffer(struct drm_device *dev, void 
*data,
        params->ctx = ctx;
 
 #ifdef CONFIG_SYNC
+       if (args->flags & I915_EXEC_WAIT_FENCE) {
+               if (fd_fence_wait < 0) {
+                       DRM_ERROR("Wait fence for ring %d has invalid id %d\n",
+                                 (int) ring->id, fd_fence_wait);
+               } else {
+                       params->fence_wait = sync_fence_fdget(fd_fence_wait);
+                       if (params->fence_wait == NULL)
+                               DRM_ERROR("Invalid wait fence %d\n",
+                                         fd_fence_wait);
+               }
+       }
+
        if (args->flags & I915_EXEC_CREATE_FENCE) {
                /*
                 * Caller has requested a sync fence.
@@ -1865,6 +1879,11 @@ err:
                        i915_gem_context_unreference(params->ctx);
        }
 
+#ifdef CONFIG_SYNC
+       if (params->fence_wait)
+               sync_fence_put(params->fence_wait);
+#endif
+
        /*
         * If the request was created but not successfully submitted then it
         * must be freed again. If it was submitted then it is being tracked
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c 
b/drivers/gpu/drm/i915/i915_scheduler.c
index 19577c9..66dbc20 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -977,6 +977,9 @@ static int i915_scheduler_pop_from_queue_locked(struct 
intel_engine_cs *ring,
                        signalled = 
atomic_read(&node->params.fence_wait->status) != 0;
                else
                        signalled = true;
+
+               if (!signalled)
+                       signalled = i915_safe_to_ignore_fence(ring, 
node->params.fence_wait);
 #endif // CONFIG_SYNC
 
                has_local  = false;
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h 
b/drivers/gpu/drm/i915/i915_scheduler.h
index 8ca4b4b..3f94512 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -110,6 +110,11 @@ int         i915_scheduler_queue_execbuffer(struct 
i915_scheduler_queue_entry *q
 int         i915_scheduler_handle_irq(struct intel_engine_cs *ring);
 void        i915_scheduler_kill_all(struct drm_device *dev);
 void        i915_gem_scheduler_work_handler(struct work_struct *work);
+#ifdef CONFIG_SYNC
+struct drm_i915_gem_request *i915_scheduler_find_by_sync_value(struct 
intel_engine_cs *ring,
+                                                              struct 
intel_context *ctx,
+                                                              uint32_t 
sync_value);
+#endif
 int         i915_scheduler_flush(struct intel_engine_cs *ring, bool is_locked);
 int         i915_scheduler_flush_request(struct drm_i915_gem_request *req,
                                         bool is_locked);
-- 
1.9.1

_______________________________________________
Intel-gfx mailing list
[email protected]
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to