On 11/01/2016 09:16, Chris Wilson wrote:
Migrate the request operations out of the main body of i915_gem.c and
into their own C file for easier expansion.

v2: Move __i915_add_request() across as well

Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk>
---

don't we lose the history in git blame when moved to a new file? is this ok? especially for files like i915_gem.c.

regards
Arun

  drivers/gpu/drm/i915/Makefile           |   1 +
  drivers/gpu/drm/i915/i915_drv.h         | 205 +---------
  drivers/gpu/drm/i915/i915_gem.c         | 652 +------------------------------
  drivers/gpu/drm/i915/i915_gem_request.c | 659 ++++++++++++++++++++++++++++++++
  drivers/gpu/drm/i915/i915_gem_request.h | 223 +++++++++++
  5 files changed, 895 insertions(+), 845 deletions(-)
  create mode 100644 drivers/gpu/drm/i915/i915_gem_request.c
  create mode 100644 drivers/gpu/drm/i915/i915_gem_request.h

diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 99ce591c8574..b0a83215db80 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -31,6 +31,7 @@ i915-y += i915_cmd_parser.o \
          i915_gem_gtt.o \
          i915_gem.o \
          i915_gem_render_state.o \
+         i915_gem_request.o \
          i915_gem_shrinker.o \
          i915_gem_stolen.o \
          i915_gem_tiling.o \
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 57e450e25ad6..ee146ce02412 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -41,6 +41,7 @@
  #include "intel_lrc.h"
  #include "i915_gem_gtt.h"
  #include "i915_gem_render_state.h"
+#include "i915_gem_request.h"
  #include <linux/io-mapping.h>
  #include <linux/i2c.h>
  #include <linux/i2c-algo-bit.h>
@@ -2162,179 +2163,15 @@ struct drm_i915_gem_object {
  };
  #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)

-void i915_gem_track_fb(struct drm_i915_gem_object *old,
-                      struct drm_i915_gem_object *new,
-                      unsigned frontbuffer_bits);
-
-/**
- * Request queue structure.
- *
- * The request queue allows us to note sequence numbers that have been emitted
- * and may be associated with active buffers to be retired.
- *
- * By keeping this list, we can avoid having to do questionable sequence
- * number comparisons on buffer last_read|write_seqno. It also allows an
- * emission time to be associated with the request for tracking how far ahead
- * of the GPU the submission is.
- *
- * The requests are reference counted, so upon creation they should have an
- * initial reference taken using kref_init
- */
-struct drm_i915_gem_request {
-       struct kref ref;
-
-       /** On Which ring this request was generated */
-       struct drm_i915_private *i915;
-       struct intel_engine_cs *ring;
-       unsigned reset_counter;
-
-        /** GEM sequence number associated with the previous request,
-         * when the HWS breadcrumb is equal to this the GPU is processing
-         * this request.
-         */
-       u32 previous_seqno;
-
-        /** GEM sequence number associated with this request,
-         * when the HWS breadcrumb is equal or greater than this the GPU
-         * has finished processing this request.
-         */
-       u32 seqno;
-
-       /** Position in the ringbuffer of the start of the request */
-       u32 head;
-
-       /**
-        * Position in the ringbuffer of the start of the postfix.
-        * This is required to calculate the maximum available ringbuffer
-        * space without overwriting the postfix.
-        */
-        u32 postfix;
-
-       /** Position in the ringbuffer of the end of the whole request */
-       u32 tail;
-
-       /**
-        * Context and ring buffer related to this request
-        * Contexts are refcounted, so when this request is associated with a
-        * context, we must increment the context's refcount, to guarantee that
-        * it persists while any request is linked to it. Requests themselves
-        * are also refcounted, so the request will only be freed when the last
-        * reference to it is dismissed, and the code in
-        * i915_gem_request_free() will then decrement the refcount on the
-        * context.
-        */
-       struct intel_context *ctx;
-       struct intel_ringbuffer *ringbuf;
-
-       /** Batch buffer related to this request if any (used for
-           error state dump only) */
-       struct drm_i915_gem_object *batch_obj;
-
-       /** Time at which this request was emitted, in jiffies. */
-       unsigned long emitted_jiffies;
-
-       /** global list entry for this request */
-       struct list_head list;
-
-       struct drm_i915_file_private *file_priv;
-       /** file_priv list entry for this request */
-       struct list_head client_list;
-
-       /** process identifier submitting this request */
-       struct pid *pid;
-
-       /**
-        * The ELSP only accepts two elements at a time, so we queue
-        * context/tail pairs on a given queue (ring->execlist_queue) until the
-        * hardware is available. The queue serves a double purpose: we also use
-        * it to keep track of the up to 2 contexts currently in the hardware
-        * (usually one in execution and the other queued up by the GPU): We
-        * only remove elements from the head of the queue when the hardware
-        * informs us that an element has been completed.
-        *
-        * All accesses to the queue are mediated by a spinlock
-        * (ring->execlist_lock).
-        */
-
-       /** Execlist link in the submission queue.*/
-       struct list_head execlist_link;
-
-       /** Execlists no. of times this request has been sent to the ELSP */
-       int elsp_submitted;
-
-};
-
  #ifdef CONFIG_DRM_I915_DEBUG_GEM
  #define GEM_BUG_ON(expr) BUG_ON(expr)
  #else
  #define GEM_BUG_ON(expr)
  #endif

-int i915_gem_request_alloc(struct intel_engine_cs *ring,
-                          struct intel_context *ctx,
-                          struct drm_i915_gem_request **req_out);
-void i915_gem_request_cancel(struct drm_i915_gem_request *req);
-void i915_gem_request_free(struct kref *req_ref);
-int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
-                                  struct drm_file *file);
-
-static inline uint32_t
-i915_gem_request_get_seqno(struct drm_i915_gem_request *req)
-{
-       return req ? req->seqno : 0;
-}
-
-static inline struct intel_engine_cs *
-i915_gem_request_get_ring(struct drm_i915_gem_request *req)
-{
-       return req ? req->ring : NULL;
-}
-
-static inline struct drm_i915_gem_request *
-i915_gem_request_reference(struct drm_i915_gem_request *req)
-{
-       if (req)
-               kref_get(&req->ref);
-       return req;
-}
-
-static inline void
-i915_gem_request_unreference(struct drm_i915_gem_request *req)
-{
-       WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex));
-       kref_put(&req->ref, i915_gem_request_free);
-}
-
-static inline void
-i915_gem_request_unreference__unlocked(struct drm_i915_gem_request *req)
-{
-       struct drm_device *dev;
-
-       if (!req)
-               return;
-
-       dev = req->ring->dev;
-       if (kref_put_mutex(&req->ref, i915_gem_request_free, 
&dev->struct_mutex))
-               mutex_unlock(&dev->struct_mutex);
-}
-
-static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
-                                          struct drm_i915_gem_request *src)
-{
-       if (src)
-               i915_gem_request_reference(src);
-
-       if (*pdst)
-               i915_gem_request_unreference(*pdst);
-
-       *pdst = src;
-}
-
-/*
- * XXX: i915_gem_request_completed should be here but currently needs the
- * definition of i915_seqno_passed() which is below. It will be moved in
- * a later patch when the call to i915_seqno_passed() is obsoleted...
- */
+void i915_gem_track_fb(struct drm_i915_gem_object *old,
+                      struct drm_i915_gem_object *new,
+                      unsigned frontbuffer_bits);

  /*
   * A command that requires special handling by the command parser.
@@ -2956,28 +2793,6 @@ int i915_gem_dumb_create(struct drm_file *file_priv,
                         struct drm_mode_create_dumb *args);
  int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
                      uint32_t handle, uint64_t *offset);
-/**
- * Returns true if seq1 is later than seq2.
- */
-static inline bool
-i915_seqno_passed(uint32_t seq1, uint32_t seq2)
-{
-       return (int32_t)(seq1 - seq2) >= 0;
-}
-
-static inline bool i915_gem_request_started(struct drm_i915_gem_request *req)
-{
-       return i915_seqno_passed(intel_ring_get_seqno(req->ring),
-                                req->previous_seqno);
-}
-
-static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req)
-{
-       return i915_seqno_passed(intel_ring_get_seqno(req->ring),
-                                req->seqno);
-}
-
-int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
  int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);

  struct drm_i915_gem_request *
@@ -3036,18 +2851,6 @@ void i915_gem_init_swizzling(struct drm_device *dev);
  void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
  int __must_check i915_gpu_idle(struct drm_device *dev);
  int __must_check i915_gem_suspend(struct drm_device *dev);
-void __i915_add_request(struct drm_i915_gem_request *req,
-                       struct drm_i915_gem_object *batch_obj,
-                       bool flush_caches);
-#define i915_add_request(req) \
-       __i915_add_request(req, NULL, true)
-#define i915_add_request_no_flush(req) \
-       __i915_add_request(req, NULL, false)
-int __i915_wait_request(struct drm_i915_gem_request *req,
-                       bool interruptible,
-                       s64 *timeout,
-                       struct intel_rps_client *rps);
-int __must_check i915_wait_request(struct drm_i915_gem_request *req);
  int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
  int __must_check
  i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index ea9344503bf6..68a25617ca7a 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1103,365 +1103,6 @@ put_rpm:
        return ret;
  }

-static int
-i915_gem_check_wedge(unsigned reset_counter, bool interruptible)
-{
-       if (__i915_terminally_wedged(reset_counter))
-               return -EIO;
-
-       if (__i915_reset_in_progress(reset_counter)) {
-               /* Non-interruptible callers can't handle -EAGAIN, hence return
-                * -EIO unconditionally for these. */
-               if (!interruptible)
-                       return -EIO;
-
-               return -EAGAIN;
-       }
-
-       return 0;
-}
-
-static unsigned long local_clock_us(unsigned *cpu)
-{
-       unsigned long t;
-
-       /* Cheaply and approximately convert from nanoseconds to microseconds.
-        * The result and subsequent calculations are also defined in the same
-        * approximate microseconds units. The principal source of timing
-        * error here is from the simple truncation.
-        *
-        * Note that local_clock() is only defined wrt to the current CPU;
-        * the comparisons are no longer valid if we switch CPUs. Instead of
-        * blocking preemption for the entire busywait, we can detect the CPU
-        * switch and use that as indicator of system load and a reason to
-        * stop busywaiting, see busywait_stop().
-        */
-       *cpu = get_cpu();
-       t = local_clock() >> 10;
-       put_cpu();
-
-       return t;
-}
-
-static bool busywait_stop(unsigned long timeout, unsigned cpu)
-{
-       unsigned this_cpu;
-
-       if (time_after(local_clock_us(&this_cpu), timeout))
-               return true;
-
-       return this_cpu != cpu;
-}
-
-static bool __i915_spin_request(struct drm_i915_gem_request *req,
-                               struct intel_wait *wait,
-                               int state)
-{
-       unsigned long timeout;
-       unsigned cpu;
-
-       /* When waiting for high frequency requests, e.g. during synchronous
-        * rendering split between the CPU and GPU, the finite amount of time
-        * required to set up the irq and wait upon it limits the response
-        * rate. By busywaiting on the request completion for a short while we
-        * can service the high frequency waits as quick as possible. However,
-        * if it is a slow request, we want to sleep as quickly as possible.
-        * The tradeoff between waiting and sleeping is roughly the time it
-        * takes to sleep on a request, on the order of a microsecond.
-        */
-
-       /* Only spin if we know the GPU is processing this request */
-       if (!i915_gem_request_started(req))
-               return false;
-
-       timeout = local_clock_us(&cpu) + 5;
-       do {
-               if (i915_gem_request_completed(req))
-                       return true;
-
-               if (signal_pending_state(state, wait->task))
-                       break;
-
-               if (busywait_stop(timeout, cpu))
-                       break;
-
-               cpu_relax_lowlatency();
-
-               /* Break the loop if we have consumed the timeslice (or been
-                * preempted) or when either the background thread has
-                * enabled the interrupt, or the IRQ itself has fired.
-                */
-       } while (!need_resched() && wait->task->state == state);
-
-       return false;
-}
-
-/**
- * __i915_wait_request - wait until execution of request has finished
- * @req: duh!
- * @interruptible: do an interruptible wait (normally yes)
- * @timeout: in - how long to wait (NULL forever); out - how much time 
remaining
- *
- * Note: It is of utmost importance that the passed in seqno and reset_counter
- * values have been read by the caller in an smp safe manner. Where read-side
- * locks are involved, it is sufficient to read the reset_counter before
- * unlocking the lock that protects the seqno. For lockless tricks, the
- * reset_counter _must_ be read before, and an appropriate smp_rmb must be
- * inserted.
- *
- * Returns 0 if the request was found within the alloted time. Else returns the
- * errno with remaining time filled in timeout argument.
- */
-int __i915_wait_request(struct drm_i915_gem_request *req,
-                       bool interruptible,
-                       s64 *timeout,
-                       struct intel_rps_client *rps)
-{
-       int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
-       struct intel_wait wait;
-       unsigned long timeout_remain;
-       int ret = 0;
-
-       might_sleep();
-
-       if (list_empty(&req->list))
-               return 0;
-
-       if (i915_gem_request_completed(req))
-               return 0;
-
-       timeout_remain = MAX_SCHEDULE_TIMEOUT;
-       if (timeout) {
-               if (WARN_ON(*timeout < 0))
-                       return -EINVAL;
-
-               if (*timeout == 0)
-                       return -ETIME;
-
-               /* Record current time in case interrupted, or wedged */
-               timeout_remain = nsecs_to_jiffies_timeout(*timeout);
-               *timeout += ktime_get_raw_ns();
-       }
-
-       trace_i915_gem_request_wait_begin(req);
-
-       /* This client is about to stall waiting for the GPU. In many cases
-        * this is undesirable and limits the throughput of the system, as
-        * many clients cannot continue processing user input/output whilst
-        * blocked. RPS autotuning may take tens of milliseconds to respond
-        * to the GPU load and thus incurs additional latency for the client.
-        * We can circumvent that by promoting the GPU frequency to maximum
-        * before we wait. This makes the GPU throttle up much more quickly
-        * (good for benchmarks and user experience, e.g. window animations),
-        * but at a cost of spending more power processing the workload
-        * (bad for battery). Not all clients even want their results
-        * immediately and for them we should just let the GPU select its own
-        * frequency to maximise efficiency. To prevent a single client from
-        * forcing the clocks too high for the whole system, we only allow
-        * each client to waitboost once in a busy period.
-        */
-       if (INTEL_INFO(req->i915)->gen >= 6)
-               gen6_rps_boost(req->i915, rps, req->emitted_jiffies);
-
-       intel_wait_init(&wait, req->seqno);
-       set_task_state(wait.task, state);
-
-       /* Optimistic spin for the next ~jiffie before touching IRQs */
-       if (intel_engine_add_wait(req->ring, &wait)) {
-               if (__i915_spin_request(req, &wait, state))
-                       goto complete;
-
-               /* In order to check that we haven't missed the interrupt
-                * as we enabled it, we need to kick ourselves to do a
-                * coherent check on the seqno before we sleep.
-                */
-               if (intel_engine_enable_wait_irq(req->ring, &wait))
-                       goto wakeup;
-       }
-
-       for (;;) {
-               if (signal_pending_state(state, wait.task)) {
-                       ret = -ERESTARTSYS;
-                       break;
-               }
-
-               /* Ensure that even if the GPU hangs, we get woken up. */
-               i915_queue_hangcheck(req->i915);
-
-               timeout_remain = io_schedule_timeout(timeout_remain);
-               if (timeout_remain == 0) {
-                       ret = -ETIME;
-                       break;
-               }
-
-               if (intel_wait_complete(&wait))
-                       break;
-
-wakeup:
-               set_task_state(wait.task, state);
-
-               /* Carefully check if the request is complete, giving time
-                * for the seqno to be visible following the interrupt.
-                * We also have to check in case we are kicked by the GPU
-                * reset in order to drop the struct_mutex.
-                */
-               if (__i915_request_irq_complete(req))
-                       break;
-       }
-
-complete:
-       intel_engine_remove_wait(req->ring, &wait);
-       __set_task_state(wait.task, TASK_RUNNING);
-       trace_i915_gem_request_wait_end(req);
-
-       if (timeout) {
-               *timeout -= ktime_get_raw_ns();
-               if (*timeout < 0)
-                       *timeout = 0;
-
-               /*
-                * Apparently ktime isn't accurate enough and occasionally has a
-                * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
-                * things up to make the test happy. We allow up to 1 jiffy.
-                *
-                * This is a regrssion from the timespec->ktime conversion.
-                */
-               if (ret == -ETIME && *timeout < jiffies_to_usecs(1)*1000)
-                       *timeout = 0;
-       }
-
-       if (ret == 0 && rps && req->seqno == req->ring->last_submitted_seqno) {
-               /* The GPU is now idle and this client has stalled.
-                * Since no other client has submitted a request in the
-                * meantime, assume that this client is the only one
-                * supplying work to the GPU but is unable to keep that
-                * work supplied because it is waiting. Since the GPU is
-                * then never kept fully busy, RPS autoclocking will
-                * keep the clocks relatively low, causing further delays.
-                * Compensate by giving the synchronous client credit for
-                * a waitboost next time.
-                */
-               spin_lock(&req->i915->rps.client_lock);
-               list_del_init(&rps->link);
-               spin_unlock(&req->i915->rps.client_lock);
-       }
-
-       return ret;
-}
-
-int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
-                                  struct drm_file *file)
-{
-       struct drm_i915_private *dev_private;
-       struct drm_i915_file_private *file_priv;
-
-       WARN_ON(!req || !file || req->file_priv);
-
-       if (!req || !file)
-               return -EINVAL;
-
-       if (req->file_priv)
-               return -EINVAL;
-
-       dev_private = req->ring->dev->dev_private;
-       file_priv = file->driver_priv;
-
-       spin_lock(&file_priv->mm.lock);
-       req->file_priv = file_priv;
-       list_add_tail(&req->client_list, &file_priv->mm.request_list);
-       spin_unlock(&file_priv->mm.lock);
-
-       req->pid = get_pid(task_pid(current));
-
-       return 0;
-}
-
-static inline void
-i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
-{
-       struct drm_i915_file_private *file_priv = request->file_priv;
-
-       if (!file_priv)
-               return;
-
-       spin_lock(&file_priv->mm.lock);
-       list_del(&request->client_list);
-       request->file_priv = NULL;
-       spin_unlock(&file_priv->mm.lock);
-
-       put_pid(request->pid);
-       request->pid = NULL;
-}
-
-static void i915_gem_request_retire(struct drm_i915_gem_request *request)
-{
-       trace_i915_gem_request_retire(request);
-
-       /* We know the GPU must have read the request to have
-        * sent us the seqno + interrupt, so use the position
-        * of tail of the request to update the last known position
-        * of the GPU head.
-        *
-        * Note this requires that we are always called in request
-        * completion order.
-        */
-       request->ringbuf->last_retired_head = request->postfix;
-
-       list_del_init(&request->list);
-       i915_gem_request_remove_from_client(request);
-
-       i915_gem_request_unreference(request);
-}
-
-static void
-__i915_gem_request_retire__upto(struct drm_i915_gem_request *req)
-{
-       struct intel_engine_cs *engine = req->ring;
-       struct drm_i915_gem_request *tmp;
-
-       lockdep_assert_held(&engine->dev->struct_mutex);
-
-       if (list_empty(&req->list))
-               return;
-
-       do {
-               tmp = list_first_entry(&engine->request_list,
-                                      typeof(*tmp), list);
-
-               i915_gem_request_retire(tmp);
-       } while (tmp != req);
-
-       WARN_ON(i915_verify_lists(engine->dev));
-}
-
-/**
- * Waits for a request to be signaled, and cleans up the
- * request and object lists appropriately for that event.
- */
-int
-i915_wait_request(struct drm_i915_gem_request *req)
-{
-       struct drm_device *dev;
-       struct drm_i915_private *dev_priv;
-       bool interruptible;
-       int ret;
-
-       BUG_ON(req == NULL);
-
-       dev = req->ring->dev;
-       dev_priv = dev->dev_private;
-       interruptible = dev_priv->mm.interruptible;
-
-       BUG_ON(!mutex_is_locked(&dev->struct_mutex));
-
-       ret = __i915_wait_request(req, interruptible, NULL, NULL);
-       if (ret)
-               return ret;
-
-       __i915_gem_request_retire__upto(req);
-       return 0;
-}
-
  /**
   * Ensures that all rendering to the object has completed and the object is
   * safe to unbind from the GTT or access from the CPU.
@@ -1515,7 +1156,7 @@ i915_gem_object_retire_request(struct drm_i915_gem_object 
*obj,
        else if (obj->last_write_req == req)
                i915_gem_object_retire__write(obj);

-       __i915_gem_request_retire__upto(req);
+       i915_gem_request_retire_upto(req);
  }

  /* A nonblocking variant of the above wait. This is a highly dangerous routine
@@ -2441,94 +2082,6 @@ i915_gem_object_retire__read(struct drm_i915_gem_object 
*obj, int ring)
        drm_gem_object_unreference(&obj->base);
  }

-static int
-i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_engine_cs *ring;
-       int ret, i, j;
-
-       /* Carefully retire all requests without writing to the rings */
-       for_each_ring(ring, dev_priv, i) {
-               ret = intel_ring_idle(ring);
-               if (ret)
-                       return ret;
-       }
-       i915_gem_retire_requests(dev);
-
-       /* Finally reset hw state */
-       for_each_ring(ring, dev_priv, i) {
-               intel_ring_init_seqno(ring, seqno);
-
-               for (j = 0; j < ARRAY_SIZE(ring->semaphore.sync_seqno); j++)
-                       ring->semaphore.sync_seqno[j] = 0;
-       }
-
-       return 0;
-}
-
-int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       int ret;
-
-       if (seqno == 0)
-               return -EINVAL;
-
-       /* HWS page needs to be set less than what we
-        * will inject to ring
-        */
-       ret = i915_gem_init_seqno(dev, seqno - 1);
-       if (ret)
-               return ret;
-
-       /* Carefully set the last_seqno value so that wrap
-        * detection still works
-        */
-       dev_priv->next_seqno = seqno;
-       dev_priv->last_seqno = seqno - 1;
-       if (dev_priv->last_seqno == 0)
-               dev_priv->last_seqno--;
-
-       return 0;
-}
-
-int
-i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       /* reserve 0 for non-seqno */
-       if (dev_priv->next_seqno == 0) {
-               int ret = i915_gem_init_seqno(dev, 0);
-               if (ret)
-                       return ret;
-
-               dev_priv->next_seqno = 1;
-       }
-
-       *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
-       return 0;
-}
-
-static void i915_gem_mark_busy(struct drm_i915_private *dev_priv)
-{
-       if (dev_priv->mm.busy)
-               return;
-
-       intel_runtime_pm_get_noresume(dev_priv);
-
-       i915_update_gfx_val(dev_priv);
-       if (INTEL_INFO(dev_priv)->gen >= 6)
-               gen6_rps_busy(dev_priv);
-
-       queue_delayed_work(dev_priv->wq,
-                          &dev_priv->mm.retire_work,
-                          round_jiffies_up_relative(HZ));
-
-       dev_priv->mm.busy = true;
-}
-
  static void i915_gem_mark_idle(struct drm_i915_private *dev_priv)
  {
        dev_priv->mm.busy = false;
@@ -2542,92 +2095,6 @@ static void i915_gem_mark_idle(struct drm_i915_private 
*dev_priv)
        intel_runtime_pm_put(dev_priv);
  }

-/*
- * NB: This function is not allowed to fail. Doing so would mean the the
- * request is not being tracked for completion but the work itself is
- * going to happen on the hardware. This would be a Bad Thing(tm).
- */
-void __i915_add_request(struct drm_i915_gem_request *request,
-                       struct drm_i915_gem_object *obj,
-                       bool flush_caches)
-{
-       struct intel_engine_cs *ring;
-       struct drm_i915_private *dev_priv;
-       struct intel_ringbuffer *ringbuf;
-       u32 request_start;
-       int ret;
-
-       if (WARN_ON(request == NULL))
-               return;
-
-       ring = request->ring;
-       dev_priv = ring->dev->dev_private;
-       ringbuf = request->ringbuf;
-
-       /*
-        * To ensure that this call will not fail, space for its emissions
-        * should already have been reserved in the ring buffer. Let the ring
-        * know that it is time to use that space up.
-        */
-       intel_ring_reserved_space_use(ringbuf);
-
-       request_start = intel_ring_get_tail(ringbuf);
-       /*
-        * Emit any outstanding flushes - execbuf can fail to emit the flush
-        * after having emitted the batchbuffer command. Hence we need to fix
-        * things up similar to emitting the lazy request. The difference here
-        * is that the flush _must_ happen before the next request, no matter
-        * what.
-        */
-       if (flush_caches) {
-               if (i915.enable_execlists)
-                       ret = logical_ring_flush_all_caches(request);
-               else
-                       ret = intel_ring_flush_all_caches(request);
-               /* Not allowed to fail! */
-               WARN(ret, "*_ring_flush_all_caches failed: %d!\n", ret);
-       }
-
-       /* Record the position of the start of the request so that
-        * should we detect the updated seqno part-way through the
-        * GPU processing the request, we never over-estimate the
-        * position of the head.
-        */
-       request->postfix = intel_ring_get_tail(ringbuf);
-
-       if (i915.enable_execlists)
-               ret = ring->emit_request(request);
-       else {
-               ret = ring->add_request(request);
-
-               request->tail = intel_ring_get_tail(ringbuf);
-       }
-       /* Not allowed to fail! */
-       WARN(ret, "emit|add_request failed: %d!\n", ret);
-
-       request->head = request_start;
-
-       /* Whilst this request exists, batch_obj will be on the
-        * active_list, and so will hold the active reference. Only when this
-        * request is retired will the the batch_obj be moved onto the
-        * inactive_list and lose its active reference. Hence we do not need
-        * to explicitly hold another reference here.
-        */
-       request->batch_obj = obj;
-
-       request->emitted_jiffies = jiffies;
-       request->previous_seqno = ring->last_submitted_seqno;
-       ring->last_submitted_seqno = request->seqno;
-       list_add_tail(&request->list, &ring->request_list);
-
-       trace_i915_gem_request_add(request);
-
-       i915_gem_mark_busy(dev_priv);
-
-       /* Sanity check that the reserved size was large enough. */
-       intel_ring_reserved_space_end(ringbuf);
-}
-
  static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
                                   const struct intel_context *ctx)
  {
@@ -2666,109 +2133,6 @@ static void i915_set_reset_status(struct 
drm_i915_private *dev_priv,
        }
  }

-void i915_gem_request_free(struct kref *req_ref)
-{
-       struct drm_i915_gem_request *req = container_of(req_ref,
-                                                typeof(*req), ref);
-       struct intel_context *ctx = req->ctx;
-
-       if (req->file_priv)
-               i915_gem_request_remove_from_client(req);
-
-       if (ctx) {
-               if (i915.enable_execlists) {
-                       if (ctx != req->ring->default_context)
-                               intel_lr_context_unpin(req);
-               }
-
-               i915_gem_context_unreference(ctx);
-       }
-
-       kmem_cache_free(req->i915->requests, req);
-}
-
-int i915_gem_request_alloc(struct intel_engine_cs *ring,
-                          struct intel_context *ctx,
-                          struct drm_i915_gem_request **req_out)
-{
-       struct drm_i915_private *dev_priv = to_i915(ring->dev);
-       unsigned reset_counter = i915_reset_counter(&dev_priv->gpu_error);
-       struct drm_i915_gem_request *req;
-       int ret;
-
-       if (!req_out)
-               return -EINVAL;
-
-       *req_out = NULL;
-
-       /* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
-        * EIO if the GPU is already wedged, or EAGAIN to drop the struct_mutex
-        * and restart.
-        */
-       ret = i915_gem_check_wedge(reset_counter, dev_priv->mm.interruptible);
-       if (ret)
-               return ret;
-
-       req = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL);
-       if (req == NULL)
-               return -ENOMEM;
-
-       ret = i915_gem_get_seqno(ring->dev, &req->seqno);
-       if (ret)
-               goto err;
-
-       kref_init(&req->ref);
-       req->i915 = dev_priv;
-       req->ring = ring;
-       req->reset_counter = reset_counter;
-       req->ctx  = ctx;
-       i915_gem_context_reference(req->ctx);
-
-       if (i915.enable_execlists)
-               ret = intel_logical_ring_alloc_request_extras(req);
-       else
-               ret = intel_ring_alloc_request_extras(req);
-       if (ret) {
-               i915_gem_context_unreference(req->ctx);
-               goto err;
-       }
-
-       /*
-        * Reserve space in the ring buffer for all the commands required to
-        * eventually emit this request. This is to guarantee that the
-        * i915_add_request() call can't fail. Note that the reserve may need
-        * to be redone if the request is not actually submitted straight
-        * away, e.g. because a GPU scheduler has deferred it.
-        */
-       if (i915.enable_execlists)
-               ret = intel_logical_ring_reserve_space(req);
-       else
-               ret = intel_ring_reserve_space(req);
-       if (ret) {
-               /*
-                * At this point, the request is fully allocated even if not
-                * fully prepared. Thus it can be cleaned up using the proper
-                * free code.
-                */
-               i915_gem_request_cancel(req);
-               return ret;
-       }
-
-       *req_out = req;
-       return 0;
-
-err:
-       kmem_cache_free(dev_priv->requests, req);
-       return ret;
-}
-
-void i915_gem_request_cancel(struct drm_i915_gem_request *req)
-{
-       intel_ring_reserved_space_cancel(req->ringbuf);
-
-       i915_gem_request_unreference(req);
-}
-
  struct drm_i915_gem_request *
  i915_gem_find_active_request(struct intel_engine_cs *ring)
  {
@@ -2850,14 +2214,14 @@ static void i915_gem_reset_ring_cleanup(struct 
drm_i915_private *dev_priv,
         * implicit references on things like e.g. ppgtt address spaces through
         * the request.
         */
-       while (!list_empty(&ring->request_list)) {
+       if (!list_empty(&ring->request_list)) {
                struct drm_i915_gem_request *request;

-               request = list_first_entry(&ring->request_list,
-                                          struct drm_i915_gem_request,
-                                          list);
+               request = list_last_entry(&ring->request_list,
+                                         struct drm_i915_gem_request,
+                                         list);

-               i915_gem_request_retire(request);
+               i915_gem_request_retire_upto(request);
        }

        /* Having flushed all requests from all queues, we know that all
@@ -2922,7 +2286,7 @@ i915_gem_retire_requests_ring(struct intel_engine_cs 
*ring)
                if (!i915_gem_request_completed(request))
                        break;

-               i915_gem_request_retire(request);
+               i915_gem_request_retire_upto(request);
        }

        /* Move any buffers on the active list that are no longer referenced
@@ -3053,7 +2417,7 @@ i915_gem_object_flush_active(struct drm_i915_gem_object 
*obj)
                        goto retire;

                if (i915_gem_request_completed(req)) {
-                       __i915_gem_request_retire__upto(req);
+                       i915_gem_request_retire_upto(req);
  retire:
                        i915_gem_object_retire__read(obj, i);
                }
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c 
b/drivers/gpu/drm/i915/i915_gem_request.c
new file mode 100644
index 000000000000..b4ede6dd7b20
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -0,0 +1,659 @@
+/*
+ * Copyright © 2008-2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "i915_drv.h"
+
+static int
+i915_gem_check_wedge(unsigned reset_counter, bool interruptible)
+{
+       if (__i915_terminally_wedged(reset_counter))
+               return -EIO;
+
+       if (__i915_reset_in_progress(reset_counter)) {
+               /* Non-interruptible callers can't handle -EAGAIN, hence return
+                * -EIO unconditionally for these. */
+               if (!interruptible)
+                       return -EIO;
+
+               return -EAGAIN;
+       }
+
+       return 0;
+}
+
+static int
+i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno)
+{
+       struct intel_engine_cs *ring;
+       int ret, i, j;
+
+       /* Carefully retire all requests without writing to the rings */
+       for_each_ring(ring, dev_priv, i) {
+               ret = intel_ring_idle(ring);
+               if (ret)
+                       return ret;
+       }
+       i915_gem_retire_requests(dev_priv->dev);
+
+       /* Finally reset hw state */
+       for_each_ring(ring, dev_priv, i) {
+               intel_ring_init_seqno(ring, seqno);
+
+               for (j = 0; j < ARRAY_SIZE(ring->semaphore.sync_seqno); j++)
+                       ring->semaphore.sync_seqno[j] = 0;
+       }
+
+       return 0;
+}
+
+int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int ret;
+
+       if (seqno == 0)
+               return -EINVAL;
+
+       /* HWS page needs to be set less than what we
+        * will inject to ring
+        */
+       ret = i915_gem_init_seqno(dev_priv, seqno - 1);
+       if (ret)
+               return ret;
+
+       /* Carefully set the last_seqno value so that wrap
+        * detection still works
+        */
+       dev_priv->next_seqno = seqno;
+       dev_priv->last_seqno = seqno - 1;
+       if (dev_priv->last_seqno == 0)
+               dev_priv->last_seqno--;
+
+       return 0;
+}
+
+static int
+i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno)
+{
+       /* reserve 0 for non-seqno */
+       if (unlikely(dev_priv->next_seqno == 0)) {
+               int ret = i915_gem_init_seqno(dev_priv, 0);
+               if (ret)
+                       return ret;
+
+               dev_priv->next_seqno = 1;
+       }
+
+       *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
+       return 0;
+}
+
+int i915_gem_request_alloc(struct intel_engine_cs *ring,
+                          struct intel_context *ctx,
+                          struct drm_i915_gem_request **req_out)
+{
+       struct drm_i915_private *dev_priv = to_i915(ring->dev);
+       unsigned reset_counter = i915_reset_counter(&dev_priv->gpu_error);
+       struct drm_i915_gem_request *req;
+       int ret;
+
+       if (!req_out)
+               return -EINVAL;
+
+       *req_out = NULL;
+
+       /* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
+        * EIO if the GPU is already wedged, or EAGAIN to drop the struct_mutex
+        * and restart.
+        */
+       ret = i915_gem_check_wedge(reset_counter, dev_priv->mm.interruptible);
+       if (ret)
+               return ret;
+
+       req = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL);
+       if (req == NULL)
+               return -ENOMEM;
+
+       ret = i915_gem_get_seqno(dev_priv, &req->seqno);
+       if (ret)
+               goto err;
+
+       kref_init(&req->ref);
+       req->i915 = dev_priv;
+       req->ring = ring;
+       req->reset_counter = reset_counter;
+       req->ctx  = ctx;
+       i915_gem_context_reference(req->ctx);
+
+       if (i915.enable_execlists)
+               ret = intel_logical_ring_alloc_request_extras(req);
+       else
+               ret = intel_ring_alloc_request_extras(req);
+       if (ret) {
+               i915_gem_context_unreference(req->ctx);
+               goto err;
+       }
+
+       /*
+        * Reserve space in the ring buffer for all the commands required to
+        * eventually emit this request. This is to guarantee that the
+        * i915_add_request() call can't fail. Note that the reserve may need
+        * to be redone if the request is not actually submitted straight
+        * away, e.g. because a GPU scheduler has deferred it.
+        */
+       if (i915.enable_execlists)
+               ret = intel_logical_ring_reserve_space(req);
+       else
+               ret = intel_ring_reserve_space(req);
+       if (ret) {
+               /*
+                * At this point, the request is fully allocated even if not
+                * fully prepared. Thus it can be cleaned up using the proper
+                * free code.
+                */
+               i915_gem_request_cancel(req);
+               return ret;
+       }
+
+       *req_out = req;
+       return 0;
+
+err:
+       kmem_cache_free(dev_priv->requests, req);
+       return ret;
+}
+
+void i915_gem_request_cancel(struct drm_i915_gem_request *req)
+{
+       intel_ring_reserved_space_cancel(req->ringbuf);
+
+       i915_gem_request_unreference(req);
+}
+
+int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
+                                  struct drm_file *file)
+{
+       struct drm_i915_private *dev_private;
+       struct drm_i915_file_private *file_priv;
+
+       WARN_ON(!req || !file || req->file_priv);
+
+       if (!req || !file)
+               return -EINVAL;
+
+       if (req->file_priv)
+               return -EINVAL;
+
+       dev_private = req->ring->dev->dev_private;
+       file_priv = file->driver_priv;
+
+       spin_lock(&file_priv->mm.lock);
+       req->file_priv = file_priv;
+       list_add_tail(&req->client_list, &file_priv->mm.request_list);
+       spin_unlock(&file_priv->mm.lock);
+
+       req->pid = get_pid(task_pid(current));
+
+       return 0;
+}
+
+static inline void
+i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
+{
+       struct drm_i915_file_private *file_priv = request->file_priv;
+
+       if (!file_priv)
+               return;
+
+       spin_lock(&file_priv->mm.lock);
+       list_del(&request->client_list);
+       request->file_priv = NULL;
+       spin_unlock(&file_priv->mm.lock);
+
+       put_pid(request->pid);
+       request->pid = NULL;
+}
+
+static void i915_gem_request_retire(struct drm_i915_gem_request *request)
+{
+       trace_i915_gem_request_retire(request);
+
+       /* We know the GPU must have read the request to have
+        * sent us the seqno + interrupt, so use the position
+        * of tail of the request to update the last known position
+        * of the GPU head.
+        *
+        * Note this requires that we are always called in request
+        * completion order.
+        */
+       request->ringbuf->last_retired_head = request->postfix;
+
+       list_del_init(&request->list);
+       i915_gem_request_remove_from_client(request);
+
+       i915_gem_request_unreference(request);
+}
+
+void
+i915_gem_request_retire_upto(struct drm_i915_gem_request *req)
+{
+       struct intel_engine_cs *engine = req->ring;
+       struct drm_i915_gem_request *tmp;
+
+       lockdep_assert_held(&engine->dev->struct_mutex);
+
+       if (list_empty(&req->list))
+               return;
+
+       do {
+               tmp = list_first_entry(&engine->request_list,
+                                      typeof(*tmp), list);
+
+               i915_gem_request_retire(tmp);
+       } while (tmp != req);
+
+       WARN_ON(i915_verify_lists(engine->dev));
+}
+
+static void i915_gem_mark_busy(struct drm_i915_private *dev_priv)
+{
+       if (dev_priv->mm.busy)
+               return;
+
+       intel_runtime_pm_get_noresume(dev_priv);
+
+       i915_update_gfx_val(dev_priv);
+       if (INTEL_INFO(dev_priv)->gen >= 6)
+               gen6_rps_busy(dev_priv);
+
+       queue_delayed_work(dev_priv->wq,
+                          &dev_priv->mm.retire_work,
+                          round_jiffies_up_relative(HZ));
+
+       dev_priv->mm.busy = true;
+}
+
+/*
+ * NB: This function is not allowed to fail. Doing so would mean the the
+ * request is not being tracked for completion but the work itself is
+ * going to happen on the hardware. This would be a Bad Thing(tm).
+ */
+void __i915_add_request(struct drm_i915_gem_request *request,
+                       struct drm_i915_gem_object *obj,
+                       bool flush_caches)
+{
+       struct intel_engine_cs *ring;
+       struct drm_i915_private *dev_priv;
+       struct intel_ringbuffer *ringbuf;
+       u32 request_start;
+       int ret;
+
+       if (WARN_ON(request == NULL))
+               return;
+
+       ring = request->ring;
+       dev_priv = ring->dev->dev_private;
+       ringbuf = request->ringbuf;
+
+       /*
+        * To ensure that this call will not fail, space for its emissions
+        * should already have been reserved in the ring buffer. Let the ring
+        * know that it is time to use that space up.
+        */
+       intel_ring_reserved_space_use(ringbuf);
+
+       request_start = intel_ring_get_tail(ringbuf);
+       /*
+        * Emit any outstanding flushes - execbuf can fail to emit the flush
+        * after having emitted the batchbuffer command. Hence we need to fix
+        * things up similar to emitting the lazy request. The difference here
+        * is that the flush _must_ happen before the next request, no matter
+        * what.
+        */
+       if (flush_caches) {
+               if (i915.enable_execlists)
+                       ret = logical_ring_flush_all_caches(request);
+               else
+                       ret = intel_ring_flush_all_caches(request);
+               /* Not allowed to fail! */
+               WARN(ret, "*_ring_flush_all_caches failed: %d!\n", ret);
+       }
+
+       /* Record the position of the start of the request so that
+        * should we detect the updated seqno part-way through the
+        * GPU processing the request, we never over-estimate the
+        * position of the head.
+        */
+       request->postfix = intel_ring_get_tail(ringbuf);
+
+       if (i915.enable_execlists)
+               ret = ring->emit_request(request);
+       else {
+               ret = ring->add_request(request);
+
+               request->tail = intel_ring_get_tail(ringbuf);
+       }
+       /* Not allowed to fail! */
+       WARN(ret, "emit|add_request failed: %d!\n", ret);
+
+       request->head = request_start;
+
+       /* Whilst this request exists, batch_obj will be on the
+        * active_list, and so will hold the active reference. Only when this
+        * request is retired will the the batch_obj be moved onto the
+        * inactive_list and lose its active reference. Hence we do not need
+        * to explicitly hold another reference here.
+        */
+       request->batch_obj = obj;
+
+       request->emitted_jiffies = jiffies;
+       request->previous_seqno = ring->last_submitted_seqno;
+       ring->last_submitted_seqno = request->seqno;
+       list_add_tail(&request->list, &ring->request_list);
+
+       trace_i915_gem_request_add(request);
+
+       i915_gem_mark_busy(dev_priv);
+
+       /* Sanity check that the reserved size was large enough. */
+       intel_ring_reserved_space_end(ringbuf);
+}
+
+
+static unsigned long local_clock_us(unsigned *cpu)
+{
+       unsigned long t;
+
+       /* Cheaply and approximately convert from nanoseconds to microseconds.
+        * The result and subsequent calculations are also defined in the same
+        * approximate microseconds units. The principal source of timing
+        * error here is from the simple truncation.
+        *
+        * Note that local_clock() is only defined wrt to the current CPU;
+        * the comparisons are no longer valid if we switch CPUs. Instead of
+        * blocking preemption for the entire busywait, we can detect the CPU
+        * switch and use that as indicator of system load and a reason to
+        * stop busywaiting, see busywait_stop().
+        */
+       *cpu = get_cpu();
+       t = local_clock() >> 10;
+       put_cpu();
+
+       return t;
+}
+
+static bool busywait_stop(unsigned long timeout, unsigned cpu)
+{
+       unsigned this_cpu;
+
+       if (time_after(local_clock_us(&this_cpu), timeout))
+               return true;
+
+       return this_cpu != cpu;
+}
+
+static bool __i915_spin_request(struct drm_i915_gem_request *req,
+                               struct intel_wait *wait,
+                               int state)
+{
+       unsigned long timeout;
+       unsigned cpu;
+
+       /* When waiting for high frequency requests, e.g. during synchronous
+        * rendering split between the CPU and GPU, the finite amount of time
+        * required to set up the irq and wait upon it limits the response
+        * rate. By busywaiting on the request completion for a short while we
+        * can service the high frequency waits as quick as possible. However,
+        * if it is a slow request, we want to sleep as quickly as possible.
+        * The tradeoff between waiting and sleeping is roughly the time it
+        * takes to sleep on a request, on the order of a microsecond.
+        */
+
+       /* Only spin if we know the GPU is processing this request */
+       if (!i915_gem_request_started(req))
+               return false;
+
+       timeout = local_clock_us(&cpu) + 5;
+       do {
+               if (i915_gem_request_completed(req))
+                       return true;
+
+               if (signal_pending_state(state, wait->task))
+                       break;
+
+               if (busywait_stop(timeout, cpu))
+                       break;
+
+               cpu_relax_lowlatency();
+
+               /* Break the loop if we have consumed the timeslice (or been
+                * preempted) or when either the background thread has
+                * enabled the interrupt, or the IRQ itself has fired.
+                */
+       } while (!need_resched() && wait->task->state == state);
+
+       return false;
+}
+
+/**
+ * __i915_wait_request - wait until execution of request has finished
+ * @req: duh!
+ * @interruptible: do an interruptible wait (normally yes)
+ * @timeout: in - how long to wait (NULL forever); out - how much time 
remaining
+ *
+ * Note: It is of utmost importance that the passed in seqno and reset_counter
+ * values have been read by the caller in an smp safe manner. Where read-side
+ * locks are involved, it is sufficient to read the reset_counter before
+ * unlocking the lock that protects the seqno. For lockless tricks, the
+ * reset_counter _must_ be read before, and an appropriate smp_rmb must be
+ * inserted.
+ *
+ * Returns 0 if the request was found within the alloted time. Else returns the
+ * errno with remaining time filled in timeout argument.
+ */
+int __i915_wait_request(struct drm_i915_gem_request *req,
+                       bool interruptible,
+                       s64 *timeout,
+                       struct intel_rps_client *rps)
+{
+       int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
+       struct intel_wait wait;
+       unsigned long timeout_remain;
+       int ret = 0;
+
+       might_sleep();
+
+       if (list_empty(&req->list))
+               return 0;
+
+       if (i915_gem_request_completed(req))
+               return 0;
+
+       timeout_remain = MAX_SCHEDULE_TIMEOUT;
+       if (timeout) {
+               if (WARN_ON(*timeout < 0))
+                       return -EINVAL;
+
+               if (*timeout == 0)
+                       return -ETIME;
+
+               /* Record current time in case interrupted, or wedged */
+               timeout_remain = nsecs_to_jiffies_timeout(*timeout);
+               *timeout += ktime_get_raw_ns();
+       }
+
+       trace_i915_gem_request_wait_begin(req);
+
+       /* This client is about to stall waiting for the GPU. In many cases
+        * this is undesirable and limits the throughput of the system, as
+        * many clients cannot continue processing user input/output whilst
+        * blocked. RPS autotuning may take tens of milliseconds to respond
+        * to the GPU load and thus incurs additional latency for the client.
+        * We can circumvent that by promoting the GPU frequency to maximum
+        * before we wait. This makes the GPU throttle up much more quickly
+        * (good for benchmarks and user experience, e.g. window animations),
+        * but at a cost of spending more power processing the workload
+        * (bad for battery). Not all clients even want their results
+        * immediately and for them we should just let the GPU select its own
+        * frequency to maximise efficiency. To prevent a single client from
+        * forcing the clocks too high for the whole system, we only allow
+        * each client to waitboost once in a busy period.
+        */
+       if (INTEL_INFO(req->i915)->gen >= 6)
+               gen6_rps_boost(req->i915, rps, req->emitted_jiffies);
+
+       intel_wait_init(&wait, req->seqno);
+       set_task_state(wait.task, state);
+
+       /* Optimistic spin for the next ~jiffie before touching IRQs */
+       if (intel_engine_add_wait(req->ring, &wait)) {
+               if (__i915_spin_request(req, &wait, state))
+                       goto complete;
+
+               /* In order to check that we haven't missed the interrupt
+                * as we enabled it, we need to kick ourselves to do a
+                * coherent check on the seqno before we sleep.
+                */
+               if (intel_engine_enable_wait_irq(req->ring, &wait))
+                       goto wakeup;
+       }
+
+       for (;;) {
+               if (signal_pending_state(state, wait.task)) {
+                       ret = -ERESTARTSYS;
+                       break;
+               }
+
+               /* Ensure that even if the GPU hangs, we get woken up. */
+               i915_queue_hangcheck(req->i915);
+
+               timeout_remain = io_schedule_timeout(timeout_remain);
+               if (timeout_remain == 0) {
+                       ret = -ETIME;
+                       break;
+               }
+
+               if (intel_wait_complete(&wait))
+                       break;
+
+wakeup:
+               set_task_state(wait.task, state);
+
+               /* Carefully check if the request is complete, giving time
+                * for the seqno to be visible following the interrupt.
+                * We also have to check in case we are kicked by the GPU
+                * reset in order to drop the struct_mutex.
+                */
+               if (__i915_request_irq_complete(req))
+                       break;
+       }
+
+complete:
+       intel_engine_remove_wait(req->ring, &wait);
+       __set_task_state(wait.task, TASK_RUNNING);
+       trace_i915_gem_request_wait_end(req);
+
+       if (timeout) {
+               *timeout -= ktime_get_raw_ns();
+               if (*timeout < 0)
+                       *timeout = 0;
+
+               /*
+                * Apparently ktime isn't accurate enough and occasionally has a
+                * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
+                * things up to make the test happy. We allow up to 1 jiffy.
+                *
+                * This is a regrssion from the timespec->ktime conversion.
+                */
+               if (ret == -ETIME && *timeout < jiffies_to_usecs(1)*1000)
+                       *timeout = 0;
+       }
+
+       if (ret == 0 && rps && req->seqno == req->ring->last_submitted_seqno) {
+               /* The GPU is now idle and this client has stalled.
+                * Since no other client has submitted a request in the
+                * meantime, assume that this client is the only one
+                * supplying work to the GPU but is unable to keep that
+                * work supplied because it is waiting. Since the GPU is
+                * then never kept fully busy, RPS autoclocking will
+                * keep the clocks relatively low, causing further delays.
+                * Compensate by giving the synchronous client credit for
+                * a waitboost next time.
+                */
+               spin_lock(&req->i915->rps.client_lock);
+               list_del_init(&rps->link);
+               spin_unlock(&req->i915->rps.client_lock);
+       }
+
+       return ret;
+}
+
+/**
+ * Waits for a request to be signaled, and cleans up the
+ * request and object lists appropriately for that event.
+ */
+int
+i915_wait_request(struct drm_i915_gem_request *req)
+{
+       struct drm_device *dev;
+       struct drm_i915_private *dev_priv;
+       bool interruptible;
+       int ret;
+
+       BUG_ON(req == NULL);
+
+       dev = req->ring->dev;
+       dev_priv = dev->dev_private;
+       interruptible = dev_priv->mm.interruptible;
+
+       BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+
+       ret = __i915_wait_request(req, interruptible, NULL, NULL);
+       if (ret)
+               return ret;
+
+       i915_gem_request_retire_upto(req);
+       return 0;
+}
+
+void i915_gem_request_free(struct kref *req_ref)
+{
+       struct drm_i915_gem_request *req = container_of(req_ref,
+                                                typeof(*req), ref);
+       struct intel_context *ctx = req->ctx;
+
+       if (req->file_priv)
+               i915_gem_request_remove_from_client(req);
+
+       if (ctx) {
+               if (i915.enable_execlists) {
+                       if (ctx != req->ring->default_context)
+                               intel_lr_context_unpin(req);
+               }
+
+               i915_gem_context_unreference(ctx);
+       }
+
+       kmem_cache_free(req->i915->requests, req);
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h 
b/drivers/gpu/drm/i915/i915_gem_request.h
new file mode 100644
index 000000000000..d46f22f30b0a
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_request.h
@@ -0,0 +1,223 @@
+/*
+ * Copyright © 2008-2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef I915_GEM_REQUEST_H
+#define I915_GEM_REQUEST_H
+
+/**
+ * Request queue structure.
+ *
+ * The request queue allows us to note sequence numbers that have been emitted
+ * and may be associated with active buffers to be retired.
+ *
+ * By keeping this list, we can avoid having to do questionable sequence
+ * number comparisons on buffer last_read|write_seqno. It also allows an
+ * emission time to be associated with the request for tracking how far ahead
+ * of the GPU the submission is.
+ *
+ * The requests are reference counted, so upon creation they should have an
+ * initial reference taken using kref_init
+ */
+struct drm_i915_gem_request {
+       struct kref ref;
+
+       /** On Which ring this request was generated */
+       struct drm_i915_private *i915;
+       struct intel_engine_cs *ring;
+       unsigned reset_counter;
+
+        /** GEM sequence number associated with the previous request,
+         * when the HWS breadcrumb is equal to this the GPU is processing
+         * this request.
+         */
+       u32 previous_seqno;
+
+        /** GEM sequence number associated with this request,
+         * when the HWS breadcrumb is equal or greater than this the GPU
+         * has finished processing this request.
+         */
+       u32 seqno;
+
+       /** Position in the ringbuffer of the start of the request */
+       u32 head;
+
+       /**
+        * Position in the ringbuffer of the start of the postfix.
+        * This is required to calculate the maximum available ringbuffer
+        * space without overwriting the postfix.
+        */
+        u32 postfix;
+
+       /** Position in the ringbuffer of the end of the whole request */
+       u32 tail;
+
+       /**
+        * Context and ring buffer related to this request
+        * Contexts are refcounted, so when this request is associated with a
+        * context, we must increment the context's refcount, to guarantee that
+        * it persists while any request is linked to it. Requests themselves
+        * are also refcounted, so the request will only be freed when the last
+        * reference to it is dismissed, and the code in
+        * i915_gem_request_free() will then decrement the refcount on the
+        * context.
+        */
+       struct intel_context *ctx;
+       struct intel_ringbuffer *ringbuf;
+
+       /** Batch buffer related to this request if any (used for
+           error state dump only) */
+       struct drm_i915_gem_object *batch_obj;
+
+       /** Time at which this request was emitted, in jiffies. */
+       unsigned long emitted_jiffies;
+
+       /** global list entry for this request */
+       struct list_head list;
+
+       struct drm_i915_file_private *file_priv;
+       /** file_priv list entry for this request */
+       struct list_head client_list;
+
+       /** process identifier submitting this request */
+       struct pid *pid;
+
+       /**
+        * The ELSP only accepts two elements at a time, so we queue
+        * context/tail pairs on a given queue (ring->execlist_queue) until the
+        * hardware is available. The queue serves a double purpose: we also use
+        * it to keep track of the up to 2 contexts currently in the hardware
+        * (usually one in execution and the other queued up by the GPU): We
+        * only remove elements from the head of the queue when the hardware
+        * informs us that an element has been completed.
+        *
+        * All accesses to the queue are mediated by a spinlock
+        * (ring->execlist_lock).
+        */
+
+       /** Execlist link in the submission queue.*/
+       struct list_head execlist_link;
+
+       /** Execlists no. of times this request has been sent to the ELSP */
+       int elsp_submitted;
+};
+
+int i915_gem_request_alloc(struct intel_engine_cs *ring,
+                          struct intel_context *ctx,
+                          struct drm_i915_gem_request **req_out);
+void i915_gem_request_cancel(struct drm_i915_gem_request *req);
+void i915_gem_request_free(struct kref *req_ref);
+int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
+                                  struct drm_file *file);
+void i915_gem_request_retire_upto(struct drm_i915_gem_request *req);
+
+static inline uint32_t
+i915_gem_request_get_seqno(struct drm_i915_gem_request *req)
+{
+       return req ? req->seqno : 0;
+}
+
+static inline struct intel_engine_cs *
+i915_gem_request_get_ring(struct drm_i915_gem_request *req)
+{
+       return req ? req->ring : NULL;
+}
+
+static inline struct drm_i915_gem_request *
+i915_gem_request_reference(struct drm_i915_gem_request *req)
+{
+       if (req)
+               kref_get(&req->ref);
+       return req;
+}
+
+static inline void
+i915_gem_request_unreference(struct drm_i915_gem_request *req)
+{
+       WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex));
+       kref_put(&req->ref, i915_gem_request_free);
+}
+
+static inline void
+i915_gem_request_unreference__unlocked(struct drm_i915_gem_request *req)
+{
+       struct drm_device *dev;
+
+       if (!req)
+               return;
+
+       dev = req->ring->dev;
+       if (kref_put_mutex(&req->ref, i915_gem_request_free, 
&dev->struct_mutex))
+               mutex_unlock(&dev->struct_mutex);
+}
+
+static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
+                                          struct drm_i915_gem_request *src)
+{
+       if (src)
+               i915_gem_request_reference(src);
+
+       if (*pdst)
+               i915_gem_request_unreference(*pdst);
+
+       *pdst = src;
+}
+
+void __i915_add_request(struct drm_i915_gem_request *req,
+                       struct drm_i915_gem_object *batch_obj,
+                       bool flush_caches);
+#define i915_add_request(req) \
+       __i915_add_request(req, NULL, true)
+#define i915_add_request_no_flush(req) \
+       __i915_add_request(req, NULL, false)
+
+struct intel_rps_client;
+
+int __i915_wait_request(struct drm_i915_gem_request *req,
+                       bool interruptible,
+                       s64 *timeout,
+                       struct intel_rps_client *rps);
+int __must_check i915_wait_request(struct drm_i915_gem_request *req);
+
+/**
+ * Returns true if seq1 is later than seq2.
+ */
+static inline bool
+i915_seqno_passed(uint32_t seq1, uint32_t seq2)
+{
+       return (int32_t)(seq1 - seq2) >= 0;
+}
+
+static inline bool i915_gem_request_started(struct drm_i915_gem_request *req)
+{
+       return i915_seqno_passed(intel_ring_get_seqno(req->ring),
+                                req->previous_seqno);
+}
+
+static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req)
+{
+       return i915_seqno_passed(intel_ring_get_seqno(req->ring),
+                                req->seqno);
+}
+
+#endif /* I915_GEM_REQUEST_H */


_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to