On Fri, Nov 20, 2015 at 12:43:44PM +0000, Chris Wilson wrote:
> Combine the near identical implementations of intel_logical_ring_begin()
> and intel_ring_begin() - the only difference is that the logical wait
> has to check for a matching ring (which is assumed by legacy).
> 
> Signed-off-by: Chris Wilson <[email protected]>

Hm, I originally punted on this one since OCD-me wanted to move
engine->request_list to ring->request_list first. But hey just adding the
check works too and gives us the immediate improvement faster!

Reviewed-by: Daniel Vetter <[email protected]>

> ---
>  drivers/gpu/drm/i915/intel_lrc.c        | 149 
> ++------------------------------
>  drivers/gpu/drm/i915/intel_lrc.h        |   1 -
>  drivers/gpu/drm/i915/intel_mocs.c       |  12 +--
>  drivers/gpu/drm/i915/intel_ringbuffer.c | 115 ++++++++++++------------
>  4 files changed, 71 insertions(+), 206 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/intel_lrc.c 
> b/drivers/gpu/drm/i915/intel_lrc.c
> index 0db23c474045..02f798e4c726 100644
> --- a/drivers/gpu/drm/i915/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/intel_lrc.c
> @@ -646,48 +646,6 @@ int intel_logical_ring_alloc_request_extras(struct 
> drm_i915_gem_request *request
>       return 0;
>  }
>  
> -static int logical_ring_wait_for_space(struct drm_i915_gem_request *req,
> -                                    int bytes)
> -{
> -     struct intel_ringbuffer *ringbuf = req->ringbuf;
> -     struct intel_engine_cs *ring = req->ring;
> -     struct drm_i915_gem_request *target;
> -     unsigned space;
> -     int ret;
> -
> -     if (intel_ring_space(ringbuf) >= bytes)
> -             return 0;
> -
> -     /* The whole point of reserving space is to not wait! */
> -     WARN_ON(ringbuf->reserved_in_use);
> -
> -     list_for_each_entry(target, &ring->request_list, list) {
> -             /*
> -              * The request queue is per-engine, so can contain requests
> -              * from multiple ringbuffers. Here, we must ignore any that
> -              * aren't from the ringbuffer we're considering.
> -              */
> -             if (target->ringbuf != ringbuf)
> -                     continue;
> -
> -             /* Would completion of this request free enough space? */
> -             space = __intel_ring_space(target->postfix, ringbuf->tail,
> -                                        ringbuf->size);
> -             if (space >= bytes)
> -                     break;
> -     }
> -
> -     if (WARN_ON(&target->list == &ring->request_list))
> -             return -ENOSPC;
> -
> -     ret = i915_wait_request(target);
> -     if (ret)
> -             return ret;
> -
> -     ringbuf->space = space;
> -     return 0;
> -}
> -
>  /*
>   * intel_logical_ring_advance_and_submit() - advance the tail and submit the 
> workload
>   * @request: Request to advance the logical ringbuffer of.
> @@ -708,97 +666,6 @@ intel_logical_ring_advance_and_submit(struct 
> drm_i915_gem_request *request)
>       execlists_context_queue(request);
>  }
>  
> -static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf)
> -{
> -     int rem = ringbuf->size - ringbuf->tail;
> -     memset(ringbuf->virtual_start + ringbuf->tail, 0, rem);
> -
> -     ringbuf->tail = 0;
> -     intel_ring_update_space(ringbuf);
> -}
> -
> -static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes)
> -{
> -     struct intel_ringbuffer *ringbuf = req->ringbuf;
> -     int remain_usable = ringbuf->effective_size - ringbuf->tail;
> -     int remain_actual = ringbuf->size - ringbuf->tail;
> -     int ret, total_bytes, wait_bytes = 0;
> -     bool need_wrap = false;
> -
> -     if (ringbuf->reserved_in_use)
> -             total_bytes = bytes;
> -     else
> -             total_bytes = bytes + ringbuf->reserved_size;
> -
> -     if (unlikely(bytes > remain_usable)) {
> -             /*
> -              * Not enough space for the basic request. So need to flush
> -              * out the remainder and then wait for base + reserved.
> -              */
> -             wait_bytes = remain_actual + total_bytes;
> -             need_wrap = true;
> -     } else {
> -             if (unlikely(total_bytes > remain_usable)) {
> -                     /*
> -                      * The base request will fit but the reserved space
> -                      * falls off the end. So only need to to wait for the
> -                      * reserved size after flushing out the remainder.
> -                      */
> -                     wait_bytes = remain_actual + ringbuf->reserved_size;
> -                     need_wrap = true;
> -             } else if (total_bytes > ringbuf->space) {
> -                     /* No wrapping required, just waiting. */
> -                     wait_bytes = total_bytes;
> -             }
> -     }
> -
> -     if (wait_bytes) {
> -             ret = logical_ring_wait_for_space(req, wait_bytes);
> -             if (unlikely(ret))
> -                     return ret;
> -
> -             if (need_wrap)
> -                     __wrap_ring_buffer(ringbuf);
> -     }
> -
> -     return 0;
> -}
> -
> -/**
> - * intel_logical_ring_begin() - prepare the logical ringbuffer to accept 
> some commands
> - *
> - * @request: The request to start some new work for
> - * @ctx: Logical ring context whose ringbuffer is being prepared.
> - * @num_dwords: number of DWORDs that we plan to write to the ringbuffer.
> - *
> - * The ringbuffer might not be ready to accept the commands right away 
> (maybe it needs to
> - * be wrapped, or wait a bit for the tail to be updated). This function 
> takes care of that
> - * and also preallocates a request (every workload submission is still 
> mediated through
> - * requests, same as it did with legacy ringbuffer submission).
> - *
> - * Return: non-zero if the ringbuffer is not ready to be written to.
> - */
> -int intel_logical_ring_begin(struct drm_i915_gem_request *req, int 
> num_dwords)
> -{
> -     struct drm_i915_private *dev_priv;
> -     int ret;
> -
> -     WARN_ON(req == NULL);
> -     dev_priv = req->i915;
> -
> -     ret = i915_gem_check_wedge(&dev_priv->gpu_error,
> -                                dev_priv->mm.interruptible);
> -     if (ret)
> -             return ret;
> -
> -     ret = logical_ring_prepare(req, num_dwords * sizeof(uint32_t));
> -     if (ret)
> -             return ret;
> -
> -     req->ringbuf->space -= num_dwords * sizeof(uint32_t);
> -     return 0;
> -}
> -
>  int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request)
>  {
>       /*
> @@ -811,7 +678,7 @@ int intel_logical_ring_reserve_space(struct 
> drm_i915_gem_request *request)
>        */
>       intel_ring_reserved_space_reserve(request->ringbuf, 
> MIN_SPACE_FOR_ADD_REQUEST);
>  
> -     return intel_logical_ring_begin(request, 0);
> +     return intel_ring_begin(request, 0);
>  }
>  
>  /**
> @@ -881,7 +748,7 @@ int intel_execlists_submission(struct 
> i915_execbuffer_params *params,
>  
>       if (ring == &dev_priv->ring[RCS] &&
>           instp_mode != dev_priv->relative_constants_mode) {
> -             ret = intel_logical_ring_begin(params->request, 4);
> +             ret = intel_ring_begin(params->request, 4);
>               if (ret)
>                       return ret;
>  
> @@ -1035,7 +902,7 @@ static int intel_logical_ring_workarounds_emit(struct 
> drm_i915_gem_request *req)
>       if (ret)
>               return ret;
>  
> -     ret = intel_logical_ring_begin(req, w->count * 2 + 2);
> +     ret = intel_ring_begin(req, w->count * 2 + 2);
>       if (ret)
>               return ret;
>  
> @@ -1472,7 +1339,7 @@ static int intel_logical_ring_emit_pdps(struct 
> drm_i915_gem_request *req)
>       const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
>       int i, ret;
>  
> -     ret = intel_logical_ring_begin(req, num_lri_cmds * 2 + 2);
> +     ret = intel_ring_begin(req, num_lri_cmds * 2 + 2);
>       if (ret)
>               return ret;
>  
> @@ -1516,7 +1383,7 @@ static int gen8_emit_bb_start(struct 
> drm_i915_gem_request *req,
>               req->ctx->ppgtt->pd_dirty_rings &= ~intel_ring_flag(req->ring);
>       }
>  
> -     ret = intel_logical_ring_begin(req, 4);
> +     ret = intel_ring_begin(req, 4);
>       if (ret)
>               return ret;
>  
> @@ -1577,7 +1444,7 @@ static int gen8_emit_flush(struct drm_i915_gem_request 
> *request,
>       uint32_t cmd;
>       int ret;
>  
> -     ret = intel_logical_ring_begin(request, 4);
> +     ret = intel_ring_begin(request, 4);
>       if (ret)
>               return ret;
>  
> @@ -1644,7 +1511,7 @@ static int gen8_emit_flush_render(struct 
> drm_i915_gem_request *request,
>       vf_flush_wa = INTEL_INFO(ring->dev)->gen >= 9 &&
>                     flags & PIPE_CONTROL_VF_CACHE_INVALIDATE;
>  
> -     ret = intel_logical_ring_begin(request, vf_flush_wa ? 12 : 6);
> +     ret = intel_ring_begin(request, vf_flush_wa ? 12 : 6);
>       if (ret)
>               return ret;
>  
> @@ -1690,7 +1557,7 @@ static int gen8_emit_request(struct 
> drm_i915_gem_request *request)
>        * used as a workaround for not being allowed to do lite
>        * restore with HEAD==TAIL (WaIdleLiteRestore).
>        */
> -     ret = intel_logical_ring_begin(request, 8);
> +     ret = intel_ring_begin(request, 8);
>       if (ret)
>               return ret;
>  
> diff --git a/drivers/gpu/drm/i915/intel_lrc.h 
> b/drivers/gpu/drm/i915/intel_lrc.h
> index 861668919e5a..5402eca78a07 100644
> --- a/drivers/gpu/drm/i915/intel_lrc.h
> +++ b/drivers/gpu/drm/i915/intel_lrc.h
> @@ -42,7 +42,6 @@ int intel_logical_ring_reserve_space(struct 
> drm_i915_gem_request *request);
>  void intel_logical_ring_stop(struct intel_engine_cs *ring);
>  void intel_logical_ring_cleanup(struct intel_engine_cs *ring);
>  int intel_logical_rings_init(struct drm_device *dev);
> -int intel_logical_ring_begin(struct drm_i915_gem_request *req, int 
> num_dwords);
>  
>  int logical_ring_flush_all_caches(struct drm_i915_gem_request *req);
>  
> diff --git a/drivers/gpu/drm/i915/intel_mocs.c 
> b/drivers/gpu/drm/i915/intel_mocs.c
> index 399a131a94b6..ac0a982bbf55 100644
> --- a/drivers/gpu/drm/i915/intel_mocs.c
> +++ b/drivers/gpu/drm/i915/intel_mocs.c
> @@ -181,11 +181,9 @@ static int emit_mocs_control_table(struct 
> drm_i915_gem_request *req,
>       if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
>               return -ENODEV;
>  
> -     ret = intel_logical_ring_begin(req, 2 + 2 * GEN9_NUM_MOCS_ENTRIES);
> -     if (ret) {
> -             DRM_DEBUG("intel_logical_ring_begin failed %d\n", ret);
> +     ret = intel_ring_begin(req, 2 + 2 * GEN9_NUM_MOCS_ENTRIES);
> +     if (ret)
>               return ret;
> -     }
>  
>       intel_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
>  
> @@ -238,11 +236,9 @@ static int emit_mocs_l3cc_table(struct 
> drm_i915_gem_request *req,
>       if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
>               return -ENODEV;
>  
> -     ret = intel_logical_ring_begin(req, 2 + GEN9_NUM_MOCS_ENTRIES);
> -     if (ret) {
> -             DRM_DEBUG("intel_logical_ring_begin failed %d\n", ret);
> +     ret = intel_ring_begin(req, 2 + GEN9_NUM_MOCS_ENTRIES);
> +     if (ret)
>               return ret;
> -     }
>  
>       intel_ring_emit(ringbuf,
>                       MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2));
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c 
> b/drivers/gpu/drm/i915/intel_ringbuffer.c
> index b3de8b1fde2f..fbee790ddaf0 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
> @@ -2143,46 +2143,6 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs 
> *ring)
>       ring->buffer = NULL;
>  }
>  
> -static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
> -{
> -     struct intel_ringbuffer *ringbuf = ring->buffer;
> -     struct drm_i915_gem_request *request;
> -     unsigned space;
> -     int ret;
> -
> -     if (intel_ring_space(ringbuf) >= n)
> -             return 0;
> -
> -     /* The whole point of reserving space is to not wait! */
> -     WARN_ON(ringbuf->reserved_in_use);
> -
> -     list_for_each_entry(request, &ring->request_list, list) {
> -             space = __intel_ring_space(request->postfix, ringbuf->tail,
> -                                        ringbuf->size);
> -             if (space >= n)
> -                     break;
> -     }
> -
> -     if (WARN_ON(&request->list == &ring->request_list))
> -             return -ENOSPC;
> -
> -     ret = i915_wait_request(request);
> -     if (ret)
> -             return ret;
> -
> -     ringbuf->space = space;
> -     return 0;
> -}
> -
> -static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf)
> -{
> -     int rem = ringbuf->size - ringbuf->tail;
> -     memset(ringbuf->virtual_start + ringbuf->tail, 0, rem);
> -
> -     ringbuf->tail = 0;
> -     intel_ring_update_space(ringbuf);
> -}
> -
>  int intel_ring_idle(struct intel_engine_cs *ring)
>  {
>       struct drm_i915_gem_request *req;
> @@ -2270,9 +2230,59 @@ void intel_ring_reserved_space_end(struct 
> intel_ringbuffer *ringbuf)
>       ringbuf->reserved_in_use = false;
>  }
>  
> -static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes)
> +static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
>  {
> -     struct intel_ringbuffer *ringbuf = ring->buffer;
> +     struct intel_ringbuffer *ringbuf = req->ringbuf;
> +     struct intel_engine_cs *ring = req->ring;
> +     struct drm_i915_gem_request *target;
> +     unsigned space;
> +     int ret;
> +
> +     if (intel_ring_space(ringbuf) >= bytes)
> +             return 0;
> +
> +     /* The whole point of reserving space is to not wait! */
> +     WARN_ON(ringbuf->reserved_in_use);
> +
> +     list_for_each_entry(target, &ring->request_list, list) {
> +             /*
> +              * The request queue is per-engine, so can contain requests
> +              * from multiple ringbuffers. Here, we must ignore any that
> +              * aren't from the ringbuffer we're considering.
> +              */
> +             if (target->ringbuf != ringbuf)
> +                     continue;
> +
> +             /* Would completion of this request free enough space? */
> +             space = __intel_ring_space(target->postfix, ringbuf->tail,
> +                                        ringbuf->size);
> +             if (space >= bytes)
> +                     break;
> +     }
> +
> +     if (WARN_ON(&target->list == &ring->request_list))
> +             return -ENOSPC;
> +
> +     ret = i915_wait_request(target);
> +     if (ret)
> +             return ret;
> +
> +     ringbuf->space = space;
> +     return 0;
> +}
> +
> +static void ring_wrap(struct intel_ringbuffer *ringbuf)
> +{
> +     int rem = ringbuf->size - ringbuf->tail;
> +     memset(ringbuf->virtual_start + ringbuf->tail, 0, rem);
> +
> +     ringbuf->tail = 0;
> +     intel_ring_update_space(ringbuf);
> +}
> +
> +static int ring_prepare(struct drm_i915_gem_request *req, int bytes)
> +{
> +     struct intel_ringbuffer *ringbuf = req->ringbuf;
>       int remain_usable = ringbuf->effective_size - ringbuf->tail;
>       int remain_actual = ringbuf->size - ringbuf->tail;
>       int ret, total_bytes, wait_bytes = 0;
> @@ -2306,38 +2316,31 @@ static int __intel_ring_prepare(struct 
> intel_engine_cs *ring, int bytes)
>       }
>  
>       if (wait_bytes) {
> -             ret = ring_wait_for_space(ring, wait_bytes);
> +             ret = wait_for_space(req, wait_bytes);
>               if (unlikely(ret))
>                       return ret;
>  
>               if (need_wrap)
> -                     __wrap_ring_buffer(ringbuf);
> +                     ring_wrap(ringbuf);
>       }
>  
>       return 0;
>  }
>  
> -int intel_ring_begin(struct drm_i915_gem_request *req,
> -                  int num_dwords)
> +int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
>  {
> -     struct intel_engine_cs *ring;
> -     struct drm_i915_private *dev_priv;
>       int ret;
>  
> -     WARN_ON(req == NULL);
> -     ring = req->ring;
> -     dev_priv = req->i915;
> -
> -     ret = i915_gem_check_wedge(&dev_priv->gpu_error,
> -                                dev_priv->mm.interruptible);
> +     ret = i915_gem_check_wedge(&req->i915->gpu_error,
> +                                req->i915->mm.interruptible);
>       if (ret)
>               return ret;
>  
> -     ret = __intel_ring_prepare(ring, num_dwords * sizeof(uint32_t));
> +     ret = ring_prepare(req, num_dwords * sizeof(uint32_t));
>       if (ret)
>               return ret;
>  
> -     ring->buffer->space -= num_dwords * sizeof(uint32_t);
> +     req->ringbuf->space -= num_dwords * sizeof(uint32_t);
>       return 0;
>  }
>  
> -- 
> 2.6.2
> 
> _______________________________________________
> Intel-gfx mailing list
> [email protected]
> http://lists.freedesktop.org/mailman/listinfo/intel-gfx

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
_______________________________________________
Intel-gfx mailing list
[email protected]
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to