Re: [Intel-gfx] [PATCH v5 1/6] drm/i915: Track per-context engine busyness

2018-01-19 Thread Chris Wilson
Quoting Tvrtko Ursulin (2018-01-19 16:26:16)
> From: Tvrtko Ursulin 
> 
> Some customers want to know how much of the GPU time are their clients
> using in order to make dynamic load balancing decisions.
> 
> With the hooks already in place which track the overall engine busyness,
> we can extend that slightly to split that time between contexts.
> 
> v2: Fix accounting for tail updates.
> v3: Rebase.
> v4: Mark currently running contexts as active on stats enable.
> v5: Include some headers to fix the build.
> 
> Signed-off-by: Tvrtko Ursulin 
> Cc: gordon.ke...@intel.com
> ---
>  drivers/gpu/drm/i915/i915_gem_context.h |  8 ++
>  drivers/gpu/drm/i915/intel_engine_cs.c  | 32 +
>  drivers/gpu/drm/i915/intel_lrc.c| 14 +
>  drivers/gpu/drm/i915/intel_ringbuffer.h | 50 
> +
>  4 files changed, 93 insertions(+), 11 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_gem_context.h 
> b/drivers/gpu/drm/i915/i915_gem_context.h
> index 4bfb72f8e1cb..7f5eebb67167 100644
> --- a/drivers/gpu/drm/i915/i915_gem_context.h
> +++ b/drivers/gpu/drm/i915/i915_gem_context.h
> @@ -29,6 +29,9 @@
>  #include 
>  #include 
>  
> +#include "i915_gem.h"
> +#include "i915_gem_request.h"

Yup, we need a patch for tip for

#include "i915_gem.h"

struct drm_i915_gem_request;
-Chris
___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx


[Intel-gfx] [PATCH v5 1/6] drm/i915: Track per-context engine busyness

2018-01-19 Thread Tvrtko Ursulin
From: Tvrtko Ursulin 

Some customers want to know how much of the GPU time are their clients
using in order to make dynamic load balancing decisions.

With the hooks already in place which track the overall engine busyness,
we can extend that slightly to split that time between contexts.

v2: Fix accounting for tail updates.
v3: Rebase.
v4: Mark currently running contexts as active on stats enable.
v5: Include some headers to fix the build.

Signed-off-by: Tvrtko Ursulin 
Cc: gordon.ke...@intel.com
---
 drivers/gpu/drm/i915/i915_gem_context.h |  8 ++
 drivers/gpu/drm/i915/intel_engine_cs.c  | 32 +
 drivers/gpu/drm/i915/intel_lrc.c| 14 +
 drivers/gpu/drm/i915/intel_ringbuffer.h | 50 +
 4 files changed, 93 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_context.h 
b/drivers/gpu/drm/i915/i915_gem_context.h
index 4bfb72f8e1cb..7f5eebb67167 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -29,6 +29,9 @@
 #include 
 #include 
 
+#include "i915_gem.h"
+#include "i915_gem_request.h"
+
 struct pid;
 
 struct drm_device;
@@ -157,6 +160,11 @@ struct i915_gem_context {
u32 *lrc_reg_state;
u64 lrc_desc;
int pin_count;
+   struct {
+   bool active;
+   ktime_t start;
+   ktime_t total;
+   } stats;
} engine[I915_NUM_ENGINES];
 
/** ring_size: size for allocating the per-engine ring buffer */
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c 
b/drivers/gpu/drm/i915/intel_engine_cs.c
index d572b18d39eb..9907ceedfa90 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -1966,6 +1966,16 @@ int intel_enable_engine_stats(struct intel_engine_cs 
*engine)
 
engine->stats.enabled_at = ktime_get();
 
+   /* Mark currently running context as active. */
+   if (port_isset(port)) {
+   struct drm_i915_gem_request *req = port_request(port);
+   struct intel_context *ce =
+   >ctx->engine[engine->id];
+
+   ce->stats.start = engine->stats.enabled_at;
+   ce->stats.active = true;
+   }
+
/* XXX submission method oblivious? */
while (num_ports-- && port_isset(port)) {
engine->stats.active++;
@@ -2038,6 +2048,28 @@ void intel_disable_engine_stats(struct intel_engine_cs 
*engine)
spin_unlock_irqrestore(>stats.lock, flags);
 }
 
+ktime_t intel_context_engine_get_busy_time(struct i915_gem_context *ctx,
+  struct intel_engine_cs *engine)
+{
+   struct intel_context *ce;
+   unsigned long flags;
+   ktime_t total;
+
+   ce = >engine[engine->id];
+
+   spin_lock_irqsave(>stats.lock, flags);
+
+   total = ce->stats.total;
+
+   if (ce->stats.active)
+   total = ktime_add(total,
+ ktime_sub(ktime_get(), ce->stats.start));
+
+   spin_unlock_irqrestore(>stats.lock, flags);
+
+   return total;
+}
+
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 #include "selftests/mock_engine.c"
 #endif
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 24ce781d39b7..a82ad5da6090 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -380,16 +380,19 @@ execlists_context_status_change(struct 
drm_i915_gem_request *rq,
 }
 
 static inline void
-execlists_context_schedule_in(struct drm_i915_gem_request *rq)
+execlists_context_schedule_in(struct drm_i915_gem_request *rq,
+ unsigned int port)
 {
execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
-   intel_engine_context_in(rq->engine);
+   intel_engine_context_in(rq->engine,
+   >ctx->engine[rq->engine->id],
+   port == 0);
 }
 
 static inline void
 execlists_context_schedule_out(struct drm_i915_gem_request *rq)
 {
-   intel_engine_context_out(rq->engine);
+   intel_engine_context_out(rq->engine, >ctx->engine[rq->engine->id]);
execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
 }
 
@@ -442,7 +445,7 @@ static void execlists_submit_ports(struct intel_engine_cs 
*engine)
if (rq) {
GEM_BUG_ON(count > !n);
if (!count++)
-   execlists_context_schedule_in(rq);
+   execlists_context_schedule_in(rq, n);
port_set([n], port_pack(rq, count));
desc = execlists_update_context(rq);