On 27/02/2019 23:09, Chris Wilson wrote:
We currently use a worker queued from an rcu callback to determine when
a how grace period has elapsed while we remained idle. We use this idle
delay to infer that we will be idle for a while and this is a suitable
point at which we can trim our global memory caches.

Since we wrote that, this mechanism now exists as rcu_work, and having
converted the idle shrinkers over to using that, we can remove our own
variant.

By the look of it gt.epoch can be completely ripped out.

Regards,

Tvrtko

Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk>
---
  drivers/gpu/drm/i915/i915_gem.c | 91 +++++----------------------------
  1 file changed, 12 insertions(+), 79 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 8ded7e1756c9..8cf3429594d5 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -101,7 +101,7 @@ static void i915_gem_info_remove_obj(struct 
drm_i915_private *dev_priv,
        spin_unlock(&dev_priv->mm.object_stat_lock);
  }
-static u32 __i915_gem_park(struct drm_i915_private *i915)
+static void __i915_gem_park(struct drm_i915_private *i915)
  {
        intel_wakeref_t wakeref;
@@ -112,7 +112,7 @@ static u32 __i915_gem_park(struct drm_i915_private *i915)
        GEM_BUG_ON(!list_empty(&i915->gt.active_rings));
if (!i915->gt.awake)
-               return I915_EPOCH_INVALID;
+               return;
GEM_BUG_ON(i915->gt.epoch == I915_EPOCH_INVALID); @@ -143,7 +143,15 @@ static u32 __i915_gem_park(struct drm_i915_private *i915) intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ, wakeref); - return i915->gt.epoch;
+       /*
+        * When we are idle, it is an opportune time to reap our caches.
+        * However, we have many objects that utilise RCU and the ordered
+        * i915->wq that this work is executing on. To try and flush any
+        * pending frees now we are idle, we first wait for an RCU grace
+        * period, and then queue a task (that will run last on the wq) to
+        * shrink and re-optimize the caches.
+        */
+       i915_globals_park();
  }
void i915_gem_park(struct drm_i915_private *i915)
@@ -2877,62 +2885,6 @@ i915_gem_retire_work_handler(struct work_struct *work)
                                   round_jiffies_up_relative(HZ));
  }
-static void shrink_caches(struct drm_i915_private *i915)
-{
-       /*
-        * kmem_cache_shrink() discards empty slabs and reorders partially
-        * filled slabs to prioritise allocating from the mostly full slabs,
-        * with the aim of reducing fragmentation.
-        */
-       i915_globals_park();
-}
-
-struct sleep_rcu_work {
-       union {
-               struct rcu_head rcu;
-               struct work_struct work;
-       };
-       struct drm_i915_private *i915;
-       unsigned int epoch;
-};
-
-static inline bool
-same_epoch(struct drm_i915_private *i915, unsigned int epoch)
-{
-       /*
-        * There is a small chance that the epoch wrapped since we started
-        * sleeping. If we assume that epoch is at least a u32, then it will
-        * take at least 2^32 * 100ms for it to wrap, or about 326 years.
-        */
-       return epoch == READ_ONCE(i915->gt.epoch);
-}
-
-static void __sleep_work(struct work_struct *work)
-{
-       struct sleep_rcu_work *s = container_of(work, typeof(*s), work);
-       struct drm_i915_private *i915 = s->i915;
-       unsigned int epoch = s->epoch;
-
-       kfree(s);
-       if (same_epoch(i915, epoch))
-               shrink_caches(i915);
-}
-
-static void __sleep_rcu(struct rcu_head *rcu)
-{
-       struct sleep_rcu_work *s = container_of(rcu, typeof(*s), rcu);
-       struct drm_i915_private *i915 = s->i915;
-
-       destroy_rcu_head(&s->rcu);
-
-       if (same_epoch(i915, s->epoch)) {
-               INIT_WORK(&s->work, __sleep_work);
-               queue_work(i915->wq, &s->work);
-       } else {
-               kfree(s);
-       }
-}
-
  static inline bool
  new_requests_since_last_retire(const struct drm_i915_private *i915)
  {
@@ -2961,7 +2913,6 @@ i915_gem_idle_work_handler(struct work_struct *work)
  {
        struct drm_i915_private *dev_priv =
                container_of(work, typeof(*dev_priv), gt.idle_work.work);
-       unsigned int epoch = I915_EPOCH_INVALID;
        bool rearm_hangcheck;
if (!READ_ONCE(dev_priv->gt.awake))
@@ -3016,7 +2967,7 @@ i915_gem_idle_work_handler(struct work_struct *work)
        if (new_requests_since_last_retire(dev_priv))
                goto out_unlock;
- epoch = __i915_gem_park(dev_priv);
+       __i915_gem_park(dev_priv);
assert_kernel_context_is_current(dev_priv); @@ -3029,24 +2980,6 @@ i915_gem_idle_work_handler(struct work_struct *work)
                GEM_BUG_ON(!dev_priv->gt.awake);
                i915_queue_hangcheck(dev_priv);
        }
-
-       /*
-        * When we are idle, it is an opportune time to reap our caches.
-        * However, we have many objects that utilise RCU and the ordered
-        * i915->wq that this work is executing on. To try and flush any
-        * pending frees now we are idle, we first wait for an RCU grace
-        * period, and then queue a task (that will run last on the wq) to
-        * shrink and re-optimize the caches.
-        */
-       if (same_epoch(dev_priv, epoch)) {
-               struct sleep_rcu_work *s = kmalloc(sizeof(*s), GFP_KERNEL);
-               if (s) {
-                       init_rcu_head(&s->rcu);
-                       s->i915 = dev_priv;
-                       s->epoch = epoch;
-                       call_rcu(&s->rcu, __sleep_rcu);
-               }
-       }
  }
void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to