On Tue, 24 Jul 2012 23:33:45 +0200
Daniel Vetter <[email protected]> wrote:

> This way it's easier so see what belongs together, and what is used
> by the ilk ips code. Also add some comments that explain the locking.
> 
> v2: Missed one place that the dev_priv->ips change caught ...
> 
> Signed-off-by: Daniel Vetter <[email protected]>
Reviewed-by: Ben Widawsky <[email protected]>
With a few comments below

> ---
>  drivers/gpu/drm/i915/i915_debugfs.c  |   11 +++----
>  drivers/gpu/drm/i915/i915_dma.c      |    2 +-
>  drivers/gpu/drm/i915/i915_drv.h      |   18 ++++++++++--
>  drivers/gpu/drm/i915/i915_irq.c      |   32 ++++++++++-----------
>  drivers/gpu/drm/i915/intel_display.c |    2 +-
>  drivers/gpu/drm/i915/intel_pm.c      |   52 
> +++++++++++++++++-----------------
>  6 files changed, 65 insertions(+), 52 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_debugfs.c 
> b/drivers/gpu/drm/i915/i915_debugfs.c
> index 2499610..05087fa 100644
> --- a/drivers/gpu/drm/i915/i915_debugfs.c
> +++ b/drivers/gpu/drm/i915/i915_debugfs.c
> @@ -1287,7 +1287,8 @@ static int i915_ring_freq_table(struct seq_file *m, 
> void *unused)
>  
>       seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n");
>  
> -     for (gpu_freq = dev_priv->min_delay; gpu_freq <= dev_priv->max_delay;
> +     for (gpu_freq = dev_priv->rps.min_delay;
> +          gpu_freq <= dev_priv->rps.max_delay;
>            gpu_freq++) {
>               I915_WRITE(GEN6_PCODE_DATA, gpu_freq);
>               I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
> @@ -1715,7 +1716,7 @@ i915_max_freq_read(struct file *filp,
>  
>       mutex_lock(&dev->struct_mutex);
>       len = snprintf(buf, sizeof(buf),
> -                    "max freq: %d\n", dev_priv->max_delay * 50);
> +                    "max freq: %d\n", dev_priv->rps.max_delay * 50);
>       mutex_unlock(&dev->struct_mutex);
>  
>       if (len > sizeof(buf))
> @@ -1755,7 +1756,7 @@ i915_max_freq_write(struct file *filp,
>       /*
>        * Turbo will still be enabled, but won't go above the set value.
>        */
> -     dev_priv->max_delay = val / 50;
> +     dev_priv->rps.max_delay = val / 50;
>  
>       gen6_set_rps(dev, val / 50);
>       mutex_unlock(&dev->struct_mutex);
> @@ -1785,7 +1786,7 @@ i915_min_freq_read(struct file *filp, char __user 
> *ubuf, size_t max,
>  
>       mutex_lock(&dev->struct_mutex);
>       len = snprintf(buf, sizeof(buf),
> -                    "min freq: %d\n", dev_priv->min_delay * 50);
> +                    "min freq: %d\n", dev_priv->rps.min_delay * 50);
>       mutex_unlock(&dev->struct_mutex);
>  
>       if (len > sizeof(buf))
> @@ -1823,7 +1824,7 @@ i915_min_freq_write(struct file *filp, const char 
> __user *ubuf, size_t cnt,
>       /*
>        * Turbo will still be enabled, but won't go below the set value.
>        */
> -     dev_priv->min_delay = val / 50;
> +     dev_priv->rps.min_delay = val / 50;
>  
>       gen6_set_rps(dev, val / 50);
>       mutex_unlock(&dev->struct_mutex);
> diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
> index 5e20f11..5f63638 100644
> --- a/drivers/gpu/drm/i915/i915_dma.c
> +++ b/drivers/gpu/drm/i915/i915_dma.c
> @@ -1586,7 +1586,7 @@ int i915_driver_load(struct drm_device *dev, unsigned 
> long flags)
>  
>       spin_lock_init(&dev_priv->irq_lock);
>       spin_lock_init(&dev_priv->error_lock);
> -     spin_lock_init(&dev_priv->rps_lock);
> +     spin_lock_init(&dev_priv->rps.lock);
>  
>       if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
>               dev_priv->num_pipe = 3;
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index f176589..2496d60 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -793,9 +793,21 @@ typedef struct drm_i915_private {
>  
>       bool mchbar_need_disable;
>  
> -     struct work_struct rps_work;
> -     spinlock_t rps_lock;
> -     u32 pm_iir;
> +     /* gen6+ rps state */
> +     struct {
> +             struct work_struct work;
> +             u32 pm_iir;
> +             /* lock - irqsave spinlock that protectects the work_struct and
> +              * pm_iir. */
> +             spinlock_t lock;
> +
> +             /* The below variables an all the rps hw state are protected by
> +              * dev->struct mutext. */
> +             u8 cur_delay;
> +             u8 min_delay;
> +             u8 max_delay;
> +     } rps;
> +
>  
>       u8 cur_delay;
>       u8 min_delay;

Could you add the reason for adding new cur/min/max delays to the commit
message? From just this hunk it would seem we'd want to remove the old
cur/min/max.

> diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
> index 41ed41d..6e3f43c 100644
> --- a/drivers/gpu/drm/i915/i915_irq.c
> +++ b/drivers/gpu/drm/i915/i915_irq.c
> @@ -349,16 +349,16 @@ static void notify_ring(struct drm_device *dev,
>  static void gen6_pm_rps_work(struct work_struct *work)
>  {
>       drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
> -                                                 rps_work);
> +                                                 rps.work);
>       u32 pm_iir, pm_imr;
>       u8 new_delay;
>  
> -     spin_lock_irq(&dev_priv->rps_lock);
> -     pm_iir = dev_priv->pm_iir;
> -     dev_priv->pm_iir = 0;
> +     spin_lock_irq(&dev_priv->rps.lock);
> +     pm_iir = dev_priv->rps.pm_iir;
> +     dev_priv->rps.pm_iir = 0;
>       pm_imr = I915_READ(GEN6_PMIMR);
>       I915_WRITE(GEN6_PMIMR, 0);
> -     spin_unlock_irq(&dev_priv->rps_lock);
> +     spin_unlock_irq(&dev_priv->rps.lock);
>  
>       if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
>               return;
> @@ -366,9 +366,9 @@ static void gen6_pm_rps_work(struct work_struct *work)
>       mutex_lock(&dev_priv->dev->struct_mutex);
>  
>       if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
> -             new_delay = dev_priv->cur_delay + 1;
> +             new_delay = dev_priv->rps.cur_delay + 1;
>       else
> -             new_delay = dev_priv->cur_delay - 1;
> +             new_delay = dev_priv->rps.cur_delay - 1;
>  
>       gen6_set_rps(dev_priv->dev, new_delay);
>  
> @@ -488,20 +488,20 @@ static void gen6_queue_rps_work(struct drm_i915_private 
> *dev_priv,
>        * IIR bits should never already be set because IMR should
>        * prevent an interrupt from being shown in IIR. The warning
>        * displays a case where we've unsafely cleared
> -      * dev_priv->pm_iir. Although missing an interrupt of the same
> +      * dev_priv->rps.pm_iir. Although missing an interrupt of the same
>        * type is not a problem, it displays a problem in the logic.
>        *
> -      * The mask bit in IMR is cleared by rps_work.
> +      * The mask bit in IMR is cleared by dev_priv->rps.work.
>        */
>  
> -     spin_lock_irqsave(&dev_priv->rps_lock, flags);
> -     WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n");
> -     dev_priv->pm_iir |= pm_iir;
> -     I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
> +     spin_lock_irqsave(&dev_priv->rps.lock, flags);
> +     WARN(dev_priv->rps.pm_iir & pm_iir, "Missed a PM interrupt\n");
> +     dev_priv->rps.pm_iir |= pm_iir;
> +     I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
>       POSTING_READ(GEN6_PMIMR);
> -     spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
> +     spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
>  
> -     queue_work(dev_priv->wq, &dev_priv->rps_work);
> +     queue_work(dev_priv->wq, &dev_priv->rps.work);
>  }
>  
>  static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
> @@ -2649,7 +2649,7 @@ void intel_irq_init(struct drm_device *dev)
>  
>       INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
>       INIT_WORK(&dev_priv->error_work, i915_error_work_func);
> -     INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work);
> +     INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
>       INIT_WORK(&dev_priv->parity_error_work, ivybridge_parity_work);
>  
>       dev->driver->get_vblank_counter = i915_get_vblank_counter;
> diff --git a/drivers/gpu/drm/i915/intel_display.c 
> b/drivers/gpu/drm/i915/intel_display.c
> index be45b92..00a90e1 100644
> --- a/drivers/gpu/drm/i915/intel_display.c
> +++ b/drivers/gpu/drm/i915/intel_display.c
> @@ -7220,7 +7220,7 @@ void intel_modeset_cleanup(struct drm_device *dev)
>        * enqueue unpin/hotplug work. */
>       drm_irq_uninstall(dev);
>       cancel_work_sync(&dev_priv->hotplug_work);
> -     cancel_work_sync(&dev_priv->rps_work);
> +     cancel_work_sync(&dev_priv->rps.work);
>  
>       /* flush any delayed tasks or pending work */
>       flush_scheduled_work();
> diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
> index d5af41b..6c9925d 100644
> --- a/drivers/gpu/drm/i915/intel_pm.c
> +++ b/drivers/gpu/drm/i915/intel_pm.c
> @@ -2275,17 +2275,17 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
>       WARN_ON(!mutex_is_locked(&dev->struct_mutex));
>  
>       limits = 0;
> -     if (val >= dev_priv->max_delay)
> -             val = dev_priv->max_delay;
> +     if (val >= dev_priv->rps.max_delay)
> +             val = dev_priv->rps.max_delay;
>       else
> -             limits |= dev_priv->max_delay << 24;
> +             limits |= dev_priv->rps.max_delay << 24;
>  
> -     if (val <= dev_priv->min_delay)
> -             val = dev_priv->min_delay;
> +     if (val <= dev_priv->rps.min_delay)
> +             val = dev_priv->rps.min_delay;
>       else
> -             limits |= dev_priv->min_delay << 16;
> +             limits |= dev_priv->rps.min_delay << 16;
>  
> -     if (val == dev_priv->cur_delay)
> +     if (val == dev_priv->rps.cur_delay)
>               return;
>  
>       I915_WRITE(GEN6_RPNSWREQ,
> @@ -2298,7 +2298,7 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
>        */
>       I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
>  
> -     dev_priv->cur_delay = val;
> +     dev_priv->rps.cur_delay = val;
>  }
>  
>  static void gen6_disable_rps(struct drm_device *dev)
> @@ -2314,9 +2314,9 @@ static void gen6_disable_rps(struct drm_device *dev)
>        * register (PMIMR) to mask PM interrupts. The only risk is in leaving
>        * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
>  
> -     spin_lock_irq(&dev_priv->rps_lock);
> -     dev_priv->pm_iir = 0;
> -     spin_unlock_irq(&dev_priv->rps_lock);
> +     spin_lock_irq(&dev_priv->rps.lock);
> +     dev_priv->rps.pm_iir = 0;
> +     spin_unlock_irq(&dev_priv->rps.lock);
>  
>       I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
>  }
> @@ -2385,9 +2385,9 @@ static void gen6_enable_rps(struct drm_device *dev)
>       gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
>  
>       /* In units of 100MHz */
> -     dev_priv->max_delay = rp_state_cap & 0xff;
> -     dev_priv->min_delay = (rp_state_cap & 0xff0000) >> 16;
> -     dev_priv->cur_delay = 0;
> +     dev_priv->rps.max_delay = rp_state_cap & 0xff;
> +     dev_priv->rps.min_delay = (rp_state_cap & 0xff0000) >> 16;
> +     dev_priv->rps.cur_delay = 0;
>  
>       /* disable the counters and set deterministic thresholds */
>       I915_WRITE(GEN6_RC_CONTROL, 0);
> @@ -2440,8 +2440,8 @@ static void gen6_enable_rps(struct drm_device *dev)
>  
>       I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
>       I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
> -                dev_priv->max_delay << 24 |
> -                dev_priv->min_delay << 16);
> +                dev_priv->rps.max_delay << 24 |
> +                dev_priv->rps.min_delay << 16);
>  
>       if (IS_HASWELL(dev)) {
>               I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
> @@ -2486,7 +2486,7 @@ static void gen6_enable_rps(struct drm_device *dev)
>                    500))
>               DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
>       if (pcu_mbox & (1<<31)) { /* OC supported */
> -             dev_priv->max_delay = pcu_mbox & 0xff;
> +             dev_priv->rps.max_delay = pcu_mbox & 0xff;
>               DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency 
> max to %dMHz\n", pcu_mbox * 50);
>       }
>  
> @@ -2494,10 +2494,10 @@ static void gen6_enable_rps(struct drm_device *dev)
>  
>       /* requires MSI enabled */
>       I915_WRITE(GEN6_PMIER, GEN6_PM_DEFERRED_EVENTS);
> -     spin_lock_irq(&dev_priv->rps_lock);
> -     WARN_ON(dev_priv->pm_iir != 0);
> +     spin_lock_irq(&dev_priv->rps.lock);
> +     WARN_ON(dev_priv->rps.pm_iir != 0);
>       I915_WRITE(GEN6_PMIMR, 0);
> -     spin_unlock_irq(&dev_priv->rps_lock);
> +     spin_unlock_irq(&dev_priv->rps.lock);
>       /* enable all PM interrupts */
>       I915_WRITE(GEN6_PMINTRMSK, 0);
>  
> @@ -2529,9 +2529,9 @@ static void gen6_update_ring_freq(struct drm_device 
> *dev)
>        * to use for memory access.  We do this by specifying the IA frequency
>        * the PCU should use as a reference to determine the ring frequency.
>        */
> -     for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay;
> +     for (gpu_freq = dev_priv->rps.max_delay; gpu_freq >= 
> dev_priv->rps.min_delay;
>            gpu_freq--) {
> -             int diff = dev_priv->max_delay - gpu_freq;
> +             int diff = dev_priv->rps.max_delay - gpu_freq;
>  
>               /*
>                * For GPU frequencies less than 750MHz, just use the lowest
> @@ -2974,7 +2974,7 @@ unsigned long i915_gfx_val(struct drm_i915_private 
> *dev_priv)
>  
>       assert_spin_locked(&mchdev_lock);
>  
> -     pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4));
> +     pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_delay * 4));
>       pxvid = (pxvid >> 24) & 0x7f;
>       ext_v = pvid_to_extvid(dev_priv, pxvid);
>  
> @@ -3772,11 +3772,11 @@ static void gen6_sanitize_pm(struct drm_device *dev)
>        * until we hit the minimum or maximum frequencies.
>        */
>       limits &= ~(0x3f << 16 | 0x3f << 24);
> -     delay = dev_priv->cur_delay;
> +     delay = dev_priv->rps.cur_delay;
>       if (delay < dev_priv->max_delay)
>               limits |= (dev_priv->max_delay & 0x3f) << 24;
> -     if (delay > dev_priv->min_delay)
> -             limits |= (dev_priv->min_delay & 0x3f) << 16;
> +     if (delay > dev_priv->rps.min_delay)
> +             limits |= (dev_priv->rps.min_delay & 0x3f) << 16;
>  
>       if (old != limits) {
>               /* Note that the known failure case is to read back 0. */



-- 
Ben Widawsky, Intel Open Source Technology Center
_______________________________________________
Intel-gfx mailing list
[email protected]
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to