Add intel_irq_fini() for placing the deinitialization code,
starting with freeing dev_priv->l3_parity.remap_info[].

Signed-off-by: Joonas Lahtinen <[email protected]>
Cc: Chris Wilson <[email protected]>
Cc: Tvrtko Ursulin <[email protected]>
Cc: Mika Kuoppala <[email protected]>
---
 drivers/gpu/drm/i915/i915_drv.c   |  6 ++++--
 drivers/gpu/drm/i915/i915_drv.h   |  1 +
 drivers/gpu/drm/i915/i915_irq.c   | 20 +++++++++++++++++++-
 drivers/gpu/drm/i915/i915_sysfs.c | 23 ++++++++++++-----------
 4 files changed, 36 insertions(+), 14 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index c7d68e7..8228b4c 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -856,7 +856,7 @@ static int i915_driver_init_early(struct drm_i915_private 
*dev_priv,
        intel_init_audio_hooks(dev_priv);
        ret = i915_gem_load_init(dev_priv);
        if (ret < 0)
-               goto err_workqueues;
+               goto err_irq;
 
        intel_display_crc_init(dev_priv);
 
@@ -868,7 +868,8 @@ static int i915_driver_init_early(struct drm_i915_private 
*dev_priv,
 
        return 0;
 
-err_workqueues:
+err_irq:
+       intel_irq_fini(dev_priv);
        i915_workqueues_cleanup(dev_priv);
 err_engines:
        i915_engines_cleanup(dev_priv);
@@ -883,6 +884,7 @@ static void i915_driver_cleanup_early(struct 
drm_i915_private *dev_priv)
 {
        i915_perf_fini(dev_priv);
        i915_gem_load_cleanup(dev_priv);
+       intel_irq_fini(dev_priv);
        i915_workqueues_cleanup(dev_priv);
        i915_engines_cleanup(dev_priv);
 }
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index d1f7c48..618ea2d 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3060,6 +3060,7 @@ void i915_handle_error(struct drm_i915_private *dev_priv,
                       const char *fmt, ...);
 
 extern void intel_irq_init(struct drm_i915_private *dev_priv);
+extern void intel_irq_fini(struct drm_i915_private *dev_priv);
 int intel_irq_install(struct drm_i915_private *dev_priv);
 void intel_irq_uninstall(struct drm_i915_private *dev_priv);
 
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index fd97fe0..0e4dcbeb 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1236,7 +1236,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
 static void ivybridge_parity_work(struct work_struct *work)
 {
        struct drm_i915_private *dev_priv =
-               container_of(work, struct drm_i915_private, 
l3_parity.error_work);
+               container_of(work, typeof(*dev_priv), l3_parity.error_work);
        u32 error_status, row, bank, subbank;
        char *parity_event[6];
        uint32_t misccpctl;
@@ -4233,11 +4233,15 @@ static void i965_irq_uninstall(struct drm_device * dev)
 void intel_irq_init(struct drm_i915_private *dev_priv)
 {
        struct drm_device *dev = &dev_priv->drm;
+       int i;
 
        intel_hpd_init_work(dev_priv);
 
        INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
+
        INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
+       for (i = 0; i < MAX_L3_SLICES; ++i)
+               dev_priv->l3_parity.remap_info[i] = NULL;
 
        if (HAS_GUC_SCHED(dev_priv))
                dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT;
@@ -4363,6 +4367,20 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
 }
 
 /**
+ * intel_irq_fini - deinitializes IRQ support
+ * @i915: i915 device instance
+ *
+ * This function deinitializes all the IRQ support.
+ */
+void intel_irq_fini(struct drm_i915_private *i915)
+{
+       int i;
+
+       for (i = 0; i < MAX_L3_SLICES; ++i)
+               kfree(i915->l3_parity.remap_info[i]);
+}
+
+/**
  * intel_irq_install - enables the hardware interrupt
  * @dev_priv: i915 device instance
  *
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c 
b/drivers/gpu/drm/i915/i915_sysfs.c
index f3fdfda..f9e009c 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -181,8 +181,8 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
        struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
        struct drm_device *dev = &dev_priv->drm;
        struct i915_gem_context *ctx;
-       u32 *temp = NULL; /* Just here to make handling failures easy */
        int slice = (int)(uintptr_t)attr->private;
+       u32 **remap_info;
        int ret;
 
        if (!HAS_HW_CONTEXTS(dev_priv))
@@ -196,11 +196,12 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
        if (ret)
                return ret;
 
-       if (!dev_priv->l3_parity.remap_info[slice]) {
-               temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
-               if (!temp) {
-                       mutex_unlock(&dev->struct_mutex);
-                       return -ENOMEM;
+       remap_info = &dev_priv->l3_parity.remap_info[slice];
+       if (!*remap_info) {
+               *remap_info = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
+               if (!*remap_info) {
+                       ret = -ENOMEM;
+                       goto out;
                }
        }
 
@@ -208,18 +209,18 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
         * aren't propagated. Since I cannot find a stable way to reset the GPU
         * at this point it is left as a TODO.
        */
-       if (temp)
-               dev_priv->l3_parity.remap_info[slice] = temp;
-
-       memcpy(dev_priv->l3_parity.remap_info[slice] + (offset/4), buf, count);
+       memcpy(*remap_info + (offset/4), buf, count);
 
        /* NB: We defer the remapping until we switch to the context */
        list_for_each_entry(ctx, &dev_priv->context_list, link)
                ctx->remap_slice |= (1<<slice);
 
+       ret = count;
+
+out:
        mutex_unlock(&dev->struct_mutex);
 
-       return count;
+       return ret;
 }
 
 static struct bin_attribute dpf_attrs = {
-- 
2.7.4

_______________________________________________
Intel-gfx mailing list
[email protected]
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to