This spinlock only served debugging purposes in a time when we could not
be sure of the mutex ever being released upon a GPU hang. As we now
should be able rely on hangcheck to do the job for us (and that error
reporting should not itself require the struct mutex) we can kill the
incomplete and misleading attempt at protection.
Signed-off-by: Chris Wilson ch...@chris-wilson.co.uk
---
drivers/gpu/drm/i915/i915_debugfs.c | 58 +---
drivers/gpu/drm/i915/i915_drv.h |2 -
drivers/gpu/drm/i915/i915_gem.c | 35 +--
drivers/gpu/drm/i915/i915_gem_evict.c |5 ---
4 files changed, 47 insertions(+), 53 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c
b/drivers/gpu/drm/i915/i915_debugfs.c
index 9214119..08f279b 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -71,12 +71,10 @@ static int i915_gem_object_list_info(struct seq_file *m,
void *data)
struct drm_device *dev = node-minor-dev;
drm_i915_private_t *dev_priv = dev-dev_private;
struct drm_i915_gem_object *obj_priv;
- spinlock_t *lock = NULL;
switch (list) {
case ACTIVE_LIST:
seq_printf(m, Active:\n);
- lock = dev_priv-mm.active_list_lock;
head = dev_priv-render_ring.active_list;
break;
case INACTIVE_LIST:
@@ -89,11 +87,12 @@ static int i915_gem_object_list_info(struct seq_file *m,
void *data)
break;
default:
DRM_INFO(Ooops, unexpected list\n);
- return 0;
+ return -EINVAL;
}
- if (lock)
- spin_lock(lock);
+ if (!mutex_trylock(dev-struct_mutex))
+ return -EBUSY;
+
list_for_each_entry(obj_priv, head, list)
{
seq_printf(m, %p: %s %8zd %08x %08x %d%s%s,
@@ -116,8 +115,8 @@ static int i915_gem_object_list_info(struct seq_file *m,
void *data)
seq_printf(m, \n);
}
- if (lock)
- spin_unlock(lock);
+ mutex_unlock(dev-struct_mutex);
+
return 0;
}
@@ -128,6 +127,9 @@ static int i915_gem_request_info(struct seq_file *m, void
*data)
drm_i915_private_t *dev_priv = dev-dev_private;
struct drm_i915_gem_request *gem_request;
+ if (!mutex_trylock(dev-struct_mutex))
+ return -EBUSY;
+
seq_printf(m, Request:\n);
list_for_each_entry(gem_request, dev_priv-render_ring.request_list,
list) {
@@ -135,6 +137,9 @@ static int i915_gem_request_info(struct seq_file *m, void
*data)
gem_request-seqno,
(int) (jiffies - gem_request-emitted_jiffies));
}
+
+ mutex_unlock(dev-struct_mutex);
+
return 0;
}
@@ -144,6 +149,9 @@ static int i915_gem_seqno_info(struct seq_file *m, void
*data)
struct drm_device *dev = node-minor-dev;
drm_i915_private_t *dev_priv = dev-dev_private;
+ if (!mutex_trylock(dev-struct_mutex))
+ return -EBUSY;
+
if (dev_priv-render_ring.status_page.page_addr != NULL) {
seq_printf(m, Current sequence: %d\n,
i915_get_gem_seqno(dev, dev_priv-render_ring));
@@ -153,6 +161,9 @@ static int i915_gem_seqno_info(struct seq_file *m, void
*data)
seq_printf(m, Waiter sequence: %d\n,
dev_priv-mm.waiting_gem_seqno);
seq_printf(m, IRQ sequence: %d\n, dev_priv-mm.irq_gem_seqno);
+
+ mutex_unlock(dev-struct_mutex);
+
return 0;
}
@@ -216,6 +227,9 @@ static int i915_gem_fence_regs_info(struct seq_file *m,
void *data)
drm_i915_private_t *dev_priv = dev-dev_private;
int i;
+ if (!mutex_trylock(dev-struct_mutex))
+ return -EBUSY;
+
seq_printf(m, Reserved fences = %d\n, dev_priv-fence_reg_start);
seq_printf(m, Total fences = %d\n, dev_priv-num_fence_regs);
for (i = 0; i dev_priv-num_fence_regs; i++) {
@@ -241,6 +255,8 @@ static int i915_gem_fence_regs_info(struct seq_file *m,
void *data)
}
}
+ mutex_unlock(dev-struct_mutex);
+
return 0;
}
@@ -284,9 +300,10 @@ static int i915_batchbuffer_info(struct seq_file *m, void
*data)
drm_i915_private_t *dev_priv = dev-dev_private;
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
- int ret;
+ int ret = 0;
- spin_lock(dev_priv-mm.active_list_lock);
+ if (!mutex_trylock(dev-struct_mutex))
+ return -EBUSY;
list_for_each_entry(obj_priv, dev_priv-render_ring.active_list,
list) {
@@ -295,8 +312,7 @@ static int i915_batchbuffer_info(struct seq_file *m, void
*data)
ret = i915_gem_object_get_pages(obj, 0);
if (ret) {