[Intel-gfx] [PATCH] drm/i915: Include instdone[1] in hangcheck

2010-06-06 Thread Chris Wilson
References:

  Bug 26691 - Spurious hangcheck whilst executing a long shader over a
  large vertex buffer
  https://bugs.freedesktop.org/show_bug.cgi?id=26691

Signed-off-by: Chris Wilson ch...@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_drv.h |2 ++
 drivers/gpu/drm/i915/i915_irq.c |   38 +++---
 2 files changed, 25 insertions(+), 15 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 2765831..27900cd 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -285,6 +285,8 @@ typedef struct drm_i915_private {
struct timer_list hangcheck_timer;
int hangcheck_count;
uint32_t last_acthd;
+   uint32_t last_instdone;
+   uint32_t last_instdone1;
 
struct drm_mm vram;
 
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 2479be0..4f2a85d 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1225,16 +1225,21 @@ void i915_hangcheck_elapsed(unsigned long data)
 {
struct drm_device *dev = (struct drm_device *)data;
drm_i915_private_t *dev_priv = dev-dev_private;
-   uint32_t acthd;
+   uint32_t acthd, instdone, instdone1;
 
/* No reset support on this chip yet. */
if (IS_GEN6(dev))
return;
 
-   if (!IS_I965G(dev))
+   if (!IS_I965G(dev)) {
acthd = I915_READ(ACTHD);
-   else
+   instdone = I915_READ(INSTDONE);
+   instdone1 = 0;
+   } else {
acthd = I915_READ(ACTHD_I965);
+   instdone = I915_READ(INSTDONE_I965);
+   instdone1 = I915_READ(INSTDONE1);
+   }
 
/* If all work is done then ACTHD clearly hasn't advanced. */
if (list_empty(dev_priv-render_ring.request_list) ||
@@ -1245,21 +1250,24 @@ void i915_hangcheck_elapsed(unsigned long data)
return;
}
 
-   if (dev_priv-last_acthd == acthd  dev_priv-hangcheck_count  0) {
-   DRM_ERROR(Hangcheck timer elapsed... GPU hung\n);
-   i915_handle_error(dev, true);
-   return;
-   } 
+   if (dev_priv-last_acthd == acthd 
+   dev_priv-last_instdone == instdone 
+   dev_priv-last_instdone1 == instdone1) {
+   if (dev_priv-hangcheck_count++  1) {
+   DRM_ERROR(Hangcheck timer elapsed... GPU hung\n);
+   i915_handle_error(dev, true);
+   return;
+   }
+   } else {
+   dev_priv-hangcheck_count = 0;
+
+   dev_priv-last_acthd = acthd;
+   dev_priv-last_instdone = instdone;
+   dev_priv-last_instdone1 = instdone1;
+   }
 
/* Reset timer case chip hangs without another request being added */
mod_timer(dev_priv-hangcheck_timer, jiffies + 
DRM_I915_HANGCHECK_PERIOD);
-
-   if (acthd != dev_priv-last_acthd)
-   dev_priv-hangcheck_count = 0;
-   else
-   dev_priv-hangcheck_count++;
-
-   dev_priv-last_acthd = acthd;
 }
 
 /* drm_dma.h hooks
-- 
1.7.1

___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx


[Intel-gfx] [PATCH] drm/i915: Remove the WARN when failing to set tiling.

2010-06-06 Thread Chris Wilson
We generally issue an error message at the point of failure, and so this
warning with a fairly pointless stacktrace is superfluous and ugly.
Needless to say, the common trigger for this WARN happens to be EIO
where this is pure noise.

Signed-off-by: Chris Wilson ch...@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_gem_tiling.c |2 --
 1 files changed, 0 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c 
b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 4b7c49d..155719e 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -333,8 +333,6 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
i915_gem_release_mmap(obj);
 
if (ret != 0) {
-   WARN(ret != -ERESTARTSYS,
-failed to reset object for tiling switch);
args-tiling_mode = obj_priv-tiling_mode;
args-stride = obj_priv-stride;
goto err;
-- 
1.7.1

___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx


[Intel-gfx] screen flickering on gateway NV 59.

2010-06-06 Thread yiyu jia
Hi there,

I installed Fedora 13 on my gateway NV59 notebook, which has i5 processor. I
checked all driver are installed. But the screen flickers on login screen
and desktop background, and some application windows.

There is a bug report on Redhat bugzilla. You might be interested in having
a look at it. https://bugzilla.redhat.com/show_bug.cgi?id=585673

thanks and regards,


Yiyu Jia
___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx


Re: [Intel-gfx] [PATCH] intel: Add more intermediate sizes of cache buckets between powers of 2.

2010-06-06 Thread Xiang, Haihao
On Sat, 2010-06-05 at 08:16 +0800, Eric Anholt wrote:
 We had two cases recently where the rounding to powers of two hurt
 badly: 4:2:0 YUV HD video frames would round up from 2.2MB to 4MB,
 Urban Terror was hitting aperture size limitations.  Mipmap trees for
 power of two sizes will land right in the middle between two cache
 buckets.
 
 By giving a few more sizes between powers of two, Urban Terror on my
 945 ends up consuming 207MB of GEM objects instead of 272MB.
 ---
  intel/intel_bufmgr_gem.c |   64 
 +++---
  1 files changed, 49 insertions(+), 15 deletions(-)
 
 diff --git a/intel/intel_bufmgr_gem.c b/intel/intel_bufmgr_gem.c
 index b76fd7e..c3e189e 100644
 --- a/intel/intel_bufmgr_gem.c
 +++ b/intel/intel_bufmgr_gem.c
 @@ -66,6 +66,8 @@
   fprintf(stderr, __VA_ARGS__);   \
  } while (0)
  
 +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
 +
  typedef struct _drm_intel_bo_gem drm_intel_bo_gem;
  
  struct drm_intel_gem_bo_bucket {
 @@ -73,10 +75,6 @@ struct drm_intel_gem_bo_bucket {
   unsigned long size;
  };
  
 -/* Only cache objects up to 64MB.  Bigger than that, and the rounding of the
 - * size makes many operations fail that wouldn't otherwise.
 - */
 -#define DRM_INTEL_GEM_BO_BUCKETS 14
  typedef struct _drm_intel_bufmgr_gem {
   drm_intel_bufmgr bufmgr;
  
 @@ -93,7 +91,8 @@ typedef struct _drm_intel_bufmgr_gem {
   int exec_count;
  
   /** Array of lists of cached gem objects of power-of-two sizes */
 - struct drm_intel_gem_bo_bucket cache_bucket[DRM_INTEL_GEM_BO_BUCKETS];
 + struct drm_intel_gem_bo_bucket cache_bucket[14 * 4];
 + int num_buckets;
  
   uint64_t gtt_size;
   int available_fences;
 @@ -285,7 +284,7 @@ drm_intel_gem_bo_bucket_for_size(drm_intel_bufmgr_gem 
 *bufmgr_gem,
  {
   int i;
  
 - for (i = 0; i  DRM_INTEL_GEM_BO_BUCKETS; i++) {
 + for (i = 0; i  bufmgr_gem-num_buckets; i++) {
   struct drm_intel_gem_bo_bucket *bucket =
   bufmgr_gem-cache_bucket[i];
   if (bucket-size = size) {
 @@ -822,7 +821,7 @@ drm_intel_gem_cleanup_bo_cache(drm_intel_bufmgr_gem 
 *bufmgr_gem, time_t time)
  {
   int i;
  
 - for (i = 0; i  DRM_INTEL_GEM_BO_BUCKETS; i++) {
 + for (i = 0; i  bufmgr_gem-num_buckets; i++) {
   struct drm_intel_gem_bo_bucket *bucket =
   bufmgr_gem-cache_bucket[i];
  
 @@ -1250,7 +1249,7 @@ drm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr)
   pthread_mutex_destroy(bufmgr_gem-lock);
  
   /* Free any cached buffer objects we were going to reuse */
 - for (i = 0; i  DRM_INTEL_GEM_BO_BUCKETS; i++) {
 + for (i = 0; i  bufmgr_gem-num_buckets; i++) {
   struct drm_intel_gem_bo_bucket *bucket =
   bufmgr_gem-cache_bucket[i];
   drm_intel_bo_gem *bo_gem;
 @@ -1960,6 +1959,46 @@ drm_intel_gem_bo_references(drm_intel_bo *bo, 
 drm_intel_bo *target_bo)
   return 0;
  }
  
 +static void
 +add_bucket(drm_intel_bufmgr_gem *bufmgr_gem, int size)
 +{
 + unsigned int i = bufmgr_gem-num_buckets;
 +
 + assert(i  ARRAY_SIZE(bufmgr_gem-cache_bucket));
 +
 + DRMINITLISTHEAD(bufmgr_gem-cache_bucket[i].head);
 + bufmgr_gem-cache_bucket[i].size = size;
 + bufmgr_gem-num_buckets++;
 +}
 +
 +static void
 +init_cache_buckets(drm_intel_bufmgr_gem *bufmgr_gem)
 +{
 + unsigned long size, cache_max_size = 64 * 1024 * 1024;
 +
 + /* Initialize the linked lists for BO reuse cache. */
 + for (size = 4096; size = cache_max_size; size *= 2) {
 + add_bucket(bufmgr_gem, size);
 +
 + /* OK, so power of two buckets was too wasteful of
 +  * memory.  Give 3 other sizes between each power of
 +  * two, to hopefully cover things accurately enough.
 +  * (The alternative is probably to just go for exact
 +  * matching of sizes, and assume that for things like
 +  * composited window resize the tiled width/height
 +  * alignment and rounding of sizes to pages will get
 +  * us useful cache hit rates anyway)
 +  */
 + if (size == 8192) {
 + add_bucket(bufmgr_gem, size + size / 2);
 + } else if (size  cache_max_size) {
 + add_bucket(bufmgr_gem, size + size * 1 / 4);
 + add_bucket(bufmgr_gem, size + size * 2 / 4);
 + add_bucket(bufmgr_gem, size + size * 3 / 4);
 + }
 + }
 +}
 +
  /**
   * Initializes the GEM buffer manager, which uses the kernel to allocate, 
 map,
   * and manage map buffer objections.
 @@ -1972,8 +2011,7 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
   drm_intel_bufmgr_gem *bufmgr_gem;
   struct drm_i915_gem_get_aperture aperture;
   drm_i915_getparam_t gp;
 - int ret, i;
 - unsigned long size;
 + int ret;
   int exec2 = 0;