Author: arekm                        Date: Fri Apr 15 08:36:31 2011 GMT
Module: packages                      Tag: HEAD
---- Log message:
- drm fixes from http://intellinuxgraphics.org/2011Q1.html

---- Files affected:
packages/kernel:
   kernel.spec (1.905 -> 1.906) , kernel-drm-intel-2011Q1.patch (NONE -> 1.1)  
(NEW)

---- Diffs:

================================================================
Index: packages/kernel/kernel.spec
diff -u packages/kernel/kernel.spec:1.905 packages/kernel/kernel.spec:1.906
--- packages/kernel/kernel.spec:1.905   Fri Apr 15 10:16:51 2011
+++ packages/kernel/kernel.spec Fri Apr 15 10:36:25 2011
@@ -276,6 +276,8 @@
 
 Patch2200:     kernel-xfs-delaylog.patch
 
+Patch2300:     kernel-drm-intel-2011Q1.patch
+
 # kill some thousands of warnings
 # (only warnings, so just remove parts of this patch if conflics)
 Patch2500:     kernel-warnings.patch
@@ -766,6 +768,8 @@
 
 %patch2200 -p1
 
+%patch2300 -p1
+
 %patch2500 -p1
 
 %if %{with rescuecd}
@@ -1528,6 +1532,9 @@
 All persons listed below can be reached at <cvs_login>@pld-linux.org
 
 $Log$
+Revision 1.906  2011/04/15 08:36:25  arekm
+- drm fixes from http://intellinuxgraphics.org/2011Q1.html
+
 Revision 1.905  2011/04/15 08:16:51  arekm
 - 2.6.38.3 (not ready); vserver up to patch-2.6.38.3-vs2.3.0.37-rc12.diff
 

================================================================
Index: packages/kernel/kernel-drm-intel-2011Q1.patch
diff -u /dev/null packages/kernel/kernel-drm-intel-2011Q1.patch:1.1
--- /dev/null   Fri Apr 15 10:36:31 2011
+++ packages/kernel/kernel-drm-intel-2011Q1.patch       Fri Apr 15 10:36:25 2011
@@ -0,0 +1,531 @@
+commit c94249d2a6911daf74f329e05c42e076af2cd024
+Author: Chris Wilson <[email protected]>
+Date:   Thu Mar 24 20:34:41 2011 +0000
+
+    drm/i915: Enable GPU semaphores by default
+    
+    It looks like we have fixed the underlying bugs, so enable GPU
+    semaphores by default again.
+    
+    Signed-off-by: Chris Wilson <[email protected]>
+
+diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
+index 22ec066..b1fbb09 100644
+--- a/drivers/gpu/drm/i915/i915_drv.c
++++ b/drivers/gpu/drm/i915/i915_drv.c
+@@ -46,7 +46,7 @@ module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
+ unsigned int i915_powersave = 1;
+ module_param_named(powersave, i915_powersave, int, 0600);
+ 
+-unsigned int i915_semaphores = 0;
++unsigned int i915_semaphores = 1;
+ module_param_named(semaphores, i915_semaphores, int, 0600);
+ 
+ unsigned int i915_enable_rc6 = 0;
+
+commit 8d6eefa3e760be5cfb71ae11ce1714fd8b26801a
+Author: Chris Wilson <[email protected]>
+Date:   Sun Mar 20 21:09:12 2011 +0000
+
+    drm/i915: Avoid unmapping pages from a NULL address space
+    
+    Found by gem_stress.
+    
+    As we perform retirement from a workqueue, it is possible for us to free
+    and unbind objects after the last close on the device, and so after the
+    address space has been torn down and reset to NULL:
+    
+    BUG: unable to handle kernel NULL pointer dereference at 00000054
+    IP: [<c1295a20>] mutex_lock+0xf/0x27
+    *pde = 00000000
+    Oops: 0002 [#1] SMP
+    last sysfs file: /sys/module/vt/parameters/default_utf8
+    
+    Pid: 5, comm: kworker/u:0 Not tainted 2.6.38+ #214
+    EIP: 0060:[<c1295a20>] EFLAGS: 00010206 CPU: 1
+    EIP is at mutex_lock+0xf/0x27
+    EAX: 00000054 EBX: 00000054 ECX: 00000000 EDX: 00012fff
+    ESI: 00000028 EDI: 00000000 EBP: f706fe20 ESP: f706fe18
+     DS: 007b ES: 007b FS: 00d8 GS: 0000 SS: 0068
+    Process kworker/u:0 (pid: 5, ti=f706e000 task=f7060d00 task.ti=f706e000)
+    Stack:
+     f5aa3c60 00000000 f706fe74 c107e7df 00000246 dea55380 00000054 f5aa3c60
+     f706fe44 00000061 f70b4000 c13fff84 00000008 f706fe54 00000000 00000000
+     00012f00 00012fff 00000028 c109e575 f6b36700 00100000 00000000 f706fe90
+    Call Trace:
+     [<c107e7df>] unmap_mapping_range+0x7d/0x1e6
+     [<c109e575>] ? mntput_no_expire+0x52/0xb6
+     [<c11c12f6>] i915_gem_release_mmap+0x49/0x58
+     [<c11c3449>] i915_gem_object_unbind+0x4c/0x125
+     [<c11c353f>] i915_gem_free_object_tail+0x1d/0xdb
+     [<c11c55a2>] i915_gem_free_object+0x3d/0x41
+     [<c11a6be2>] ? drm_gem_object_free+0x0/0x27
+     [<c11a6c07>] drm_gem_object_free+0x25/0x27
+     [<c113c3ca>] kref_put+0x39/0x42
+     [<c11c0a59>] drm_gem_object_unreference+0x16/0x18
+     [<c11c0b15>] i915_gem_object_move_to_inactive+0xba/0xbe
+     [<c11c0c87>] i915_gem_retire_requests_ring+0x16e/0x1a5
+     [<c11c3645>] i915_gem_retire_requests+0x48/0x63
+     [<c11c36ac>] i915_gem_retire_work_handler+0x4c/0x117
+     [<c10385d1>] process_one_work+0x140/0x21b
+     [<c103734c>] ? __need_more_worker+0x13/0x2a
+     [<c10373b1>] ? need_to_create_worker+0x1c/0x35
+     [<c11c3660>] ? i915_gem_retire_work_handler+0x0/0x117
+     [<c1038faf>] worker_thread+0xd4/0x14b
+     [<c1038edb>] ? worker_thread+0x0/0x14b
+     [<c103be1b>] kthread+0x68/0x6d
+     [<c103bdb3>] ? kthread+0x0/0x6d
+     [<c12970f6>] kernel_thread_helper+0x6/0x10
+    Code: 00 e8 98 fe ff ff 5d c3 55 89 e5 3e 8d 74 26 00 ba 01 00 00 00 e8
+    84 fe ff ff 5d c3 55 89 e5 53 8d 64 24 fc 3e 8d 74 26 00 89 c3 <f0> ff
+    08 79 05 e8 ab ff ff ff 89 e0 25 00 e0 ff ff 89 43 10 58
+    EIP: [<c1295a20>] mutex_lock+0xf/0x27 SS:ESP 0068:f706fe18
+    CR2: 0000000000000054
+    
+    Signed-off-by: Chris Wilson <[email protected]>
+
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index 882a82f..6c435a3 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -1321,9 +1321,10 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
+       if (!obj->fault_mappable)
+               return;
+ 
+-      unmap_mapping_range(obj->base.dev->dev_mapping,
+-                          (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
+-                          obj->base.size, 1);
++      if (obj->base.dev->dev_mapping)
++              unmap_mapping_range(obj->base.dev->dev_mapping,
++                                  
(loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
++                                  obj->base.size, 1);
+ 
+       obj->fault_mappable = false;
+ }
+
+commit ca5b7809a236b337c2b51ed30660ab92052fe648
+Author: Chris Wilson <[email protected]>
+Date:   Wed Mar 23 08:26:43 2011 +0000
+
+    drm/i915: Restore missing command flush before interrupt on BLT ring
+    
+    We always skipped flushing the BLT ring if the request flush did not
+    include the RENDER domain. However, this neglects that we try to flush
+    the COMMAND domain after every batch and before the breadcrumb interrupt
+    (to make sure the batch is indeed completed prior to the interrupt
+    firing and so insuring CPU coherency). As a result of the missing flush,
+    incoherency did indeed creep in, most notable when using lots of command
+    buffers and so potentially rewritting an active command buffer (i.e.
+    the GPU was still executing from it even though the following interrupt
+    had already fired and the request/buffer retired).
+    
+    As all ring->flush routines now have the same preconditions, de-duplicate
+    and move those checks up into i915_gem_flush_ring().
+    
+    Fixes gem_linear_blit.
+    
+    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=35284
+    Signed-off-by: Chris Wilson <[email protected]>
+    Reviewed-by: Daniel Vetter <[email protected]>
+    Tested-by: [email protected]
+    [ickle: backported to 2.6.38]
+    
+    Conflicts:
+    
+       drivers/gpu/drm/i915/i915_gem.c
+       drivers/gpu/drm/i915/intel_ringbuffer.c
+
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index 07cd0a3..882a82f 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -1669,9 +1669,8 @@ i915_gem_object_is_purgeable(struct drm_i915_gem_object 
*obj)
+ }
+ 
+ static void
+-i915_gem_process_flushing_list(struct drm_device *dev,
+-                             uint32_t flush_domains,
+-                             struct intel_ring_buffer *ring)
++i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
++                             uint32_t flush_domains)
+ {
+       struct drm_i915_gem_object *obj, *next;
+ 
+@@ -1684,7 +1683,7 @@ i915_gem_process_flushing_list(struct drm_device *dev,
+                       obj->base.write_domain = 0;
+                       list_del_init(&obj->gpu_write_list);
+                       i915_gem_object_move_to_active(obj, ring,
+-                                                     
i915_gem_next_request_seqno(dev, ring));
++                                                     
i915_gem_next_request_seqno(ring->dev, ring));
+ 
+                       trace_i915_gem_object_change_domain(obj,
+                                                           
obj->base.read_domains,
+@@ -2178,11 +2177,16 @@ i915_gem_flush_ring(struct drm_device *dev,
+ {
+       int ret;
+ 
++      if (((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) == 0)
++              return 0;
++
+       ret = ring->flush(ring, invalidate_domains, flush_domains);
+       if (ret)
+               return ret;
+ 
+-      i915_gem_process_flushing_list(dev, flush_domains, ring);
++      if (flush_domains & I915_GEM_GPU_DOMAINS)
++              i915_gem_process_flushing_list(ring, flush_domains);
++
+       return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c 
b/drivers/gpu/drm/i915/intel_ringbuffer.c
+index 445f27e..9fde32e 100644
+--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
++++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
+@@ -62,77 +62,63 @@ render_ring_flush(struct intel_ring_buffer *ring,
+                 u32   flush_domains)
+ {
+       struct drm_device *dev = ring->dev;
+-      drm_i915_private_t *dev_priv = dev->dev_private;
+       u32 cmd;
+       int ret;
+ 
+-#if WATCH_EXEC
+-      DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
+-                invalidate_domains, flush_domains);
+-#endif
+-
+-      trace_i915_gem_request_flush(dev, dev_priv->next_seqno,
+-                                   invalidate_domains, flush_domains);
++      /*
++       * read/write caches:
++       *
++       * I915_GEM_DOMAIN_RENDER is always invalidated, but is
++       * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
++       * also flushed at 2d versus 3d pipeline switches.
++       *
++       * read-only caches:
++       *
++       * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
++       * MI_READ_FLUSH is set, and is always flushed on 965.
++       *
++       * I915_GEM_DOMAIN_COMMAND may not exist?
++       *
++       * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
++       * invalidated when MI_EXE_FLUSH is set.
++       *
++       * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
++       * invalidated with every MI_FLUSH.
++       *
++       * TLBs:
++       *
++       * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
++       * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
++       * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
++       * are flushed at any MI_FLUSH.
++       */
+ 
+-      if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
++      cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
++      if ((invalidate_domains|flush_domains) &
++          I915_GEM_DOMAIN_RENDER)
++              cmd &= ~MI_NO_WRITE_FLUSH;
++      if (INTEL_INFO(dev)->gen < 4) {
+               /*
+-               * read/write caches:
+-               *
+-               * I915_GEM_DOMAIN_RENDER is always invalidated, but is
+-               * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
+-               * also flushed at 2d versus 3d pipeline switches.
+-               *
+-               * read-only caches:
+-               *
+-               * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
+-               * MI_READ_FLUSH is set, and is always flushed on 965.
+-               *
+-               * I915_GEM_DOMAIN_COMMAND may not exist?
+-               *
+-               * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
+-               * invalidated when MI_EXE_FLUSH is set.
+-               *
+-               * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
+-               * invalidated with every MI_FLUSH.
+-               *
+-               * TLBs:
+-               *
+-               * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
+-               * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
+-               * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
+-               * are flushed at any MI_FLUSH.
++               * On the 965, the sampler cache always gets flushed
++               * and this bit is reserved.
+                */
++              if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
++                      cmd |= MI_READ_FLUSH;
++      }
++      if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
++              cmd |= MI_EXE_FLUSH;
+ 
+-              cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
+-              if ((invalidate_domains|flush_domains) &
+-                  I915_GEM_DOMAIN_RENDER)
+-                      cmd &= ~MI_NO_WRITE_FLUSH;
+-              if (INTEL_INFO(dev)->gen < 4) {
+-                      /*
+-                       * On the 965, the sampler cache always gets flushed
+-                       * and this bit is reserved.
+-                       */
+-                      if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
+-                              cmd |= MI_READ_FLUSH;
+-              }
+-              if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
+-                      cmd |= MI_EXE_FLUSH;
+-
+-              if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
+-                  (IS_G4X(dev) || IS_GEN5(dev)))
+-                      cmd |= MI_INVALIDATE_ISP;
++      if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
++          (IS_G4X(dev) || IS_GEN5(dev)))
++              cmd |= MI_INVALIDATE_ISP;
+ 
+-#if WATCH_EXEC
+-              DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
+-#endif
+-              ret = intel_ring_begin(ring, 2);
+-              if (ret)
+-                      return ret;
++      ret = intel_ring_begin(ring, 2);
++      if (ret)
++              return ret;
+ 
+-              intel_ring_emit(ring, cmd);
+-              intel_ring_emit(ring, MI_NOOP);
+-              intel_ring_advance(ring);
+-      }
++      intel_ring_emit(ring, cmd);
++      intel_ring_emit(ring, MI_NOOP);
++      intel_ring_advance(ring);
+ 
+       return 0;
+ }
+@@ -580,9 +566,6 @@ bsd_ring_flush(struct intel_ring_buffer *ring,
+ {
+       int ret;
+ 
+-      if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
+-              return 0;
+-
+       ret = intel_ring_begin(ring, 2);
+       if (ret)
+               return ret;
+@@ -1064,9 +1047,6 @@ static int gen6_ring_flush(struct intel_ring_buffer 
*ring,
+       uint32_t cmd;
+       int ret;
+ 
+-      if (((invalidate | flush) & I915_GEM_GPU_DOMAINS) == 0)
+-              return 0;
+-
+       ret = intel_ring_begin(ring, 4);
+       if (ret)
+               return ret;
+@@ -1238,9 +1218,6 @@ static int blt_ring_flush(struct intel_ring_buffer *ring,
+       uint32_t cmd;
+       int ret;
+ 
+-      if (((invalidate | flush) & I915_GEM_DOMAIN_RENDER) == 0)
+-              return 0;
+-
+       ret = blt_ring_begin(ring, 4);
+       if (ret)
+               return ret;
+
+commit fc33e71579d6c1faf3326a94f5f6d1ffad47ecdd
+Author: Chris Wilson <[email protected]>
+Date:   Sat Feb 12 10:33:12 2011 +0000
+
+    drm/i915/dp: Sanity check eDP existence
+    
+    Some hardware claims to have both an LVDS panel and an eDP output.
+    Whilst this may be true in a rare case, more often it is just broken
+    hardware. If we see an eDP device we know that it must be connected and
+    so we can confirm its existence with a simple probe.
+    
+    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=34165
+    Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=24822
+    Signed-off-by: Chris Wilson <[email protected]>
+
+diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
+index 51cb4e3..27e2088 100644
+--- a/drivers/gpu/drm/i915/intel_dp.c
++++ b/drivers/gpu/drm/i915/intel_dp.c
+@@ -1917,7 +1917,11 @@ intel_dp_init(struct drm_device *dev, int output_reg)
+                               dev_priv->no_aux_handshake = intel_dp->dpcd[3] &
+                                       DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
+               } else {
+-                      DRM_ERROR("failed to retrieve link info\n");
++                      /* if this fails, presume the device is a ghost */
++                      DRM_INFO("failed to retrieve link info, disabling 
eDP\n");
++                      intel_dp_encoder_destroy(&intel_dp->base.base);
++                      intel_dp_destroy(&intel_connector->base);
++                      return;
+               }
+               if (!was_on)
+                       ironlake_edp_panel_off(dev);
+
+commit 1c9fa75a74b584952ef406291e99a48a9d1476fd
+Author: Chris Wilson <[email protected]>
+Date:   Tue Mar 15 11:04:41 2011 +0000
+
+    drm: Retry i2c transfer of EDID block after failure
+    
+    Usually EDID retrieval is fine. However, sometimes, especially when the
+    machine is loaded, it fails, but succeeds after a few retries.
+    
+    Based on a patch by Michael Buesch.
+    
+    Reported-by: Michael Buesch <[email protected]>
+    Signed-off-by: Chris Wilson <[email protected]>
+    Reviewed-by: Alex Deucher <[email protected]>
+    Signed-off-by: Dave Airlie <[email protected]>
+
+diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
+index a245d17..ddc3da9 100644
+--- a/drivers/gpu/drm/drm_edid.c
++++ b/drivers/gpu/drm/drm_edid.c
+@@ -230,24 +230,32 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, 
unsigned char *buf,
+                     int block, int len)
+ {
+       unsigned char start = block * EDID_LENGTH;
+-      struct i2c_msg msgs[] = {
+-              {
+-                      .addr   = DDC_ADDR,
+-                      .flags  = 0,
+-                      .len    = 1,
+-                      .buf    = &start,
+-              }, {
+-                      .addr   = DDC_ADDR,
+-                      .flags  = I2C_M_RD,
+-                      .len    = len,
+-                      .buf    = buf,
+-              }
+-      };
++      int ret, retries = 5;
+ 
+-      if (i2c_transfer(adapter, msgs, 2) == 2)
+-              return 0;
++      /* The core i2c driver will automatically retry the transfer if the
++       * adapter reports EAGAIN. However, we find that bit-banging transfers
++       * are susceptible to errors under a heavily loaded machine and
++       * generate spurious NAKs and timeouts. Retrying the transfer
++       * of the individual block a few times seems to overcome this.
++       */
++      do {
++              struct i2c_msg msgs[] = {
++                      {
++                              .addr   = DDC_ADDR,
++                              .flags  = 0,
++                              .len    = 1,
++                              .buf    = &start,
++                      }, {
++                              .addr   = DDC_ADDR,
++                              .flags  = I2C_M_RD,
++                              .len    = len,
++                              .buf    = buf,
++                      }
++              };
++              ret = i2c_transfer(adapter, msgs, 2);
++      } while (ret != 2 && --retries);
+ 
+-      return -1;
++      return ret == 2 ? 0 : -1;
+ }
+ 
+ static u8 *
+
+commit f48629cff5bf3a0df923ce0314ace584212afbe7
+Author: Chris Wilson <[email protected]>
+Date:   Thu Mar 17 15:23:22 2011 +0000
+
+    drm/i915: Fix tiling corruption from pipelined fencing
+    
+    ... even though it was disabled. A mistake in the handling of fence reuse
+    caused us to skip the vital delay of waiting for the object to finish
+    rendering before changing the register. This resulted in us changing the
+    fence register whilst the bo was active and so causing the blits to
+    complete using the wrong stride or even the wrong tiling. (Visually the
+    effect is that small blocks of the screen look like they have been
+    interlaced). The fix is to wait for the GPU to finish using the memory
+    region pointed to by the fence before changing it.
+    
+    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=34584
+    Cc: Andy Whitcroft <[email protected]>
+    Cc: Daniel Vetter <[email protected]>
+    Reviewed-by: Daniel Vetter <[email protected]>
+    [Note for 2.6.38-stable, we need to reintroduce the interruptible passing]
+    Signed-off-by: Chris Wilson <[email protected]>
+    Tested-by: Dave Airlie <[email protected]>
+
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index 36e66cc..10378a3 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -2544,8 +2544,25 @@ i915_gem_object_get_fence(struct drm_i915_gem_object 
*obj,
+               reg = &dev_priv->fence_regs[obj->fence_reg];
+               list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
+ 
+-              if (!obj->fenced_gpu_access && !obj->last_fenced_seqno)
+-                      pipelined = NULL;
++              if (obj->tiling_changed) {
++                      ret = i915_gem_object_flush_fence(obj,
++                                                        pipelined,
++                                                        interruptible);
++                      if (ret)
++                              return ret;
++
++                      if (!obj->fenced_gpu_access && !obj->last_fenced_seqno)
++                              pipelined = NULL;
++
++                      if (pipelined) {
++                              reg->setup_seqno =
++                                      i915_gem_next_request_seqno(dev, 
pipelined);
++                              obj->last_fenced_seqno = reg->setup_seqno;
++                              obj->last_fenced_ring = pipelined;
++                      }
++
++                      goto update;
++              }
+ 
+               if (!pipelined) {
+                       if (reg->setup_seqno) {
+@@ -2568,31 +2585,6 @@ i915_gem_object_get_fence(struct drm_i915_gem_object 
*obj,
+                                                         interruptible);
+                       if (ret)
+                               return ret;
+-              } else if (obj->tiling_changed) {
+-                      if (obj->fenced_gpu_access) {
+-                              if (obj->base.write_domain & 
I915_GEM_GPU_DOMAINS) {
+-                                      ret = 
i915_gem_flush_ring(obj->base.dev, obj->ring,
+-                                                                0, 
obj->base.write_domain);
+-                                      if (ret)
+-                                              return ret;
+-                              }
+-
+-                              obj->fenced_gpu_access = false;
+-                      }
+-              }
+-
+-              if (!obj->fenced_gpu_access && !obj->last_fenced_seqno)
+-                      pipelined = NULL;
+-              BUG_ON(!pipelined && reg->setup_seqno);
+-
+-              if (obj->tiling_changed) {
+-                      if (pipelined) {
+-                              reg->setup_seqno =
+-                                      i915_gem_next_request_seqno(dev, 
pipelined);
+-                              obj->last_fenced_seqno = reg->setup_seqno;
+-                              obj->last_fenced_ring = pipelined;
+-                      }
+-                      goto update;
+               }
+ 
+               return 0;
================================================================

---- CVS-web:
    
http://cvs.pld-linux.org/cgi-bin/cvsweb.cgi/packages/kernel/kernel.spec?r1=1.905&r2=1.906&f=u

_______________________________________________
pld-cvs-commit mailing list
[email protected]
http://lists.pld-linux.org/mailman/listinfo/pld-cvs-commit

Reply via email to