As we may be using a partial vma for fence allocation, rather then
revoke the mmap of all vma on the object only revoke the overlapping
mmap for this fence.

Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem_fence_reg.c |  4 ++--
 drivers/gpu/drm/i915/i915_vma.c           | 25 +++++++++++++++++++++++--
 drivers/gpu/drm/i915/i915_vma.h           |  1 +
 3 files changed, 26 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c 
b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
index 52d5696f9e49..c5cd4dcc54ba 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
@@ -240,7 +240,7 @@ static int fence_update(struct drm_i915_fence_reg *fence,
                /* Ensure that all userspace CPU access is completed before
                 * stealing the fence.
                 */
-               i915_gem_release_mmap(fence->vma->obj);
+               i915_vma_revoke_mmap(fence->vma);
 
                fence->vma->fence = NULL;
                fence->vma = NULL;
@@ -379,7 +379,7 @@ void i915_gem_revoke_fences(struct drm_i915_private 
*dev_priv)
                struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i];
 
                if (fence->vma)
-                       i915_gem_release_mmap(fence->vma->obj);
+                       i915_vma_revoke_mmap(fence->vma);
        }
 }
 
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index fd671d493ca6..c4817ad7c965 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -642,6 +642,28 @@ static void __i915_vma_iounmap(struct i915_vma *vma)
        vma->iomap = NULL;
 }
 
+void i915_vma_revoke_mmap(struct i915_vma *vma)
+{
+       struct drm_vma_offset_node *node = &vma->obj->base.vma_node;
+
+       lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
+
+       if (!i915_vma_has_userfault(vma))
+               return;
+
+       GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
+       GEM_BUG_ON(list_empty(&vma->obj->userfault_link));
+
+       unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
+                           drm_vma_node_offset_addr(node) + 
vma->ggtt_view.partial.offset,
+                           vma->size,
+                           1);
+       vma->flags &= ~I915_VMA_USERFAULT;
+
+       if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
+               list_del_init(&vma->obj->userfault_link);
+}
+
 int i915_vma_unbind(struct i915_vma *vma)
 {
        struct drm_i915_gem_object *obj = vma->obj;
@@ -705,8 +727,7 @@ int i915_vma_unbind(struct i915_vma *vma)
                        return ret;
 
                /* Force a pagefault for domain tracking on next user access */
-               if (i915_vma_has_userfault(vma))
-                       i915_gem_release_mmap(obj);
+               i915_vma_revoke_mmap(vma);
 
                __i915_vma_iounmap(vma);
                vma->flags &= ~I915_VMA_CAN_FENCE;
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 78afbac2bca8..218f9d48e7c6 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -247,6 +247,7 @@ bool i915_gem_valid_gtt_space(struct i915_vma *vma, 
unsigned long cache_level);
 bool i915_vma_misplaced(const struct i915_vma *vma,
                        u64 size, u64 alignment, u64 flags);
 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
+void i915_vma_revoke_mmap(struct i915_vma *vma);
 int __must_check i915_vma_unbind(struct i915_vma *vma);
 void i915_vma_unlink_ctx(struct i915_vma *vma);
 void i915_vma_close(struct i915_vma *vma);
-- 
2.13.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to