Re: [Intel-gfx] [RFC v4 06/14] drm/i915/vm_bind: Handle persistent vmas

2022-09-26 Thread Niranjana Vishwanathapura

On Mon, Sep 26, 2022 at 07:36:24PM -0700, Zeng, Oak wrote:



Regards,
Oak


-Original Message-
From: Intel-gfx  On Behalf Of Niranjana
Vishwanathapura
Sent: September 21, 2022 3:10 AM
To: intel-gfx@lists.freedesktop.org; dri-de...@lists.freedesktop.org
Cc: Zanoni, Paulo R ; Hellstrom, Thomas
; Auld, Matthew ; Vetter,
Daniel ; christian.koe...@amd.com
Subject: [Intel-gfx] [RFC v4 06/14] drm/i915/vm_bind: Handle persistent vmas

Treat VM_BIND vmas as persistent across execbuf ioctl calls and handle
them during the request submission in the execbuff path.

Support eviction by maintaining a list of evicted persistent vmas
for rebinding during next submission.

Signed-off-by: Niranjana Vishwanathapura 
Signed-off-by: Andi Shyti 
---
 .../drm/i915/gem/i915_gem_vm_bind_object.c|  7 +++
 drivers/gpu/drm/i915/gt/intel_gtt.c   |  2 +
 drivers/gpu/drm/i915/gt/intel_gtt.h   |  4 ++
 drivers/gpu/drm/i915/i915_gem_gtt.c   | 39 
 drivers/gpu/drm/i915/i915_gem_gtt.h   |  3 ++
 drivers/gpu/drm/i915/i915_vma.c   | 46 +++
 drivers/gpu/drm/i915/i915_vma.h   | 45 +-
 drivers/gpu/drm/i915/i915_vma_types.h | 17 +++
 8 files changed, 151 insertions(+), 12 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_vm_bind_object.c
b/drivers/gpu/drm/i915/gem/i915_gem_vm_bind_object.c
index 7ca6a41fc981..236f901b8b9c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_vm_bind_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_vm_bind_object.c
@@ -91,6 +91,12 @@ static void i915_gem_vm_bind_remove(struct i915_vma
*vma, bool release_obj)
 {
  lockdep_assert_held(&vma->vm->vm_bind_lock);

+ spin_lock(&vma->vm->vm_rebind_lock);
+ if (!list_empty(&vma->vm_rebind_link))
+ list_del_init(&vma->vm_rebind_link);
+ i915_vma_set_purged(vma);
+ spin_unlock(&vma->vm->vm_rebind_lock);
+
  list_del_init(&vma->vm_bind_link);
  list_del_init(&vma->non_priv_vm_bind_link);
  i915_vm_bind_it_remove(vma, &vma->vm->va);
@@ -181,6 +187,7 @@ static struct i915_vma *vm_bind_get_vma(struct
i915_address_space *vm,

  vma->start = va->start;
  vma->last = va->start + va->length - 1;
+ i915_vma_set_persistent(vma);

  return vma;
 }
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c
b/drivers/gpu/drm/i915/gt/intel_gtt.c
index da4f9dee0397..6db31197fa87 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
@@ -296,6 +296,8 @@ void i915_address_space_init(struct i915_address_space
*vm, int subclass)
  INIT_LIST_HEAD(&vm->non_priv_vm_bind_list);
  vm->root_obj = i915_gem_object_create_internal(vm->i915, PAGE_SIZE);
  GEM_BUG_ON(IS_ERR(vm->root_obj));
+ INIT_LIST_HEAD(&vm->vm_rebind_list);
+ spin_lock_init(&vm->vm_rebind_lock);
 }

 void *__px_vaddr(struct drm_i915_gem_object *p)
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h
b/drivers/gpu/drm/i915/gt/intel_gtt.h
index 3f2e87d3bf34..b73d35b4e05d 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
@@ -273,6 +273,10 @@ struct i915_address_space {
  struct list_head vm_bind_list;
  /** @vm_bound_list: List of vm_binding completed */
  struct list_head vm_bound_list;
+ /* @vm_rebind_list: list of vmas to be rebinded */
+ struct list_head vm_rebind_list;
+ /* @vm_rebind_lock: protects vm_rebound_list */
+ spinlock_t vm_rebind_lock;
  /* @va: tree of persistent vmas */
  struct rb_root_cached va;
  struct list_head non_priv_vm_bind_list;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c
b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 329ff75b80b9..b7d0844de561 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -25,6 +25,45 @@
 #include "i915_trace.h"
 #include "i915_vgpu.h"

+/**
+ * i915_vm_sync() - Wait until address space is not in use
+ * @vm: address space
+ *
+ * Waits until all requests using the address space are complete.
+ *
+ * Returns: 0 if success, -ve err code upon failure
+ */
+int i915_vm_sync(struct i915_address_space *vm)
+{
+ int ret;
+
+ /* Wait for all requests under this vm to finish */
+ ret = dma_resv_wait_timeout(vm->root_obj->base.resv,
+ DMA_RESV_USAGE_BOOKKEEP, false,
+ MAX_SCHEDULE_TIMEOUT);
+ if (ret < 0)
+ return ret;
+ else if (ret > 0)
+ return 0;
+ else
+ return -ETIMEDOUT;
+}
+
+/**
+ * i915_vm_is_active() - Check if address space is being used
+ * @vm: address space
+ *
+ * Check if any request using the specified address space is
+ * active.
+ *
+ * Returns: true if address space is active, false otherwise.
+ */
+bool i915_vm_is_active(const struct i915_address_spa

Re: [Intel-gfx] [RFC v4 06/14] drm/i915/vm_bind: Handle persistent vmas

2022-09-26 Thread Zeng, Oak



Regards,
Oak

> -Original Message-
> From: Intel-gfx  On Behalf Of 
> Niranjana
> Vishwanathapura
> Sent: September 21, 2022 3:10 AM
> To: intel-gfx@lists.freedesktop.org; dri-de...@lists.freedesktop.org
> Cc: Zanoni, Paulo R ; Hellstrom, Thomas
> ; Auld, Matthew ; Vetter,
> Daniel ; christian.koe...@amd.com
> Subject: [Intel-gfx] [RFC v4 06/14] drm/i915/vm_bind: Handle persistent vmas
> 
> Treat VM_BIND vmas as persistent across execbuf ioctl calls and handle
> them during the request submission in the execbuff path.
> 
> Support eviction by maintaining a list of evicted persistent vmas
> for rebinding during next submission.
> 
> Signed-off-by: Niranjana Vishwanathapura 
> Signed-off-by: Andi Shyti 
> ---
>  .../drm/i915/gem/i915_gem_vm_bind_object.c|  7 +++
>  drivers/gpu/drm/i915/gt/intel_gtt.c   |  2 +
>  drivers/gpu/drm/i915/gt/intel_gtt.h   |  4 ++
>  drivers/gpu/drm/i915/i915_gem_gtt.c   | 39 
>  drivers/gpu/drm/i915/i915_gem_gtt.h   |  3 ++
>  drivers/gpu/drm/i915/i915_vma.c   | 46 +++
>  drivers/gpu/drm/i915/i915_vma.h   | 45 +-
>  drivers/gpu/drm/i915/i915_vma_types.h | 17 +++
>  8 files changed, 151 insertions(+), 12 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_vm_bind_object.c
> b/drivers/gpu/drm/i915/gem/i915_gem_vm_bind_object.c
> index 7ca6a41fc981..236f901b8b9c 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_vm_bind_object.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_vm_bind_object.c
> @@ -91,6 +91,12 @@ static void i915_gem_vm_bind_remove(struct i915_vma
> *vma, bool release_obj)
>  {
>   lockdep_assert_held(&vma->vm->vm_bind_lock);
> 
> + spin_lock(&vma->vm->vm_rebind_lock);
> + if (!list_empty(&vma->vm_rebind_link))
> + list_del_init(&vma->vm_rebind_link);
> + i915_vma_set_purged(vma);
> + spin_unlock(&vma->vm->vm_rebind_lock);
> +
>   list_del_init(&vma->vm_bind_link);
>   list_del_init(&vma->non_priv_vm_bind_link);
>   i915_vm_bind_it_remove(vma, &vma->vm->va);
> @@ -181,6 +187,7 @@ static struct i915_vma *vm_bind_get_vma(struct
> i915_address_space *vm,
> 
>   vma->start = va->start;
>   vma->last = va->start + va->length - 1;
> + i915_vma_set_persistent(vma);
> 
>   return vma;
>  }
> diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c
> b/drivers/gpu/drm/i915/gt/intel_gtt.c
> index da4f9dee0397..6db31197fa87 100644
> --- a/drivers/gpu/drm/i915/gt/intel_gtt.c
> +++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
> @@ -296,6 +296,8 @@ void i915_address_space_init(struct i915_address_space
> *vm, int subclass)
>   INIT_LIST_HEAD(&vm->non_priv_vm_bind_list);
>   vm->root_obj = i915_gem_object_create_internal(vm->i915, PAGE_SIZE);
>   GEM_BUG_ON(IS_ERR(vm->root_obj));
> + INIT_LIST_HEAD(&vm->vm_rebind_list);
> + spin_lock_init(&vm->vm_rebind_lock);
>  }
> 
>  void *__px_vaddr(struct drm_i915_gem_object *p)
> diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h
> b/drivers/gpu/drm/i915/gt/intel_gtt.h
> index 3f2e87d3bf34..b73d35b4e05d 100644
> --- a/drivers/gpu/drm/i915/gt/intel_gtt.h
> +++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
> @@ -273,6 +273,10 @@ struct i915_address_space {
>   struct list_head vm_bind_list;
>   /** @vm_bound_list: List of vm_binding completed */
>   struct list_head vm_bound_list;
> + /* @vm_rebind_list: list of vmas to be rebinded */
> + struct list_head vm_rebind_list;
> + /* @vm_rebind_lock: protects vm_rebound_list */
> + spinlock_t vm_rebind_lock;
>   /* @va: tree of persistent vmas */
>   struct rb_root_cached va;
>   struct list_head non_priv_vm_bind_list;
> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c
> b/drivers/gpu/drm/i915/i915_gem_gtt.c
> index 329ff75b80b9..b7d0844de561 100644
> --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
> @@ -25,6 +25,45 @@
>  #include "i915_trace.h"
>  #include "i915_vgpu.h"
> 
> +/**
> + * i915_vm_sync() - Wait until address space is not in use
> + * @vm: address space
> + *
> + * Waits until all requests using the address space are complete.
> + *
> + * Returns: 0 if success, -ve err code upon failure
> + */
> +int i915_vm_sync(struct i915_address_space *vm)
> +{
> + int ret;
> +
> + /* Wait for all requests under this vm to finish */
> + ret = dma_resv_wait_timeout(vm->root_obj->base.resv,
> + DMA_RESV_USAGE_BOOKKE

[Intel-gfx] [RFC v4 06/14] drm/i915/vm_bind: Handle persistent vmas

2022-09-21 Thread Niranjana Vishwanathapura
Treat VM_BIND vmas as persistent across execbuf ioctl calls and handle
them during the request submission in the execbuff path.

Support eviction by maintaining a list of evicted persistent vmas
for rebinding during next submission.

Signed-off-by: Niranjana Vishwanathapura 
Signed-off-by: Andi Shyti 
---
 .../drm/i915/gem/i915_gem_vm_bind_object.c|  7 +++
 drivers/gpu/drm/i915/gt/intel_gtt.c   |  2 +
 drivers/gpu/drm/i915/gt/intel_gtt.h   |  4 ++
 drivers/gpu/drm/i915/i915_gem_gtt.c   | 39 
 drivers/gpu/drm/i915/i915_gem_gtt.h   |  3 ++
 drivers/gpu/drm/i915/i915_vma.c   | 46 +++
 drivers/gpu/drm/i915/i915_vma.h   | 45 +-
 drivers/gpu/drm/i915/i915_vma_types.h | 17 +++
 8 files changed, 151 insertions(+), 12 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_vm_bind_object.c 
b/drivers/gpu/drm/i915/gem/i915_gem_vm_bind_object.c
index 7ca6a41fc981..236f901b8b9c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_vm_bind_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_vm_bind_object.c
@@ -91,6 +91,12 @@ static void i915_gem_vm_bind_remove(struct i915_vma *vma, 
bool release_obj)
 {
lockdep_assert_held(&vma->vm->vm_bind_lock);
 
+   spin_lock(&vma->vm->vm_rebind_lock);
+   if (!list_empty(&vma->vm_rebind_link))
+   list_del_init(&vma->vm_rebind_link);
+   i915_vma_set_purged(vma);
+   spin_unlock(&vma->vm->vm_rebind_lock);
+
list_del_init(&vma->vm_bind_link);
list_del_init(&vma->non_priv_vm_bind_link);
i915_vm_bind_it_remove(vma, &vma->vm->va);
@@ -181,6 +187,7 @@ static struct i915_vma *vm_bind_get_vma(struct 
i915_address_space *vm,
 
vma->start = va->start;
vma->last = va->start + va->length - 1;
+   i915_vma_set_persistent(vma);
 
return vma;
 }
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c 
b/drivers/gpu/drm/i915/gt/intel_gtt.c
index da4f9dee0397..6db31197fa87 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
@@ -296,6 +296,8 @@ void i915_address_space_init(struct i915_address_space *vm, 
int subclass)
INIT_LIST_HEAD(&vm->non_priv_vm_bind_list);
vm->root_obj = i915_gem_object_create_internal(vm->i915, PAGE_SIZE);
GEM_BUG_ON(IS_ERR(vm->root_obj));
+   INIT_LIST_HEAD(&vm->vm_rebind_list);
+   spin_lock_init(&vm->vm_rebind_lock);
 }
 
 void *__px_vaddr(struct drm_i915_gem_object *p)
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h 
b/drivers/gpu/drm/i915/gt/intel_gtt.h
index 3f2e87d3bf34..b73d35b4e05d 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
@@ -273,6 +273,10 @@ struct i915_address_space {
struct list_head vm_bind_list;
/** @vm_bound_list: List of vm_binding completed */
struct list_head vm_bound_list;
+   /* @vm_rebind_list: list of vmas to be rebinded */
+   struct list_head vm_rebind_list;
+   /* @vm_rebind_lock: protects vm_rebound_list */
+   spinlock_t vm_rebind_lock;
/* @va: tree of persistent vmas */
struct rb_root_cached va;
struct list_head non_priv_vm_bind_list;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c 
b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 329ff75b80b9..b7d0844de561 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -25,6 +25,45 @@
 #include "i915_trace.h"
 #include "i915_vgpu.h"
 
+/**
+ * i915_vm_sync() - Wait until address space is not in use
+ * @vm: address space
+ *
+ * Waits until all requests using the address space are complete.
+ *
+ * Returns: 0 if success, -ve err code upon failure
+ */
+int i915_vm_sync(struct i915_address_space *vm)
+{
+   int ret;
+
+   /* Wait for all requests under this vm to finish */
+   ret = dma_resv_wait_timeout(vm->root_obj->base.resv,
+   DMA_RESV_USAGE_BOOKKEEP, false,
+   MAX_SCHEDULE_TIMEOUT);
+   if (ret < 0)
+   return ret;
+   else if (ret > 0)
+   return 0;
+   else
+   return -ETIMEDOUT;
+}
+
+/**
+ * i915_vm_is_active() - Check if address space is being used
+ * @vm: address space
+ *
+ * Check if any request using the specified address space is
+ * active.
+ *
+ * Returns: true if address space is active, false otherwise.
+ */
+bool i915_vm_is_active(const struct i915_address_space *vm)
+{
+   return !dma_resv_test_signaled(vm->root_obj->base.resv,
+  DMA_RESV_USAGE_BOOKKEEP);
+}
+
 int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
   struct sg_table *pages)
 {
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h 
b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 8c2f57eb5dda..a5bbdc59d9df 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -51,4 +51,7 @@ int