Re: [PATCH v3 04/37] drm/i915/region: support continuous allocations

2019-08-13 Thread Daniel Vetter
On Fri, Aug 09, 2019 at 11:26:10PM +0100, Matthew Auld wrote:
> Some objects may need to be allocated as a continuous block, thinking
> ahead the various kernel io_mapping interfaces seem to expect it.

Not really, we can vmalloc for iomappings too.
-Daniel

> 
> Signed-off-by: Matthew Auld 
> Cc: Joonas Lahtinen 
> Cc: Abdiel Janulgue 
> ---
>  .../gpu/drm/i915/gem/i915_gem_object_types.h  |   4 +
>  drivers/gpu/drm/i915/gem/i915_gem_region.c|  10 +-
>  drivers/gpu/drm/i915/gem/i915_gem_region.h|   3 +-
>  .../drm/i915/selftests/intel_memory_region.c  | 152 +-
>  drivers/gpu/drm/i915/selftests/mock_region.c  |   5 +-
>  5 files changed, 166 insertions(+), 8 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h 
> b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
> index 5e2fa37e9bc0..eb92243d473b 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
> @@ -116,6 +116,10 @@ struct drm_i915_gem_object {
>  
>   I915_SELFTEST_DECLARE(struct list_head st_link);
>  
> + unsigned long flags;
> +#define I915_BO_ALLOC_CONTIGUOUS BIT(0)
> +#define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS)
> +
>   /*
>* Is the object to be mapped as read-only to the GPU
>* Only honoured if hardware has relevant pte bit
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c 
> b/drivers/gpu/drm/i915/gem/i915_gem_region.c
> index be126e70c90f..d9cd722b5dbf 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_region.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c
> @@ -42,6 +42,9 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object 
> *obj)
>   return -ENOMEM;
>   }
>  
> + if (obj->flags & I915_BO_ALLOC_CONTIGUOUS)
> + flags = I915_ALLOC_CONTIGUOUS;
> +
>   ret = __intel_memory_region_get_pages_buddy(mem, size, flags, blocks);
>   if (ret)
>   goto err_free_sg;
> @@ -98,10 +101,12 @@ i915_gem_object_get_pages_buddy(struct 
> drm_i915_gem_object *obj)
>  }
>  
>  void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
> - struct intel_memory_region *mem)
> + struct intel_memory_region *mem,
> + unsigned long flags)
>  {
>   INIT_LIST_HEAD(>mm.blocks);
>   obj->mm.region= mem;
> + obj->flags = flags;
>  
>   mutex_lock(>obj_lock);
>   list_add(>mm.region_link, >objects);
> @@ -125,6 +130,9 @@ i915_gem_object_create_region(struct intel_memory_region 
> *mem,
>   if (!mem)
>   return ERR_PTR(-ENODEV);
>  
> + if (flags & ~I915_BO_ALLOC_FLAGS)
> + return ERR_PTR(-EINVAL);
> +
>   size = round_up(size, mem->min_page_size);
>  
>   GEM_BUG_ON(!size);
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.h 
> b/drivers/gpu/drm/i915/gem/i915_gem_region.h
> index ebddc86d78f7..f2ff6f8bff74 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_region.h
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_region.h
> @@ -17,7 +17,8 @@ void i915_gem_object_put_pages_buddy(struct 
> drm_i915_gem_object *obj,
>struct sg_table *pages);
>  
>  void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
> - struct intel_memory_region *mem);
> + struct intel_memory_region *mem,
> + unsigned long flags);
>  void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj);
>  
>  struct drm_i915_gem_object *
> diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c 
> b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
> index 2f13e4c1d999..70b467d4e811 100644
> --- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
> +++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
> @@ -81,17 +81,17 @@ static int igt_mock_fill(void *arg)
>  
>  static void igt_mark_evictable(struct drm_i915_gem_object *obj)
>  {
> - i915_gem_object_unpin_pages(obj);
> + if (i915_gem_object_has_pinned_pages(obj))
> + i915_gem_object_unpin_pages(obj);
>   obj->mm.madv = I915_MADV_DONTNEED;
>   list_move(>mm.region_link, >mm.region->purgeable);
>  }
>  
> -static int igt_mock_shrink(void *arg)
> +static int igt_frag_region(struct intel_memory_region *mem,
> +struct list_head *objects)
>  {
> - struct intel_memory_region *mem = arg;
>   struct drm_i915_gem_object *obj;
>   unsigned long n_objects;
> - LIST_HEAD(objects);
>   resource_size_t target;
>   resource_size_t total;
>   int err = 0;
> @@ -109,7 +109,7 @@ static int igt_mock_shrink(void *arg)
>   goto err_close_objects;
>   }
>  
> - list_add(>st_link, );
> + list_add(>st_link, objects);
>  
>   err = 

Re: [PATCH v3 04/37] drm/i915/region: support continuous allocations

2019-08-10 Thread Chris Wilson
Quoting Matthew Auld (2019-08-09 23:26:10)
> Some objects may need to be allocated as a continuous block, thinking
> ahead the various kernel io_mapping interfaces seem to expect it.

But we could always use scattergather over top...

> @@ -98,10 +101,12 @@ i915_gem_object_get_pages_buddy(struct 
> drm_i915_gem_object *obj)
>  }
>  
>  void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
> -   struct intel_memory_region *mem)
> +   struct intel_memory_region *mem,
> +   unsigned long flags)
>  {
> INIT_LIST_HEAD(>mm.blocks);
> obj->mm.region= mem;
> +   obj->flags = flags;
>  
> mutex_lock(>obj_lock);
> list_add(>mm.region_link, >objects);
> @@ -125,6 +130,9 @@ i915_gem_object_create_region(struct intel_memory_region 
> *mem,
> if (!mem)
> return ERR_PTR(-ENODEV);
>  
> +   if (flags & ~I915_BO_ALLOC_FLAGS)
> +   return ERR_PTR(-EINVAL);

This is a programmer error not a user.

> +
> size = round_up(size, mem->min_page_size);
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

[PATCH v3 04/37] drm/i915/region: support continuous allocations

2019-08-09 Thread Matthew Auld
Some objects may need to be allocated as a continuous block, thinking
ahead the various kernel io_mapping interfaces seem to expect it.

Signed-off-by: Matthew Auld 
Cc: Joonas Lahtinen 
Cc: Abdiel Janulgue 
---
 .../gpu/drm/i915/gem/i915_gem_object_types.h  |   4 +
 drivers/gpu/drm/i915/gem/i915_gem_region.c|  10 +-
 drivers/gpu/drm/i915/gem/i915_gem_region.h|   3 +-
 .../drm/i915/selftests/intel_memory_region.c  | 152 +-
 drivers/gpu/drm/i915/selftests/mock_region.c  |   5 +-
 5 files changed, 166 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h 
b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 5e2fa37e9bc0..eb92243d473b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -116,6 +116,10 @@ struct drm_i915_gem_object {
 
I915_SELFTEST_DECLARE(struct list_head st_link);
 
+   unsigned long flags;
+#define I915_BO_ALLOC_CONTIGUOUS BIT(0)
+#define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS)
+
/*
 * Is the object to be mapped as read-only to the GPU
 * Only honoured if hardware has relevant pte bit
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c 
b/drivers/gpu/drm/i915/gem/i915_gem_region.c
index be126e70c90f..d9cd722b5dbf 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_region.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c
@@ -42,6 +42,9 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object 
*obj)
return -ENOMEM;
}
 
+   if (obj->flags & I915_BO_ALLOC_CONTIGUOUS)
+   flags = I915_ALLOC_CONTIGUOUS;
+
ret = __intel_memory_region_get_pages_buddy(mem, size, flags, blocks);
if (ret)
goto err_free_sg;
@@ -98,10 +101,12 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object 
*obj)
 }
 
 void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
-   struct intel_memory_region *mem)
+   struct intel_memory_region *mem,
+   unsigned long flags)
 {
INIT_LIST_HEAD(>mm.blocks);
obj->mm.region= mem;
+   obj->flags = flags;
 
mutex_lock(>obj_lock);
list_add(>mm.region_link, >objects);
@@ -125,6 +130,9 @@ i915_gem_object_create_region(struct intel_memory_region 
*mem,
if (!mem)
return ERR_PTR(-ENODEV);
 
+   if (flags & ~I915_BO_ALLOC_FLAGS)
+   return ERR_PTR(-EINVAL);
+
size = round_up(size, mem->min_page_size);
 
GEM_BUG_ON(!size);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.h 
b/drivers/gpu/drm/i915/gem/i915_gem_region.h
index ebddc86d78f7..f2ff6f8bff74 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_region.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.h
@@ -17,7 +17,8 @@ void i915_gem_object_put_pages_buddy(struct 
drm_i915_gem_object *obj,
 struct sg_table *pages);
 
 void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
-   struct intel_memory_region *mem);
+   struct intel_memory_region *mem,
+   unsigned long flags);
 void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj);
 
 struct drm_i915_gem_object *
diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c 
b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
index 2f13e4c1d999..70b467d4e811 100644
--- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -81,17 +81,17 @@ static int igt_mock_fill(void *arg)
 
 static void igt_mark_evictable(struct drm_i915_gem_object *obj)
 {
-   i915_gem_object_unpin_pages(obj);
+   if (i915_gem_object_has_pinned_pages(obj))
+   i915_gem_object_unpin_pages(obj);
obj->mm.madv = I915_MADV_DONTNEED;
list_move(>mm.region_link, >mm.region->purgeable);
 }
 
-static int igt_mock_shrink(void *arg)
+static int igt_frag_region(struct intel_memory_region *mem,
+  struct list_head *objects)
 {
-   struct intel_memory_region *mem = arg;
struct drm_i915_gem_object *obj;
unsigned long n_objects;
-   LIST_HEAD(objects);
resource_size_t target;
resource_size_t total;
int err = 0;
@@ -109,7 +109,7 @@ static int igt_mock_shrink(void *arg)
goto err_close_objects;
}
 
-   list_add(>st_link, );
+   list_add(>st_link, objects);
 
err = i915_gem_object_pin_pages(obj);
if (err)
@@ -123,6 +123,39 @@ static int igt_mock_shrink(void *arg)
igt_mark_evictable(obj);
}
 
+   return 0;
+
+err_close_objects:
+   close_objects(objects);
+   return err;
+}
+
+static