[Spice-devel] [PATCH v5 05/10] drm/ttm: Add vmap/vunmap to TTM and TTM GEM helpers

2020-10-20 Thread Thomas Zimmermann
The new functions ttm_bo_{vmap,vunmap}() map and unmap a TTM BO in kernel
address space. The mapping's address is returned as struct dma_buf_map.
Each function is a simplified version of TTM's existing kmap code. Both
functions respect the memory's location ani/or writecombine flags.

On top TTM's functions, GEM TTM helpers got drm_gem_ttm_{vmap,vunmap}(),
two helpers that convert a GEM object into the TTM BO and forward the call
to TTM's vmap/vunmap. These helpers can be dropped into the rsp GEM object
callbacks.

v5:
* use size_t for storing mapping size (Christian)
* ignore premapped memory areas correctly in ttm_bo_vunmap()
* rebase onto latest TTM interfaces (Christian)
* remove BUG() from ttm_bo_vmap() (Christian)
v4:
* drop ttm_kmap_obj_to_dma_buf() in favor of vmap helpers (Daniel,
  Christian)

Signed-off-by: Thomas Zimmermann 
Acked-by: Daniel Vetter 
Tested-by: Sam Ravnborg 
---
 drivers/gpu/drm/drm_gem_ttm_helper.c | 38 +++
 drivers/gpu/drm/ttm/ttm_bo_util.c| 72 
 include/drm/drm_gem_ttm_helper.h |  6 +++
 include/drm/ttm/ttm_bo_api.h | 28 +++
 include/linux/dma-buf-map.h  | 20 
 5 files changed, 164 insertions(+)

diff --git a/drivers/gpu/drm/drm_gem_ttm_helper.c 
b/drivers/gpu/drm/drm_gem_ttm_helper.c
index 0e4fb9ba43ad..db4c14d78a30 100644
--- a/drivers/gpu/drm/drm_gem_ttm_helper.c
+++ b/drivers/gpu/drm/drm_gem_ttm_helper.c
@@ -49,6 +49,44 @@ void drm_gem_ttm_print_info(struct drm_printer *p, unsigned 
int indent,
 }
 EXPORT_SYMBOL(drm_gem_ttm_print_info);
 
+/**
+ * drm_gem_ttm_vmap() - vmap _buffer_object
+ * @gem: GEM object.
+ * @map: [out] returns the dma-buf mapping.
+ *
+ * Maps a GEM object with ttm_bo_vmap(). This function can be used as
+ * _gem_object_funcs.vmap callback.
+ *
+ * Returns:
+ * 0 on success, or a negative errno code otherwise.
+ */
+int drm_gem_ttm_vmap(struct drm_gem_object *gem,
+struct dma_buf_map *map)
+{
+   struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem);
+
+   return ttm_bo_vmap(bo, map);
+
+}
+EXPORT_SYMBOL(drm_gem_ttm_vmap);
+
+/**
+ * drm_gem_ttm_vunmap() - vunmap _buffer_object
+ * @gem: GEM object.
+ * @map: dma-buf mapping.
+ *
+ * Unmaps a GEM object with ttm_bo_vunmap(). This function can be used as
+ * _gem_object_funcs.vmap callback.
+ */
+void drm_gem_ttm_vunmap(struct drm_gem_object *gem,
+   struct dma_buf_map *map)
+{
+   struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem);
+
+   ttm_bo_vunmap(bo, map);
+}
+EXPORT_SYMBOL(drm_gem_ttm_vunmap);
+
 /**
  * drm_gem_ttm_mmap() - mmap _buffer_object
  * @gem: GEM object.
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c 
b/drivers/gpu/drm/ttm/ttm_bo_util.c
index ba7ab5ed85d0..5c79418405ea 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -32,6 +32,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -527,6 +528,77 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
 }
 EXPORT_SYMBOL(ttm_bo_kunmap);
 
+int ttm_bo_vmap(struct ttm_buffer_object *bo, struct dma_buf_map *map)
+{
+   struct ttm_resource *mem = >mem;
+   int ret;
+
+   ret = ttm_mem_io_reserve(bo->bdev, mem);
+   if (ret)
+   return ret;
+
+   if (mem->bus.is_iomem) {
+   void __iomem *vaddr_iomem;
+   size_t size = bo->num_pages << PAGE_SHIFT;
+
+   if (mem->bus.addr)
+   vaddr_iomem = (void __iomem *)mem->bus.addr;
+   else if (mem->bus.caching == ttm_write_combined)
+   vaddr_iomem = ioremap_wc(mem->bus.offset, size);
+   else
+   vaddr_iomem = ioremap(mem->bus.offset, size);
+
+   if (!vaddr_iomem)
+   return -ENOMEM;
+
+   dma_buf_map_set_vaddr_iomem(map, vaddr_iomem);
+
+   } else {
+   struct ttm_operation_ctx ctx = {
+   .interruptible = false,
+   .no_wait_gpu = false
+   };
+   struct ttm_tt *ttm = bo->ttm;
+   pgprot_t prot;
+   void *vaddr;
+
+   ret = ttm_tt_populate(bo->bdev, ttm, );
+   if (ret)
+   return ret;
+
+   /*
+* We need to use vmap to get the desired page protection
+* or to make the buffer object look contiguous.
+*/
+   prot = ttm_io_prot(bo, mem, PAGE_KERNEL);
+   vaddr = vmap(ttm->pages, bo->num_pages, 0, prot);
+   if (!vaddr)
+   return -ENOMEM;
+
+   dma_buf_map_set_vaddr(map, vaddr);
+   }
+
+   return 0;
+}
+EXPORT_SYMBOL(ttm_bo_vmap);
+
+void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct dma_buf_map *map)
+{
+   struct ttm_resource *mem = >mem;
+
+   if 

Re: [Spice-devel] [PATCH v5 05/10] drm/ttm: Add vmap/vunmap to TTM and TTM GEM helpers

2020-10-20 Thread Christian König

Am 20.10.20 um 14:20 schrieb Thomas Zimmermann:

The new functions ttm_bo_{vmap,vunmap}() map and unmap a TTM BO in kernel
address space. The mapping's address is returned as struct dma_buf_map.
Each function is a simplified version of TTM's existing kmap code. Both
functions respect the memory's location ani/or writecombine flags.

On top TTM's functions, GEM TTM helpers got drm_gem_ttm_{vmap,vunmap}(),
two helpers that convert a GEM object into the TTM BO and forward the call
to TTM's vmap/vunmap. These helpers can be dropped into the rsp GEM object
callbacks.

v5:
* use size_t for storing mapping size (Christian)
* ignore premapped memory areas correctly in ttm_bo_vunmap()
* rebase onto latest TTM interfaces (Christian)
* remove BUG() from ttm_bo_vmap() (Christian)
v4:
* drop ttm_kmap_obj_to_dma_buf() in favor of vmap helpers (Daniel,
  Christian)

Signed-off-by: Thomas Zimmermann 
Acked-by: Daniel Vetter 
Tested-by: Sam Ravnborg 


Reviewed-by: Christian König 


---
  drivers/gpu/drm/drm_gem_ttm_helper.c | 38 +++
  drivers/gpu/drm/ttm/ttm_bo_util.c| 72 
  include/drm/drm_gem_ttm_helper.h |  6 +++
  include/drm/ttm/ttm_bo_api.h | 28 +++
  include/linux/dma-buf-map.h  | 20 
  5 files changed, 164 insertions(+)

diff --git a/drivers/gpu/drm/drm_gem_ttm_helper.c 
b/drivers/gpu/drm/drm_gem_ttm_helper.c
index 0e4fb9ba43ad..db4c14d78a30 100644
--- a/drivers/gpu/drm/drm_gem_ttm_helper.c
+++ b/drivers/gpu/drm/drm_gem_ttm_helper.c
@@ -49,6 +49,44 @@ void drm_gem_ttm_print_info(struct drm_printer *p, unsigned 
int indent,
  }
  EXPORT_SYMBOL(drm_gem_ttm_print_info);
  
+/**

+ * drm_gem_ttm_vmap() - vmap _buffer_object
+ * @gem: GEM object.
+ * @map: [out] returns the dma-buf mapping.
+ *
+ * Maps a GEM object with ttm_bo_vmap(). This function can be used as
+ * _gem_object_funcs.vmap callback.
+ *
+ * Returns:
+ * 0 on success, or a negative errno code otherwise.
+ */
+int drm_gem_ttm_vmap(struct drm_gem_object *gem,
+struct dma_buf_map *map)
+{
+   struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem);
+
+   return ttm_bo_vmap(bo, map);
+
+}
+EXPORT_SYMBOL(drm_gem_ttm_vmap);
+
+/**
+ * drm_gem_ttm_vunmap() - vunmap _buffer_object
+ * @gem: GEM object.
+ * @map: dma-buf mapping.
+ *
+ * Unmaps a GEM object with ttm_bo_vunmap(). This function can be used as
+ * _gem_object_funcs.vmap callback.
+ */
+void drm_gem_ttm_vunmap(struct drm_gem_object *gem,
+   struct dma_buf_map *map)
+{
+   struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem);
+
+   ttm_bo_vunmap(bo, map);
+}
+EXPORT_SYMBOL(drm_gem_ttm_vunmap);
+
  /**
   * drm_gem_ttm_mmap() - mmap _buffer_object
   * @gem: GEM object.
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c 
b/drivers/gpu/drm/ttm/ttm_bo_util.c
index ba7ab5ed85d0..5c79418405ea 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -32,6 +32,7 @@
  #include 
  #include 
  #include 
+#include 
  #include 
  #include 
  #include 
@@ -527,6 +528,77 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
  }
  EXPORT_SYMBOL(ttm_bo_kunmap);
  
+int ttm_bo_vmap(struct ttm_buffer_object *bo, struct dma_buf_map *map)

+{
+   struct ttm_resource *mem = >mem;
+   int ret;
+
+   ret = ttm_mem_io_reserve(bo->bdev, mem);
+   if (ret)
+   return ret;
+
+   if (mem->bus.is_iomem) {
+   void __iomem *vaddr_iomem;
+   size_t size = bo->num_pages << PAGE_SHIFT;
+
+   if (mem->bus.addr)
+   vaddr_iomem = (void __iomem *)mem->bus.addr;
+   else if (mem->bus.caching == ttm_write_combined)
+   vaddr_iomem = ioremap_wc(mem->bus.offset, size);
+   else
+   vaddr_iomem = ioremap(mem->bus.offset, size);
+
+   if (!vaddr_iomem)
+   return -ENOMEM;
+
+   dma_buf_map_set_vaddr_iomem(map, vaddr_iomem);
+
+   } else {
+   struct ttm_operation_ctx ctx = {
+   .interruptible = false,
+   .no_wait_gpu = false
+   };
+   struct ttm_tt *ttm = bo->ttm;
+   pgprot_t prot;
+   void *vaddr;
+
+   ret = ttm_tt_populate(bo->bdev, ttm, );
+   if (ret)
+   return ret;
+
+   /*
+* We need to use vmap to get the desired page protection
+* or to make the buffer object look contiguous.
+*/
+   prot = ttm_io_prot(bo, mem, PAGE_KERNEL);
+   vaddr = vmap(ttm->pages, bo->num_pages, 0, prot);
+   if (!vaddr)
+   return -ENOMEM;
+
+   dma_buf_map_set_vaddr(map, vaddr);
+   }
+
+   return 0;
+}
+EXPORT_SYMBOL(ttm_bo_vmap);
+
+void ttm_bo_vunmap(struct