Implement virtgpu specific map_dma_buf callback to support mapping
exported vram object dma-bufs. The dma-buf callback is used directly, as
vram objects don't have backing pages and thus can't implement the
drm_gem_object_funcs.get_sg_table callback.

Signed-off-by: David Stevens <steve...@chromium.org>
---
v1 -> v2:
 - reflow line to fix strict checkpatch warning
 - replace else with return for consistency between functions
---
 drivers/gpu/drm/virtio/virtgpu_drv.h   |  8 ++++
 drivers/gpu/drm/virtio/virtgpu_prime.c | 32 +++++++++++++-
 drivers/gpu/drm/virtio/virtgpu_vram.c  | 61 ++++++++++++++++++++++++++
 3 files changed, 99 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h 
b/drivers/gpu/drm/virtio/virtgpu_drv.h
index d4e610a44e12..0c4810982530 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -26,6 +26,7 @@
 #ifndef VIRTIO_DRV_H
 #define VIRTIO_DRV_H
 
+#include <linux/dma-direction.h>
 #include <linux/virtio.h>
 #include <linux/virtio_ids.h>
 #include <linux/virtio_config.h>
@@ -459,4 +460,11 @@ bool virtio_gpu_is_vram(struct virtio_gpu_object *bo);
 int virtio_gpu_vram_create(struct virtio_gpu_device *vgdev,
                           struct virtio_gpu_object_params *params,
                           struct virtio_gpu_object **bo_ptr);
+struct sg_table *virtio_gpu_vram_map_dma_buf(struct virtio_gpu_object *bo,
+                                            struct device *dev,
+                                            enum dma_data_direction dir);
+void virtio_gpu_vram_unmap_dma_buf(struct device *dev,
+                                  struct sg_table *sgt,
+                                  enum dma_data_direction dir);
+
 #endif
diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c 
b/drivers/gpu/drm/virtio/virtgpu_prime.c
index 807a27a16365..7b940be3323f 100644
--- a/drivers/gpu/drm/virtio/virtgpu_prime.c
+++ b/drivers/gpu/drm/virtio/virtgpu_prime.c
@@ -43,13 +43,41 @@ static int virtgpu_virtio_get_uuid(struct dma_buf *buf,
        return 0;
 }
 
+static struct sg_table *
+virtgpu_gem_map_dma_buf(struct dma_buf_attachment *attach,
+                       enum dma_data_direction dir)
+{
+       struct drm_gem_object *obj = attach->dmabuf->priv;
+       struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
+
+       if (virtio_gpu_is_vram(bo))
+               return virtio_gpu_vram_map_dma_buf(bo, attach->dev, dir);
+
+       return drm_gem_map_dma_buf(attach, dir);
+}
+
+static void virtgpu_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
+                                     struct sg_table *sgt,
+                                     enum dma_data_direction dir)
+{
+       struct drm_gem_object *obj = attach->dmabuf->priv;
+       struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
+
+       if (virtio_gpu_is_vram(bo)) {
+               virtio_gpu_vram_unmap_dma_buf(attach->dev, sgt, dir);
+               return;
+       }
+
+       drm_gem_unmap_dma_buf(attach, sgt, dir);
+}
+
 static const struct virtio_dma_buf_ops virtgpu_dmabuf_ops =  {
        .ops = {
                .cache_sgt_mapping = true,
                .attach = virtio_dma_buf_attach,
                .detach = drm_gem_map_detach,
-               .map_dma_buf = drm_gem_map_dma_buf,
-               .unmap_dma_buf = drm_gem_unmap_dma_buf,
+               .map_dma_buf = virtgpu_gem_map_dma_buf,
+               .unmap_dma_buf = virtgpu_gem_unmap_dma_buf,
                .release = drm_gem_dmabuf_release,
                .mmap = drm_gem_dmabuf_mmap,
                .vmap = drm_gem_dmabuf_vmap,
diff --git a/drivers/gpu/drm/virtio/virtgpu_vram.c 
b/drivers/gpu/drm/virtio/virtgpu_vram.c
index 5cc34e7330fa..6b45b0429fef 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vram.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vram.c
@@ -1,6 +1,8 @@
 // SPDX-License-Identifier: GPL-2.0
 #include "virtgpu_drv.h"
 
+#include <linux/dma-mapping.h>
+
 static void virtio_gpu_vram_free(struct drm_gem_object *obj)
 {
        struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
@@ -64,6 +66,65 @@ static int virtio_gpu_vram_mmap(struct drm_gem_object *obj,
        return ret;
 }
 
+struct sg_table *virtio_gpu_vram_map_dma_buf(struct virtio_gpu_object *bo,
+                                            struct device *dev,
+                                            enum dma_data_direction dir)
+{
+       struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
+       struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
+       struct sg_table *sgt;
+       dma_addr_t addr;
+       int ret;
+
+       sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
+       if (!sgt)
+               return ERR_PTR(-ENOMEM);
+
+       if (!(bo->blob_flags & VIRTGPU_BLOB_FLAG_USE_MAPPABLE)) {
+               // Virtio devices can access the dma-buf via its UUID. Return a 
stub
+               // sg_table so the dma-buf API still works.
+               if (!is_virtio_device(dev) || !vgdev->has_resource_assign_uuid) 
{
+                       ret = -EIO;
+                       goto out;
+               }
+               return sgt;
+       }
+
+       ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
+       if (ret)
+               goto out;
+
+       addr = dma_map_resource(dev, vram->vram_node.start,
+                               vram->vram_node.size, dir,
+                               DMA_ATTR_SKIP_CPU_SYNC);
+       ret = dma_mapping_error(dev, addr);
+       if (ret)
+               goto out;
+
+       sg_set_page(sgt->sgl, NULL, vram->vram_node.size, 0);
+       sg_dma_address(sgt->sgl) = addr;
+       sg_dma_len(sgt->sgl) = vram->vram_node.size;
+
+       return sgt;
+out:
+       sg_free_table(sgt);
+       kfree(sgt);
+       return ERR_PTR(ret);
+}
+
+void virtio_gpu_vram_unmap_dma_buf(struct device *dev,
+                                  struct sg_table *sgt,
+                                  enum dma_data_direction dir)
+{
+       if (sgt->nents) {
+               dma_unmap_resource(dev, sg_dma_address(sgt->sgl),
+                                  sg_dma_len(sgt->sgl), dir,
+                                  DMA_ATTR_SKIP_CPU_SYNC);
+       }
+       sg_free_table(sgt);
+       kfree(sgt);
+}
+
 static const struct drm_gem_object_funcs virtio_gpu_vram_funcs = {
        .open = virtio_gpu_gem_object_open,
        .close = virtio_gpu_gem_object_close,
-- 
2.33.0.rc1.237.g0d66db33f3-goog

Reply via email to