That is now done by the DMA-buf helpers instead.

Signed-off-by: Christian König <christian.koe...@amd.com>
---
 drivers/gpu/drm/drm_prime.c | 77 ++++++++-----------------------------
 1 file changed, 17 insertions(+), 60 deletions(-)

diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 231e3f6d5f41..a6132ded7ec4 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -86,11 +86,6 @@ struct drm_prime_member {
        struct rb_node handle_rb;
 };
 
-struct drm_prime_attachment {
-       struct sg_table *sgt;
-       enum dma_data_direction dir;
-};
-
 static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
                                    struct dma_buf *dma_buf, uint32_t handle)
 {
@@ -188,25 +183,16 @@ static int drm_prime_lookup_buf_handle(struct 
drm_prime_file_private *prime_fpri
  * @dma_buf: buffer to attach device to
  * @attach: buffer attachment data
  *
- * Allocates &drm_prime_attachment and calls &drm_driver.gem_prime_pin for
- * device specific attachment. This can be used as the &dma_buf_ops.attach
- * callback.
+ * Calls &drm_driver.gem_prime_pin for device specific handling. This can be
+ * used as the &dma_buf_ops.attach callback.
  *
  * Returns 0 on success, negative error code on failure.
  */
 int drm_gem_map_attach(struct dma_buf *dma_buf,
                       struct dma_buf_attachment *attach)
 {
-       struct drm_prime_attachment *prime_attach;
        struct drm_gem_object *obj = dma_buf->priv;
 
-       prime_attach = kzalloc(sizeof(*prime_attach), GFP_KERNEL);
-       if (!prime_attach)
-               return -ENOMEM;
-
-       prime_attach->dir = DMA_NONE;
-       attach->priv = prime_attach;
-
        return drm_gem_pin(obj);
 }
 EXPORT_SYMBOL(drm_gem_map_attach);
@@ -222,26 +208,8 @@ EXPORT_SYMBOL(drm_gem_map_attach);
 void drm_gem_map_detach(struct dma_buf *dma_buf,
                        struct dma_buf_attachment *attach)
 {
-       struct drm_prime_attachment *prime_attach = attach->priv;
        struct drm_gem_object *obj = dma_buf->priv;
 
-       if (prime_attach) {
-               struct sg_table *sgt = prime_attach->sgt;
-
-               if (sgt) {
-                       if (prime_attach->dir != DMA_NONE)
-                               dma_unmap_sg_attrs(attach->dev, sgt->sgl,
-                                                  sgt->nents,
-                                                  prime_attach->dir,
-                                                  DMA_ATTR_SKIP_CPU_SYNC);
-                       sg_free_table(sgt);
-               }
-
-               kfree(sgt);
-               kfree(prime_attach);
-               attach->priv = NULL;
-       }
-
        drm_gem_unpin(obj);
 }
 EXPORT_SYMBOL(drm_gem_map_detach);
@@ -286,39 +254,22 @@ void drm_prime_remove_buf_handle_locked(struct 
drm_prime_file_private *prime_fpr
 struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
                                     enum dma_data_direction dir)
 {
-       struct drm_prime_attachment *prime_attach = attach->priv;
        struct drm_gem_object *obj = attach->dmabuf->priv;
        struct sg_table *sgt;
 
-       if (WARN_ON(dir == DMA_NONE || !prime_attach))
+       if (WARN_ON(dir == DMA_NONE))
                return ERR_PTR(-EINVAL);
 
-       /* return the cached mapping when possible */
-       if (prime_attach->dir == dir)
-               return prime_attach->sgt;
-
-       /*
-        * two mappings with different directions for the same attachment are
-        * not allowed
-        */
-       if (WARN_ON(prime_attach->dir != DMA_NONE))
-               return ERR_PTR(-EBUSY);
-
        if (obj->funcs)
                sgt = obj->funcs->get_sg_table(obj);
        else
                sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
 
-       if (!IS_ERR(sgt)) {
-               if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
-                                     DMA_ATTR_SKIP_CPU_SYNC)) {
-                       sg_free_table(sgt);
-                       kfree(sgt);
-                       sgt = ERR_PTR(-ENOMEM);
-               } else {
-                       prime_attach->sgt = sgt;
-                       prime_attach->dir = dir;
-               }
+       if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
+                             DMA_ATTR_SKIP_CPU_SYNC)) {
+               sg_free_table(sgt);
+               kfree(sgt);
+               sgt = ERR_PTR(-ENOMEM);
        }
 
        return sgt;
@@ -331,14 +282,19 @@ EXPORT_SYMBOL(drm_gem_map_dma_buf);
  * @sgt: scatterlist info of the buffer to unmap
  * @dir: direction of DMA transfer
  *
- * Not implemented. The unmap is done at drm_gem_map_detach().  This can be
- * used as the &dma_buf_ops.unmap_dma_buf callback.
+ * This can be used as the &dma_buf_ops.unmap_dma_buf callback.
  */
 void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
                           struct sg_table *sgt,
                           enum dma_data_direction dir)
 {
-       /* nothing to be done here */
+       if (!sgt)
+               return;
+
+       dma_unmap_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
+                          DMA_ATTR_SKIP_CPU_SYNC);
+       sg_free_table(sgt);
+       kfree(sgt);
 }
 EXPORT_SYMBOL(drm_gem_unmap_dma_buf);
 
@@ -452,6 +408,7 @@ int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct 
vm_area_struct *vma)
 EXPORT_SYMBOL(drm_gem_dmabuf_mmap);
 
 static const struct dma_buf_ops drm_gem_prime_dmabuf_ops =  {
+       .cache_sgt_mapping = true,
        .attach = drm_gem_map_attach,
        .detach = drm_gem_map_detach,
        .map_dma_buf = drm_gem_map_dma_buf,
-- 
2.17.1

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

Reply via email to