From: Sergey Senozhatsky <[email protected]>

Provide begin_cpu_access() and end_cpu_access() dma_buf_ops
callbacks for cache synchronisation on exported buffers.

V4L2_FLAG_MEMORY_NON_CONSISTENT has no effect on dma-sg buffers.
dma-sg allocates memory using the page allocator directly, so
there is no memory consistency guarantee.

Signed-off-by: Sergey Senozhatsky <[email protected]>
---
 .../media/common/videobuf2/videobuf2-dma-sg.c | 30 +++++++++++++++++++
 1 file changed, 30 insertions(+)

diff --git a/drivers/media/common/videobuf2/videobuf2-dma-sg.c 
b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
index 92072a08af25..595137e358e7 100644
--- a/drivers/media/common/videobuf2/videobuf2-dma-sg.c
+++ b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
@@ -120,6 +120,12 @@ static void *vb2_dma_sg_alloc(struct device *dev, unsigned 
long dma_attrs,
        buf->num_pages = size >> PAGE_SHIFT;
        buf->dma_sgt = &buf->sg_table;
 
+       /*
+        * NOTE: dma-sg allocates memory using the page allocator directly, so
+        * there is no memory consistency guarantee, hence dma-sg ignores DMA
+        * attributes passed from the upper layer. That means that
+        * V4L2_FLAG_MEMORY_NON_CONSISTENT has no effect on dma-sg buffers.
+        */
        buf->pages = kvmalloc_array(buf->num_pages, sizeof(struct page *),
                                    GFP_KERNEL | __GFP_ZERO);
        if (!buf->pages)
@@ -469,6 +475,28 @@ static void vb2_dma_sg_dmabuf_ops_release(struct dma_buf 
*dbuf)
        vb2_dma_sg_put(dbuf->priv);
 }
 
+static int
+vb2_dma_sg_dmabuf_ops_begin_cpu_access(struct dma_buf *dbuf,
+                                      enum dma_data_direction direction)
+{
+       struct vb2_dma_sg_buf *buf = dbuf->priv;
+       struct sg_table *sgt = buf->dma_sgt;
+
+       dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
+       return 0;
+}
+
+static int
+vb2_dma_sg_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf,
+                                    enum dma_data_direction direction)
+{
+       struct vb2_dma_sg_buf *buf = dbuf->priv;
+       struct sg_table *sgt = buf->dma_sgt;
+
+       dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
+       return 0;
+}
+
 static void *vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf)
 {
        struct vb2_dma_sg_buf *buf = dbuf->priv;
@@ -487,6 +515,8 @@ static const struct dma_buf_ops vb2_dma_sg_dmabuf_ops = {
        .detach = vb2_dma_sg_dmabuf_ops_detach,
        .map_dma_buf = vb2_dma_sg_dmabuf_ops_map,
        .unmap_dma_buf = vb2_dma_sg_dmabuf_ops_unmap,
+       .begin_cpu_access = vb2_dma_sg_dmabuf_ops_begin_cpu_access,
+       .end_cpu_access = vb2_dma_sg_dmabuf_ops_end_cpu_access,
        .vmap = vb2_dma_sg_dmabuf_ops_vmap,
        .mmap = vb2_dma_sg_dmabuf_ops_mmap,
        .release = vb2_dma_sg_dmabuf_ops_release,
-- 
2.26.2

Reply via email to