Re: [RFCv5 PATCH 04/15] vb2-dma-sg: add dmabuf import support

2014-11-08 Thread Pawel Osciak
On Sat, Nov 8, 2014 at 7:20 PM, Pawel Osciak  wrote:
> Hi Hans,
> Thank you for the patch.
>
> On Fri, Nov 7, 2014 at 5:50 PM, Hans Verkuil  wrote:
>> From: Hans Verkuil 
>>
>> Add support for dmabuf to vb2-dma-sg.
>
> importing dmabuf into videobuf2-dma-sg.
>

One thing I missed in the review, I think vb2_dma_sg_vaddr() needs to
be updated in this patch to take into account that we may have an
attachment present, just like it's done in dma-contig, i.e. if !vaddr
and attachment present, call the dma_buf_vmap() dmabuf op instead of
vm_map_ram.

-- 
Best regards,
Pawel Osciak
--
To unsubscribe from this list: send the line "unsubscribe linux-media" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [RFCv5 PATCH 04/15] vb2-dma-sg: add dmabuf import support

2014-11-08 Thread Pawel Osciak
Hi Hans,
Thank you for the patch.

On Fri, Nov 7, 2014 at 5:50 PM, Hans Verkuil  wrote:
> From: Hans Verkuil 
>
> Add support for dmabuf to vb2-dma-sg.

importing dmabuf into videobuf2-dma-sg.

>
> Signed-off-by: Hans Verkuil 
> ---
>  drivers/media/v4l2-core/videobuf2-dma-sg.c | 126 
> +++--
>  1 file changed, 119 insertions(+), 7 deletions(-)
>
> diff --git a/drivers/media/v4l2-core/videobuf2-dma-sg.c 
> b/drivers/media/v4l2-core/videobuf2-dma-sg.c
> index 7375923..2795c27 100644
> --- a/drivers/media/v4l2-core/videobuf2-dma-sg.c
> +++ b/drivers/media/v4l2-core/videobuf2-dma-sg.c
> @@ -42,11 +42,15 @@ struct vb2_dma_sg_buf {
> int offset;
> enum dma_data_direction dma_dir;
> struct sg_table sg_table;
> +   struct sg_table *dma_sgt;

Perhaps it would be useful to add a comment when this points to
sg_table and when not...

> size_t  size;
> unsigned intnum_pages;
> atomic_trefcount;
> struct vb2_vmarea_handler   handler;
> struct vm_area_struct   *vma;
> +
> +   /* DMABUF related */

I'd remove this comment.

> +   struct dma_buf_attachment   *db_attach;
>  };
>
>  static void vb2_dma_sg_put(void *buf_priv);
> @@ -114,6 +118,7 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned 
> long size, int write,
> /* size is already page aligned */
> buf->num_pages = size >> PAGE_SHIFT;
> buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
> +   buf->dma_sgt = &buf->sg_table;
>
> buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
>  GFP_KERNEL);
> @@ -124,7 +129,7 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned 
> long size, int write,
> if (ret)
> goto fail_pages_alloc;
>
> -   ret = sg_alloc_table_from_pages(&buf->sg_table, buf->pages,
> +   ret = sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
> buf->num_pages, 0, size, GFP_KERNEL);
> if (ret)
> goto fail_table_alloc;
> @@ -173,7 +178,7 @@ static void vb2_dma_sg_put(void *buf_priv)
> dma_unmap_sg(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
> if (buf->vaddr)
> vm_unmap_ram(buf->vaddr, buf->num_pages);
> -   sg_free_table(&buf->sg_table);
> +   sg_free_table(buf->dma_sgt);
> while (--i >= 0)
> __free_page(buf->pages[i]);
> kfree(buf->pages);
> @@ -185,7 +190,11 @@ static void vb2_dma_sg_put(void *buf_priv)
>  static void vb2_dma_sg_prepare(void *buf_priv)
>  {
> struct vb2_dma_sg_buf *buf = buf_priv;
> -   struct sg_table *sgt = &buf->sg_table;
> +   struct sg_table *sgt = buf->dma_sgt;
> +
> +   /* DMABUF exporter will flush the cache for us */
> +   if (buf->db_attach)
> +   return;
>
> dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
>  }
> @@ -193,7 +202,11 @@ static void vb2_dma_sg_prepare(void *buf_priv)
>  static void vb2_dma_sg_finish(void *buf_priv)
>  {
> struct vb2_dma_sg_buf *buf = buf_priv;
> -   struct sg_table *sgt = &buf->sg_table;
> +   struct sg_table *sgt = buf->dma_sgt;
> +
> +   /* DMABUF exporter will flush the cache for us */
> +   if (buf->db_attach)
> +   return;
>
> dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
>  }
> @@ -222,6 +235,7 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, 
> unsigned long vaddr,
> buf->offset = vaddr & ~PAGE_MASK;
> buf->size = size;
> buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
> +   buf->dma_sgt = &buf->sg_table;
>
> first = (vaddr   & PAGE_MASK) >> PAGE_SHIFT;
> last  = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT;
> @@ -274,7 +288,7 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, 
> unsigned long vaddr,
> if (num_pages_from_user != buf->num_pages)
> goto userptr_fail_get_user_pages;
>
> -   if (sg_alloc_table_from_pages(&buf->sg_table, buf->pages,
> +   if (sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
> buf->num_pages, buf->offset, size, 0))
> goto userptr_fail_alloc_table_from_pages;
>
> @@ -319,7 +333,7 @@ static void vb2_dma_sg_put_userptr(void *buf_priv)
> dma_unmap_sg(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
> if (buf->vaddr)
> vm_unmap_ram(buf->vaddr, buf->num_pages);
> -   sg_free_table(&buf->sg_table);
> +   sg_free_table(buf->dma_sgt);
> while (--i >= 0) {
> if (buf->write)
> set_page_dirty_lock(buf->pages[i]);
> @@ -392,11 +406,105 @@ static int vb2_dma_sg_mmap(

[RFCv5 PATCH 04/15] vb2-dma-sg: add dmabuf import support

2014-11-07 Thread Hans Verkuil
From: Hans Verkuil 

Add support for dmabuf to vb2-dma-sg.

Signed-off-by: Hans Verkuil 
---
 drivers/media/v4l2-core/videobuf2-dma-sg.c | 126 +++--
 1 file changed, 119 insertions(+), 7 deletions(-)

diff --git a/drivers/media/v4l2-core/videobuf2-dma-sg.c 
b/drivers/media/v4l2-core/videobuf2-dma-sg.c
index 7375923..2795c27 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-sg.c
@@ -42,11 +42,15 @@ struct vb2_dma_sg_buf {
int offset;
enum dma_data_direction dma_dir;
struct sg_table sg_table;
+   struct sg_table *dma_sgt;
size_t  size;
unsigned intnum_pages;
atomic_trefcount;
struct vb2_vmarea_handler   handler;
struct vm_area_struct   *vma;
+
+   /* DMABUF related */
+   struct dma_buf_attachment   *db_attach;
 };
 
 static void vb2_dma_sg_put(void *buf_priv);
@@ -114,6 +118,7 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned 
long size, int write,
/* size is already page aligned */
buf->num_pages = size >> PAGE_SHIFT;
buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+   buf->dma_sgt = &buf->sg_table;
 
buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
 GFP_KERNEL);
@@ -124,7 +129,7 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned 
long size, int write,
if (ret)
goto fail_pages_alloc;
 
-   ret = sg_alloc_table_from_pages(&buf->sg_table, buf->pages,
+   ret = sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
buf->num_pages, 0, size, GFP_KERNEL);
if (ret)
goto fail_table_alloc;
@@ -173,7 +178,7 @@ static void vb2_dma_sg_put(void *buf_priv)
dma_unmap_sg(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
if (buf->vaddr)
vm_unmap_ram(buf->vaddr, buf->num_pages);
-   sg_free_table(&buf->sg_table);
+   sg_free_table(buf->dma_sgt);
while (--i >= 0)
__free_page(buf->pages[i]);
kfree(buf->pages);
@@ -185,7 +190,11 @@ static void vb2_dma_sg_put(void *buf_priv)
 static void vb2_dma_sg_prepare(void *buf_priv)
 {
struct vb2_dma_sg_buf *buf = buf_priv;
-   struct sg_table *sgt = &buf->sg_table;
+   struct sg_table *sgt = buf->dma_sgt;
+
+   /* DMABUF exporter will flush the cache for us */
+   if (buf->db_attach)
+   return;
 
dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
 }
@@ -193,7 +202,11 @@ static void vb2_dma_sg_prepare(void *buf_priv)
 static void vb2_dma_sg_finish(void *buf_priv)
 {
struct vb2_dma_sg_buf *buf = buf_priv;
-   struct sg_table *sgt = &buf->sg_table;
+   struct sg_table *sgt = buf->dma_sgt;
+
+   /* DMABUF exporter will flush the cache for us */
+   if (buf->db_attach)
+   return;
 
dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
 }
@@ -222,6 +235,7 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, 
unsigned long vaddr,
buf->offset = vaddr & ~PAGE_MASK;
buf->size = size;
buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+   buf->dma_sgt = &buf->sg_table;
 
first = (vaddr   & PAGE_MASK) >> PAGE_SHIFT;
last  = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT;
@@ -274,7 +288,7 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, 
unsigned long vaddr,
if (num_pages_from_user != buf->num_pages)
goto userptr_fail_get_user_pages;
 
-   if (sg_alloc_table_from_pages(&buf->sg_table, buf->pages,
+   if (sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
buf->num_pages, buf->offset, size, 0))
goto userptr_fail_alloc_table_from_pages;
 
@@ -319,7 +333,7 @@ static void vb2_dma_sg_put_userptr(void *buf_priv)
dma_unmap_sg(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
if (buf->vaddr)
vm_unmap_ram(buf->vaddr, buf->num_pages);
-   sg_free_table(&buf->sg_table);
+   sg_free_table(buf->dma_sgt);
while (--i >= 0) {
if (buf->write)
set_page_dirty_lock(buf->pages[i]);
@@ -392,11 +406,105 @@ static int vb2_dma_sg_mmap(void *buf_priv, struct 
vm_area_struct *vma)
return 0;
 }
 
+/*/
+/*   callbacks for DMABUF buffers*/
+/*/
+
+static int vb2_dma_sg_map_dmabuf(void *mem_priv)
+{
+   struct vb2_dma_sg_buf *buf = mem_priv;
+   struct sg_table *sgt;
+
+   if (WARN_ON(!buf->db_attach)) {
+   pr_err("trying to pin a non attached buffer\n");
+