Re: [PATCH v5 6/8] iio: buffer-dma: Enable support for DMABUFs

2023-12-26 Thread Jonathan Cameron


> > > +struct iio_dma_buffer_block *
> > > +iio_dma_buffer_attach_dmabuf(struct iio_buffer *buffer,
> > > +    struct dma_buf_attachment *attach)
> > > +{
> > > +   struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
> > > +   struct iio_dma_buffer_block *block;
> > > +   int err;
> > > +
> > > +   mutex_lock(>lock);  
> > 
> > guard(mutex)(>lock);  
> 
> I'm also a big fan of this new stuff but shouldn't we have a prep patch 
> making the
> transition to that? Otherwise we'll end up with a mix of styles. I'm happy to 
> clean
> up stuff with follow up patches (even some coding style could be improved 
> IIRC) but
> typically that's not how we handle things like this I believe...

Ideally yes, I think a prep patch would make sense - but I'm not that fussed
about it and follow up patches would be fine here. 

There are cases for using this that are much easier to justify than
others, so I don't really mind if simple

mutex_lock();
do_something
mutex_unlock();

cases are left for ever not using guard(), though I also don't mind if people 
want to use
scoped_guard() or guard for these just to be consistent.  Either way is pretty
easy to read.  We'll probably also continue to gain new uses of this logic such
as the conditional guard stuff that is queued up for the next merge window and
whilst that is going on we are going to have a bit of mixed style.

Jonathan


> 
> - Nuno Sá
> >   
> 



Re: [PATCH v5 6/8] iio: buffer-dma: Enable support for DMABUFs

2023-12-22 Thread Nuno Sá
On Thu, 2023-12-21 at 16:04 +, Jonathan Cameron wrote:
> On Tue, 19 Dec 2023 18:50:07 +0100
> Paul Cercueil  wrote:
> 
> > Implement iio_dma_buffer_attach_dmabuf(), iio_dma_buffer_detach_dmabuf()
> > and iio_dma_buffer_transfer_dmabuf(), which can then be used by the IIO
> > DMA buffer implementations.
> > 
> > Signed-off-by: Paul Cercueil 
> > 
> Hi Paul,
> 
> A few comments in here. Mostly places where the cleanup.h guard() stuff
> can simplify error paths.
> 
> Overall this looks reasonable to me.
> 
> Jonathan
> 
> > ---
> > v3: Update code to provide the functions that will be used as callbacks
> >     for the new IOCTLs.
> > ---
> >  drivers/iio/buffer/industrialio-buffer-dma.c | 157 +--
> >  include/linux/iio/buffer-dma.h   |  26 +++
> >  2 files changed, 170 insertions(+), 13 deletions(-)
> > 
> > diff --git a/drivers/iio/buffer/industrialio-buffer-dma.c
> > b/drivers/iio/buffer/industrialio-buffer-dma.c
> > index 5610ba67925e..adb64f975064 100644
> > --- a/drivers/iio/buffer/industrialio-buffer-dma.c
> > +++ b/drivers/iio/buffer/industrialio-buffer-dma.c
> > @@ -14,6 +14,7 @@
> >  #include 
> >  #include 
> >  #include 
> > +#include 
> >  #include 
> >  #include 
> >  
> > @@ -94,14 +95,24 @@ static void iio_buffer_block_release(struct kref *kref)
> >  {
> > struct iio_dma_buffer_block *block = container_of(kref,
> > struct iio_dma_buffer_block, kref);
> > +   struct iio_dma_buffer_queue *queue = block->queue;
> >  
> > -   WARN_ON(block->state != IIO_BLOCK_STATE_DEAD);
> > +   WARN_ON(block->fileio && block->state != IIO_BLOCK_STATE_DEAD);
> >  
> > -   dma_free_coherent(block->queue->dev, PAGE_ALIGN(block->size),
> > -   block->vaddr, block->phys_addr);
> > +   mutex_lock(>lock);
> >  
> > -   iio_buffer_put(>queue->buffer);
> > +   if (block->fileio) {
> > +   dma_free_coherent(queue->dev, PAGE_ALIGN(block->size),
> > + block->vaddr, block->phys_addr);
> > +   queue->num_fileio_blocks--;
> > +   }
> > +
> > +   queue->num_blocks--;
> > kfree(block);
> > +
> > +   mutex_unlock(>lock);
> > +
> > +   iio_buffer_put(>buffer);
> >  }
> >  
> >  static void iio_buffer_block_get(struct iio_dma_buffer_block *block)
> > @@ -163,7 +174,7 @@ static struct iio_dma_buffer_queue
> > *iio_buffer_to_queue(struct iio_buffer *buf)
> >  }
> >  
> >  static struct iio_dma_buffer_block *iio_dma_buffer_alloc_block(
> > -   struct iio_dma_buffer_queue *queue, size_t size)
> > +   struct iio_dma_buffer_queue *queue, size_t size, bool fileio)
> >  {
> > struct iio_dma_buffer_block *block;
> >  
> > @@ -171,13 +182,16 @@ static struct iio_dma_buffer_block
> > *iio_dma_buffer_alloc_block(
> > if (!block)
> > return NULL;
> >  
> > -   block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size),
> > -   >phys_addr, GFP_KERNEL);
> > -   if (!block->vaddr) {
> > -   kfree(block);
> > -   return NULL;
> > +   if (fileio) {
> > +   block->vaddr = dma_alloc_coherent(queue->dev, 
> > PAGE_ALIGN(size),
> > + >phys_addr, 
> > GFP_KERNEL);
> > +   if (!block->vaddr) {
> > +   kfree(block);
> > +   return NULL;
> > +   }
> > }
> >  
> > +   block->fileio = fileio;
> > block->size = size;
> > block->state = IIO_BLOCK_STATE_DONE;
> > block->queue = queue;
> > @@ -186,6 +200,9 @@ static struct iio_dma_buffer_block
> > *iio_dma_buffer_alloc_block(
> >  
> > iio_buffer_get(>buffer);
> >  
> > +   queue->num_blocks++;
> > +   queue->num_fileio_blocks += fileio;
> Adding a boolean is non intuitive.
> 
> if (fileio)
> queue->num_fileio_blocks++;
> 
> probably easier to read and compiler should be able to figure out how to
> optimise the condition away.
> 
> > +
> > return block;
> >  }
> >  
> > @@ -211,6 +228,9 @@ void iio_dma_buffer_block_done(struct 
> > iio_dma_buffer_block
> > *block)
> > _iio_dma_buffer_block_done(block);
> > spin_unlock_irqrestore(>list_lock, flags);
> >  
> > +   if (!block->fileio)
> > +   iio_buffer_signal_dmabuf_done(block->attach, 0);
> > +
> > iio_buffer_block_put_atomic(block);
> > wake_up_interruptible_poll(>buffer.pollq, EPOLLIN | 
> > EPOLLRDNORM);
> >  }
> > @@ -237,10 +257,14 @@ void iio_dma_buffer_block_list_abort(struct
> > iio_dma_buffer_queue *queue,
> > list_del(>head);
> > block->bytes_used = 0;
> > _iio_dma_buffer_block_done(block);
> > +
> > +   if (!block->fileio)
> > +   iio_buffer_signal_dmabuf_done(block->attach, 
> > -EINTR);
> > 

Re: [PATCH v5 6/8] iio: buffer-dma: Enable support for DMABUFs

2023-12-21 Thread Jonathan Cameron
On Tue, 19 Dec 2023 18:50:07 +0100
Paul Cercueil  wrote:

> Implement iio_dma_buffer_attach_dmabuf(), iio_dma_buffer_detach_dmabuf()
> and iio_dma_buffer_transfer_dmabuf(), which can then be used by the IIO
> DMA buffer implementations.
> 
> Signed-off-by: Paul Cercueil 
> 
Hi Paul,

A few comments in here. Mostly places where the cleanup.h guard() stuff
can simplify error paths.

Overall this looks reasonable to me.

Jonathan

> ---
> v3: Update code to provide the functions that will be used as callbacks
> for the new IOCTLs.
> ---
>  drivers/iio/buffer/industrialio-buffer-dma.c | 157 +--
>  include/linux/iio/buffer-dma.h   |  26 +++
>  2 files changed, 170 insertions(+), 13 deletions(-)
> 
> diff --git a/drivers/iio/buffer/industrialio-buffer-dma.c 
> b/drivers/iio/buffer/industrialio-buffer-dma.c
> index 5610ba67925e..adb64f975064 100644
> --- a/drivers/iio/buffer/industrialio-buffer-dma.c
> +++ b/drivers/iio/buffer/industrialio-buffer-dma.c
> @@ -14,6 +14,7 @@
>  #include 
>  #include 
>  #include 
> +#include 
>  #include 
>  #include 
>  
> @@ -94,14 +95,24 @@ static void iio_buffer_block_release(struct kref *kref)
>  {
>   struct iio_dma_buffer_block *block = container_of(kref,
>   struct iio_dma_buffer_block, kref);
> + struct iio_dma_buffer_queue *queue = block->queue;
>  
> - WARN_ON(block->state != IIO_BLOCK_STATE_DEAD);
> + WARN_ON(block->fileio && block->state != IIO_BLOCK_STATE_DEAD);
>  
> - dma_free_coherent(block->queue->dev, PAGE_ALIGN(block->size),
> - block->vaddr, block->phys_addr);
> + mutex_lock(>lock);
>  
> - iio_buffer_put(>queue->buffer);
> + if (block->fileio) {
> + dma_free_coherent(queue->dev, PAGE_ALIGN(block->size),
> +   block->vaddr, block->phys_addr);
> + queue->num_fileio_blocks--;
> + }
> +
> + queue->num_blocks--;
>   kfree(block);
> +
> + mutex_unlock(>lock);
> +
> + iio_buffer_put(>buffer);
>  }
>  
>  static void iio_buffer_block_get(struct iio_dma_buffer_block *block)
> @@ -163,7 +174,7 @@ static struct iio_dma_buffer_queue 
> *iio_buffer_to_queue(struct iio_buffer *buf)
>  }
>  
>  static struct iio_dma_buffer_block *iio_dma_buffer_alloc_block(
> - struct iio_dma_buffer_queue *queue, size_t size)
> + struct iio_dma_buffer_queue *queue, size_t size, bool fileio)
>  {
>   struct iio_dma_buffer_block *block;
>  
> @@ -171,13 +182,16 @@ static struct iio_dma_buffer_block 
> *iio_dma_buffer_alloc_block(
>   if (!block)
>   return NULL;
>  
> - block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size),
> - >phys_addr, GFP_KERNEL);
> - if (!block->vaddr) {
> - kfree(block);
> - return NULL;
> + if (fileio) {
> + block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size),
> +   >phys_addr, 
> GFP_KERNEL);
> + if (!block->vaddr) {
> + kfree(block);
> + return NULL;
> + }
>   }
>  
> + block->fileio = fileio;
>   block->size = size;
>   block->state = IIO_BLOCK_STATE_DONE;
>   block->queue = queue;
> @@ -186,6 +200,9 @@ static struct iio_dma_buffer_block 
> *iio_dma_buffer_alloc_block(
>  
>   iio_buffer_get(>buffer);
>  
> + queue->num_blocks++;
> + queue->num_fileio_blocks += fileio;
Adding a boolean is non intuitive.

if (fileio)
queue->num_fileio_blocks++;

probably easier to read and compiler should be able to figure out how to
optimise the condition away.

> +
>   return block;
>  }
>  
> @@ -211,6 +228,9 @@ void iio_dma_buffer_block_done(struct 
> iio_dma_buffer_block *block)
>   _iio_dma_buffer_block_done(block);
>   spin_unlock_irqrestore(>list_lock, flags);
>  
> + if (!block->fileio)
> + iio_buffer_signal_dmabuf_done(block->attach, 0);
> +
>   iio_buffer_block_put_atomic(block);
>   wake_up_interruptible_poll(>buffer.pollq, EPOLLIN | EPOLLRDNORM);
>  }
> @@ -237,10 +257,14 @@ void iio_dma_buffer_block_list_abort(struct 
> iio_dma_buffer_queue *queue,
>   list_del(>head);
>   block->bytes_used = 0;
>   _iio_dma_buffer_block_done(block);
> +
> + if (!block->fileio)
> + iio_buffer_signal_dmabuf_done(block->attach, -EINTR);
>   iio_buffer_block_put_atomic(block);
>   }
>   spin_unlock_irqrestore(>list_lock, flags);
>  
> + queue->fileio.enabled = false;

While this obviously doesn't need to be conditional if it can already be false
it might be easier to follow the code flow it if didn't check if we were doing
fileio or not before disabling it.

>   wake_up_interruptible_poll(>buffer.pollq, EPOLLIN | EPOLLRDNORM);
>  }
>  EXPORT_SYMBOL_GPL(iio_dma_buffer_block_list_abort);
> @@ -261,6 +285,12 @@ 

[PATCH v5 6/8] iio: buffer-dma: Enable support for DMABUFs

2023-12-19 Thread Paul Cercueil
Implement iio_dma_buffer_attach_dmabuf(), iio_dma_buffer_detach_dmabuf()
and iio_dma_buffer_transfer_dmabuf(), which can then be used by the IIO
DMA buffer implementations.

Signed-off-by: Paul Cercueil 

---
v3: Update code to provide the functions that will be used as callbacks
for the new IOCTLs.
---
 drivers/iio/buffer/industrialio-buffer-dma.c | 157 +--
 include/linux/iio/buffer-dma.h   |  26 +++
 2 files changed, 170 insertions(+), 13 deletions(-)

diff --git a/drivers/iio/buffer/industrialio-buffer-dma.c 
b/drivers/iio/buffer/industrialio-buffer-dma.c
index 5610ba67925e..adb64f975064 100644
--- a/drivers/iio/buffer/industrialio-buffer-dma.c
+++ b/drivers/iio/buffer/industrialio-buffer-dma.c
@@ -14,6 +14,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 
@@ -94,14 +95,24 @@ static void iio_buffer_block_release(struct kref *kref)
 {
struct iio_dma_buffer_block *block = container_of(kref,
struct iio_dma_buffer_block, kref);
+   struct iio_dma_buffer_queue *queue = block->queue;
 
-   WARN_ON(block->state != IIO_BLOCK_STATE_DEAD);
+   WARN_ON(block->fileio && block->state != IIO_BLOCK_STATE_DEAD);
 
-   dma_free_coherent(block->queue->dev, PAGE_ALIGN(block->size),
-   block->vaddr, block->phys_addr);
+   mutex_lock(>lock);
 
-   iio_buffer_put(>queue->buffer);
+   if (block->fileio) {
+   dma_free_coherent(queue->dev, PAGE_ALIGN(block->size),
+ block->vaddr, block->phys_addr);
+   queue->num_fileio_blocks--;
+   }
+
+   queue->num_blocks--;
kfree(block);
+
+   mutex_unlock(>lock);
+
+   iio_buffer_put(>buffer);
 }
 
 static void iio_buffer_block_get(struct iio_dma_buffer_block *block)
@@ -163,7 +174,7 @@ static struct iio_dma_buffer_queue 
*iio_buffer_to_queue(struct iio_buffer *buf)
 }
 
 static struct iio_dma_buffer_block *iio_dma_buffer_alloc_block(
-   struct iio_dma_buffer_queue *queue, size_t size)
+   struct iio_dma_buffer_queue *queue, size_t size, bool fileio)
 {
struct iio_dma_buffer_block *block;
 
@@ -171,13 +182,16 @@ static struct iio_dma_buffer_block 
*iio_dma_buffer_alloc_block(
if (!block)
return NULL;
 
-   block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size),
-   >phys_addr, GFP_KERNEL);
-   if (!block->vaddr) {
-   kfree(block);
-   return NULL;
+   if (fileio) {
+   block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size),
+ >phys_addr, 
GFP_KERNEL);
+   if (!block->vaddr) {
+   kfree(block);
+   return NULL;
+   }
}
 
+   block->fileio = fileio;
block->size = size;
block->state = IIO_BLOCK_STATE_DONE;
block->queue = queue;
@@ -186,6 +200,9 @@ static struct iio_dma_buffer_block 
*iio_dma_buffer_alloc_block(
 
iio_buffer_get(>buffer);
 
+   queue->num_blocks++;
+   queue->num_fileio_blocks += fileio;
+
return block;
 }
 
@@ -211,6 +228,9 @@ void iio_dma_buffer_block_done(struct iio_dma_buffer_block 
*block)
_iio_dma_buffer_block_done(block);
spin_unlock_irqrestore(>list_lock, flags);
 
+   if (!block->fileio)
+   iio_buffer_signal_dmabuf_done(block->attach, 0);
+
iio_buffer_block_put_atomic(block);
wake_up_interruptible_poll(>buffer.pollq, EPOLLIN | EPOLLRDNORM);
 }
@@ -237,10 +257,14 @@ void iio_dma_buffer_block_list_abort(struct 
iio_dma_buffer_queue *queue,
list_del(>head);
block->bytes_used = 0;
_iio_dma_buffer_block_done(block);
+
+   if (!block->fileio)
+   iio_buffer_signal_dmabuf_done(block->attach, -EINTR);
iio_buffer_block_put_atomic(block);
}
spin_unlock_irqrestore(>list_lock, flags);
 
+   queue->fileio.enabled = false;
wake_up_interruptible_poll(>buffer.pollq, EPOLLIN | EPOLLRDNORM);
 }
 EXPORT_SYMBOL_GPL(iio_dma_buffer_block_list_abort);
@@ -261,6 +285,12 @@ static bool iio_dma_block_reusable(struct 
iio_dma_buffer_block *block)
}
 }
 
+static bool iio_dma_buffer_fileio_mode(struct iio_dma_buffer_queue *queue)
+{
+   return queue->fileio.enabled ||
+   queue->num_blocks == queue->num_fileio_blocks;
+}
+
 /**
  * iio_dma_buffer_request_update() - DMA buffer request_update callback
  * @buffer: The buffer which to request an update
@@ -287,6 +317,12 @@ int iio_dma_buffer_request_update(struct iio_buffer 
*buffer)
 
mutex_lock(>lock);
 
+   queue->fileio.enabled = iio_dma_buffer_fileio_mode(queue);
+
+   /* If DMABUFs were created, disable fileio interface */
+   if (!queue->fileio.enabled)
+   goto out_unlock;
+
/* Allocations are page