Re: [PATCH v2 01/12] iio: buffer-dma: Get rid of outgoing queue

2022-03-28 Thread Jonathan Cameron
On Mon,  7 Feb 2022 12:59:22 +
Paul Cercueil  wrote:

> The buffer-dma code was using two queues, incoming and outgoing, to
> manage the state of the blocks in use.
> 
> While this totally works, it adds some complexity to the code,
> especially since the code only manages 2 blocks. It is much easier to
> just check each block's state manually, and keep a counter for the next
> block to dequeue.
> 
> Since the new DMABUF based API wouldn't use the outgoing queue anyway,
> getting rid of it now makes the upcoming changes simpler.
> 
> With this change, the IIO_BLOCK_STATE_DEQUEUED is now useless, and can
> be removed.
> 
> v2: - Only remove the outgoing queue, and keep the incoming queue, as we
>   want the buffer to start streaming data as soon as it is enabled.
> - Remove IIO_BLOCK_STATE_DEQUEUED, since it is now functionally the
>   same as IIO_BLOCK_STATE_DONE.
> 
> Signed-off-by: Paul Cercueil 

Hi Paul,

In the interests of moving things forward / simplifying what people need
to look at: This change looks good to me on it's own.

Lars had some comments on v1.  Lars, could you take look at this and
verify if this versions addresses the points you raised (I think it does
but they were your comments so better you judge)

Thanks,

Jonathan

> ---
>  drivers/iio/buffer/industrialio-buffer-dma.c | 44 ++--
>  include/linux/iio/buffer-dma.h   |  7 ++--
>  2 files changed, 26 insertions(+), 25 deletions(-)
> 
> diff --git a/drivers/iio/buffer/industrialio-buffer-dma.c 
> b/drivers/iio/buffer/industrialio-buffer-dma.c
> index d348af8b9705..1fc91467d1aa 100644
> --- a/drivers/iio/buffer/industrialio-buffer-dma.c
> +++ b/drivers/iio/buffer/industrialio-buffer-dma.c
> @@ -179,7 +179,7 @@ static struct iio_dma_buffer_block 
> *iio_dma_buffer_alloc_block(
>   }
>  
>   block->size = size;
> - block->state = IIO_BLOCK_STATE_DEQUEUED;
> + block->state = IIO_BLOCK_STATE_DONE;
>   block->queue = queue;
>   INIT_LIST_HEAD(>head);
>   kref_init(>kref);
> @@ -191,16 +191,8 @@ static struct iio_dma_buffer_block 
> *iio_dma_buffer_alloc_block(
>  
>  static void _iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
>  {
> - struct iio_dma_buffer_queue *queue = block->queue;
> -
> - /*
> -  * The buffer has already been freed by the application, just drop the
> -  * reference.
> -  */
> - if (block->state != IIO_BLOCK_STATE_DEAD) {
> + if (block->state != IIO_BLOCK_STATE_DEAD)
>   block->state = IIO_BLOCK_STATE_DONE;
> - list_add_tail(>head, >outgoing);
> - }
>  }
>  
>  /**
> @@ -261,7 +253,6 @@ static bool iio_dma_block_reusable(struct 
> iio_dma_buffer_block *block)
>* not support abort and has not given back the block yet.
>*/
>   switch (block->state) {
> - case IIO_BLOCK_STATE_DEQUEUED:
>   case IIO_BLOCK_STATE_QUEUED:
>   case IIO_BLOCK_STATE_DONE:
>   return true;
> @@ -317,7 +308,6 @@ int iio_dma_buffer_request_update(struct iio_buffer 
> *buffer)
>* dead. This means we can reset the lists without having to fear
>* corrution.
>*/
> - INIT_LIST_HEAD(>outgoing);
>   spin_unlock_irq(>list_lock);
>  
>   INIT_LIST_HEAD(>incoming);
> @@ -456,14 +446,20 @@ static struct iio_dma_buffer_block 
> *iio_dma_buffer_dequeue(
>   struct iio_dma_buffer_queue *queue)
>  {
>   struct iio_dma_buffer_block *block;
> + unsigned int idx;
>  
>   spin_lock_irq(>list_lock);
> - block = list_first_entry_or_null(>outgoing, struct
> - iio_dma_buffer_block, head);
> - if (block != NULL) {
> - list_del(>head);
> - block->state = IIO_BLOCK_STATE_DEQUEUED;
> +
> + idx = queue->fileio.next_dequeue;
> + block = queue->fileio.blocks[idx];
> +
> + if (block->state == IIO_BLOCK_STATE_DONE) {
> + idx = (idx + 1) % ARRAY_SIZE(queue->fileio.blocks);
> + queue->fileio.next_dequeue = idx;
> + } else {
> + block = NULL;
>   }
> +
>   spin_unlock_irq(>list_lock);
>  
>   return block;
> @@ -539,6 +535,7 @@ size_t iio_dma_buffer_data_available(struct iio_buffer 
> *buf)
>   struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buf);
>   struct iio_dma_buffer_block *block;
>   size_t data_available = 0;
> + unsigned int i;
>  
>   /*
>* For counting the available bytes we'll use the size of the block not
> @@ -552,8 +549,15 @@ size_t iio_dma_buffer_data_available(struct iio_buffer 
> *buf)
>   data_available += queue->fileio.active_block->size;
>  
>   spin_lock_irq(>list_lock);
> - list_for_each_entry(block, >outgoing, head)
> - data_available += block->size;
> +
> + for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
> + block = queue->fileio.blocks[i];
> +
> + if (block != queue->fileio.active_block
> + && 

Re: [PATCH v2 01/12] iio: buffer-dma: Get rid of outgoing queue

2022-02-13 Thread Paul Cercueil

Hi Jonathan,

Le dim., févr. 13 2022 at 18:57:40 +, Jonathan Cameron 
 a écrit :

On Mon,  7 Feb 2022 12:59:22 +
Paul Cercueil  wrote:


 The buffer-dma code was using two queues, incoming and outgoing, to
 manage the state of the blocks in use.

 While this totally works, it adds some complexity to the code,
 especially since the code only manages 2 blocks. It is much easier 
to
 just check each block's state manually, and keep a counter for the 
next

 block to dequeue.

 Since the new DMABUF based API wouldn't use the outgoing queue 
anyway,

 getting rid of it now makes the upcoming changes simpler.

 With this change, the IIO_BLOCK_STATE_DEQUEUED is now useless, and 
can

 be removed.

 v2: - Only remove the outgoing queue, and keep the incoming queue, 
as we
   want the buffer to start streaming data as soon as it is 
enabled.
 - Remove IIO_BLOCK_STATE_DEQUEUED, since it is now functionally 
the

   same as IIO_BLOCK_STATE_DONE.

 Signed-off-by: Paul Cercueil 
 ---


Trivial process thing but change log should be here, not above as we 
don't

want it to end up in the main git log.


I'm kinda used to do this now, it's the policy for sending patches to 
the DRM tree. I like it because "git notes" disappear after rebases and 
it's a pain. At least like this I don't lose the changelog.


But okay, I'll change it for v3, if there's a v3.

Cheers,
-Paul

  drivers/iio/buffer/industrialio-buffer-dma.c | 44 
++--

  include/linux/iio/buffer-dma.h   |  7 ++--
  2 files changed, 26 insertions(+), 25 deletions(-)






Re: [PATCH v2 01/12] iio: buffer-dma: Get rid of outgoing queue

2022-02-13 Thread Jonathan Cameron
On Mon,  7 Feb 2022 12:59:22 +
Paul Cercueil  wrote:

> The buffer-dma code was using two queues, incoming and outgoing, to
> manage the state of the blocks in use.
> 
> While this totally works, it adds some complexity to the code,
> especially since the code only manages 2 blocks. It is much easier to
> just check each block's state manually, and keep a counter for the next
> block to dequeue.
> 
> Since the new DMABUF based API wouldn't use the outgoing queue anyway,
> getting rid of it now makes the upcoming changes simpler.
> 
> With this change, the IIO_BLOCK_STATE_DEQUEUED is now useless, and can
> be removed.
> 
> v2: - Only remove the outgoing queue, and keep the incoming queue, as we
>   want the buffer to start streaming data as soon as it is enabled.
> - Remove IIO_BLOCK_STATE_DEQUEUED, since it is now functionally the
>   same as IIO_BLOCK_STATE_DONE.
> 
> Signed-off-by: Paul Cercueil 
> ---

Trivial process thing but change log should be here, not above as we don't
want it to end up in the main git log.


>  drivers/iio/buffer/industrialio-buffer-dma.c | 44 ++--
>  include/linux/iio/buffer-dma.h   |  7 ++--
>  2 files changed, 26 insertions(+), 25 deletions(-)
> 


[PATCH v2 01/12] iio: buffer-dma: Get rid of outgoing queue

2022-02-07 Thread Paul Cercueil
The buffer-dma code was using two queues, incoming and outgoing, to
manage the state of the blocks in use.

While this totally works, it adds some complexity to the code,
especially since the code only manages 2 blocks. It is much easier to
just check each block's state manually, and keep a counter for the next
block to dequeue.

Since the new DMABUF based API wouldn't use the outgoing queue anyway,
getting rid of it now makes the upcoming changes simpler.

With this change, the IIO_BLOCK_STATE_DEQUEUED is now useless, and can
be removed.

v2: - Only remove the outgoing queue, and keep the incoming queue, as we
  want the buffer to start streaming data as soon as it is enabled.
- Remove IIO_BLOCK_STATE_DEQUEUED, since it is now functionally the
  same as IIO_BLOCK_STATE_DONE.

Signed-off-by: Paul Cercueil 
---
 drivers/iio/buffer/industrialio-buffer-dma.c | 44 ++--
 include/linux/iio/buffer-dma.h   |  7 ++--
 2 files changed, 26 insertions(+), 25 deletions(-)

diff --git a/drivers/iio/buffer/industrialio-buffer-dma.c 
b/drivers/iio/buffer/industrialio-buffer-dma.c
index d348af8b9705..1fc91467d1aa 100644
--- a/drivers/iio/buffer/industrialio-buffer-dma.c
+++ b/drivers/iio/buffer/industrialio-buffer-dma.c
@@ -179,7 +179,7 @@ static struct iio_dma_buffer_block 
*iio_dma_buffer_alloc_block(
}
 
block->size = size;
-   block->state = IIO_BLOCK_STATE_DEQUEUED;
+   block->state = IIO_BLOCK_STATE_DONE;
block->queue = queue;
INIT_LIST_HEAD(>head);
kref_init(>kref);
@@ -191,16 +191,8 @@ static struct iio_dma_buffer_block 
*iio_dma_buffer_alloc_block(
 
 static void _iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
 {
-   struct iio_dma_buffer_queue *queue = block->queue;
-
-   /*
-* The buffer has already been freed by the application, just drop the
-* reference.
-*/
-   if (block->state != IIO_BLOCK_STATE_DEAD) {
+   if (block->state != IIO_BLOCK_STATE_DEAD)
block->state = IIO_BLOCK_STATE_DONE;
-   list_add_tail(>head, >outgoing);
-   }
 }
 
 /**
@@ -261,7 +253,6 @@ static bool iio_dma_block_reusable(struct 
iio_dma_buffer_block *block)
 * not support abort and has not given back the block yet.
 */
switch (block->state) {
-   case IIO_BLOCK_STATE_DEQUEUED:
case IIO_BLOCK_STATE_QUEUED:
case IIO_BLOCK_STATE_DONE:
return true;
@@ -317,7 +308,6 @@ int iio_dma_buffer_request_update(struct iio_buffer *buffer)
 * dead. This means we can reset the lists without having to fear
 * corrution.
 */
-   INIT_LIST_HEAD(>outgoing);
spin_unlock_irq(>list_lock);
 
INIT_LIST_HEAD(>incoming);
@@ -456,14 +446,20 @@ static struct iio_dma_buffer_block 
*iio_dma_buffer_dequeue(
struct iio_dma_buffer_queue *queue)
 {
struct iio_dma_buffer_block *block;
+   unsigned int idx;
 
spin_lock_irq(>list_lock);
-   block = list_first_entry_or_null(>outgoing, struct
-   iio_dma_buffer_block, head);
-   if (block != NULL) {
-   list_del(>head);
-   block->state = IIO_BLOCK_STATE_DEQUEUED;
+
+   idx = queue->fileio.next_dequeue;
+   block = queue->fileio.blocks[idx];
+
+   if (block->state == IIO_BLOCK_STATE_DONE) {
+   idx = (idx + 1) % ARRAY_SIZE(queue->fileio.blocks);
+   queue->fileio.next_dequeue = idx;
+   } else {
+   block = NULL;
}
+
spin_unlock_irq(>list_lock);
 
return block;
@@ -539,6 +535,7 @@ size_t iio_dma_buffer_data_available(struct iio_buffer *buf)
struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buf);
struct iio_dma_buffer_block *block;
size_t data_available = 0;
+   unsigned int i;
 
/*
 * For counting the available bytes we'll use the size of the block not
@@ -552,8 +549,15 @@ size_t iio_dma_buffer_data_available(struct iio_buffer 
*buf)
data_available += queue->fileio.active_block->size;
 
spin_lock_irq(>list_lock);
-   list_for_each_entry(block, >outgoing, head)
-   data_available += block->size;
+
+   for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
+   block = queue->fileio.blocks[i];
+
+   if (block != queue->fileio.active_block
+   && block->state == IIO_BLOCK_STATE_DONE)
+   data_available += block->size;
+   }
+
spin_unlock_irq(>list_lock);
mutex_unlock(>lock);
 
@@ -617,7 +621,6 @@ int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue,
queue->ops = ops;
 
INIT_LIST_HEAD(>incoming);
-   INIT_LIST_HEAD(>outgoing);
 
mutex_init(>lock);
spin_lock_init(>list_lock);
@@ -645,7 +648,6 @@ void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue)
continue;