This is an automated email from the ASF dual-hosted git repository.

pkarashchenko pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/nuttx.git

commit 9e4a1be8d482119f004549e1bb076227b1e58279
Author: Xiang Xiao <xiaoxi...@xiaomi.com>
AuthorDate: Sat Oct 21 17:20:29 2023 +0800

    mm/iob: Replace the critical section with spin lock
    
    Base on discusion: https://github.com/apache/nuttx/issues/10981
    
    Signed-off-by: Xiang Xiao <xiaoxi...@xiaomi.com>
---
 mm/iob/iob.h                   |  3 +++
 mm/iob/iob_add_queue.c         |  4 ++--
 mm/iob/iob_alloc.c             | 10 +++++-----
 mm/iob/iob_alloc_qentry.c      |  8 ++++----
 mm/iob/iob_free.c              |  6 +++---
 mm/iob/iob_free_qentry.c       |  5 +++--
 mm/iob/iob_free_queue_qentry.c |  8 +++++---
 mm/iob/iob_initialize.c        |  2 ++
 mm/iob/iob_remove_queue.c      |  9 +++++++--
 9 files changed, 34 insertions(+), 21 deletions(-)

diff --git a/mm/iob/iob.h b/mm/iob/iob.h
index 3181385d34..14947915de 100644
--- a/mm/iob/iob.h
+++ b/mm/iob/iob.h
@@ -30,6 +30,7 @@
 #include <debug.h>
 
 #include <nuttx/mm/iob.h>
+#include <nuttx/spinlock.h>
 #include <nuttx/semaphore.h>
 
 #ifdef CONFIG_MM_IOB
@@ -80,6 +81,8 @@ extern sem_t g_throttle_sem;  /* Counts available I/O buffers 
when throttled */
 extern sem_t g_qentry_sem;    /* Counts free I/O buffer queue containers */
 #endif
 
+extern spinlock_t g_iob_lock;
+
 /****************************************************************************
  * Public Function Prototypes
  ****************************************************************************/
diff --git a/mm/iob/iob_add_queue.c b/mm/iob/iob_add_queue.c
index 4a5ffea2d0..2593d2a14f 100644
--- a/mm/iob/iob_add_queue.c
+++ b/mm/iob/iob_add_queue.c
@@ -61,7 +61,7 @@ static int iob_add_queue_internal(FAR struct iob_s *iob,
 
   qentry->qe_flink = NULL;
 
-  irqstate_t flags = enter_critical_section();
+  irqstate_t flags = spin_lock_irqsave(&g_iob_lock);
   if (!iobq->qh_head)
     {
       iobq->qh_head = qentry;
@@ -74,7 +74,7 @@ static int iob_add_queue_internal(FAR struct iob_s *iob,
       iobq->qh_tail = qentry;
     }
 
-  leave_critical_section(flags);
+  spin_unlock_irqrestore(&g_iob_lock, flags);
 
   return 0;
 }
diff --git a/mm/iob/iob_alloc.c b/mm/iob/iob_alloc.c
index 3cb7dff71c..5edb369e34 100644
--- a/mm/iob/iob_alloc.c
+++ b/mm/iob/iob_alloc.c
@@ -73,7 +73,7 @@ static FAR struct iob_s *iob_alloc_committed(void)
    * to protect the committed list:  We disable interrupts very briefly.
    */
 
-  flags = enter_critical_section();
+  flags = spin_lock_irqsave(&g_iob_lock);
 
   /* Take the I/O buffer from the head of the committed list */
 
@@ -92,7 +92,7 @@ static FAR struct iob_s *iob_alloc_committed(void)
       iob->io_pktlen = 0;    /* Total length of the packet */
     }
 
-  leave_critical_section(flags);
+  spin_unlock_irqrestore(&g_iob_lock, flags);
   return iob;
 }
 
@@ -272,7 +272,7 @@ FAR struct iob_s *iob_tryalloc(bool throttled)
    * to protect the free list:  We disable interrupts very briefly.
    */
 
-  flags = enter_critical_section();
+  flags = spin_lock_irqsave(&g_iob_lock);
 
 #if CONFIG_IOB_THROTTLE > 0
   /* If there are free I/O buffers for this allocation */
@@ -314,7 +314,7 @@ FAR struct iob_s *iob_tryalloc(bool throttled)
           g_throttle_sem.semcount--;
 #endif
 
-          leave_critical_section(flags);
+          spin_unlock_irqrestore(&g_iob_lock, flags);
 
           /* Put the I/O buffer in a known state */
 
@@ -326,6 +326,6 @@ FAR struct iob_s *iob_tryalloc(bool throttled)
         }
     }
 
-  leave_critical_section(flags);
+  spin_unlock_irqrestore(&g_iob_lock, flags);
   return NULL;
 }
diff --git a/mm/iob/iob_alloc_qentry.c b/mm/iob/iob_alloc_qentry.c
index 567b900534..0f00019ab9 100644
--- a/mm/iob/iob_alloc_qentry.c
+++ b/mm/iob/iob_alloc_qentry.c
@@ -57,7 +57,7 @@ static FAR struct iob_qentry_s *iob_alloc_qcommitted(void)
    * to protect the committed list:  We disable interrupts very briefly.
    */
 
-  flags = enter_critical_section();
+  flags = spin_lock_irqsave(&g_iob_lock);
 
   /* Take the I/O buffer from the head of the committed list */
 
@@ -73,7 +73,7 @@ static FAR struct iob_qentry_s *iob_alloc_qcommitted(void)
       iobq->qe_head = NULL; /* Nothing is contained */
     }
 
-  leave_critical_section(flags);
+  spin_unlock_irqrestore(&g_iob_lock, flags);
   return iobq;
 }
 
@@ -199,7 +199,7 @@ FAR struct iob_qentry_s *iob_tryalloc_qentry(void)
    * to protect the free list:  We disable interrupts very briefly.
    */
 
-  flags = enter_critical_section();
+  flags = spin_lock_irqsave(&g_iob_lock);
   iobq  = g_iob_freeqlist;
   if (iobq)
     {
@@ -225,7 +225,7 @@ FAR struct iob_qentry_s *iob_tryalloc_qentry(void)
       iobq->qe_head = NULL; /* Nothing is contained */
     }
 
-  leave_critical_section(flags);
+  spin_unlock_irqrestore(&g_iob_lock, flags);
   return iobq;
 }
 
diff --git a/mm/iob/iob_free.c b/mm/iob/iob_free.c
index 232e28ff4e..f2c04be917 100644
--- a/mm/iob/iob_free.c
+++ b/mm/iob/iob_free.c
@@ -118,7 +118,7 @@ FAR struct iob_s *iob_free(FAR struct iob_s *iob)
    * interrupts very briefly.
    */
 
-  flags = enter_critical_section();
+  flags = spin_lock_irqsave(&g_iob_lock);
 
   /* Which list?  If there is a task waiting for an IOB, then put
    * the IOB on either the free list or on the committed list where
@@ -137,6 +137,8 @@ FAR struct iob_s *iob_free(FAR struct iob_s *iob)
       g_iob_freelist  = iob;
     }
 
+  spin_unlock_irqrestore(&g_iob_lock, flags);
+
   /* Signal that an IOB is available.  If there is a thread blocked,
    * waiting for an IOB, this will wake up exactly one thread.  The
    * semaphore count will correctly indicated that the awakened task
@@ -168,8 +170,6 @@ FAR struct iob_s *iob_free(FAR struct iob_s *iob)
     }
 #endif
 
-  leave_critical_section(flags);
-
   /* And return the I/O buffer after the one that was freed */
 
   return next;
diff --git a/mm/iob/iob_free_qentry.c b/mm/iob/iob_free_qentry.c
index 527861afd7..5709ec73b0 100644
--- a/mm/iob/iob_free_qentry.c
+++ b/mm/iob/iob_free_qentry.c
@@ -58,7 +58,7 @@ FAR struct iob_qentry_s *iob_free_qentry(FAR struct 
iob_qentry_s *iobq)
    * interrupts very briefly.
    */
 
-  flags = enter_critical_section();
+  flags = spin_lock_irqsave(&g_iob_lock);
 
   /* Which list?  If there is a task waiting for an IOB chain, then put
    * the IOB chain on either the free list or on the committed list where
@@ -77,6 +77,8 @@ FAR struct iob_qentry_s *iob_free_qentry(FAR struct 
iob_qentry_s *iobq)
       g_iob_freeqlist  = iobq;
     }
 
+  spin_unlock_irqrestore(&g_iob_lock, flags);
+
   /* Signal that an I/O buffer chain container is available.  If there
    * is a thread waiting for an I/O buffer chain container, this will
    * wake up exactly one thread.  The semaphore count will correctly
@@ -85,7 +87,6 @@ FAR struct iob_qentry_s *iob_free_qentry(FAR struct 
iob_qentry_s *iobq)
    */
 
   nxsem_post(&g_qentry_sem);
-  leave_critical_section(flags);
 
   /* And return the I/O buffer chain container after the one that was freed */
 
diff --git a/mm/iob/iob_free_queue_qentry.c b/mm/iob/iob_free_queue_qentry.c
index 3801efe9c1..88d34ffbf9 100644
--- a/mm/iob/iob_free_queue_qentry.c
+++ b/mm/iob/iob_free_queue_qentry.c
@@ -51,7 +51,7 @@ void iob_free_queue_qentry(FAR struct iob_s *iob,
   FAR struct iob_qentry_s *prev = NULL;
   FAR struct iob_qentry_s *qentry;
 
-  irqstate_t flags = enter_critical_section();
+  irqstate_t flags = spin_lock_irqsave(&g_iob_lock);
   for (qentry = iobq->qh_head; qentry != NULL;
        prev = qentry, qentry = qentry->qe_flink)
     {
@@ -73,6 +73,8 @@ void iob_free_queue_qentry(FAR struct iob_s *iob,
               iobq->qh_tail = prev;
             }
 
+          spin_unlock_irqrestore(&g_iob_lock, flags);
+
           /* Remove the queue container */
 
           iob_free_qentry(qentry);
@@ -81,11 +83,11 @@ void iob_free_queue_qentry(FAR struct iob_s *iob,
 
           iob_free_chain(iob);
 
-          break;
+          return;
         }
     }
 
-  leave_critical_section(flags);
+  spin_unlock_irqrestore(&g_iob_lock, flags);
 }
 
 #endif /* CONFIG_IOB_NCHAINS > 0 */
diff --git a/mm/iob/iob_initialize.c b/mm/iob/iob_initialize.c
index 08b30e5165..d232f1e7bf 100644
--- a/mm/iob/iob_initialize.c
+++ b/mm/iob/iob_initialize.c
@@ -102,6 +102,8 @@ sem_t g_throttle_sem = SEM_INITIALIZER(CONFIG_IOB_NBUFFERS -
 sem_t g_qentry_sem = SEM_INITIALIZER(CONFIG_IOB_NCHAINS);
 #endif
 
+spinlock_t g_iob_lock = SP_UNLOCKED;
+
 /****************************************************************************
  * Public Functions
  ****************************************************************************/
diff --git a/mm/iob/iob_remove_queue.c b/mm/iob/iob_remove_queue.c
index 04953867c1..8ccbabcdea 100644
--- a/mm/iob/iob_remove_queue.c
+++ b/mm/iob/iob_remove_queue.c
@@ -56,7 +56,7 @@ FAR struct iob_s *iob_remove_queue(FAR struct iob_queue_s 
*iobq)
 
   /* Remove the I/O buffer chain from the head of the queue */
 
-  irqstate_t flags = enter_critical_section();
+  irqstate_t flags = spin_lock_irqsave(&g_iob_lock);
   qentry = iobq->qh_head;
   if (qentry)
     {
@@ -66,6 +66,8 @@ FAR struct iob_s *iob_remove_queue(FAR struct iob_queue_s 
*iobq)
           iobq->qh_tail = NULL;
         }
 
+      spin_unlock_irqrestore(&g_iob_lock, flags);
+
       /* Extract the I/O buffer chain from the container and free the
        * container.
        */
@@ -73,8 +75,11 @@ FAR struct iob_s *iob_remove_queue(FAR struct iob_queue_s 
*iobq)
       iob = qentry->qe_head;
       iob_free_qentry(qentry);
     }
+  else
+    {
+      spin_unlock_irqrestore(&g_iob_lock, flags);
+    }
 
-  leave_critical_section(flags);
   return iob;
 }
 

Reply via email to