xiaoxiang781216 commented on code in PR #14764:
URL: https://github.com/apache/nuttx/pull/14764#discussion_r1845720916


##########
mm/iob/iob_alloc.c:
##########
@@ -97,10 +97,82 @@ static FAR struct iob_s *iob_alloc_committed(void)
       iob->io_pktlen = 0;    /* Total length of the packet */
     }
 
-  leave_critical_section(flags);
+  spin_unlock_irqrestore(&g_iob_lock, flags);
   return iob;
 }
 
+static FAR struct iob_s *iob_tryalloc_internal(bool throttled)
+{
+  FAR struct iob_s *iob;
+#if CONFIG_IOB_THROTTLE > 0
+  int16_t count;
+#endif
+
+#if CONFIG_IOB_THROTTLE > 0
+  /* Select the count to check. */
+
+  count = (throttled ? g_throttle_count : g_iob_count);
+#endif
+
+  /* We don't know what context we are called from so we use extreme measures
+   * to protect the free list:  We disable interrupts very briefly.
+   */
+
+#if CONFIG_IOB_THROTTLE > 0

Review Comment:
   merge with line 111



##########
mm/iob/iob_alloc.c:
##########
@@ -97,10 +97,82 @@ static FAR struct iob_s *iob_alloc_committed(void)
       iob->io_pktlen = 0;    /* Total length of the packet */
     }
 
-  leave_critical_section(flags);
+  spin_unlock_irqrestore(&g_iob_lock, flags);
   return iob;
 }
 
+static FAR struct iob_s *iob_tryalloc_internal(bool throttled)
+{
+  FAR struct iob_s *iob;
+#if CONFIG_IOB_THROTTLE > 0
+  int16_t count;
+#endif
+
+#if CONFIG_IOB_THROTTLE > 0
+  /* Select the count to check. */
+
+  count = (throttled ? g_throttle_count : g_iob_count);
+#endif
+
+  /* We don't know what context we are called from so we use extreme measures

Review Comment:
   remove



##########
mm/iob/iob_alloc.c:
##########
@@ -97,10 +97,82 @@ static FAR struct iob_s *iob_alloc_committed(void)
       iob->io_pktlen = 0;    /* Total length of the packet */
     }
 
-  leave_critical_section(flags);
+  spin_unlock_irqrestore(&g_iob_lock, flags);
   return iob;
 }
 
+static FAR struct iob_s *iob_tryalloc_internal(bool throttled)
+{
+  FAR struct iob_s *iob;
+#if CONFIG_IOB_THROTTLE > 0
+  int16_t count;
+#endif
+
+#if CONFIG_IOB_THROTTLE > 0
+  /* Select the count to check. */
+
+  count = (throttled ? g_throttle_count : g_iob_count);

Review Comment:
   merge to line 108



##########
mm/iob/iob_alloc.c:
##########
@@ -121,8 +194,10 @@ static FAR struct iob_s *iob_allocwait(bool throttled, 
unsigned int timeout)
 #if CONFIG_IOB_THROTTLE > 0
   /* Select the semaphore count to check. */
 
+  count = (throttled ? &g_throttle_count : &g_iob_count);

Review Comment:
   merge to line 188



##########
mm/iob/iob.h:
##########
@@ -76,14 +77,29 @@ extern FAR struct iob_qentry_s *g_iob_qcommitted;
 
 /* Counting semaphores that tracks the number of free IOBs/qentries */
 
-extern sem_t g_iob_sem;       /* Counts free I/O buffers */
+extern sem_t g_iob_sem;
+
+/* Counts free I/O buffers */
+
+extern volatile int16_t g_iob_count;

Review Comment:
   let's remove ALL volatile which is already protected by spinlock



##########
mm/iob/iob_alloc_qentry.c:
##########
@@ -101,21 +137,23 @@ static FAR struct iob_qentry_s *iob_allocwait_qentry(void)
    * re-enabled while we are waiting for I/O buffers to become free.
    */
 
-  flags = enter_critical_section();
+  flags = spin_lock_irqsave(&g_iob_lock);
 
   /* Try to get an I/O buffer chain container.  If successful, the semaphore
    * count will bedecremented atomically.

Review Comment:
   remove the wrong comment here an other similar place



##########
mm/iob/iob_alloc.c:
##########
@@ -97,10 +97,82 @@ static FAR struct iob_s *iob_alloc_committed(void)
       iob->io_pktlen = 0;    /* Total length of the packet */
     }
 
-  leave_critical_section(flags);
+  spin_unlock_irqrestore(&g_iob_lock, flags);
   return iob;
 }
 
+static FAR struct iob_s *iob_tryalloc_internal(bool throttled)
+{
+  FAR struct iob_s *iob;
+#if CONFIG_IOB_THROTTLE > 0
+  int16_t count;
+#endif
+
+#if CONFIG_IOB_THROTTLE > 0
+  /* Select the count to check. */
+
+  count = (throttled ? g_throttle_count : g_iob_count);
+#endif
+
+  /* We don't know what context we are called from so we use extreme measures
+   * to protect the free list:  We disable interrupts very briefly.
+   */
+
+#if CONFIG_IOB_THROTTLE > 0
+  /* If there are free I/O buffers for this allocation */
+
+  if (count > 0)
+#endif
+    {
+      /* Take the I/O buffer from the head of the free list */
+
+      iob = g_iob_freelist;
+      if (iob != NULL)
+        {
+          /* Remove the I/O buffer from the free list and decrement the
+           * counting semaphore(s) that tracks the number of available
+           * IOBs.
+           */
+
+          g_iob_freelist = iob->io_flink;
+
+          /* Take a semaphore count.  Note that we cannot do this in

Review Comment:
   remove the mismatch comment



##########
mm/iob/iob_navail.c:
##########
@@ -46,34 +46,27 @@
 
 int iob_navail(bool throttled)
 {
-  int navail = 0;
   int ret;
 
 #if CONFIG_IOB_NBUFFERS > 0
-  /* Get the value of the IOB counting semaphores */

Review Comment:
   add spinlock protection



##########
mm/iob/iob_navail.c:
##########
@@ -89,24 +82,18 @@ int iob_navail(bool throttled)
 
 int iob_qentry_navail(void)
 {
-  int navail = 0;
   int ret;
 
 #if CONFIG_IOB_NCHAINS > 0
   /* Get the value of the IOB chain qentry counting semaphores */
 
-  ret = nxsem_get_value(&g_qentry_sem, &navail);

Review Comment:
   add the protection too



##########
mm/iob/iob_initialize.c:
##########
@@ -91,23 +91,32 @@ FAR struct iob_qentry_s *g_iob_freeqlist;
 FAR struct iob_qentry_s *g_iob_qcommitted;
 #endif
 
+sem_t g_iob_sem = SEM_INITIALIZER(0);
+
 /* Counting semaphores that tracks the number of free IOBs/qentries */
 
-sem_t g_iob_sem = SEM_INITIALIZER(CONFIG_IOB_NBUFFERS);
+volatile int16_t g_iob_count = CONFIG_IOB_NBUFFERS;
 
 #if CONFIG_IOB_THROTTLE > 0
+
+sem_t g_throttle_sem = SEM_INITIALIZER(0);
+
 /* Counts available I/O buffers when throttled */
 
-sem_t g_throttle_sem = SEM_INITIALIZER(CONFIG_IOB_NBUFFERS -
-                                       CONFIG_IOB_THROTTLE);
+volatile int16_t g_throttle_count = CONFIG_IOB_NBUFFERS -

Review Comment:
   remove volatile and merge into.one line



##########
mm/iob/iob_alloc_qentry.c:
##########
@@ -75,7 +75,43 @@ static FAR struct iob_qentry_s *iob_alloc_qcommitted(void)
       iobq->qe_head = NULL; /* Nothing is contained */
     }
 
-  leave_critical_section(flags);
+  spin_unlock_irqrestore(&g_iob_lock, flags);
+  return iobq;
+}
+
+static FAR struct iob_qentry_s *iob_tryalloc_qentry_internal(void)
+{
+  FAR struct iob_qentry_s *iobq;
+
+  /* We don't know what context we are called from so we use extreme measures

Review Comment:
   remove



##########
mm/iob/iob_alloc_qentry.c:
##########
@@ -101,21 +137,23 @@ static FAR struct iob_qentry_s *iob_allocwait_qentry(void)
    * re-enabled while we are waiting for I/O buffers to become free.
    */
 
-  flags = enter_critical_section();
+  flags = spin_lock_irqsave(&g_iob_lock);
 
   /* Try to get an I/O buffer chain container.  If successful, the semaphore
    * count will bedecremented atomically.
    */
 
-  qentry = iob_tryalloc_qentry();
-  while (ret == OK && qentry == NULL)
+  qentry = iob_tryalloc_qentry_internal();
+  if (qentry == NULL)
     {
       /* If not successful, then the semaphore count was less than or equal

Review Comment:
   remove semaphore from the comment



##########
mm/iob/iob_free.c:
##########
@@ -145,80 +142,58 @@ FAR struct iob_s *iob_free(FAR struct iob_s *iob)
    */
 
 #if CONFIG_IOB_THROTTLE > 0
-  if ((g_iob_sem.semcount < 0) ||
-      ((g_iob_sem.semcount >= CONFIG_IOB_THROTTLE) &&
-       (g_throttle_sem.semcount < 0)))
+  if ((g_iob_count < 0) ||

Review Comment:
   remove the unnecessary ()



##########
mm/iob/iob_free.c:
##########
@@ -145,80 +142,58 @@ FAR struct iob_s *iob_free(FAR struct iob_s *iob)
    */
 
 #if CONFIG_IOB_THROTTLE > 0
-  if ((g_iob_sem.semcount < 0) ||
-      ((g_iob_sem.semcount >= CONFIG_IOB_THROTTLE) &&
-       (g_throttle_sem.semcount < 0)))
+  if ((g_iob_count < 0) ||
+      ((g_iob_count >= CONFIG_IOB_THROTTLE) &&
+       (g_throttle_count < 0)))

Review Comment:
   impossible smaller than zero in the nee code?



##########
mm/iob/iob_free_qentry.c:
##########
@@ -60,35 +60,30 @@ FAR struct iob_qentry_s *iob_free_qentry(FAR struct 
iob_qentry_s *iobq)
    * interrupts very briefly.
    */
 
-  flags = enter_critical_section();
+  flags = spin_lock_irqsave(&g_iob_lock);
 
   /* Which list?  If there is a task waiting for an IOB chain, then put
    * the IOB chain on either the free list or on the committed list where
    * it is reserved for that allocation (and not available to
    * iob_tryalloc_qentry()).
    */
 
-  if (g_qentry_sem.semcount < 0)
+  if (g_qentry_count < 0)

Review Comment:
   g_qentry_count++
   and remove line 75 and 81



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to